content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from django.urls import path, include
from mighty.functions import setting
from mighty.applications.user import views
urlpatterns = [
path('user/', include([
path('create/', views.CreateUserView.as_view(), name="use-create"),
])),
]
api_urlpatterns = [
path('user/', include([
path('form/', include([
path('create/', views.CreatUserFormView.as_view(), name="api-user-form-create"),
])),
path('check/', include([
path('email/', views.UserEmailCheckView.as_view(), name="api-user-check-email"),
path('phone/', views.UserPhoneCheckView.as_view(), name="api-user-check-phone"),
])),
path('', views.CreateUserView.as_view(), name="api-user-profile"),
path('profile/', views.ProfileView.as_view(), name="api-user-profile"),
path('invitation/', include([
path('<uuid:uid>/', views.InvitationDetailView.as_view(), name="api-user-invitation"),
path('<uuid:uid>/<str:action>/', views.InvitationDetailView.as_view(), name="api-user-invitation-action"),
]))
]))
]
|
python
|
from django.core.management.base import BaseCommand, CommandError
from openfood.models import Product, Category, Position
from django.db import models
from datetime import datetime
import sys
import requests
class Collector:
"""
Get products from Open Food Facts database.
Register fields for 'Products' & 'Categories'.
The many to many connection table 'Position' contains 'rank' field,
according to the position of each category in the product hierarchy.
"""
def __init__(self, url="https://fr.openfoodfacts.org/cgi/search.pl",
number_by_grade=[
('a', 150), ('b', 150), ('c', 150), ('d', 150), ('e', 150)
],
categories=[
"Salty snacks", "Cheeses", "Beverage", "Sauces",
"Biscuits", "Frozen foods", "pizzas", "chocolats",
"Candies", "Snacks sucrés",]
):
self.url = url
self.grades = number_by_grade
self.categories = categories
self.products = []
def fetch(self, category="Cheese", grade="a", products_number=50,
product_keys = [ 'product_name', 'nutrition_grades',
'url', 'code', 'brands', 'stores', 'categories_hierarchy',
'image_url', ]):
"""
Get [products_number] products in [category] & grade [grade,
keep only the needed fields listed in [product_keys].
"""
args = {
'action': "process",
'tagtype_0': "categories",
'tag_contains_0': "contains",
'tag_0': category,
'nutrition_grades': grade,
'json': 1,
'page_size': 1000,
}
response = requests.get(self.url, params=args)
products = response.json()["products"]
products_to_store = []
for product in products:
product_to_store = {}
try:
for key in product_keys:
product_to_store[key] = product[key]
products_to_store.append(product_to_store)
except KeyError:
# print("Key Error on {}.".format(key))
pass
if len(products_to_store) == products_number:
print("Number reached !!!")
break
self.products.extend(products_to_store)
def register(self):
for product in self.products:
new_product = Product()
new_product.product_name = product['product_name']
new_product.grade = product['nutrition_grades']
new_product.url = product['url']
new_product.barcode = product['code']
new_product.brand = product['brands']
new_product.store = product['stores']
new_product.product_img_url = product['image_url']
new_product.save()
for i, category in enumerate(product['categories_hierarchy'][::-1]):
new_category = Category.objects.get_or_create(
category_name=category,
)
new_position = Position()
new_position.product = new_product
new_position.category = new_category[0]
new_position.rank = i
new_position.save()
def populate(self):
for category in self.categories:
for grade in self.grades:
self.fetch(category=category, grade=grade[0],
products_number=grade[1])
print("Products:", len(self.products))
print("Registering products in database...")
self.register()
print("{} products registered in database.".format(len(self.products)))
def empty(self):
products_to_delete = Product.objects.filter(favorized=0)
products_to_delete_number = len(products_to_delete)
total_products = len(Product.objects.all())
products_to_delete.delete()
print("-\n{} deleted on a total of {}.-\n".format(
products_to_delete_number,
total_products,
)
)
class Command(BaseCommand):
"""
Django command to refresh data.
"""
def handle(self, *args, **options):
collector = Collector()
orig_stdout = sys.stdout
if 'win' in sys.platform:
filename = 'refresh_logs/refresh-{}.txt'.format(datetime.strftime(datetime.now(), "%d-%m-%Y@%H-%M-%S"))
else:
filename = '/home/gil/oc-projet-10/refresh_logs/refresh-{}.txt'.format(datetime.strftime(datetime.now(), "%d-%m-%Y@%H-%M-%S"))
log = open(filename, 'w')
sys.stdout = log
print("Operation started at {}.\n-".format(datetime.strftime(datetime.now(), "%H:%M:%S")))
collector.empty()
collector.populate()
print("-\nOperation ended at {}.".format(datetime.strftime(datetime.now(), "%H:%M:%S")))
sys.stdout = orig_stdout
|
python
|
import altair as alt
from model import data_wrangle
def return_fatality_bar_chart(value=None):
"""
creates an altair chart object for the plot of the fatality barchart.
Parameters
----------
value: the value passed in from the radio buttons.
"0", "1", and "2" for "first-world", "non-first-world", and "both" respectively.
Returns
-------
Altair.Chart object of the bar chart.
"""
value = '0' if value is None else value
plot2 = alt.Chart(data_wrangle.chart_2_data,
title="Number of fatalities for airlines that had an incident between 1985 and 2014"
).mark_bar().encode(
alt.Y("airline:N",
title="Airline (* includes regional subsidiaries)",
sort=alt.EncodingSortField(
field="total_fatalities_per_b_avail_seat_km",
order="ascending")),
alt.X("total_fatalities_per_b_avail_seat_km:Q",
axis=alt.Axis(
title="Rate of fatalities per billion available seat kilometers")),
tooltip=[alt.Tooltip(shorthand="total_fatalities_per_b_avail_seat_km:Q",
title="count of fatalities")]
).configure_mark(color="blue"
).configure_title(fontSize=18
).configure_legend(labelFontSize=13
).configure_axis(labelFontSize=11,
titleFontSize=14
).properties(width=800,
height=600)
if value != "2":
if value == "0":
color_range = ["blue", "grey"]
else: # To remove 'Non First World', we do not need to do anything here
color_range = ["gray", "blue"]
plot2 = alt.Chart(data_wrangle.chart_2_data,
title="Rate of fatal incidents for airlines between 1985 and 2014"
).mark_bar().encode(
alt.Y("airline:N",
title="Airline (* includes regional subsidiaries)",
sort=alt.EncodingSortField(
field="total_fatalities_per_b_avail_seat_km",
order="ascending")),
alt.X("total_fatalities_per_b_avail_seat_km:Q",
axis=alt.Axis(
title="Normalized Rate of fatal incidents (incident/billion km/seat)")),
alt.Color("first_world",
title=None,
scale=alt.Scale(domain=["First World", "Other"],
range=color_range)),
tooltip=[alt.Tooltip(shorthand="total_fatalities_per_b_avail_seat_km:Q",
title="count of fatalities")]
).configure_title(fontSize=18
).configure_legend(labelFontSize=13
).configure_axis(labelFontSize=11,
titleFontSize=14
).properties(width=800, height=600)
return plot2
return_fatality_bar_chart(0)
|
python
|
import sys
from collections import OrderedDict
import calplot
import pandas as pd
def loadData(filename: str):
df = pd.read_csv(filename, usecols=['DateTime', 'Open', 'High', 'Low', 'Close', 'Volume'], na_values=['nan'])
df['DateTime'] = pd.to_datetime(df['DateTime'], utc=True).dt.tz_convert('US/Eastern')
df = df.set_index('DateTime')
return df
def resample(df):
return df.resample('1min').agg(
OrderedDict([
('Open', 'first'),
('High', 'max'),
('Low', 'min'),
('Close', 'last'),
('Volume', 'sum'),
])
).dropna()
if __name__ == '__main__':
file_path = sys.argv[1]
data_frame = loadData(file_path)
# data_frame = resample(data_frame)
data_frame['hasDay'] = 1
fig, _ = calplot.calplot(data_frame['hasDay'], cmap='Blues', colorbar=False)
print(f"Calendar hitmap has been saved to {file_path}_hitmap.png")
fig.savefig(f"{file_path}_hitmap.png")
|
python
|
from aiohttp import web, test_utils
import typing
import asyncio
import functools
from .const import InputQuery
import attr
@attr.s
class AppContainer:
host: typing.Optional[str] = attr.ib(default=None)
port: typing.Optional[int] = attr.ib(default=None)
_app: web.Application = attr.ib(factory=web.Application)
_route: web.RouteTableDef = attr.ib(factory=web.RouteTableDef, init=False)
appRunner = attr.ib(type=web.AppRunner)
@appRunner.default
def app_runner_def(self):
return web.AppRunner(self._app)
site = attr.ib(type=web.TCPSite, default=None)
def get(self, path, **kwargs):
return self._route.get(path, **kwargs)
def put(self, path, **kwargs):
return self._route.put(path, **kwargs)
async def start(self):
self._app.add_routes(self._route)
await self.appRunner.setup()
self.site = web.TCPSite(self.appRunner, self.host, self.port)
await self.site.start()
def get_app(self):
self._app.add_routes(self._route)
return self._app
async def stop(self):
await self.site.stop()
def test_client(self) -> test_utils.TestClient:
return test_utils.TestClient(test_utils.TestServer(self.get_app()), loop=asyncio.get_event_loop())
async def make_server(handle: typing.Callable[[web.Request], typing.Awaitable[web.Response]], port: int) -> typing.Tuple[web.Server, web.TCPSite]:
"""
Make server and start it immidiatly
:param handle: handler coroutinefunction
:param port: port on wich server will be started
:return:
"""
assert asyncio.iscoroutinefunction(handle), 'handle must coroutine function'
server = web.Server(handle)
runner = web.ServerRunner(server)
await runner.setup()
site = web.TCPSite(runner, 'localhost', port)
await site.start()
return server, site
def cancelok(foo):
"""
Deco foo not to raise on cancelation
:param foo:
:return:
"""
@functools.wraps(foo)
async def wrapper(*args, **kwargs):
try:
return await foo(*args, **kwargs)
except asyncio.CancelledError:
return
return wrapper
def make_query(query: dict):
if isinstance(query, InputQuery):
query = query._asdict()
return '&'.join([f'{x}={y}' for x, y in query.items()])
|
python
|
'''OpenGL extension SGIS.texture_color_mask
Overview (from the spec)
This extension implements the same functionality for texture
updates that glColorMask implements for color buffer updates.
Masks for updating textures with indexed internal formats
(the analog for glIndexMask) should be supported by a separate extension.
The extension allows an application to update a subset of
components in an existing texture. The masks are applied after
all pixel transfer operations have been performed, immediately
prior to writing the texel value into texture memory. They
apply to all texture updates.
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/SGIS/texture_color_mask.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_SGIS_texture_color_mask'
GL_TEXTURE_COLOR_WRITEMASK_SGIS = constant.Constant( 'GL_TEXTURE_COLOR_WRITEMASK_SGIS', 0x81EF )
glTextureColorMaskSGIS = platform.createExtensionFunction(
'glTextureColorMaskSGIS', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLboolean, constants.GLboolean, constants.GLboolean, constants.GLboolean,),
doc = 'glTextureColorMaskSGIS( GLboolean(red), GLboolean(green), GLboolean(blue), GLboolean(alpha) ) -> None',
argNames = ('red', 'green', 'blue', 'alpha',),
)
def glInitTextureColorMaskSGIS():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
python
|
from .api import AppSyncAPI
|
python
|
# -*- coding: utf-8 -*-
import time
from typing import List, Dict, Any
from chaosaws import aws_client
from chaoslib.exceptions import FailedActivity
from chaosaws.types import AWSResponse
from chaoslib.types import Configuration, Secrets
from logzero import logger
from .constants import OS_LINUX, OS_WINDOWS, GREP_PROCESS
from chaosaws.ec2_os import construct_script_content
__all__ = ["describe_os_type", "describe_instance",
"ensure_tc_installed", "ensure_tc_uninstalled",
"grep_process_exist"]
def describe_os_type(instance_id, configuration, secrets):
res = describe_instance(instance_id, configuration, secrets)
os = "linux"
try:
os = res['Reservations'][0]['Instances'][0]['Platform']
except KeyError:
logger.warning("No Platform key, so it is Linux")
return os
def describe_instance(instance_id: str,
configuration: Configuration = None,
secrets: Secrets = None) -> AWSResponse:
client = aws_client('ec2', configuration, secrets)
return client.describe_instances(InstanceIds=[
instance_id,
])
def ensure_tc_installed(instance_ids: List[str] = None,
configuration: Configuration = None,
secrets: Secrets = None) -> List[AWSResponse]:
response = []
for instance_id in instance_ids:
response.append(
__simple_ssm_helper(
instance_id=instance_id,
configuration=configuration,
secrets=secrets,
default_timeout=30,
action="ensure_tc_installed",
failure_matcher="Install iproute-tc package failed."
)
)
return response
def ensure_tc_uninstalled(instance_ids: List[str] = None,
configuration: Configuration = None,
secrets: Secrets = None) -> List[AWSResponse]:
response = []
for instance_id in instance_ids:
response.append(
__simple_ssm_helper(
instance_id=instance_id,
configuration=configuration,
secrets=secrets,
default_timeout=30,
action="ensure_tc_uninstalled",
failure_matcher="Remove iproute-tc package failed."
)
)
return response
def grep_process_exist(instance_ids: List[str] = None,
process_name: str = None,
configuration: Configuration = None,
secrets: Secrets = None) -> List[AWSResponse]:
"""
Grep pid of process name
Parameters
----------
instance_ids : List[str]
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
process_name : str
Name of the process to be killed
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start network_latency: configuration='{}', instance_ids='{}'".format(
configuration, instance_ids))
response = []
try:
for instance in instance_ids:
param = dict()
param["duration"] = "1"
param["instance_id"] = instance
param["process_name"] = process_name
response.append(
__simple_ssm_helper(instance_id=instance,
configuration=configuration,
secrets=secrets,
action=GREP_PROCESS,
parameters=param)
)
return response
except Exception as x:
raise FailedActivity(
"failed issuing a execute of shell script via AWS SSM {}".format(
str(x)
))
###############################################################################
# Private helper functions
###############################################################################
def __simple_ssm_helper(instance_id: str,
configuration: Configuration = None,
secrets: Secrets = None,
default_timeout: int = 30,
action: str = None,
parameters: Dict[str, Any] = None,
failure_matcher: str = "failed") -> AWSResponse:
client = aws_client("ssm", configuration, secrets)
if not instance_id:
raise FailedActivity(
"you must specify the instance_id"
)
try:
if describe_os_type(instance_id, configuration, secrets) == "windows":
os_type = OS_WINDOWS
# TODO with PowerShell
cmd = ""
document_name = ""
else:
os_type = OS_LINUX
document_name = "AWS-RunShellScript"
res_send_command = client.send_command(
InstanceIds=[instance_id],
DocumentName=document_name,
Parameters={
'commands':
[construct_script_content(action, os_type, parameters)]
},
)
cmd_id = res_send_command["Command"]["CommandId"]
logger.info("ssm run command is sent, id {}".format(cmd_id))
totalwait = 0
interval = 1
while True:
res_list = client.list_command_invocations(
CommandId=cmd_id,
Details=True
)
try:
cp = res_list['CommandInvocations'][0]['CommandPlugins'][0]
status = cp['Status']
if status == "InProgress":
time.sleep(interval)
totalwait += interval
if totalwait > default_timeout:
raise FailedActivity(
"Script exceeded default timeout {}".format(
default_timeout
)
)
continue
elif status == "Failed":
break
elif status == "Success":
break
else:
break
except IndexError:
time.sleep(1)
continue
for command_invocation in res_list['CommandInvocations']:
for invocation in command_invocation['CommandPlugins']:
if invocation['Name'] == 'aws:runShellScript':
if failure_matcher in invocation['Output']:
raise FailedActivity(
"The result of command failed as:\n{}".format(
failure_matcher
)
)
logger.info("ssm run command status {}"
.format(invocation['Status']))
logger.info("ssm rum command result \n{}"
.format(invocation['Output'].rstrip('\n')))
return invocation['Output'].rstrip('\n')
except Exception as x:
raise FailedActivity(
"failed issuing a execute of shell script:\n{}".format(x))
|
python
|
import torch as T
import dataclasses as dc
from typing import Optional, Callable
def vanilla_gradient(
output, input,
filter_outliers_quantiles:tuple[float,float]=[.005, .995]):
map = T.autograd.grad(output, input)
assert isinstance(map, tuple) and len(map) == 1, 'sanity check'
map = map[0]
# --> filter the bottom 0.5% and top 0.5% of gradient values since
# SmoothGrad paper suggests they are outliers
low, hi = filter_outliers_quantiles
map.clamp_(map.quantile(low), map.quantile(hi))
return map
@dc.dataclass
class SmoothGrad:
"""Wrap a model. Instead of outputting a prediction, generate SmoothGrad
saliency maps for given image and an output class index to explain.
>>> sg = SmoothGrad(model)
>>> explanation = sg(x, index_of_class_to_explain=0)
"""
model: T.nn.Module
layer: Optional[T.nn.Module] = None # defaults to a saliency map w.r.t. input
saliency_method:str = 'vanilla_gradient'
# smoothgrad hyperparameters
nsamples:int = 30 # paper suggests less than 50
std_spread:float = .15 # paper suggests values satisfying std / (max-min intensities) in [.1,.2], so std = .15*(max-min)
apply_before_mean:Callable = lambda x: x**2 # apply a function (like absolute value or magnitude or clip extreme values) before computing mean over samples.
def __call__(self, x: T.Tensor, index_of_class_to_explain:T.Tensor):
explanations = []
B,C = x.shape[:2]
# --> compute the standard deviation per image and color channel.
_intensity_range = x.reshape(B,C,-1).max(-1).values - x.reshape(B,C,-1).min(-1).values
std = self.std_spread * _intensity_range
# --> smoothgrad. just an average of saliency maps perturbed by noise
for i in range(self.nsamples):
self.model.zero_grad()
_noise = T.randn_like(x) * std.reshape(B,C,*(1 for _ in x.shape[2:]))
x_plus_noise = (x.detach() + _noise).requires_grad_()
yhat = self.model(x_plus_noise)
if self.saliency_method == 'vanilla_gradient':
map = vanilla_gradient(
input=self.layer if self.layer is not None else x_plus_noise,
output=yhat[:, index_of_class_to_explain],
)
else:
raise NotImplementedError()
map = self.apply_before_mean(map)
explanations.append(map)
return T.stack(explanations).mean(0)
# notes from paper
# maybe take absolute value
# consider .99 percentile of gradient values, because extreme values throw off input color and result in black map.
# noise, N(0, sigma^2): 10 to 20% noise?
if __name__ == "__main__":
# cfg = ...
sg = SmoothGrad(cfg.model.cpu())
x,y = cfg.train_dset[0]
x = x.unsqueeze_(0).to(cfg.device, non_blocking=True)
# explanations = [sg(x, i) for i in range(y.shape[0])]
# e = explanations[0]
e = sg(x,6)
|
python
|
import argparse
import torch
import wandb
wandb.login()
from dataloader import get_dataloaders
from utils import get_model
from train import Trainer
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, choices=['c10', 'c100', 'svhn'])
parser.add_argument('--model', required=True, choices=['mlp_mixer'])
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--eval-batch-size', type=int, default=1024)
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--seed', type=int, default=3407)
parser.add_argument('--epochs', type=int, default=300)
# parser.add_argument('--precision', type=int, default=16)
parser.add_argument('--patch-size', type=int, default=4)
parser.add_argument('--hidden-size', type=int, default=128)
parser.add_argument('--hidden-c', type=int, default=512)
parser.add_argument('--hidden-s', type=int, default=64)
parser.add_argument('--num-layers', type=int, default=8)
parser.add_argument('--drop-p', type=int, default=0.)
parser.add_argument('--off-act', action='store_true', help='Disable activation function')
parser.add_argument('--is-cls-token', action='store_true', help='Introduce a class token.')
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--min-lr', type=float, default=1e-6)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--optimizer', default='adam', choices=['adam', 'sgd'])
parser.add_argument('--scheduler', default='cosine', choices=['step', 'cosine'])
parser.add_argument('--beta1', type=float, default=0.9)
parser.add_argument('--beta2', type=float, default=0.99)
parser.add_argument('--weight-decay', type=float, default=5e-5)
parser.add_argument('--off-nesterov', action='store_true')
parser.add_argument('--label-smoothing', type=float, default=0.1)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--warmup-epoch', type=int, default=5)
parser.add_argument('--autoaugment', action='store_true')
parser.add_argument('--clip-grad', type=float, default=0, help="0 means disabling clip-grad")
parser.add_argument('--cutmix-beta', type=float, default=1.0)
parser.add_argument('--cutmix-prob', type=float, default=0.)
args = parser.parse_args()
args.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
args.nesterov = not args.off_nesterov
torch.random.manual_seed(args.seed)
experiment_name = f"{args.model}_{args.dataset}_{args.optimizer}_{args.scheduler}"
if args.autoaugment:
experiment_name += "_aa"
if args.clip_grad:
experiment_name += f"_cg{args.clip_grad}"
if args.off_act:
experiment_name += f"_noact"
if args.cutmix_prob>0.:
experiment_name += f'_cm'
if args.is_cls_token:
experiment_name += f"_cls"
if __name__=='__main__':
with wandb.init(project='mlp_mixer', config=args, name=experiment_name):
train_dl, test_dl = get_dataloaders(args)
model = get_model(args)
trainer = Trainer(model, args)
trainer.fit(train_dl, test_dl)
|
python
|
def divide(num):
try:
return 42 / num
except ZeroDivisionError:
print('Error: Invalid argument')
print(divide(2))
print(divide(12))
print(divide(0))
print(divide(1))
|
python
|
# coding=utf-8
#
# created by kpe on 28.Mar.2019 at 15:56
#
from __future__ import absolute_import, division, print_function
|
python
|
import graphviz
dot = graphviz.Digraph(comment='GIADog system overview')
dot.render('output/system.gv', view=True)
|
python
|
# The MIT License (MIT)
# Copyright (c) 2020 Mike Teachman
# https://opensource.org/licenses/MIT
# Platform-independent MicroPython code for the rotary encoder module
# Documentation:
# https://github.com/MikeTeachman/micropython-rotary
import micropython
_DIR_CW = const(0x10) # Clockwise step
_DIR_CCW = const(0x20) # Counter-clockwise step
# Rotary Encoder States
_R_START = const(0x0)
_R_CW_1 = const(0x1)
_R_CW_2 = const(0x2)
_R_CW_3 = const(0x3)
_R_CCW_1 = const(0x4)
_R_CCW_2 = const(0x5)
_R_CCW_3 = const(0x6)
_R_ILLEGAL = const(0x7)
_transition_table = [
# |------------- NEXT STATE -------------| |CURRENT STATE|
# CLK/DT CLK/DT CLK/DT CLK/DT
# 00 01 10 11
[_R_START, _R_CCW_1, _R_CW_1, _R_START], # _R_START
[_R_CW_2, _R_START, _R_CW_1, _R_START], # _R_CW_1
[_R_CW_2, _R_CW_3, _R_CW_1, _R_START], # _R_CW_2
[_R_CW_2, _R_CW_3, _R_START, _R_START | _DIR_CW], # _R_CW_3
[_R_CCW_2, _R_CCW_1, _R_START, _R_START], # _R_CCW_1
[_R_CCW_2, _R_CCW_1, _R_CCW_3, _R_START], # _R_CCW_2
[_R_CCW_2, _R_START, _R_CCW_3, _R_START | _DIR_CCW], # _R_CCW_3
[_R_START, _R_START, _R_START, _R_START]] # _R_ILLEGAL
_transition_table_half_step = [
[_R_CW_3, _R_CW_2, _R_CW_1, _R_START],
[_R_CW_3 | _DIR_CCW, _R_START, _R_CW_1, _R_START],
[_R_CW_3 | _DIR_CW, _R_CW_2, _R_START, _R_START],
[_R_CW_3, _R_CCW_2, _R_CCW_1, _R_START],
[_R_CW_3, _R_CW_2, _R_CCW_1, _R_START | _DIR_CW],
[_R_CW_3, _R_CCW_2, _R_CW_3, _R_START | _DIR_CCW]]
_STATE_MASK = const(0x07)
_DIR_MASK = const(0x30)
def _wrap(value, incr, lower_bound, upper_bound):
range = upper_bound - lower_bound + 1
value = value + incr
if value < lower_bound:
value += range * ((lower_bound - value) // range + 1)
return lower_bound + (value - lower_bound) % range
def _bound(value, incr, lower_bound, upper_bound):
return min(upper_bound, max(lower_bound, value + incr))
def _trigger(rotary_instance):
for listener in rotary_instance._listener:
listener()
class Rotary(object):
RANGE_UNBOUNDED = const(1)
RANGE_WRAP = const(2)
RANGE_BOUNDED = const(3)
def __init__(self, min_val, max_val, reverse, range_mode, half_step):
self._min_val = min_val
self._max_val = max_val
self._reverse = -1 if reverse else 1
self._range_mode = range_mode
self._value = min_val
self._state = _R_START
self._half_step = half_step
self._listener = []
def set(self, value=None, min_val=None,
max_val=None, reverse=None, range_mode=None):
# disable DT and CLK pin interrupts
self._hal_disable_irq()
if value is not None:
self._value = value
if min_val is not None:
self._min_val = min_val
if max_val is not None:
self._max_val = max_val
if reverse is not None:
self._reverse = -1 if reverse else 1
if range_mode is not None:
self._range_mode = range_mode
self._state = _R_START
# enable DT and CLK pin interrupts
self._hal_enable_irq()
def value(self):
return self._value
def reset(self):
self._value = 0
def close(self):
self._hal_close()
def add_listener(self, l):
self._listener.append(l)
def remove_listener(self, l):
if l not in self._listener:
raise ValueError('{} is not an installed listener'.format(l))
self._listener.remove(l)
def _process_rotary_pins(self, pin):
old_value = self._value
clk_dt_pins = (self._hal_get_clk_value() <<
1) | self._hal_get_dt_value()
# Determine next state
if self._half_step:
self._state = _transition_table_half_step[self._state &
_STATE_MASK][clk_dt_pins]
else:
self._state = _transition_table[self._state &
_STATE_MASK][clk_dt_pins]
direction = self._state & _DIR_MASK
incr = 0
if direction == _DIR_CW:
incr = 1
elif direction == _DIR_CCW:
incr = -1
incr *= self._reverse
if self._range_mode == self.RANGE_WRAP:
self._value = _wrap(
self._value,
incr,
self._min_val,
self._max_val)
elif self._range_mode == self.RANGE_BOUNDED:
self._value = _bound(
self._value,
incr,
self._min_val,
self._max_val)
else:
self._value = self._value + incr
try:
if old_value != self._value and len(self._listener) != 0:
micropython.schedule(_trigger, self)
except:
pass
|
python
|
from flask import Flask
app = Flask(__name__)
import sqreen
sqreen.start()
from app import routes
if __name__ == '__main__':
app.run(debug=True)
|
python
|
"""Module to generate datasets for FROCC
"""
import os
import numpy as np
import sklearn.datasets as skds
import scipy.sparse as sp
def himoon(n_samples=1000, n_dims=1000, sparsity=0.01, dist=5):
# n_samples = 1000
# n_dims = 1000
# dist = 5
# sparsity = 0.01
x, y = skds.make_moons(n_samples=n_samples * 2)
x = np.hstack(
(x, dist * np.ones((n_samples * 2, int(n_dims * sparsity - x.shape[1]))))
)
x_p = x[y == 1]
x_pos = sp.csr_matrix((n_samples, n_dims))
x_pos[:, : x.shape[1]] = x_p
x_n = x[y == 0]
x_neg = sp.csr_matrix((int(n_samples * 0.3), n_dims))
x_neg[:, : x.shape[1]] = x_n[: int(n_samples * 0.3)]
x_train = x_pos[: int(n_samples * 0.7)]
x_val = sp.vstack(
(
x_pos[int(n_samples * 0.7) : int(n_samples * 0.9)],
x_neg[: int(n_samples * 0.2)],
),
)
x_test = sp.vstack((x_pos[int(n_samples * 0.9) :], x_neg[int(n_samples * 0.2) :]))
y_train = np.ones(int(n_samples * 0.7))
y_val = np.concatenate(
((np.ones(int(n_samples * 0.2)), np.zeros(int(n_samples * 0.2))))
)
y_test = np.concatenate(
((np.ones(int(n_samples * 0.1)), np.zeros(int(n_samples * 0.1))))
)
# x_train = sp.csc_matrix(x_train)
# x_val = sp.csc_matrix(x_val)
# x_test = sp.csc_matrix(x_test)
x_train.reshape(x_train.shape)
x_test.reshape(x_test.shape)
x_val.reshape(x_val.shape)
return x_train, y_train, x_val, y_val, x_test, y_test
def mmgauss(n_samples=1000, n_dims=1000, modes=5, sparsity=0.01, dist=5):
# n_samples = 10000
# n_dims = 10000
# modes = 5
# dist = 5
# sparsity = 0.01
pos_means = [(i + dist) * np.ones(int(n_dims * sparsity)) for i in range(modes)]
neg_means = dist * np.zeros((int(n_dims * sparsity), 1))
x_p, _ = skds.make_blobs(n_samples=n_samples, centers=pos_means)
x_pos = sp.csr_matrix((n_samples, n_dims))
x_pos[:, : int(n_dims * sparsity)] = x_p
x_n, _ = skds.make_blobs(n_samples=int(n_samples * 0.3), centers=neg_means)
x_neg = sp.csr_matrix((int(n_samples * 0.3), n_dims))
x_neg[:, : int(n_dims * sparsity)] = x_n
x_train = x_pos[: int(n_samples * 0.7)]
x_val = sp.vstack(
(
x_pos[int(n_samples * 0.7) : int(n_samples * 0.9)],
x_neg[: int(n_samples * 0.2)],
),
)
x_test = sp.vstack((x_pos[int(n_samples * 0.9) :], x_neg[int(n_samples * 0.2) :]))
y_train = np.ones(int(n_samples * 0.7))
y_val = np.concatenate(
((np.ones(int(n_samples * 0.2)), np.zeros(int(n_samples * 0.2))))
)
y_test = np.concatenate(
((np.ones(int(n_samples * 0.1)), np.zeros(int(n_samples * 0.1))))
)
# x_train = sp.csc_matrix(x_train)
# x_val = sp.csc_matrix(x_val)
# x_test = sp.csc_matrix(x_test)
x_train.reshape(x_train.shape)
x_test.reshape(x_test.shape)
x_val.reshape(x_val.shape)
return x_train, y_train, x_val, y_val, x_test, y_test
|
python
|
import pytest
from mixer.main import mixer
from smpa.models.address import Address, SiteAddress
@pytest.fixture
def address():
obj = Address()
obj.number = "42"
obj.property_name = "property name"
obj.address_line_1 = "address line 1"
obj.address_line_2 = "address line 2"
obj.address_line_3 = "address line 3"
obj.town_city = "town city"
obj.postcode = "postcode"
obj.validate()
return obj
@pytest.fixture
def site_address():
obj = SiteAddress()
obj.number = "42"
obj.property_name = "property name"
obj.address_line_1 = "address line 1"
obj.address_line_2 = "address line 2"
obj.address_line_3 = "address line 3"
obj.town_city = "town city"
obj.postcode = "postcode"
obj.validate()
return obj
|
python
|
# Russell RIchardson
# Homework 2, problem 1
"""This reads from a text file and returns a string of the text"""
def read_from_a_file(the_file):
file=open(the_file,'r')
the_string=file.read()
file.close()
return the_string
"""This takes in a string and writes that string to a text file"""
def write_to_a_file(message, the_file):
file = open(the_file,"w")
file.write(message)
file.close()
"""Call main to run the main program"""
def main():
the_file = r"message.txt"
message = read_from_a_file(the_file)
print(message)
key = input("Enter a key for the cipher: ")
encrypted_message = encrypt(key,message)
print(encrypted_message)
new_file = the_file[:-4]
new_file = new_file + "-cipher.txt"
write_to_a_file(encrypted_message,new_file)
"""This encrypts the message, given a key"""
def encrypt(key,message):
encrypted_message = ""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
key_index = 0
key = key.lower()
for symbol in message:
encrypted_index = alphabet.find(symbol)
if encrypted_index != -1:
encrypted_index += alphabet.find(key[key_index])
encrypted_index %= len(alphabet)
if symbol.islower():
encrypted_message += alphabet[encrypted_index]
elif symbol.isupper():
encrypted_message += alphabet[encrypted_index].upper()
key_index += 1
if key_index == len(key):
key_index = 0
else:
encrypted_message += symbol
return encrypted_message
|
python
|
"""
Perform inference on inputted text.
"""
import utils
import torch
from termcolor import cprint, colored as c
import model
import data
import re
import sys
source = sys.argv[1]
# get an edgt object
def get_edgt():
input_chars = list(" \nabcdefghijklmnopqrstuvwxyz01234567890")
output_chars = ["<nop>", "<cap>"] + list(".,;:?!\"'$")
# torch.set_num_threads(8)
batch_size = 128
char2vec = utils.Char2Vec(chars=input_chars, add_unknown=True)
output_char2vec = utils.Char2Vec(chars = output_chars)
input_size = char2vec.size
output_size = output_char2vec.size
hidden_size = input_size
layers = 1
rnn = model.GruRNN(input_size, hidden_size, output_size, batch_size=batch_size, layers=layers, bi=True)
egdt = model.Engadget(rnn, char2vec, output_char2vec)
egdt.load('./data/Gru_Engadget_epch-24.tar')
return egdt
def predict_next(source, in_edgt, gen_length=None, temperature=0.05):
input_chars = list(" \nabcdefghijklmnopqrstuvwxyz01234567890")
output_chars = ["<nop>", "<cap>"] + list(".,;:?!\"'$")
input_text, punc_target = data.extract_punc(source, input_chars, output_chars)
in_edgt.model.batch_size = 1
in_edgt.init_hidden_()
in_edgt.next_([input_text])
punc_output = in_edgt.output_chars(temperature=temperature)[0]
result = data.apply_punc(input_text, punc_output)
# capitalize letters after periods
for i in range(len(result) - 1):
if result[i] == '.':
result = result[:i] + result[i].upper() + result[i + 1:]
print(result)
predict_next(source, get_edgt())
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''checkplot.py - Waqas Bhatti ([email protected]) - Jan 2017
License: MIT.
Contains functions to make checkplots: quick views for determining periodic
variability for light curves and sanity-checking results from period-finding
functions (e.g., from periodbase).
The checkplot_png function makes the following 3 x 3 grid and writes to a PNG:
[LSP plot + objectinfo] [ unphased LC ] [ period 1 phased LC ]
[period 1 phased LC /2] [period 1 phased LC x2] [ period 2 phased LC ]
[ period 3 phased LC ] [period 4 phased LC ] [ period 5 phased LC ]
The twolsp_checkplot_png function makes a similar plot for two independent
period-finding routines and writes to a PNG:
[ pgram1 + objectinfo ] [ pgram2 ] [ unphased LC ]
[ pgram1 P1 phased LC ] [ pgram1 P2 phased LC ] [ pgram1 P3 phased LC ]
[ pgram2 P1 phased LC ] [ pgram2 P2 phased LC ] [ pgram2 P3 phased LC ]
where:
pgram1 is the plot for the periodogram in the lspinfo1 dict
pgram1 P1, P2, and P3 are the best three periods from lspinfo1
pgram2 is the plot for the periodogram in the lspinfo2 dict
pgram2 P1, P2, and P3 are the best three periods from lspinfo2
The checkplot_pickle function takes, for a single object, an arbitrary number of
results from independent period-finding functions (e.g. BLS, PDM, AoV, GLS) in
periodbase, and generates a gzipped pickle file that contains object and
variability information, finder chart, mag series plot, and for each
period-finding result: a periodogram and phased mag series plots for up to
arbitrary number of 'best periods'. This is intended for use with an external
checkplot viewer: the Tornado webapp checkplotserver.py, but you can also use
the checkplot_pickle_to_png function to render this to a PNG similar to those
above. In this case, the PNG will look something like:
[ finder ] [ objectinfo ] [ variableinfo ] [ unphased LC ]
[ periodogram1 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
[ periodogram2 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
.
.
[ periodogramN ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
for N independent period-finding methods producing:
- periodogram1,2,3...N: the periodograms from each method
- phased LC P1,P2,P3: the phased lightcurves using the best 3 peaks in each
periodogram
'''
#############
## LOGGING ##
#############
import logging
from datetime import datetime
from traceback import format_exc
# setup a logger
LOGGER = None
LOGMOD = __name__
DEBUG = False
def set_logger_parent(parent_name):
globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD))
def LOGDEBUG(message):
if LOGGER:
LOGGER.debug(message)
elif DEBUG:
print('[%s - DBUG] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGINFO(message):
if LOGGER:
LOGGER.info(message)
else:
print('[%s - INFO] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGERROR(message):
if LOGGER:
LOGGER.error(message)
else:
print('[%s - ERR!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGWARNING(message):
if LOGGER:
LOGGER.warning(message)
else:
print('[%s - WRN!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGEXCEPTION(message):
if LOGGER:
LOGGER.exception(message)
else:
print(
'[%s - EXC!] %s\nexception was: %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message, format_exc()
)
)
#############
## IMPORTS ##
#############
import os
import os.path
import gzip
import base64
import sys
import hashlib
import sys
import json
try:
import cPickle as pickle
from cStringIO import StringIO as strio
except:
import pickle
from io import BytesIO as strio
import numpy as np
from numpy import nan as npnan, median as npmedian, \
isfinite as npisfinite, min as npmin, max as npmax, abs as npabs, \
ravel as npravel
# we're going to plot using Agg only
import matplotlib
MPLVERSION = tuple([int(x) for x in matplotlib.__version__.split('.')])
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# import this to check if stimes, smags, serrs are Column objects
from astropy.table import Column as astcolumn
# import this to get neighbors and their x,y coords from the Skyview FITS
from astropy.wcs import WCS
# import from Pillow to generate pngs from checkplot dicts
from PIL import Image, ImageDraw, ImageFont
# import sps.cKDTree for external catalog xmatches
from scipy.spatial import cKDTree
###################
## LOCAL IMPORTS ##
###################
from .lcmath import phase_magseries, phase_bin_magseries, \
normalize_magseries, sigclip_magseries
from .varbase.lcfit import spline_fit_magseries
from .varclass.varfeatures import all_nonperiodic_features
from .varclass.starfeatures import coord_features, color_features, \
color_classification, neighbor_gaia_features
from .plotbase import skyview_stamp, \
PLOTYLABELS, METHODLABELS, METHODSHORTLABELS
from .coordutils import total_proper_motion, reduced_proper_motion
#######################
## UTILITY FUNCTIONS ##
#######################
def _make_periodogram(axes,
lspinfo,
objectinfo,
findercmap,
finderconvolve,
verbose=True,
findercachedir='~/.astrobase/stamp-cache'):
'''makes the periodogram, objectinfo, and finder tile.
'''
# get the appropriate plot ylabel
pgramylabel = PLOTYLABELS[lspinfo['method']]
# get the periods and lspvals from lspinfo
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
# make the LSP plot on the first subplot
axes.plot(periods,lspvals)
axes.set_xscale('log',basex=10)
axes.set_xlabel('Period [days]')
axes.set_ylabel(pgramylabel)
plottitle = '%s - %.6f d' % (METHODLABELS[lspinfo['method']],
bestperiod)
axes.set_title(plottitle)
# show the best five peaks on the plot
for bestperiod, bestpeak in zip(nbestperiods,
nbestlspvals):
axes.annotate('%.6f' % bestperiod,
xy=(bestperiod, bestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='14.0')
# make a grid
axes.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# if objectinfo is present, get things from it
if (objectinfo and isinstance(objectinfo, dict) and
('objectid' in objectinfo or 'hatid' in objectinfo)
and 'ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] and objectinfo['decl']):
if 'objectid' not in objectinfo:
objectid = objectinfo['hatid']
else:
objectid = objectinfo['objectid']
if verbose:
LOGINFO('adding in object information and '
'finder chart for %s at RA: %.3f, DEC: %.3f' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# FIXME: get mag info from astroquery or HATDS if needed
# calculate colors
if ('bmag' in objectinfo and 'vmag' in objectinfo and
'jmag' in objectinfo and 'kmag' in objectinfo and
'sdssi' in objectinfo and
objectinfo['bmag'] and objectinfo['vmag'] and
objectinfo['jmag'] and objectinfo['kmag'] and
objectinfo['sdssi']):
bvcolor = objectinfo['bmag'] - objectinfo['vmag']
jkcolor = objectinfo['jmag'] - objectinfo['kmag']
ijcolor = objectinfo['sdssi'] - objectinfo['jmag']
else:
bvcolor = None
jkcolor = None
ijcolor = None
# bump the ylim of the LSP plot so that the overplotted finder and
# objectinfo can fit in this axes plot
lspylim = axes.get_ylim()
axes.set_ylim(lspylim[0], lspylim[1]+0.75*(lspylim[1]-lspylim[0]))
# get the stamp
try:
dss, dssheader = skyview_stamp(objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
cachedir=findercachedir,
verbose=verbose)
stamp = dss
# inset plot it on the current axes
inset = inset_axes(axes, width="40%", height="40%", loc=1)
inset.imshow(stamp,cmap=findercmap)
inset.set_xticks([])
inset.set_yticks([])
inset.set_frame_on(False)
# grid lines pointing to the center of the frame
inset.axvline(x=150,ymin=0.2,ymax=0.4,linewidth=2.0,color='k')
inset.axhline(y=150,xmin=0.2,xmax=0.4,linewidth=2.0,color='k')
except Exception as e:
LOGEXCEPTION('could not fetch a DSS stamp for this '
'object %s using coords (%.3f,%.3f)' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# annotate with objectinfo
axes.text(
0.05,0.95,
'%s' % objectid,
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0
)
axes.text(
0.05,0.91,
'RA = %.3f, DEC = %.3f' % (objectinfo['ra'], objectinfo['decl']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0
)
if bvcolor:
axes.text(0.05,0.87,
'$B - V$ = %.3f, $V$ = %.3f' % (bvcolor,
objectinfo['vmag']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
elif 'vmag' in objectinfo and objectinfo['vmag']:
axes.text(0.05,0.87,
'$V$ = %.3f' % (objectinfo['vmag'],),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if ijcolor:
axes.text(0.05,0.83,
'$i - J$ = %.3f, $J$ = %.3f' % (ijcolor,
objectinfo['jmag']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
elif 'jmag' in objectinfo and objectinfo['jmag']:
axes.text(0.05,0.83,
'$J$ = %.3f' % (objectinfo['jmag'],),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if jkcolor:
axes.text(0.05,0.79,
'$J - K$ = %.3f, $K$ = %.3f' % (jkcolor,
objectinfo['kmag']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
elif 'kmag' in objectinfo and objectinfo['kmag']:
axes.text(0.05,0.79,
'$K$ = %.3f' % (objectinfo['kmag'],),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if 'sdssr' in objectinfo and objectinfo['sdssr']:
axes.text(0.05,0.75,'SDSS $r$ = %.3f' % objectinfo['sdssr'],
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
# add in proper motion stuff if available in objectinfo
if ('pmra' in objectinfo and objectinfo['pmra'] and
'pmdecl' in objectinfo and objectinfo['pmdecl']):
pm = total_proper_motion(objectinfo['pmra'],
objectinfo['pmdecl'],
objectinfo['decl'])
axes.text(0.05,0.67,'$\mu$ = %.2f mas yr$^{-1}$' % pm,
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if 'jmag' in objectinfo and objectinfo['jmag']:
rpm = reduced_proper_motion(objectinfo['jmag'],pm)
axes.text(0.05,0.63,'$H_J$ = %.2f' % rpm,
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
def _make_magseries_plot(axes,
stimes,
smags,
serrs,
magsarefluxes=False):
'''makes the magseries plot tile.
'''
scaledplottime = stimes - npmin(stimes)
axes.plot(scaledplottime,
smags,
marker='o',
ms=2.0, ls='None',mew=0,
color='green',
rasterized=True)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = axes.get_ylim()
axes.set_ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
plot_xlim = axes.get_xlim()
axes.set_xlim((npmin(scaledplottime)-1.0,
npmax(scaledplottime)+1.0))
# make a grid
axes.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'JD - %.3f' % npmin(stimes)
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
axes.set_xlabel(plot_xlabel)
axes.set_ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
axes.get_xaxis().get_major_formatter().set_useOffset(False)
def _make_phased_magseries_plot(axes,
periodind,
stimes, smags,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim,
lspmethod,
xliminsetmode=False,
twolspmode=False,
magsarefluxes=False):
'''makes the phased magseries plot tile.
if xliminsetmode = True, then makes a zoomed-in plot with the provided
plotxlim as the main x limits, and the full plot as an inset.
'''
# phase the magseries
phasedlc = phase_magseries(stimes,
smags,
varperiod,
varepoch,
wrap=phasewrap,
sort=phasesort)
plotphase = phasedlc['phase']
plotmags = phasedlc['mags']
# if we're supposed to bin the phases, do so
if phasebin:
binphasedlc = phase_bin_magseries(plotphase,
plotmags,
binsize=phasebin,
minbinelems=minbinelems)
binplotphase = binphasedlc['binnedphases']
binplotmags = binphasedlc['binnedmags']
# finally, make the phased LC plot
axes.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
# overlay the binned phased LC plot if we're making one
if phasebin:
axes.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = axes.get_ylim()
axes.set_ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
if not plotxlim:
plot_xlim = axes.get_xlim()
axes.set_xlim((npmin(plotphase)-0.1,
npmax(plotphase)+0.1))
else:
axes.set_xlim((plotxlim[0],plotxlim[1]))
# make a grid
axes.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'phase'
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
axes.set_xlabel(plot_xlabel)
axes.set_ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
axes.get_xaxis().get_major_formatter().set_useOffset(False)
# make the plot title
if periodind == 0:
plottitle = '%s best period: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
varperiod,
varepoch
)
elif periodind == 1 and not twolspmode:
plottitle = '%s best period x 0.5: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
varperiod,
varepoch
)
elif periodind == 2 and not twolspmode:
plottitle = '%s best period x 2: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
varperiod,
varepoch
)
elif periodind > 2 and not twolspmode:
plottitle = '%s peak %s: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
periodind-1,
varperiod,
varepoch
)
elif periodind > 0:
plottitle = '%s peak %s: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
periodind+1,
varperiod,
varepoch
)
axes.set_title(plottitle)
# if we're making an inset plot showing the full range
if (plotxlim and isinstance(plotxlim, list) and
len(plotxlim) == 2 and xliminsetmode is True):
# bump the ylim of the plot so that the inset can fit in this axes plot
axesylim = axes.get_ylim()
if magsarefluxes:
axes.set_ylim(axesylim[0],
axesylim[1] + 0.5*npabs(axesylim[1]-axesylim[0]))
else:
axes.set_ylim(axesylim[0],
axesylim[1] - 0.5*npabs(axesylim[1]-axesylim[0]))
# put the inset axes in
inset = inset_axes(axes, width="40%", height="40%", loc=1)
# make the scatter plot for the phased LC plot
inset.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
# overlay the binned phased LC plot if we're making one
if phasebin:
inset.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# show the full phase coverage
# show the full phase coverage
if phasewrap:
inset.set_xlim(-0.2,0.8)
else:
inset.set_xlim(-0.1,1.1)
# flip y axis for mags
if not magsarefluxes:
inset_ylim = inset.get_ylim()
inset.set_ylim((inset_ylim[1], inset_ylim[0]))
# set the plot title
inset.text(0.5,0.1,'full phased light curve',
ha='center',va='center',transform=inset.transAxes)
# don't show axes labels or ticks
inset.set_xticks([])
inset.set_yticks([])
############################################
## CHECKPLOT FUNCTIONS THAT WRITE TO PNGS ##
############################################
def checkplot_png(lspinfo,
times,
mags,
errs,
magsarefluxes=False,
objectinfo=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
outfile=None,
sigclip=4.0,
varepoch='min',
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=[-0.8,0.8],
xliminsetmode=False,
plotdpi=100,
bestperiodhighlight=None,
verbose=True):
'''This makes a checkplot for an info dict from a period-finding routine.
A checkplot is a 3 x 3 grid of plots like so:
[LSP plot + objectinfo] [ unphased LC ] [ period 1 phased LC ]
[period 1 phased LC /2] [period 1 phased LC x2] [ period 2 phased LC ]
[ period 3 phased LC ] [period 4 phased LC ] [ period 5 phased LC ]
This is used to sanity check the five best periods obtained from an LSP
function in periodbase.
lspinfo is either a dict or a Python pickle filename containing a dict that
should look something like the dict below, containing the output from your
period search routine. The key 'lspvals' is the spectral power or SNR
obtained from Lomb-Scargle, PDM, AoV, or BLS. The keys 'nbestperiods' and
'nbestlspvals' contain the best five periods and their respective peaks
chosen by your period search routine (usually the highest SNR or highest
power peaks in the spectrum).
{'bestperiod':7.7375425564838061,
'lspvals':array([ 0.00892461, 0.0091704 , 0.00913682,...]),
'periods':array([ 8. , 7.999936, 7.999872, ...]),
'nbestperiods':[7.7375425564838061,
7.6370856881010738,
7.837604827964415,
7.5367037472486667,
7.9377048920074627],
'nbestlspvals':[0.071409790831114872,
0.055157963469682415,
0.055126754408175715,
0.023441268126990749,
0.023239128705778048],
'method':'gls'}
The 'method' key-val pair decides what kind of period finding method was
run. This is used to label the periodogram plot correctly. The following
values are recognized.
'gls' -> generalized Lomb-Scargle (e.g., from periodbase.pgen_lsp)
'pdm' -> Stellingwerf PDM (e.g., from periodbase.stellingwerf_pdm)
'aov' -> Schwarzenberg-Czerny AoV (e.g., from periodbase.aov_periodfind)
'bls' -> Box Least-squared Search (e.g., from periodbase.bls_parallel_pfind)
'sls' -> Lomb-Scargle from Scipy (e.g., from periodbase.scipylsp_parallel)
magsarefluxes = True means the values provided in the mags input array are
actually fluxes; this affects the sigma-clipping and plotting of light
curves.
If a dict is passed to objectinfo, this function will use it to figure out
where in the sky the checkplotted object is, and put the finding chart plus
some basic info into the checkplot. The objectinfo dict should look
something like those produced for HAT light curves using the reader
functions in the astrobase.hatlc module, e.g.:
{'bmag': 17.669,
'decl': -63.933598,
'hatid': 'HAT-786-0021445',
'objectid': 'HAT-786-0021445',
'hmag': 13.414,
'jmag': 14.086,
'kmag': 13.255,
'ndet': 10850,
'network': 'HS',
'pmdecl': -19.4,
'pmdecl_err': 5.1,
'pmra': 29.3,
'pmra_err': 4.1,
'ra': 23.172678,
'sdssg': 17.093,
'sdssi': 15.382,
'sdssr': 15.956,
'stations': 'HS02,HS04,HS06',
'twomassid': '01324144-6356009 ',
'ucac4id': '12566701',
'vmag': 16.368}
At a minimum, you must have the following fields: 'objectid', 'ra',
'decl'. If 'jmag', 'kmag', 'bmag', 'vmag', 'sdssr', and 'sdssi' are also
present, the following quantities will be calculated: B-V, J-K, and i-J. If
'pmra' and 'pmdecl' are present as well, the total proper motion and reduced
J magnitude proper motion will be calculated.
findercmap sets the matplotlib colormap of the downloaded finder chart:
http://matplotlib.org/examples/color/colormaps_reference.html
finderconvolve convolves the finder FITS image with the given
astropy.convolution kernel:
http://docs.astropy.org/en/stable/convolution/kernels.html
This can be useful to see effects of wide-field telescopes with large pixel
sizes (like HAT) on the blending of sources.
findercachedir is the directory where the downloaded stamp FITS files
go. Repeated calls to this function will then use the cached version of the
stamp if the finder coordinates don't change.
bestperiodhighlight sets whether user wants a background on the phased light
curve from each periodogram type to distinguish them from others. this is an
HTML hex color specification. If this is None, no highlight will be added.
xliminsetmode = True sets up the phased mag series plot to show a zoomed-in
portion (set by plotxlim) as the main plot and an inset version of the full
phased light curve from phase 0.0 to 1.0. This can be useful if searching
for small dips near phase 0.0 caused by planetary transits for example.
verbose = False turns off many of the informational messages. Useful for
when an external function is driving lots of checkplot calls.
'''
if not outfile and isinstance(lspinfo,str):
# generate the plot filename
plotfpath = os.path.join(
os.path.dirname(lspinfo),
'checkplot-%s.png' % (
os.path.basename(lspinfo),
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'checkplot.png'
# get the lspinfo from a pickle file transparently
if isinstance(lspinfo,str) and os.path.exists(lspinfo):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo)
if '.gz' in lspinfo:
with gzip.open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
else:
with open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
# get the things to plot out of the data
if ('periods' in lspinfo and
'lspvals' in lspinfo and
'bestperiod' in lspinfo):
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
lspmethod = lspinfo['method']
else:
LOGERROR('could not understand lspinfo for this object, skipping...')
return None
if not npisfinite(bestperiod):
LOGWARNING('no best period found for this object, skipping...')
return None
# initialize the plot
fig, axes = plt.subplots(3,3)
axes = npravel(axes)
# this is a full page plot
fig.set_size_inches(30,24)
#######################
## PLOT 1 is the LSP ##
#######################
_make_periodogram(axes[0],lspinfo,objectinfo,
findercmap, finderconvolve,
verbose=verbose,
findercachedir=findercachedir)
######################################
## NOW MAKE THE PHASED LIGHT CURVES ##
######################################
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# take care of the normalization
if normto is not False:
stimes, smags = normalize_magseries(stimes, smags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
# make sure we have some lightcurve points to plot after sigclip
if len(stimes) >= 50:
##############################
## PLOT 2 is an unphased LC ##
##############################
_make_magseries_plot(axes[1], stimes, smags, serrs,
magsarefluxes=magsarefluxes)
###########################
### NOW PLOT PHASED LCS ###
###########################
# make the plot for each best period
lspbestperiods = nbestperiods[::]
lspperiodone = lspbestperiods[0]
lspbestperiods.insert(1,lspperiodone*2.0)
lspbestperiods.insert(1,lspperiodone*0.5)
for periodind, varperiod in enumerate(lspbestperiods):
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
varepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch,str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
sigclip=None,
magsarefluxes=magsarefluxes,
verbose=verbose)
varepoch = spfit['fitinfo']['fitepoch']
if len(varepoch) != 1:
varepoch = varepoch[0]
except Exception as e:
LOGEXCEPTION('spline fit failed, using min(times) as epoch')
varepoch = npmin(stimes)
if verbose:
LOGINFO('plotting phased LC with period %.6f, epoch %.5f' %
(varperiod, varepoch))
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
axes[periodind+2].set_facecolor(bestperiodhighlight)
else:
axes[periodind+2].set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(axes[periodind+2],
periodind,
stimes, smags,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod,
xliminsetmode=xliminsetmode,
magsarefluxes=magsarefluxes)
# end of plotting for each ax
# save the plot to disk
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath,dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close('all')
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
# otherwise, there's no valid data for this plot
else:
LOGWARNING('no good data')
for periodind in range(5):
axes[periodind+2].text(
0.5,0.5,
('no best aperture light curve available'),
horizontalalignment='center',
verticalalignment='center',
transform=axes[periodind+2].transAxes
)
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath, dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close('all')
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
def twolsp_checkplot_png(lspinfo1,
lspinfo2,
times,
mags,
errs,
magsarefluxes=False,
objectinfo=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
outfile=None,
sigclip=4.0,
varepoch='min',
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=[-0.8,0.8],
xliminsetmode=False,
plotdpi=100,
bestperiodhighlight=None,
verbose=True):
'''This makes a checkplot using results from two independent period-finders.
Adapted from Luke Bouma's implementation of the same. This makes a special
checkplot that uses two lspinfo dictionaries, from two independent
period-finding methods. For EBs, it's probably best to use Stellingwerf PDM
or Schwarzenberg-Czerny AoV as one of these, and the Box Least-squared Search
method as the other one.
The checkplot layout in this case is:
[ pgram1 + objectinfo ] [ pgram2 ] [ unphased LC ]
[ pgram1 P1 phased LC ] [ pgram1 P2 phased LC ] [ pgram1 P3 phased LC ]
[ pgram2 P1 phased LC ] [ pgram2 P2 phased LC ] [ pgram2 P3 phased LC ]
where:
pgram1 is the plot for the periodogram in the lspinfo1 dict
pgram1 P1, P2, and P3 are the best three periods from lspinfo1
pgram2 is the plot for the periodogram in the lspinfo2 dict
pgram2 P1, P2, and P3 are the best three periods from lspinfo2
All other args and kwargs are the same as checkplot_png. Note that we take
the output file name from lspinfo1 if lspinfo1 is a string filename pointing
to a (gzipped) pickle containing the results dict from a period-finding
routine similar to those in periodbase.
'''
# generate the plot filename
if not outfile and isinstance(lspinfo1,str):
plotfpath = os.path.join(
os.path.dirname(lspinfo),
'twolsp-checkplot-%s.png' % (
os.path.basename(lspinfo),
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'twolsp-checkplot.png'
# get the first LSP from a pickle file transparently
if isinstance(lspinfo1,str) and os.path.exists(lspinfo1):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo1)
if '.gz' in lspinfo1:
with gzip.open(lspinfo1,'rb') as infd:
lspinfo1 = pickle.load(infd)
else:
with open(lspinfo1,'rb') as infd:
lspinfo1 = pickle.load(infd)
# get the second LSP from a pickle file transparently
if isinstance(lspinfo2,str) and os.path.exists(lspinfo2):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo2)
if '.gz' in lspinfo2:
with gzip.open(lspinfo2,'rb') as infd:
lspinfo2 = pickle.load(infd)
else:
with open(lspinfo2,'rb') as infd:
lspinfo2 = pickle.load(infd)
# get the things to plot out of the data
if ('periods' in lspinfo1 and 'periods' in lspinfo2 and
'lspvals' in lspinfo1 and 'lspvals' in lspinfo2 and
'bestperiod' in lspinfo1 and 'bestperiod' in lspinfo2):
periods1 = lspinfo1['periods']
lspvals1 = lspinfo1['lspvals']
bestperiod1 = lspinfo1['bestperiod']
nbestperiods1 = lspinfo1['nbestperiods']
nbestlspvals1 = lspinfo1['nbestlspvals']
lspmethod1 = lspinfo1['method']
periods2 = lspinfo2['periods']
lspvals2 = lspinfo2['lspvals']
bestperiod2 = lspinfo2['bestperiod']
nbestperiods2 = lspinfo2['nbestperiods']
nbestlspvals2 = lspinfo2['nbestlspvals']
lspmethod2 = lspinfo2['method']
else:
LOGERROR('could not understand lspinfo1 or lspinfo2 '
'for this object, skipping...')
return None
if (not npisfinite(bestperiod1)) or (not npisfinite(bestperiod2)):
LOGWARNING('no best period found for this object, skipping...')
return None
# initialize the plot
fig, axes = plt.subplots(3,3)
axes = npravel(axes)
# this is a full page plot
fig.set_size_inches(30,24)
######################################################################
## PLOT 1 is the LSP from lspinfo1, including objectinfo and finder ##
######################################################################
_make_periodogram(axes[0], lspinfo1, objectinfo,
findercmap, finderconvolve,
verbose=verbose,
findercachedir=findercachedir)
#####################################
## PLOT 2 is the LSP from lspinfo2 ##
#####################################
_make_periodogram(axes[1], lspinfo2, None,
findercmap, finderconvolve)
##########################################
## FIX UP THE MAGS AND REMOVE BAD STUFF ##
##########################################
# sigclip first
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# take care of the normalization
if normto is not False:
stimes, smags = normalize_magseries(stimes, smags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
# make sure we have some lightcurve points to plot after sigclip
if len(stimes) >= 50:
##############################
## PLOT 3 is an unphased LC ##
##############################
_make_magseries_plot(axes[2], stimes, smags, serrs,
magsarefluxes=magsarefluxes)
# make the plot for each best period
lspbestperiods1 = nbestperiods1[::]
lspbestperiods2 = nbestperiods2[::]
##########################################################
### NOW PLOT PHASED LCS FOR 3 BEST PERIODS IN LSPINFO1 ###
##########################################################
for periodind, varperiod, plotaxes in zip([0,1,2],
lspbestperiods1[:3],
[axes[3], axes[4], axes[5]]):
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
varepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch,str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
sigclip=None,
magsarefluxes=magsarefluxes,
verbose=verbose)
varepoch = spfit['fitinfo']['fitepoch']
if len(varepoch) != 1:
varepoch = varepoch[0]
except Exception as e:
LOGEXCEPTION('spline fit failed, using min(times) as epoch')
varepoch = npmin(stimes)
if verbose:
LOGINFO('plotting phased LC with period %.6f, epoch %.5f' %
(varperiod, varepoch))
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plotaxes.set_facecolor(bestperiodhighlight)
else:
plotaxes.set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(plotaxes,
periodind,
stimes, smags,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod1,
twolspmode=True,
magsarefluxes=magsarefluxes,
xliminsetmode=xliminsetmode)
##########################################################
### NOW PLOT PHASED LCS FOR 3 BEST PERIODS IN LSPINFO2 ###
##########################################################
for periodind, varperiod, plotaxes in zip([0,1,2],
lspbestperiods2[:3],
[axes[6], axes[7], axes[8]]):
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
varepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch,str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
magsarefluxes=magsarefluxes,
sigclip=None,
verbose=verbose)
varepoch = spfit['fitinfo']['fitepoch']
if len(varepoch) != 1:
varepoch = varepoch[0]
except Exception as e:
LOGEXCEPTION('spline fit failed, using min(times) as epoch')
varepoch = npmin(stimes)
if verbose:
LOGINFO('plotting phased LC with period %.6f, epoch %.5f' %
(varperiod, varepoch))
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plotaxes.set_facecolor(bestperiodhighlight)
else:
plotaxes.set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(plotaxes,
periodind,
stimes, smags,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod2,
twolspmode=True,
magsarefluxes=magsarefluxes,
xliminsetmode=xliminsetmode)
# end of plotting for each ax
# save the plot to disk
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath,dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close()
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
# otherwise, there's no valid data for this plot
else:
LOGWARNING('no good data')
for periodind in range(5):
axes[periodind+2].text(
0.5,0.5,
('no best aperture light curve available'),
horizontalalignment='center',
verticalalignment='center',
transform=axes[periodind+2].transAxes
)
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath, dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close()
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
#########################################
## PICKLE CHECKPLOT UTILITY FUNCTIONS ##
#########################################
def _xyzdist_to_distarcsec(xyzdist):
'''
This just inverts the xyz unit vector distance -> angular distance relation.
'''
return np.degrees(2.0*np.arcsin(xyzdist/2.0))*3600.0
def _base64_to_file(b64str, outfpath, writetostrio=False):
'''
This converts the base64 encoded string to a file.
'''
try:
filebytes = base64.b64decode(b64str)
# if we're writing back to a stringio object
if writetostrio:
outobj = strio(filebytes)
return outobj
# otherwise, we're writing to an actual file
else:
with open(outfpath,'wb') as outfd:
outfd.write(filebytes)
if os.path.exists(outfpath):
return outfpath
else:
LOGERROR('could not write output file: %s' % outfpath)
return None
except Exception as e:
LOGEXCEPTION('failed while trying to convert '
'b64 string to file %s' % outfpath)
return None
def _pkl_finder_objectinfo(objectinfo,
varinfo,
findercmap,
finderconvolve,
sigclip,
normto,
normmingap,
lclistpkl=None,
nbrradiusarcsec=30.0,
plotdpi=100,
findercachedir='~/.astrobase/stamp-cache',
verbose=True):
'''This returns the finder chart and object information as a dict.
'''
if (isinstance(objectinfo, dict) and
('objectid' in objectinfo or 'hatid' in objectinfo) and
'ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] and objectinfo['decl']):
if 'objectid' not in objectinfo:
objectid = objectinfo['hatid']
else:
objectid = objectinfo['objectid']
if verbose:
LOGINFO('adding in object information and '
'finder chart for %s at RA: %.3f, DEC: %.3f' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# get the finder chart
try:
# generate the finder chart
finder, finderheader = skyview_stamp(objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
verbose=verbose,
cachedir=findercachedir)
finderfig = plt.figure(figsize=(3,3),dpi=plotdpi,frameon=False)
plt.imshow(finder, cmap=findercmap)
# skip down to after nbr stuff for the rest of the finderchart...
# search around the target's location and get its neighbors if
# lclistpkl is provided and it exists
if (lclistpkl is not None and
os.path.exists(lclistpkl) and
nbrradiusarcsec is not None and
nbrradiusarcsec > 0.0):
if lclistpkl.endswith('.gz'):
infd = gzip.open(lclistpkl,'rb')
else:
infd = open(lclistpkl,'rb')
lclist = pickle.load(infd)
infd.close()
if not 'kdtree' in lclist:
LOGERROR('neighbors within %.1f arcsec for %s could '
'not be found, no kdtree in lclistpkl: %s'
% (objectid, lclistpkl))
neighbors = None
kdt = None
else:
kdt = lclist['kdtree']
obj_cosdecl = np.cos(np.radians(objectinfo['decl']))
obj_sindecl = np.sin(np.radians(objectinfo['decl']))
obj_cosra = np.cos(np.radians(objectinfo['ra']))
obj_sinra = np.sin(np.radians(objectinfo['ra']))
obj_xyz = np.column_stack((obj_cosra*obj_cosdecl,
obj_sinra*obj_cosdecl,
obj_sindecl))
match_xyzdist = (
2.0 * np.sin(np.radians(nbrradiusarcsec/3600.0)/2.0)
)
matchdists, matchinds = kdt.query(
obj_xyz,
k=6, # get closest 5 neighbors + tgt
distance_upper_bound=match_xyzdist
)
# sort by matchdist
mdsorted = np.argsort(matchdists[0])
matchdists = matchdists[0][mdsorted]
matchinds = matchinds[0][mdsorted]
# luckily, the indices to the kdtree are the same as that
# for the objects (I think)
neighbors = []
# initialize the finder WCS
finderwcs = WCS(finderheader)
nbrind = 0
for md, mi in zip(matchdists, matchinds):
if np.isfinite(md) and md > 0.0:
# generate the xy for the finder we'll use a HTML5
# canvas and these pixcoords to highlight each
# neighbor when we mouse over its row in the
# neighbors tab
pixcoords = finderwcs.all_world2pix(
np.array([[lclist['objects']['ra'][mi],
lclist['objects']['decl'][mi]]]),
1
)
# each elem is {'objectid',
# 'ra','decl',
# 'xpix','ypix',
# 'dist','lcfpath'}
thisnbr = {
'objectid':lclist['objects']['objectid'][mi],
'ra':lclist['objects']['ra'][mi],
'decl':lclist['objects']['decl'][mi],
'xpix':pixcoords[0,0],
'ypix':300.0 - pixcoords[0,1],
'dist':_xyzdist_to_distarcsec(md),
'lcfpath': lclist['objects']['lcfname'][mi]
}
neighbors.append(thisnbr)
nbrind = nbrind+1
# put in a nice marker for this neighbor into the
# overall finder chart
annotatex = pixcoords[0,0]
annotatey = 300.0 - pixcoords[0,1]
if ((300.0 - annotatex) > 50.0):
offx = annotatex + 30.0
xha = 'center'
else:
offx = annotatex - 30.0
xha = 'center'
if ((300.0 - annotatey) > 50.0):
offy = annotatey - 30.0
yha = 'center'
else:
offy = annotatey + 30.0
yha = 'center'
plt.annotate('N%s' % nbrind,
(annotatex, annotatey),
xytext=(offx, offy),
arrowprops={'facecolor':'blue',
'edgecolor':'blue',
'width':1.0,
'headwidth':1.0,
'headlength':0.1,
'shrink':0.0},
color='blue',
horizontalalignment=xha,
verticalalignment=yha)
# if there are no neighbors, set the 'neighbors' key to None
else:
neighbors = None
kdt = None
#
# finish up the finder chart after neighbors are processed
#
plt.xticks([])
plt.yticks([])
# grid lines pointing to the center of the frame
plt.axvline(x=150,ymin=0.2,ymax=0.4,linewidth=2.0,color='b')
plt.axhline(y=149,xmin=0.2,xmax=0.4,linewidth=2.0,color='b')
plt.gca().set_frame_on(False)
# this is the output instance
finderpng = strio()
finderfig.savefig(finderpng,
bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
finderpng.seek(0)
finderb64 = base64.b64encode(finderpng.read())
# close the stringio buffer
finderpng.close()
except Exception as e:
LOGEXCEPTION('could not fetch a DSS stamp for this '
'object %s using coords (%.3f,%.3f)' %
(objectid, objectinfo['ra'], objectinfo['decl']))
finderb64 = None
neighbors = None
kdt = None
# now that we have the finder chart, get the rest of the object
# information
# first, the color features
colorfeat = color_features(objectinfo)
# next, get the coord features
coordfeat = coord_features(objectinfo)
# next, get the color classification
colorclass = color_classification(colorfeat, coordfeat)
# get the neighbor features and GAIA info
nbrfeat = neighbor_gaia_features(objectinfo, kdt, nbrradiusarcsec,
verbose=False)
# update the objectinfo dict with everything
objectinfo.update(colorfeat)
objectinfo.update(coordfeat)
objectinfo.update(colorclass)
objectinfo.update(nbrfeat)
# update GAIA info so it's available at the first level
if 'ok' in objectinfo['gaia_status']:
objectinfo['gaiamag'] = objectinfo['gaia_mags'][0]
objectinfo['gaia_absmag'] = objectinfo['gaia_absolute_mags'][0]
objectinfo['gaia_parallax'] = objectinfo['gaia_parallaxes'][0]
objectinfo['gaia_parallax_err'] = objectinfo['gaia_parallax_errs'][0]
else:
objectinfo['gaiamag'] = np.nan
objectinfo['gaia_absmag'] = np.nan
objectinfo['gaia_parallax'] = np.nan
objectinfo['gaia_parallax_err'] = np.nan
# put together the initial checkplot pickle dictionary
# this will be updated by the functions below as appropriate
# and will written out as a gzipped pickle at the end of processing
checkplotdict = {'objectid':objectid,
'neighbors':neighbors,
'objectinfo':objectinfo,
'finderchart':finderb64,
'sigclip':sigclip,
'normto':normto,
'normmingap':normmingap}
# add the objecttags key to objectinfo
checkplotdict['objectinfo']['objecttags'] = None
# if there's no objectinfo, we can't do anything.
else:
# put together the initial checkplot pickle dictionary
# this will be updated by the functions below as appropriate
# and will written out as a gzipped pickle at the end of processing
checkplotdict = {'objectid':None,
'neighbors':None,
'objectinfo':{'bmag':None,
'bvcolor':None,
'decl':None,
'hatid':None,
'hmag':None,
'ijcolor':None,
'jkcolor':None,
'jmag':None,
'kmag':None,
'ndet':None,
'network':None,
'objecttags':None,
'pmdecl':None,
'pmdecl_err':None,
'pmra':None,
'pmra_err':None,
'propermotion':None,
'ra':None,
'rpmj':None,
'sdssg':None,
'sdssi':None,
'sdssr':None,
'stations':None,
'twomassid':None,
'ucac4id':None,
'vmag':None},
'finderchart':None,
'sigclip':sigclip,
'normto':normto,
'normmingap':normmingap}
# end of objectinfo processing
# add the varinfo dict
if isinstance(varinfo, dict):
checkplotdict['varinfo'] = varinfo
else:
checkplotdict['varinfo'] = {
'objectisvar':None,
'vartags':None,
'varisperiodic':None,
'varperiod':None,
'varepoch':None,
}
return checkplotdict
def _pkl_periodogram(lspinfo,
plotdpi=100,
override_pfmethod=None):
'''This returns the periodogram plot PNG as base64, plus info as a dict.
'''
# get the appropriate plot ylabel
pgramylabel = PLOTYLABELS[lspinfo['method']]
# get the periods and lspvals from lspinfo
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
# open the figure instance
pgramfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
# make the plot
plt.plot(periods,lspvals)
plt.xscale('log',basex=10)
plt.xlabel('Period [days]')
plt.ylabel(pgramylabel)
plottitle = '%s - %.6f d' % (METHODLABELS[lspinfo['method']],
bestperiod)
plt.title(plottitle)
# show the best five peaks on the plot
for xbestperiod, xbestpeak in zip(nbestperiods,
nbestlspvals):
plt.annotate('%.6f' % xbestperiod,
xy=(xbestperiod, xbestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='14.0')
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# this is the output instance
pgrampng = strio()
pgramfig.savefig(pgrampng,
# bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
pgrampng.seek(0)
pgramb64 = base64.b64encode(pgrampng.read())
# close the stringio buffer
pgrampng.close()
if not override_pfmethod:
# this is the dict to return
checkplotdict = {
lspinfo['method']:{
'periods':periods,
'lspvals':lspvals,
'bestperiod':bestperiod,
'nbestperiods':nbestperiods,
'nbestlspvals':nbestlspvals,
'periodogram':pgramb64,
}
}
else:
# this is the dict to return
checkplotdict = {
override_pfmethod:{
'periods':periods,
'lspvals':lspvals,
'bestperiod':bestperiod,
'nbestperiods':nbestperiods,
'nbestlspvals':nbestlspvals,
'periodogram':pgramb64,
}
}
return checkplotdict
def _pkl_magseries_plot(stimes, smags, serrs,
plotdpi=100,
magsarefluxes=False):
'''This returns the magseries plot PNG as base64, plus arrays as dict.
'''
scaledplottime = stimes - npmin(stimes)
# open the figure instance
magseriesfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
plt.plot(scaledplottime,
smags,
marker='o',
ms=2.0, ls='None',mew=0,
color='green',
rasterized=True)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = plt.ylim()
plt.ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
plot_xlim = plt.xlim()
plt.xlim((npmin(scaledplottime)-2.0,
npmax(scaledplottime)+2.0))
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'JD - %.3f' % npmin(stimes)
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
# this is the output instance
magseriespng = strio()
magseriesfig.savefig(magseriespng,
# bbox_inches='tight',
pad_inches=0.05, format='png')
plt.close()
# encode the finderpng instance to base64
magseriespng.seek(0)
magseriesb64 = base64.b64encode(magseriespng.read())
# close the stringio buffer
magseriespng.close()
checkplotdict = {
'magseries':{
'plot':magseriesb64,
'times':stimes,
'mags':smags,
'errs':serrs
}
}
return checkplotdict
def _pkl_phased_magseries_plot(checkplotdict, lspmethod, periodind,
stimes, smags, serrs,
varperiod, varepoch,
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=[-0.8,0.8],
plotdpi=100,
bestperiodhighlight=None,
xgridlines=None,
xliminsetmode=False,
magsarefluxes=False,
directreturn=False,
overplotfit=None,
verbose=True,
override_pfmethod=None):
'''This returns the phased magseries plot PNG as base64 plus info as a dict.
checkplotdict is an existing checkplotdict to update. If it's None or
directreturn = True, then the generated dict result for this magseries plot
will be returned directly.
lspmethod is a string indicating the type of period-finding algorithm that
produced the period. If this is not in METHODSHORTLABELS, it will be used
verbatim.
periodind is the index of the period.
If == 0 -> best period and bestperiodhighlight is applied if not None
If > 0 -> some other peak of the periodogram
If == -1 -> special mode w/ no periodogram labels and enabled highlight
overplotfit is a result dict returned from one of the XXXX_fit_magseries
functions in astrobase.varbase.lcfit. If this is not None, then the fit will
be overplotted on the phased light curve plot.
overplotfit must have the following structure and at least the keys below if
not originally from one of these functions:
{'fittype':<str: name of fit method>,
'fitchisq':<float: the chi-squared value of the fit>,
'fitredchisq':<float: the reduced chi-squared value of the fit>,
'fitinfo':{'fitmags':<ndarray: model mags or fluxes from fit function>},
'magseries':{'times':<ndarray: times at which the fitmags are evaluated>}}
fitmags and times should all be of the same size. overplotfit is copied over
to the checkplot dict for each specific phased LC plot to save all of this
information.
'''
# open the figure instance
phasedseriesfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
varepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch,str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
magsarefluxes=magsarefluxes,
sigclip=None,
verbose=verbose)
varepoch = spfit['fitinfo']['fitepoch']
if len(varepoch) != 1:
varepoch = varepoch[0]
except Exception as e:
LOGEXCEPTION('spline fit failed, using min(times) as epoch')
varepoch = npmin(stimes)
if verbose:
LOGINFO('plotting %s phased LC with period %s: %.6f, epoch: %.5f' %
(lspmethod, periodind, varperiod, varepoch))
# make the plot title based on the lspmethod
if periodind == 0:
plottitle = '%s best period: %.6f d - epoch: %.5f' % (
(METHODSHORTLABELS[lspmethod] if lspmethod in METHODSHORTLABELS
else lspmethod),
varperiod,
varepoch
)
elif periodind > 0:
plottitle = '%s peak %s: %.6f d - epoch: %.5f' % (
(METHODSHORTLABELS[lspmethod] if lspmethod in METHODSHORTLABELS
else lspmethod),
periodind+1,
varperiod,
varepoch
)
elif periodind == -1:
plottitle = '%s period: %.6f d - epoch: %.5f' % (
lspmethod,
varperiod,
varepoch
)
# phase the magseries
phasedlc = phase_magseries(stimes,
smags,
varperiod,
varepoch,
wrap=phasewrap,
sort=phasesort)
plotphase = phasedlc['phase']
plotmags = phasedlc['mags']
# if we're supposed to bin the phases, do so
if phasebin:
binphasedlc = phase_bin_magseries(plotphase,
plotmags,
binsize=phasebin,
minbinelems=minbinelems)
binplotphase = binphasedlc['binnedphases']
binplotmags = binphasedlc['binnedmags']
else:
binplotphase = None
binplotmags = None
# finally, make the phased LC plot
plt.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
# overlay the binned phased LC plot if we're making one
if phasebin:
plt.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# if we're making a overplotfit, then plot the fit over the other stuff
if overplotfit and isinstance(overplotfit, dict):
fitmethod = overplotfit['fittype']
fitchisq = overplotfit['fitchisq']
fitredchisq = overplotfit['fitredchisq']
plotfitmags = overplotfit['fitinfo']['fitmags']
plotfittimes = overplotfit['magseries']['times']
# phase the fit magseries
fitphasedlc = phase_magseries(plotfittimes,
plotfitmags,
varperiod,
varepoch,
wrap=phasewrap,
sort=phasesort)
plotfitphase = fitphasedlc['phase']
plotfitmags = fitphasedlc['mags']
plotfitlabel = ('%s fit ${\chi}^2/{\mathrm{dof}} = %.3f$' %
(fitmethod, fitredchisq))
# plot the fit phase and mags
plt.plot(plotfitphase, plotfitmags,'k-',
linewidth=3, rasterized=True,label=plotfitlabel)
plt.legend(loc='upper left', frameon=False)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = plt.ylim()
plt.ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
if not plotxlim:
plot_xlim = plt.xlim()
plt.xlim((npmin(plotphase)-0.1,
npmax(plotphase)+0.1))
else:
plt.xlim((plotxlim[0],plotxlim[1]))
# make a grid
ax = plt.gca()
if isinstance(xgridlines,list):
ax.set_xticks(xgridlines, minor=False)
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'phase'
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
# set the plot title
plt.title(plottitle)
# make sure the best period phased LC plot stands out
if (periodind == 0 or periodind == -1) and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plt.gca().set_facecolor(bestperiodhighlight)
else:
plt.gca().set_axis_bgcolor(bestperiodhighlight)
# if we're making an inset plot showing the full range
if (plotxlim and isinstance(plotxlim, list) and
len(plotxlim) == 2 and xliminsetmode is True):
# bump the ylim of the plot so that the inset can fit in this axes plot
axesylim = plt.gca().get_ylim()
if magsarefluxes:
plt.gca().set_ylim(
axesylim[0],
axesylim[1] + 0.5*npabs(axesylim[1]-axesylim[0])
)
else:
plt.gca().set_ylim(
axesylim[0],
axesylim[1] - 0.5*npabs(axesylim[1]-axesylim[0])
)
# put the inset axes in
inset = inset_axes(plt.gca(), width="40%", height="40%", loc=1)
# make the scatter plot for the phased LC plot
inset.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
if phasebin:
# make the scatter plot for the phased LC plot
inset.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# show the full phase coverage
if phasewrap:
inset.set_xlim(-0.2,0.8)
else:
inset.set_xlim(-0.1,1.1)
# flip y axis for mags
if not magsarefluxes:
inset_ylim = inset.get_ylim()
inset.set_ylim((inset_ylim[1], inset_ylim[0]))
# set the plot title
inset.text(0.5,0.9,'full phased light curve',
ha='center',va='center',transform=inset.transAxes)
# don't show axes labels or ticks
inset.set_xticks([])
inset.set_yticks([])
# this is the output instance
phasedseriespng = strio()
phasedseriesfig.savefig(phasedseriespng,
# bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
phasedseriespng.seek(0)
phasedseriesb64 = base64.b64encode(phasedseriespng.read())
# close the stringio buffer
phasedseriespng.close()
# this includes a fitinfo dict if one is provided in overplotfit
retdict = {
'plot':phasedseriesb64,
'period':varperiod,
'epoch':varepoch,
'phase':plotphase,
'phasedmags':plotmags,
'binphase':binplotphase,
'binphasedmags':binplotmags,
'phasewrap':phasewrap,
'phasesort':phasesort,
'phasebin':phasebin,
'minbinelems':minbinelems,
'plotxlim':plotxlim,
'lcfit':overplotfit,
}
# if we're returning stuff directly, i.e. not being used embedded within
# the checkplot_dict function
if directreturn or checkplotdict is None:
return retdict
# this requires the checkplotdict to be present already, we'll just update
# it at the appropriate lspmethod and periodind
else:
if override_pfmethod:
checkplotdict[override_pfmethod][periodind] = retdict
else:
checkplotdict[lspmethod][periodind] = retdict
return checkplotdict
#########################################
## XMATCHING AGAINST EXTERNAL CATALOGS ##
#########################################
def _parse_xmatch_catalog_header(xc, xk):
'''
This parses the header for a catalog file.
'''
catdef = []
# read in this catalog and transparently handle gzipped files
if xc.endswith('.gz'):
infd = gzip.open(xc,'rb')
else:
infd = open(xc,'rb')
# read in the defs
for line in infd:
if line.decode().startswith('#'):
catdef.append(
line.decode().replace('#','').strip().rstrip('\n')
)
if not line.decode().startswith('#'):
break
if not len(catdef) > 0:
LOGERROR("catalog definition not parseable "
"for catalog: %s, skipping..." % xc)
return None
catdef = ' '.join(catdef)
catdefdict = json.loads(catdef)
catdefkeys = [x['key'] for x in catdefdict['columns']]
catdefdtypes = [x['dtype'] for x in catdefdict['columns']]
catdefnames = [x['name'] for x in catdefdict['columns']]
catdefunits = [x['unit'] for x in catdefdict['columns']]
# get the correct column indices and dtypes for the requested columns
# from the catdefdict
catcolinds = []
catcoldtypes = []
catcolnames = []
catcolunits = []
for xkcol in xk:
if xkcol in catdefkeys:
xkcolind = catdefkeys.index(xkcol)
catcolinds.append(xkcolind)
catcoldtypes.append(catdefdtypes[xkcolind])
catcolnames.append(catdefnames[xkcolind])
catcolunits.append(catdefunits[xkcolind])
return (infd, catdefdict,
catcolinds, catcoldtypes, catcolnames, catcolunits)
def load_xmatch_external_catalogs(xmatchto, xmatchkeys, outfile=None):
'''This loads the external xmatch catalogs into a dict for use here.
xmatchto is a list of text files that contain each catalog.
the text files must be 'CSVs' that use the '|' character as the separator
betwen columns. These files should all begin with a header in JSON format on
lines starting with the '#' character. this header will define the catalog
and contains the name of the catalog and the column definitions. Column
definitions must have the column name and the numpy dtype of the columns (in
the same format as that expected for the numpy.genfromtxt function). Any
line that does not begin with '#' is assumed to be part of the columns in
the catalog. An example is shown below.
# {"name":"NSVS catalog of variable stars",
# "columns":[
# {"key":"objectid", "dtype":"U20", "name":"Object ID", "unit": null},
# {"key":"ra", "dtype":"f8", "name":"RA", "unit":"deg"},
# {"key":"decl","dtype":"f8", "name": "Declination", "unit":"deg"},
# {"key":"sdssr","dtype":"f8","name":"SDSS r", "unit":"mag"},
# {"key":"vartype","dtype":"U20","name":"Variable type", "unit":null}
# ],
# "colra":"ra",
# "coldec":"decl",
# "description":"Contains variable stars from the NSVS catalog"}
objectid1 | 45.0 | -20.0 | 12.0 | detached EB
objectid2 | 145.0 | 23.0 | 10.0 | RRab
objectid3 | 12.0 | 11.0 | 14.0 | Cepheid
.
.
.
xmatchkeys is the list of lists of columns to get out of each xmatchto
catalog. this should be the same length as xmatchto and each element here
will apply to the respective file in xmatchto.
if outfile is not None, set this to the name of the pickle to write the
collect xmatch catalogs to. this pickle can then be loaded transparently by
the checkplot_dict, checkplot_pickle functions to provide xmatch info the
_xmatch_external_catalog function below.
'''
outdict = {}
for xc, xk in zip(xmatchto, xmatchkeys):
parsed_catdef = _parse_xmatch_catalog_header(xc, xk)
if not parsed_catdef:
continue
(infd, catdefdict,
catcolinds, catcoldtypes,
catcolnames, catcolunits) = parsed_catdef
# get the specified columns out of the catalog
catarr = np.genfromtxt(infd,
usecols=catcolinds,
names=xk,
dtype=','.join(catcoldtypes),
comments='#',
delimiter='|',
autostrip=True)
infd.close()
catshortname = os.path.splitext(os.path.basename(xc))[0]
catshortname = catshortname.replace('.csv','')
#
# make a kdtree for this catalog
#
# get the ra and decl columns
objra, objdecl = (catarr[catdefdict['colra']],
catarr[catdefdict['coldec']])
# get the xyz unit vectors from ra,decl
cosdecl = np.cos(np.radians(objdecl))
sindecl = np.sin(np.radians(objdecl))
cosra = np.cos(np.radians(objra))
sinra = np.sin(np.radians(objra))
xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl))
# generate the kdtree
kdt = cKDTree(xyz,copy_data=True)
# generate the outdict element for this catalog
catoutdict = {'kdtree':kdt,
'data':catarr,
'columns':xk,
'colnames':catcolnames,
'colunits':catcolunits,
'name':catdefdict['name'],
'desc':catdefdict['description']}
outdict[catshortname] = catoutdict
if outfile is not None:
# if we're on OSX, we apparently need to save the file in chunks smaller
# than 2 GB to make it work right. can't load pickles larger than 4 GB
# either, but 3 GB < total size < 4 GB appears to be OK when loading.
# also see: https://bugs.python.org/issue24658.
# fix adopted from: https://stackoverflow.com/a/38003910
if sys.platform == 'darwin':
dumpbytes = pickle.dumps(outdict, protocol=pickle.HIGHEST_PROTOCOL)
n_bytes = 2**31
max_bytes = 2**31 - 1
with open(outfile, 'wb') as outfd:
for idx in range(0, len(dumpbytes), max_bytes):
outfd.write(dumpbytes[idx:idx+max_bytes])
else:
with open(outfile, 'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outfile
else:
return outdict
def xmatch_external_catalogs(checkplotdict,
xmatchinfo,
xmatchradiusarcsec=2.0,
returndirect=False,
updatexmatch=True,
savepickle=None):
'''This matches the current object to the external match catalogs in
xmatchdict.
checkplotdict is the usual checkplot dict. this must contain at least
'objectid', and in the 'objectinfo' subdict: 'ra', and 'decl'. an 'xmatch'
key will be added to this dict, with something like the following dict as
the value:
{'xmatchradiusarcsec':xmatchradiusarcsec,
'catalog1':{'name':'Catalog of interesting things',
'found':True,
'distarcsec':0.7,
'info':{'objectid':...,'ra':...,'decl':...,'desc':...}},
'catalog2':{'name':'Catalog of more interesting things',
'found':False,
'distarcsec':nan,
'info':None},
.
.
.
....}
xmatchinfo is the either a dict produced by load_xmatch_external_catalogs or
the pickle produced by the same function.
xmatchradiusarcsec is the xmatch radius in arcseconds.
NOTE: this modifies checkplotdict IN PLACE if returndirect is False. If it
is True, then just returns the xmatch results as a dict.
If updatexmatch is True, any previous 'xmatch' elements in the checkplotdict
will be added on to instead of being overwritten.
If savepickle is not None, it should be the name of a checkplot pickle file
to write the pickle back to.
'''
# load the xmatch info
if isinstance(xmatchinfo, str) and os.path.exists(xmatchinfo):
with open(xmatchinfo,'rb') as infd:
xmatchdict = pickle.load(infd)
elif isinstance(xmatchinfo, dict):
xmatchdict = xmatchinfo
else:
LOGERROR("can't figure out xmatch info, can't xmatch, skipping...")
return checkplotdict
#
# generate the xmatch spec
#
# get our ra, decl
objra = checkplotdict['objectinfo']['ra']
objdecl = checkplotdict['objectinfo']['decl']
cosdecl = np.cos(np.radians(objdecl))
sindecl = np.sin(np.radians(objdecl))
cosra = np.cos(np.radians(objra))
sinra = np.sin(np.radians(objra))
objxyz = np.column_stack((cosra*cosdecl,
sinra*cosdecl,
sindecl))
# this is the search distance in xyz unit vectors
xyzdist = 2.0 * np.sin(np.radians(xmatchradiusarcsec/3600.0)/2.0)
#
# now search in each external catalog
#
xmatchresults = {}
extcats = sorted(list(xmatchdict.keys()))
for ecat in extcats:
# get the kdtree
kdt = xmatchdict[ecat]['kdtree']
# look up the coordinates
kdt_dist, kdt_ind = kdt.query(objxyz,
k=1,
distance_upper_bound=xyzdist)
# sort by matchdist
mdsorted = np.argsort(kdt_dist)
matchdists = kdt_dist[mdsorted]
matchinds = kdt_ind[mdsorted]
if matchdists[np.isfinite(matchdists)].size == 0:
xmatchresults[ecat] = {'name':xmatchdict[ecat]['name'],
'desc':xmatchdict[ecat]['desc'],
'found':False,
'distarcsec':None,
'info':None}
else:
for md, mi in zip(matchdists, matchinds):
if np.isfinite(md) and md < xyzdist:
infodict = {}
distarcsec = _xyzdist_to_distarcsec(md)
for col in xmatchdict[ecat]['columns']:
coldata = xmatchdict[ecat]['data'][col][mi]
if isinstance(coldata, str):
coldata = coldata.strip()
infodict[col] = coldata
xmatchresults[ecat] = {
'name':xmatchdict[ecat]['name'],
'desc':xmatchdict[ecat]['desc'],
'found':True,
'distarcsec':distarcsec,
'info':infodict,
'colkeys':xmatchdict[ecat]['columns'],
'colnames':xmatchdict[ecat]['colnames'],
'colunit':xmatchdict[ecat]['colunits'],
}
break
#
# should now have match results for all external catalogs
#
if returndirect:
return xmatchresults
else:
if updatexmatch and 'xmatch' in checkplotdict:
checkplotdict['xmatch'].update(xmatchresults)
else:
checkplotdict['xmatch'] = xmatchresults
if savepickle:
cpf = _write_checkplot_picklefile(checkplotdict,
outfile=savepickle,
protocol=4)
return cpf
else:
return checkplotdict
########################
## READ/WRITE PICKLES ##
########################
def _write_checkplot_picklefile(checkplotdict,
outfile=None,
protocol=2,
outgzip=False):
'''This writes the checkplotdict to a (gzipped) pickle file.
If outfile is None, writes a (gzipped) pickle file of the form:
checkplot-{objectid}.pkl(.gz)
to the current directory.
protocol sets the pickle protocol:
3 -> default in Python 3 - way faster but incompatible with Python 2
2 -> default in Python 2 - very slow, but compatible with Python 2 and 3
the default protocol is 2 so that pickle files generated by newer Pythons
can still be read by older ones. if this isn't a concern, set protocol to 3.
'''
if outgzip:
if not outfile:
outfile = (
'checkplot-{objectid}.pkl.gz'.format(
objectid=checkplotdict['objectid']
)
)
with gzip.open(outfile,'wb') as outfd:
pickle.dump(checkplotdict,outfd,protocol=protocol)
else:
if not outfile:
outfile = (
'checkplot-{objectid}.pkl'.format(
objectid=checkplotdict['objectid']
)
)
# make sure to do the right thing if '.gz' is in the filename but
# outgzip was False
if outfile.endswith('.gz'):
LOGWARNING('output filename ends with .gz but kwarg outgzip=False. '
'will use gzip to compress the output pickle')
with gzip.open(outfile,'wb') as outfd:
pickle.dump(checkplotdict,outfd,protocol=protocol)
else:
with open(outfile,'wb') as outfd:
pickle.dump(checkplotdict,outfd,protocol=protocol)
return os.path.abspath(outfile)
def _read_checkplot_picklefile(checkplotpickle):
'''This reads a checkplot gzipped pickle file back into a dict.
NOTE: the try-except is for Python 2 pickles that have numpy arrays in
them. Apparently, these aren't compatible with Python 3. See here:
http://stackoverflow.com/q/11305790
The workaround is noted in this answer:
http://stackoverflow.com/a/41366785
But not sure how robust this is. We should probably move to another format
for these checkplots.
'''
if checkplotpickle.endswith('.gz'):
try:
with gzip.open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd)
except UnicodeDecodeError:
with gzip.open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd, encoding='latin1')
LOGWARNING('pickle %s was probably from Python 2 '
'and failed to load without using "latin1" encoding. '
'This is probably a numpy issue: '
'http://stackoverflow.com/q/11305790' % checkplotpickle)
else:
try:
with open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd)
except UnicodeDecodeError:
with open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd, encoding='latin1')
LOGWARNING('pickle %s was probably from Python 2 '
'and failed to load without using "latin1" encoding. '
'This is probably a numpy issue: '
'http://stackoverflow.com/q/11305790' % checkplotpickle)
return cpdict
#############################
## CHECKPLOT DICT FUNCTION ##
#############################
def checkplot_dict(lspinfolist,
times,
mags,
errs,
magsarefluxes=False,
nperiodstouse=3,
objectinfo=None,
varinfo=None,
getvarfeatures=True,
lclistpkl=None,
nbrradiusarcsec=60.0,
xmatchinfo=None,
xmatchradiusarcsec=3.0,
lcfitfunc=None,
lcfitparams={},
externalplots=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
sigclip=4.0,
varepoch='min',
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=[-0.8,0.8],
xliminsetmode=False,
plotdpi=100,
bestperiodhighlight=None,
xgridlines=None,
mindet=1000,
verbose=True):
'''This writes a multiple lspinfo checkplot to a dict.
This function can take input from multiple lspinfo dicts (e.g. a list of
output dicts or gzipped pickles of dicts from independent runs of BLS, PDM,
AoV, or GLS period-finders in periodbase).
NOTE: if lspinfolist contains more than one lspinfo object with the same
lspmethod ('pdm','gls','sls','aov','bls'), the latest one in the list will
overwrite the earlier ones.
The output dict contains all the plots (magseries and phased
magseries), periodograms, object information, variability information, light
curves, and phased light curves. This can be written to:
- a pickle with checkplot.checkplot_pickle below
- a PNG with checkplot.checkplot_dict_png below
All kwargs are the same as for checkplot_png, except for the following:
nperiodstouse controls how many 'best' periods to make phased LC plots
for. By default, this is the 3 best. If this is set to None, all 'best'
periods present in each lspinfo dict's 'nbestperiods' key will be plotted
(this is 5 according to periodbase functions' defaults).
varinfo is a dictionary with the following keys:
{'objectisvar': True if object is time-variable,
'vartags': list of variable type tags (strings),
'varisperiodic': True if object is a periodic variable,
'varperiod': variability period of the object,
'varepoch': epoch of variability in JD}
if varinfo is None, an initial empty dictionary of this form will be created
and written to the output pickle. This can be later updated using
checkplotviewer.py, etc.
If getvarfeatures is True, will use the function
varbase.features.all_nonperiodic_features to calculate several light curve
features such as the median, MAD, Stetson J index, CDPP, percentiles, etc.
lcfitfunc is a Python function that is used to fit a model to the light
curve. This is then overplotted for each phased light curve in the
checkplot. This function should have the following signature:
def lcfitfunc(times, mags, errs, period, **lcfitparams)
where lcfitparams encapsulates all external parameters (i.e. number of knots
for a spline function, the degree of a Legendre polynomial fit, etc.) This
function should return a Python dict with the following structure (similar
to the functions in astrobase.varbase.lcfit) and at least the keys below:
{'fittype':<str: name of fit method>,
'fitchisq':<float: the chi-squared value of the fit>,
'fitredchisq':<float: the reduced chi-squared value of the fit>,
'fitinfo':{'fitmags':<ndarray: model mags or fluxes from fit function>},
'magseries':{'times':<ndarray: times at which the fitmags are evaluated>}}
additional keys can include ['fitinfo']['finalparams'] for the final model
fit parameters (this will be used by the checkplotserver if present),
['fitinfo']['fitepoch'] for the minimum light epoch returned by the model
fit, among others. in any case, the output dict of lcfitfunc will be copied
to the output checkplot pickle's ['lcfit'][<fittype>] key:val dict for each
phased light curve.
externalplots is a list of 4-element tuples containing:
1. path to PNG of periodogram from a external period-finding method
2. path to PNG of best period phased light curve from external period-finder
3. path to PNG of 2nd-best phased light curve from external period-finder
4. path to PNG of 3rd-best phased light curve from external period-finder
This can be used to incorporate external period-finding method results into
the output checkplot pickle or exported PNG to allow for comparison with
astrobase results.
example of externalplots:
extrarows = [('/path/to/external/bls-periodogram.png',
'/path/to/external/bls-phasedlc-plot-bestpeak.png',
'/path/to/external/bls-phasedlc-plot-peak2.png',
'/path/to/external/bls-phasedlc-plot-peak3.png'),
('/path/to/external/pdm-periodogram.png',
'/path/to/external/pdm-phasedlc-plot-bestpeak.png',
'/path/to/external/pdm-phasedlc-plot-peak2.png',
'/path/to/external/pdm-phasedlc-plot-peak3.png'),
...]
If externalplots is provided, the checkplot_pickle_to_png function below
will automatically retrieve these plot PNGs and put them into the exported
checkplot PNG.
sigclip is either a single float or a list of two floats. in the first case,
the sigclip is applied symmetrically. in the second case, the first sigclip
in the list is applied to +ve magnitude deviations (fainter) and the second
sigclip in the list is applied to -ve magnitude deviations (brighter).
An example list would be `[10.,3.]` (for 10 sigma dimmings, 3 sigma
brightenings).
bestperiodhighlight sets whether user wants a background on the phased light
curve from each periodogram type to distinguish them from others. this is an
HTML hex color specification. If this is None, no highlight will be added.
xgridlines (default None) can be a list, e.g., [-0.5,0.,0.5] that sets the
x-axis grid lines on plotted phased LCs for easy visual identification of
important features.
xliminsetmode = True sets up the phased mag series plot to show a zoomed-in
portion (set by plotxlim) as the main plot and an inset version of the full
phased light curve from phase 0.0 to 1.0. This can be useful if searching
for small dips near phase 0.0 caused by planetary transits for example.
'''
# 0. get the objectinfo and finder chart and initialize the checkplotdict
checkplotdict = _pkl_finder_objectinfo(objectinfo,
varinfo,
findercmap,
finderconvolve,
sigclip,
normto,
normmingap,
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
plotdpi=plotdpi,
verbose=verbose,
findercachedir=findercachedir)
# if an objectinfo dict is absent, we'll generate a fake objectid based on
# the second five time and mag array values. this should be OK to ID the
# object across repeated runs of this function with the same times, mags,
# errs, but should provide enough uniqueness otherwise (across different
# times/mags array inputs). this is all done so we can still save checkplots
# correctly to pickles after reviewing them using checkplotserver
# try again to get the right objectid
if (objectinfo and isinstance(objectinfo, dict) and
'objectid' in objectinfo and objectinfo['objectid']):
checkplotdict['objectid'] = objectinfo['objectid']
# if this doesn't work, generate a random one
if checkplotdict['objectid'] is None:
try:
objuuid = hashlib.sha512(times[5:10].tostring() +
mags[5:10].tostring()).hexdigest()[:5]
except Exception as e:
LOGWARNING('times, mags, and errs may have too few items')
objuuid = hashlib.sha512(times.tostring() +
mags.tostring()).hexdigest()[:5]
LOGWARNING('no objectid provided in objectinfo keyword arg, '
'generated from times[5:10] + mags[5:10]: %s' % objuuid)
checkplotdict['objectid'] = objuuid
# filter the input times, mags, errs; do sigclipping and normalization
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# fail early if not enough light curve points
if ((stimes is None) or (smags is None) or (serrs is None) or
(stimes.size < 49) or (smags.size < 49) or (serrs.size < 49)):
LOGERROR("one or more of times, mags, errs appear to be None "
"after sig-clipping. are the measurements all nan? "
"can't make a checkplot for this objectid: %s" %
checkplotdict['objectid'])
checkplotdict['magseries'] = None
checkplotdict['status'] = 'failed: LC points appear to be all nan'
return checkplotdict
# this may fix some unpickling issues for astropy.table.Column objects
# we convert them back to ndarrays
if isinstance(stimes, astcolumn):
stimes = stimes.data
LOGWARNING('times is an astropy.table.Column object, '
'changing to numpy array because of '
'potential unpickling issues')
if isinstance(smags, astcolumn):
smags = smags.data
LOGWARNING('mags is an astropy.table.Column object, '
'changing to numpy array because of '
'potential unpickling issues')
if isinstance(serrs, astcolumn):
serrs = serrs.data
LOGWARNING('errs is an astropy.table.Column object, '
'changing to numpy array because of '
'potential unpickling issues')
# report on how sigclip went
if verbose:
LOGINFO('sigclip = %s: before = %s observations, '
'after = %s observations' %
(sigclip, len(times), len(stimes)))
# take care of the normalization
if normto is not False:
stimes, smags = normalize_magseries(stimes, smags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
# make sure we have some lightcurve points to plot after sigclip
if len(stimes) > mindet:
# 1. get the mag series plot using these filtered stimes, smags, serrs
magseriesdict = _pkl_magseries_plot(stimes, smags, serrs,
plotdpi=plotdpi,
magsarefluxes=magsarefluxes)
# update the checkplotdict
checkplotdict.update(magseriesdict)
# 2. for each lspinfo in lspinfolist, read it in (from pkl or pkl.gz
# if necessary), make the periodogram, make the phased mag series plots
# for each of the nbestperiods in each lspinfo dict
checkplot_pfmethods = []
for lspind, lspinfo in enumerate(lspinfolist):
# get the LSP from a pickle file transparently
if isinstance(lspinfo,str) and os.path.exists(lspinfo):
LOGINFO('loading LSP info from pickle %s' % lspinfo)
if '.gz' in lspinfo:
with gzip.open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
else:
with open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
# make the periodogram first
# we'll prepend the lspmethod index to allow for multiple same
# lspmethods
override_pfmethod = '%s-%s' % (lspind, lspinfo['method'])
periodogramdict = _pkl_periodogram(
lspinfo,
plotdpi=plotdpi,
override_pfmethod=override_pfmethod
)
# update the checkplotdict.
checkplotdict.update(periodogramdict)
# now, make the phased light curve plots for each of the
# nbestperiods from this periodogram
for nbpind, nbperiod in enumerate(
lspinfo['nbestperiods'][:nperiodstouse]
):
# if there's a function to use for fitting, do the fit
if lcfitfunc:
try:
overplotfit = lcfitfunc(stimes,
smags,
serrs,
nbperiod,
**lcfitparams)
except Exception as e:
LOGEXCEPTION('the light curve fitting function '
'failed, not plotting a fit over the '
'phased light curve')
overplotfit = None
else:
overplotfit = None
# this updates things as it runs
checkplotdict = _pkl_phased_magseries_plot(
checkplotdict,
lspinfo['method'],
nbpind,
stimes, smags, serrs,
nbperiod, varepoch,
phasewrap=phasewrap,
phasesort=phasesort,
phasebin=phasebin,
minbinelems=minbinelems,
plotxlim=plotxlim,
overplotfit=overplotfit,
plotdpi=plotdpi,
bestperiodhighlight=bestperiodhighlight,
magsarefluxes=magsarefluxes,
xliminsetmode=xliminsetmode,
xgridlines=xgridlines,
verbose=verbose,
override_pfmethod=override_pfmethod,
)
# if there's an snr key for this lspmethod, add the info in it to
# the checkplotdict as well
if 'snr' in lspinfo:
checkplotdict[lspinfo[override_pfmethod]]['snr'] = (
lspinfo['snr']
)
if 'altsnr' in lspinfo:
checkplotdict[lspinfo[override_pfmethod]]['altsnr'] = (
lspinfo['altsnr']
)
if 'transitdepth' in lspinfo:
checkplotdict[lspinfo[override_pfmethod]]['transitdepth'] = (
lspinfo['transitdepth']
)
if 'transitduration' in lspinfo:
checkplotdict[lspinfo[override_pfmethod]]['transitduration'] = (
lspinfo['transitduration']
)
checkplot_pfmethods.append(override_pfmethod)
#
# end of processing each pfmethod
#
## update the checkplot dict with some other stuff that's needed by
## checkplotserver
# 3. add a comments key:val
checkplotdict['comments'] = None
# 4. calculate some variability features
if getvarfeatures is True:
checkplotdict['varinfo']['features'] = all_nonperiodic_features(
stimes,
smags,
serrs,
magsarefluxes=magsarefluxes,
)
# 5. add a signals key:val. this will be used by checkplotserver's
# pre-whitening and masking functions. these will write to
# checkplotdict['signals']['whiten'] and
# checkplotdict['signals']['mask'] respectively.
checkplotdict['signals'] = {}
# 6. add any externalplots if we have them
checkplotdict['externalplots'] = []
if (externalplots and
isinstance(externalplots, list) and
len(externalplots) > 0):
for externalrow in externalplots:
if all(os.path.exists(erowfile) for erowfile in externalrow):
if verbose:
LOGINFO('adding external plots: %s to checkplot dict' %
repr(externalrow))
checkplotdict['externalplots'].append(externalrow)
else:
LOGWARNING('could not add some external '
'plots in: %s to checkplot dict'
% repr(externalrow))
# 7. do any xmatches required
if xmatchinfo is not None:
checkplotdict = xmatch_external_catalogs(
checkplotdict,
xmatchinfo,
xmatchradiusarcsec=xmatchradiusarcsec
)
# the checkplotdict now contains everything we need
contents = sorted(list(checkplotdict.keys()))
checkplotdict['status'] = 'ok: contents are %s' % contents
if verbose:
LOGINFO('checkplot dict complete for %s' %
checkplotdict['objectid'])
LOGINFO('checkplot dict contents: %s' % contents)
# 8. update the pfmethods key
checkplotdict['pfmethods'] = checkplot_pfmethods
# otherwise, we don't have enough LC points, return nothing
else:
LOGERROR('not enough light curve points for %s, have %s, need %s' %
(checkplotdict['objectid'],len(stimes),mindet))
checkplotdict['magseries'] = None
checkplotdict['status'] = 'failed: not enough LC points'
# at the end, return the dict
return checkplotdict
################################
## CHECKPLOT PICKLE FUNCTIONS ##
################################
def checkplot_pickle(lspinfolist,
times,
mags,
errs,
magsarefluxes=False,
nperiodstouse=3,
objectinfo=None,
lcfitfunc=None,
lcfitparams={},
varinfo=None,
getvarfeatures=True,
lclistpkl=None,
nbrradiusarcsec=60.0,
xmatchinfo=None,
xmatchradiusarcsec=3.0,
externalplots=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
outfile=None,
outgzip=False,
sigclip=4.0,
varepoch='min',
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=[-0.8,0.8],
xliminsetmode=False,
plotdpi=100,
returndict=False,
pickleprotocol=None,
bestperiodhighlight=None,
xgridlines=None,
mindet=1000,
verbose=True):
'''This writes a multiple lspinfo checkplot to a (gzipped) pickle file.
This function can take input from multiple lspinfo dicts (e.g. a list of
output dicts or gzipped pickles of dicts from independent runs of BLS, PDM,
AoV, or GLS period-finders in periodbase).
NOTE: if lspinfolist contains more than one lspinfo object with the same
lspmethod ('pdm','gls','sls','aov','bls'), the latest one in the list will
overwrite the earlier ones.
The output pickle contains all the plots (magseries and phased magseries),
periodograms, object information, variability information, light curves, and
phased light curves. The pickle produced by this function can be used with
an external viewer app (e.g. checkplotserver.py), or by using the
checkplot_pickle_to_png function below.
All kwargs are the same as for checkplot_png, except for the following:
nperiodstouse controls how many 'best' periods to make phased LC plots
for. By default, this is the 3 best. If this is set to None, all 'best'
periods present in each lspinfo dict's 'nbestperiods' key will be plotted
(this is 5 according to periodbase functions' defaults).
varinfo is a dictionary with the following keys:
{'objectisvar': True if object is time-variable,
'vartags': list of variable type tags (strings),
'varisperiodic': True if object is a periodic variable,
'varperiod': variability period of the object,
'varepoch': epoch of variability in JD}
if varinfo is None, an initial empty dictionary of this form will be created
and written to the output pickle. This can be later updated using
checkplotviewer.py, etc.
If getvarfeatures is True, will use the function
varbase.features.all_nonperiodic_features to calculate several light curve
features such as the median, MAD, Stetson J index, CDPP, percentiles, etc.
lcfitfunc is a Python function that is used to fit a model to the light
curve. This is then overplotted for each phased light curve in the
checkplot. This function should have the following signature:
def lcfitfunc(times, mags, errs, period, **lcfitparams)
where lcfitparams encapsulates all external parameters (i.e. number of knots
for a spline function, the degree of a Legendre polynomial fit, etc.) This
function should return a Python dict with the following structure (similar
to the functions in astrobase.varbase.lcfit) and at least the keys below:
{'fittype':<str: name of fit method>,
'fitchisq':<float: the chi-squared value of the fit>,
'fitredchisq':<float: the reduced chi-squared value of the fit>,
'fitinfo':{'fitmags':<ndarray: model mags or fluxes from fit function>},
'magseries':{'times':<ndarray: times at which the fitmags are evaluated>}}
additional keys can include ['fitinfo']['finalparams'] for the final model
fit parameters, ['fitinfo']['fitepoch'] for the minimum light epoch returned
by the model fit, among others. the output dict of lcfitfunc will be copied
to the output checkplot dict's ['fitinfo'][<fittype>] key:val dict.
externalplots is a list of 4-element tuples containing:
1. path to PNG of periodogram from a external period-finding method
2. path to PNG of best period phased light curve from external period-finder
3. path to PNG of 2nd-best phased light curve from external period-finder
4. path to PNG of 3rd-best phased light curve from external period-finder
This can be used to incorporate external period-finding method results into
the output checkplot pickle or exported PNG to allow for comparison with
astrobase results.
example of externalplots:
extrarows = [('/path/to/external/bls-periodogram.png',
'/path/to/external/bls-phasedlc-plot-bestpeak.png',
'/path/to/external/bls-phasedlc-plot-peak2.png',
'/path/to/external/bls-phasedlc-plot-peak3.png'),
('/path/to/external/pdm-periodogram.png',
'/path/to/external/pdm-phasedlc-plot-bestpeak.png',
'/path/to/external/pdm-phasedlc-plot-peak2.png',
'/path/to/external/pdm-phasedlc-plot-peak3.png'),
...]
If externalplots is provided, the checkplot_pickle_to_png function below
will automatically retrieve these plot PNGs and put them into the exported
checkplot PNG.
sigclip is either a single float or a list of two floats. in the first case,
the sigclip is applied symmetrically. in the second case, the first sigclip
in the list is applied to +ve magnitude deviations (fainter) and the second
sigclip in the list is applied to -ve magnitude deviations (brighter).
An example list would be `[10.,3.]` (for 10 sigma dimmings, 3 sigma
brightenings).
bestperiodhighlight sets whether user wants a background on the phased light
curve from each periodogram type to distinguish them from others. this is an
HTML hex color specification. If this is None, no highlight will be added.
xgridlines (default None) can be a list, e.g., [-0.5,0.,0.5] that sets the
x-axis grid lines on plotted phased LCs for easy visual identification of
important features.
xliminsetmode = True sets up the phased mag series plot to show a zoomed-in
portion (set by plotxlim) as the main plot and an inset version of the full
phased light curve from phase 0.0 to 1.0. This can be useful if searching
for small dips near phase 0.0 caused by planetary transits for example.
outgzip controls whether to gzip the output pickle. it turns out that this
is the slowest bit in the output process, so if you're after speed, best not
to use this. this is False by default since it turns out that gzip actually
doesn't save that much space (29 MB vs. 35 MB for the average checkplot
pickle).
'''
if outgzip:
# generate the outfile filename
if not outfile and isinstance(lspinfolist[0],str):
plotfpath = os.path.join(
os.path.dirname(lspinfolist[0]),
'checkplot-%s.pkl.gz' % (
os.path.basename(
lspinfolist[0].replace('.pkl','').replace('.gz','')
)
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'checkplot.pkl.gz'
else:
# generate the outfile filename
if not outfile and isinstance(lspinfolist[0],str):
plotfpath = os.path.join(
os.path.dirname(lspinfolist[0]),
'checkplot-%s.pkl' % (
os.path.basename(
lspinfolist[0].replace('.pkl','').replace('.gz','')
)
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'checkplot.pkl'
# call checkplot_dict for most of the work
checkplotdict = checkplot_dict(
lspinfolist,
times,
mags,
errs,
magsarefluxes=magsarefluxes,
nperiodstouse=nperiodstouse,
objectinfo=objectinfo,
varinfo=varinfo,
getvarfeatures=getvarfeatures,
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
xmatchinfo=xmatchinfo,
xmatchradiusarcsec=xmatchradiusarcsec,
lcfitfunc=lcfitfunc,
lcfitparams=lcfitparams,
externalplots=externalplots,
findercmap=findercmap,
finderconvolve=finderconvolve,
findercachedir=findercachedir,
normto=normto,
normmingap=normmingap,
sigclip=sigclip,
varepoch=varepoch,
phasewrap=phasewrap,
phasesort=phasesort,
phasebin=phasebin,
minbinelems=minbinelems,
plotxlim=plotxlim,
xliminsetmode=xliminsetmode,
plotdpi=plotdpi,
bestperiodhighlight=bestperiodhighlight,
xgridlines=xgridlines,
mindet=mindet,
verbose=verbose
)
# figure out which protocol to use
# for Python >= 3.4; use v3
if ((sys.version_info[0:2] >= (3,4) and not pickleprotocol) or
(pickleprotocol == 3)):
pickleprotocol = 3
if verbose:
LOGWARNING('the output pickle uses protocol v3 '
'which IS NOT backwards compatible with Python 2.7')
# for Python == 2.7; use v2
elif sys.version_info[0:2] == (2,7) and not pickleprotocol:
pickleprotocol = 2
# otherwise, if left unspecified, use the slowest but most compatible
# protocol. this will be readable by all (most?) Pythons
elif not pickleprotocol:
pickleprotocol = 0
# write the completed checkplotdict to a gzipped pickle
picklefname = _write_checkplot_picklefile(checkplotdict,
outfile=plotfpath,
protocol=pickleprotocol,
outgzip=outgzip)
# at the end, return the dict and filename if asked for
if returndict:
if verbose:
LOGINFO('checkplot done -> %s' % picklefname)
return checkplotdict, picklefname
# otherwise, just return the filename
else:
# just to make sure: free up space
del checkplotdict
if verbose:
LOGINFO('checkplot done -> %s' % picklefname)
return picklefname
def checkplot_pickle_update(currentcp, updatedcp,
outfile=None,
outgzip=False,
pickleprotocol=None,
verbose=True):
'''This updates the current checkplot dict with updated values provided.
current is either a checkplot dict produced by checkplot_pickle above or a
gzipped pickle file produced by the same function. updated is a dict or
pickle file with the same format as current.
Writes out the new checkplot gzipped pickle file to outfile. If current is a
file, updates it in place if outfile is None. Mostly only useful for
checkplotserver.py.
'''
# generate the outfile filename
if not outfile and isinstance(currentcp,str):
plotfpath = currentcp
elif outfile:
plotfpath = outfile
elif isinstance(currentcp, dict) and currentcp['objectid']:
if outgzip:
plotfpath = 'checkplot-%s.pkl.gz' % currentcp['objectid']
else:
plotfpath = 'checkplot-%s.pkl' % currentcp['objectid']
else:
# we'll get this later below
plotfpath = None
# break out python 2.7 and > 3 nonsense
if sys.version_info[:2] > (3,2):
if (isinstance(currentcp, str) and os.path.exists(currentcp)):
cp_current = _read_checkplot_picklefile(currentcp)
elif isinstance(currentcp, dict):
cp_current = currentcp
else:
LOGERROR('currentcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(currentcp), type(currentcp)))
return None
if (isinstance(updatedcp, str) and os.path.exists(updatedcp)):
cp_updated = _read_checkplot_picklefile(updatedcp)
elif isinstance(updatedcp, dict):
cp_updated = updatedcp
else:
LOGERROR('updatedcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(updatedcp), type(updatedcp)))
return None
# check for unicode in python 2.7
else:
# get the current checkplotdict
if ((isinstance(currentcp, str) or isinstance(currentcp, unicode))
and os.path.exists(currentcp)):
cp_current = _read_checkplot_picklefile(currentcp)
elif isinstance(currentcp,dict):
cp_current = currentcp
else:
LOGERROR('currentcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(currentcp), type(currentcp)))
return None
# get the updated checkplotdict
if ((isinstance(updatedcp, str) or isinstance(updatedcp, unicode))
and os.path.exists(updatedcp)):
cp_updated = _read_checkplot_picklefile(updatedcp)
elif isinstance(updatedcp, dict):
cp_updated = updatedcp
else:
LOGERROR('updatedcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(updatedcp), type(updatedcp)))
return None
# do the update using python's dict update mechanism
# this requires updated to be in the same checkplotdict format as current
# all keys in current will now be from updated
cp_current.update(cp_updated)
# figure out the plotfpath if we haven't by now
if not plotfpath and outgzip:
plotfpath = 'checkplot-%s.pkl.gz' % cp_current['objectid']
elif (not plotfpath) and (not outgzip):
plotfpath = 'checkplot-%s.pkl' % cp_current['objectid']
# make sure we write the correct postfix
if plotfpath.endswith('.gz'):
outgzip = True
# figure out which protocol to use
# for Python >= 3.4; use v4 by default
if ((sys.version_info[0:2] >= (3,4) and not pickleprotocol) or
(pickleprotocol > 2)):
pickleprotocol = 3
if verbose:
LOGWARNING('the output pickle uses protocol v3 '
'which IS NOT backwards compatible with Python 2.7')
# for Python == 2.7; use v2
elif sys.version_info[0:2] == (2,7) and not pickleprotocol:
pickleprotocol = 2
# otherwise, if left unspecified, use the slowest but most compatible
# protocol. this will be readable by all (most?) Pythons
elif not pickleprotocol:
pickleprotocol = 0
# write the new checkplotdict
return _write_checkplot_picklefile(cp_current,
outfile=plotfpath,
outgzip=outgzip,
protocol=pickleprotocol)
def checkplot_pickle_to_png(checkplotin,
outfile,
extrarows=None):
'''This reads the pickle provided, and writes out a PNG.
checkplotin is either a checkplot dict produced by checkplot_pickle above or
a pickle file produced by the same function.
The PNG has 4 x N tiles, as below:
[ finder ] [ objectinfo ] [ varinfo/comments ] [ unphased LC ]
[ periodogram1 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
[ periodogram2 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
.
.
[ periodogramN ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
for N independent period-finding methods producing:
- periodogram1,2,3...N: the periodograms from each method
- phased LC P1,P2,P3: the phased lightcurves using the best 3 peaks in each
periodogram
outfile is the output PNG file to generate.
extrarows is a list of 4-element tuples containing paths to PNG files that
will be added to the end of the rows generated from the checkplotin
pickle/dict. Each tuple represents a row in the final output PNG file. If
there are less than 4 elements per tuple, the missing elements will be
filled in with white-space. If there are more than 4 elements per tuple,
only the first four will be used.
The purpose of this kwarg is to incorporate periodograms and phased LC plots
(in the form of PNGs) generated from an external period-finding function or
program (like vartools) to allow for comparison with astrobase results.
Each external PNG will be resized to 750 x 480 pixels to fit into an output
image cell.
By convention, each 4-element tuple should contain:
a periodiogram PNG
phased LC PNG with 1st best peak period from periodogram
phased LC PNG with 2nd best peak period from periodogram
phased LC PNG with 3rd best peak period from periodogram
example of extrarows:
extrarows = [('/path/to/external/bls-periodogram.png',
'/path/to/external/bls-phasedlc-plot-bestpeak.png',
'/path/to/external/bls-phasedlc-plot-peak2.png',
'/path/to/external/bls-phasedlc-plot-peak3.png'),
('/path/to/external/pdm-periodogram.png',
'/path/to/external/pdm-phasedlc-plot-bestpeak.png',
'/path/to/external/pdm-phasedlc-plot-peak2.png',
'/path/to/external/pdm-phasedlc-plot-peak3.png'),
...]
'''
# figure out if the checkplotpickle is a filename
# python 3
if sys.version_info[:2] > (3,2):
if (isinstance(checkplotin, str) and os.path.exists(checkplotin)):
cpd = _read_checkplot_picklefile(checkplotin)
elif isinstance(checkplotin, dict):
cpd = checkplotin
else:
LOGERROR('checkplotin: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(checkplotin), type(checkplotin)))
return None
# check for unicode in python 2.7
else:
# get the current checkplotdict
if ((isinstance(checkplotin, str) or isinstance(checkplotin, unicode))
and os.path.exists(checkplotin)):
cpd = _read_checkplot_picklefile(checkplotin)
elif isinstance(checkplotin,dict):
cpd = checkplotin
else:
LOGERROR('checkplotin: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(checkplotin), type(checkplotin)))
return None
# figure out the dimensions of the output png
# each cell is 750 x 480 pixels
# a row is made of four cells
# - the first row is for object info
# - the rest are for periodograms and phased LCs, one row per method
# if there are more than three phased LC plots per method, we'll only plot 3
cplspmethods = cpd['pfmethods']
cprows = len(cplspmethods)
# add in any extra rows from neighbors
if 'neighbors' in cpd and cpd['neighbors'] and len(cpd['neighbors']) > 0:
nbrrows = len(cpd['neighbors'])
else:
nbrrows = 0
# add in any extra rows from keyword arguments
if extrarows and len(extrarows) > 0:
erows = len(extrarows)
else:
erows = 0
# add in any extra rows from the checkplot dict
if ('externalplots' in cpd and
cpd['externalplots'] and
len(cpd['externalplots']) > 0):
cpderows = len(cpd['externalplots'])
else:
cpderows = 0
totalwidth = 3000
totalheight = 480 + (cprows + erows + nbrrows + cpderows)*480
# this is the output PNG
outimg = Image.new('RGBA',(totalwidth, totalheight),(255,255,255,255))
# now fill in the rows of the output png. we'll use Pillow to build up the
# output image from the already stored plots and stuff in the checkplot
# dict.
###############################
# row 1, cell 1: finder chart #
###############################
if cpd['finderchart']:
finder = Image.open(
_base64_to_file(cpd['finderchart'], None, writetostrio=True)
)
bigfinder = finder.resize((450,450), Image.ANTIALIAS)
outimg.paste(bigfinder,(150,20))
#####################################
# row 1, cell 2: object information #
#####################################
# find the font we need from the package data
fontpath = os.path.join(os.path.dirname(__file__),
'cpserver',
'cps-assets',
'DejaVuSans.ttf')
# load the font
if os.path.exists(fontpath):
cpfontnormal = ImageFont.truetype(fontpath, 20)
cpfontlarge = ImageFont.truetype(fontpath, 28)
else:
LOGWARNING('could not find bundled '
'DejaVu Sans font in the astrobase package '
'data, using ugly defaults...')
cpfontnormal = ImageFont.load_default()
cpfontlarge = ImageFont.load_default()
# the image draw object
objinfodraw = ImageDraw.Draw(outimg)
# write out the object information
# objectid
objinfodraw.text(
(875, 25),
cpd['objectid'] if cpd['objectid'] else 'no objectid',
font=cpfontlarge,
fill=(0,0,255,255)
)
# twomass id
if 'twomassid' in cpd['objectinfo']:
objinfodraw.text(
(875, 60),
('2MASS J%s' % cpd['objectinfo']['twomassid']
if cpd['objectinfo']['twomassid']
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
# ndet
if 'ndet' in cpd['objectinfo']:
objinfodraw.text(
(875, 85),
('LC points: %s' % cpd['objectinfo']['ndet']
if cpd['objectinfo']['ndet'] is not None
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(875, 85),
('LC points: %s' % cpd['magseries']['times'].size),
font=cpfontnormal,
fill=(0,0,0,255)
)
# coords and PM
objinfodraw.text(
(875, 125),
('Coords and PM'),
font=cpfontnormal,
fill=(0,0,0,255)
)
if 'ra' in cpd['objectinfo'] and 'decl' in cpd['objectinfo']:
objinfodraw.text(
(1125, 125),
(('RA, Dec: %.3f, %.3f' %
(cpd['objectinfo']['ra'], cpd['objectinfo']['decl']))
if (cpd['objectinfo']['ra'] is not None and
cpd['objectinfo']['decl'] is not None)
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(1125, 125),
'RA, Dec: nan, nan',
font=cpfontnormal,
fill=(0,0,0,255)
)
if 'propermotion' in cpd['objectinfo']:
objinfodraw.text(
(1125, 150),
(('Total PM: %.5f mas/yr' % cpd['objectinfo']['propermotion'])
if (cpd['objectinfo']['propermotion'] is not None)
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(1125, 150),
'Total PM: nan',
font=cpfontnormal,
fill=(0,0,0,255)
)
if 'rpmj' in cpd['objectinfo']:
objinfodraw.text(
(1125, 175),
(('Reduced PM [Jmag]: %.3f' % cpd['objectinfo']['rpmj'])
if (cpd['objectinfo']['rpmj'] is not None)
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(1125, 175),
'Reduced PM [Jmag]: nan',
font=cpfontnormal,
fill=(0,0,0,255)
)
# magnitudes
objinfodraw.text(
(875, 200),
('Magnitudes'),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 200),
('gri: %.3f, %.3f, %.3f' %
((cpd['objectinfo']['sdssg'] if
('sdssg' in cpd['objectinfo'] and
cpd['objectinfo']['sdssg'] is not None)
else npnan),
(cpd['objectinfo']['sdssr'] if
('sdssr' in cpd['objectinfo'] and
cpd['objectinfo']['sdssr'] is not None)
else npnan),
(cpd['objectinfo']['sdssi'] if
('sdssi' in cpd['objectinfo'] and
cpd['objectinfo']['sdssi'] is not None)
else npnan))),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 225),
('JHK: %.3f, %.3f, %.3f' %
((cpd['objectinfo']['jmag'] if
('jmag' in cpd['objectinfo'] and
cpd['objectinfo']['jmag'] is not None)
else npnan),
(cpd['objectinfo']['hmag'] if
('hmag' in cpd['objectinfo'] and
cpd['objectinfo']['hmag'] is not None)
else npnan),
(cpd['objectinfo']['kmag'] if
('kmag' in cpd['objectinfo'] and
cpd['objectinfo']['kmag'] is not None)
else npnan))),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 250),
('BV: %.3f, %.3f' %
((cpd['objectinfo']['bmag'] if
('bmag' in cpd['objectinfo'] and
cpd['objectinfo']['bmag'] is not None)
else npnan),
(cpd['objectinfo']['vmag'] if
('vmag' in cpd['objectinfo'] and
cpd['objectinfo']['vmag'] is not None)
else npnan))),
font=cpfontnormal,
fill=(0,0,0,255)
)
# colors
if ('dereddened' in cpd['objectinfo'] and
cpd['objectinfo']['dereddened'] == True):
deredlabel = "(dereddened)"
else:
deredlabel = ""
objinfodraw.text(
(875, 275),
'Colors %s' % deredlabel,
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 275),
('B - V: %.3f, V - K: %.3f' %
( (cpd['objectinfo']['bvcolor'] if
('bvcolor' in cpd['objectinfo'] and
cpd['objectinfo']['bvcolor'] is not None)
else npnan),
(cpd['objectinfo']['vkcolor'] if
('vkcolor' in cpd['objectinfo'] and
cpd['objectinfo']['vkcolor'] is not None)
else npnan) )),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 300),
('i - J: %.3f, g - K: %.3f' %
( (cpd['objectinfo']['ijcolor'] if
('ijcolor' in cpd['objectinfo'] and
cpd['objectinfo']['ijcolor'] is not None)
else npnan),
(cpd['objectinfo']['gkcolor'] if
('gkcolor' in cpd['objectinfo'] and
cpd['objectinfo']['gkcolor'] is not None)
else npnan) )),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 325),
('J - K: %.3f' %
( (cpd['objectinfo']['jkcolor'] if
('jkcolor' in cpd['objectinfo'] and
cpd['objectinfo']['jkcolor'] is not None)
else npnan),) ),
font=cpfontnormal,
fill=(0,0,0,255)
)
# color classification
if ('color_classes' in cpd['objectinfo'] and
cpd['objectinfo']['color_classes']):
objinfodraw.text(
(875, 350),
('star classification by color: %s' %
(', '.join(cpd['objectinfo']['color_classes']))),
font=cpfontnormal,
fill=(0,0,0,255)
)
# GAIA neighbors
if ( ('gaia_neighbors' in cpd['objectinfo']) and
(cpd['objectinfo']['gaia_neighbors'] is not None) and
(np.isfinite(cpd['objectinfo']['gaia_neighbors'])) and
('searchradarcsec' in cpd['objectinfo']) and
(cpd['objectinfo']['searchradarcsec']) ):
objinfodraw.text(
(875, 375),
('%s GAIA close neighbors within %.1f arcsec' %
(cpd['objectinfo']['gaia_neighbors'],
cpd['objectinfo']['searchradarcsec'])),
font=cpfontnormal,
fill=(0,0,0,255)
)
# closest GAIA neighbor
if ( ('gaia_closest_distarcsec' in cpd['objectinfo']) and
(cpd['objectinfo']['gaia_closest_distarcsec'] is not None) and
(np.isfinite(cpd['objectinfo']['gaia_closest_distarcsec'])) and
('gaia_closest_gmagdiff' in cpd['objectinfo']) and
(cpd['objectinfo']['gaia_closest_gmagdiff'] is not None) and
(np.isfinite(cpd['objectinfo']['gaia_closest_gmagdiff'])) ):
objinfodraw.text(
(875, 400),
('closest GAIA neighbor is %.1f arcsec away, '
'GAIA mag (obj-nbr): %.3f' %
(cpd['objectinfo']['gaia_closest_distarcsec'],
cpd['objectinfo']['gaia_closest_gmagdiff'])),
font=cpfontnormal,
fill=(0,0,0,255)
)
# object tags
if 'objecttags' in cpd['objectinfo'] and cpd['objectinfo']['objecttags']:
objtagsplit = cpd['objectinfo']['objecttags'].split(',')
# write three tags per line
nobjtaglines = int(np.ceil(len(objtagsplit)/3.0))
for objtagline in range(nobjtaglines):
objtagslice = ','.join(objtagsplit[objtagline*3:objtagline*3+3])
objinfodraw.text(
(875, 450+objtagline*25),
objtagslice,
font=cpfontnormal,
fill=(135, 54, 0, 255)
)
################################################
# row 1, cell 3: variability info and comments #
################################################
# objectisvar
objisvar = cpd['varinfo']['objectisvar']
if objisvar == '0':
objvarflag = 'Variable star flag not set'
elif objisvar == '1':
objvarflag = 'Object is probably a variable star'
elif objisvar == '2':
objvarflag = 'Object is probably not a variable star'
elif objisvar == '3':
objvarflag = 'Not sure if this object is a variable star'
elif objisvar is None:
objvarflag = 'Variable star flag not set'
elif objisvar is True:
objvarflag = 'Object is probably a variable star'
elif objisvar is False:
objvarflag = 'Object is probably not a variable star'
else:
objvarflag = 'Variable star flag: %s' % objisvar
objinfodraw.text(
(1600, 125),
objvarflag,
font=cpfontnormal,
fill=(0,0,0,255)
)
# period
objinfodraw.text(
(1600, 150),
('Period [days]: %.6f' %
(cpd['varinfo']['varperiod']
if cpd['varinfo']['varperiod'] is not None
else np.nan)),
font=cpfontnormal,
fill=(0,0,0,255)
)
# epoch
objinfodraw.text(
(1600, 175),
('Epoch [JD]: %.6f' %
(cpd['varinfo']['varepoch']
if cpd['varinfo']['varepoch'] is not None
else np.nan)),
font=cpfontnormal,
fill=(0,0,0,255)
)
# variability tags
if cpd['varinfo']['vartags']:
vartagsplit = cpd['varinfo']['vartags'].split(',')
# write three tags per line
nvartaglines = int(np.ceil(len(vartagsplit)/3.0))
for vartagline in range(nvartaglines):
vartagslice = ','.join(vartagsplit[vartagline*3:vartagline*3+3])
objinfodraw.text(
(1600, 225+vartagline*25),
vartagslice,
font=cpfontnormal,
fill=(135, 54, 0, 255)
)
# object comments
if cpd['comments']:
commentsplit = cpd['comments'].split(' ')
# write 10 words per line
ncommentlines = int(np.ceil(len(commentsplit)/10.0))
for commentline in range(ncommentlines):
commentslice = ' '.join(
commentsplit[commentline*10:commentline*10+10]
)
objinfodraw.text(
(1600, 325+commentline*25),
commentslice,
font=cpfontnormal,
fill=(0,0,0,255)
)
#######################################
# row 1, cell 4: unphased light curve #
#######################################
if (cpd['magseries'] and
'plot' in cpd['magseries'] and
cpd['magseries']['plot']):
magseries = Image.open(
_base64_to_file(cpd['magseries']['plot'], None, writetostrio=True)
)
outimg.paste(magseries,(750*3,0))
###############################
# the rest of the rows in cpd #
###############################
for lspmethodind, lspmethod in enumerate(cplspmethods):
###############################
# the periodogram comes first #
###############################
if (cpd[lspmethod] and cpd[lspmethod]['periodogram']):
pgram = Image.open(
_base64_to_file(cpd[lspmethod]['periodogram'], None,
writetostrio=True)
)
outimg.paste(pgram,(0,480 + 480*lspmethodind))
#############################
# best phased LC comes next #
#############################
if (cpd[lspmethod] and 0 in cpd[lspmethod] and cpd[lspmethod][0]):
plc1 = Image.open(
_base64_to_file(cpd[lspmethod][0]['plot'], None, writetostrio=True)
)
outimg.paste(plc1,(750,480 + 480*lspmethodind))
#################################
# 2nd best phased LC comes next #
#################################
if (cpd[lspmethod] and 1 in cpd[lspmethod] and cpd[lspmethod][1]):
plc2 = Image.open(
_base64_to_file(cpd[lspmethod][1]['plot'], None, writetostrio=True)
)
outimg.paste(plc2,(750*2,480 + 480*lspmethodind))
#################################
# 3rd best phased LC comes next #
#################################
if (cpd[lspmethod] and 2 in cpd[lspmethod] and cpd[lspmethod][2]):
plc3 = Image.open(
_base64_to_file(cpd[lspmethod][2]['plot'], None, writetostrio=True)
)
outimg.paste(plc3,(750*3,480 + 480*lspmethodind))
################################
## ALL DONE WITH BUILDING PNG ##
################################
#########################
# add in any extra rows #
#########################
# from the keyword arguments
if erows > 0:
for erowind, erow in enumerate(extrarows):
# make sure we never go above 4 plots in a row
for ecolind, ecol in enumerate(erow[:4]):
eplot = Image.open(ecol)
eplotresized = eplot.resize((750,480), Image.ANTIALIAS)
outimg.paste(eplotresized,
(750*ecolind,
(cprows+1)*480 + 480*erowind))
# from the checkplotdict
if cpderows > 0:
for cpderowind, cpderow in enumerate(cpd['externalplots']):
# make sure we never go above 4 plots in a row
for cpdecolind, cpdecol in enumerate(cpderow[:4]):
cpdeplot = Image.open(cpdecol)
cpdeplotresized = cpdeplot.resize((750,480), Image.ANTIALIAS)
outimg.paste(cpdeplotresized,
(750*cpdecolind,
(cprows+1)*480 + (erows*480) + 480*cpderowind))
# from neighbors:
if nbrrows > 0:
# we have four tiles
# tile 1: neighbor objectid, ra, decl, distance, unphased LC
# tile 2: phased LC for gls
# tile 3: phased LC for pdm
# tile 4: phased LC for any other period finding method
# the priority is like so: ['bls','mav','aov','win']
for nbrind, nbr in enumerate(cpd['neighbors']):
# figure out which period finding methods are available for this
# neighbor. make sure to match the ones from the actual object in
# order of priority: 'gls','pdm','bls','aov','mav','acf','win'
nbrlspmethods = []
for lspmethod in cpd['pfmethods']:
if lspmethod in nbr:
nbrlspmethods.append(lspmethod)
# restrict to top three in priority
nbrlspmethods = nbrlspmethods[:3]
try:
# first panel: neighbor objectid, ra, decl, distance, unphased
# LC
nbrlc = Image.open(
_base64_to_file(
nbr['magseries']['plot'], None, writetostrio=True
)
)
outimg.paste(nbrlc,
(750*0,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 15),
('N%s: %s' % (nbrind + 1, nbr['objectid'])),
font=cpfontlarge,
fill=(0,0,255,255)
)
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 50),
('(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec' %
(nbr['ra'], nbr['decl'], nbr['dist'])),
font=cpfontnormal,
fill=(0,0,255,255)
)
# second panel: phased LC for gls
lsp1lc = Image.open(
_base64_to_file(
nbr[nbrlspmethods[0]][0]['plot'], None,
writetostrio=True
)
)
outimg.paste(lsp1lc,
(750*1,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
# second panel: phased LC for gls
lsp2lc = Image.open(
_base64_to_file(
nbr[nbrlspmethods[1]][0]['plot'], None,
writetostrio=True
)
)
outimg.paste(lsp2lc,
(750*2,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
# second panel: phased LC for gls
lsp3lc = Image.open(
_base64_to_file(
nbr[nbrlspmethods[2]][0]['plot'], None,
writetostrio=True
)
)
outimg.paste(lsp3lc,
(750*3,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
except Exception as e:
LOGERROR('neighbor %s does not have a magseries plot, '
'measurements are probably all nan' % nbr['objectid'])
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 15),
('N%s: %s, no light curve!' %
(nbrind + 1, nbr['objectid'])),
font=cpfontlarge,
fill=(0,0,255,255)
)
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 50),
('(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec' %
(nbr['ra'], nbr['decl'], nbr['dist'])),
font=cpfontnormal,
fill=(0,0,255,255)
)
#####################
## WRITE FINAL PNG ##
#####################
# check if we've stupidly copied over the same filename as the input pickle
# to expected output file
if outfile.endswith('pkl'):
LOGWARNING('expected output PNG filename ends with .pkl, '
'changed to .png')
outfile = outfile.replace('.pkl','.png')
outimg.save(outfile)
if os.path.exists(outfile):
LOGINFO('checkplot pickle -> checkplot PNG: %s OK' % outfile)
return outfile
else:
LOGERROR('failed to write checkplot PNG')
return None
def cp2png(checkplotin, extrarows=None):
'''
This is just a shortened form of the function above for convenience.
This only handles pickle files.
'''
if checkplotin.endswith('.gz'):
outfile = checkplotin.replace('.pkl.gz','.png')
else:
outfile = checkplotin.replace('.pkl','.png')
return checkplot_pickle_to_png(checkplotin, outfile, extrarows=extrarows)
################################
## POST-PROCESSING CHECKPLOTS ##
################################
def finalize_checkplot(cpx,
outdir,
all_lclistpkl,
objfits=None):
'''This is used to prevent any further changes to the checkplot.
cpx is the checkplot dict or pickle to process.
outdir is the directory to where the final pickle will be written. If this
is set to the same dir as cpx and cpx is a pickle, the function will return
a failure. This is meant to keep the in-process checkplots separate from the
finalized versions.
all_lclistpkl is a pickle created by lcproc.make_lclist above with no
restrictions on the number of observations (so ALL light curves in the
collection).
objfits if not None should be a file path to a FITS file containing a WCS
header and this object. This will be used to make a stamp cutout of the
object using the actual image it was detected on. This will be a useful
comparison to the usual DSS POSS-RED2 image used by the checkplots.
Use this function after all variable classification, period-finding, and
object xmatches are done. This function will add a 'final' key to the
checkplot, which will contain:
- a phased LC plot with the period and epoch set after review using the
times, mags, errs after any appropriate filtering and sigclip was done in
the checkplotserver UI
- The unphased LC using the times, mags, errs after any appropriate
filtering and sigclip was done in the checkplotserver UI
- the same plots for any LC collection neighbors
- the survey cutout for the object if objfits is provided and checks out
- a redone neighbor search using GAIA and all light curves in the collection
even if they don't have at least 1000 observations.
These items will be shown in a special 'Final' tab in the checkplotserver
webapp (this should be run in readonly mode as well). The final tab will
also contain downloadable links for the checkplot pickle in pkl and PNG
format, as well as the final times, mags, errs as a gzipped CSV with a
header containing all of this info (will be readable by the usual
astrobase.hatsurveys.hatlc module).
'''
def parallel_finalize_cplist(cplist,
outdir,
objfits=None):
'''This is a parallel driver for the function above, operating on list of
checkplots.
'''
def parallel_finalize_cpdir(cpdir,
outdir,
cpfileglob='checkplot-*.pkl*',
objfits=None):
'''This is a parallel driver for the function above, operating on a
directory of checkplots.
'''
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lazy_read.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='lazy_read.proto',
package='mindinsight.summary',
syntax='proto2',
serialized_options=b'\370\001\001',
serialized_pb=b'\n\x0flazy_read.proto\x12\x13mindinsight.summary\"t\n\x05\x45vent\x12\x11\n\twall_time\x18\x01 \x02(\x01\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x11\n\x07version\x18\x03 \x01(\tH\x00\x12/\n\x07summary\x18\x05 \x01(\x0b\x32\x1c.mindinsight.summary.SummaryH\x00\x42\x06\n\x04what\"\xc8\x01\n\x07Summary\x12\x31\n\x05value\x18\x01 \x03(\x0b\x32\".mindinsight.summary.Summary.Value\x1a\x1e\n\x05Image\x12\x15\n\rencoded_image\x18\x04 \x02(\x0c\x1aj\n\x05Value\x12\x0b\n\x03tag\x18\x01 \x02(\t\x12\x16\n\x0cscalar_value\x18\x03 \x01(\x02H\x00\x12\x33\n\x05image\x18\x04 \x01(\x0b\x32\".mindinsight.summary.Summary.ImageH\x00\x42\x07\n\x05valueB\x03\xf8\x01\x01'
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='mindinsight.summary.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='wall_time', full_name='mindinsight.summary.Event.wall_time', index=0,
number=1, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='mindinsight.summary.Event.step', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='mindinsight.summary.Event.version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='summary', full_name='mindinsight.summary.Event.summary', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='what', full_name='mindinsight.summary.Event.what',
index=0, containing_type=None, fields=[]),
],
serialized_start=40,
serialized_end=156,
)
_SUMMARY_IMAGE = _descriptor.Descriptor(
name='Image',
full_name='mindinsight.summary.Summary.Image',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='encoded_image', full_name='mindinsight.summary.Summary.Image.encoded_image', index=0,
number=4, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=221,
serialized_end=251,
)
_SUMMARY_VALUE = _descriptor.Descriptor(
name='Value',
full_name='mindinsight.summary.Summary.Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tag', full_name='mindinsight.summary.Summary.Value.tag', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scalar_value', full_name='mindinsight.summary.Summary.Value.scalar_value', index=1,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image', full_name='mindinsight.summary.Summary.Value.image', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='mindinsight.summary.Summary.Value.value',
index=0, containing_type=None, fields=[]),
],
serialized_start=253,
serialized_end=359,
)
_SUMMARY = _descriptor.Descriptor(
name='Summary',
full_name='mindinsight.summary.Summary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='mindinsight.summary.Summary.value', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SUMMARY_IMAGE, _SUMMARY_VALUE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=359,
)
_EVENT.fields_by_name['summary'].message_type = _SUMMARY
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['version'])
_EVENT.fields_by_name['version'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['summary'])
_EVENT.fields_by_name['summary'].containing_oneof = _EVENT.oneofs_by_name['what']
_SUMMARY_IMAGE.containing_type = _SUMMARY
_SUMMARY_VALUE.fields_by_name['image'].message_type = _SUMMARY_IMAGE
_SUMMARY_VALUE.containing_type = _SUMMARY
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['scalar_value'])
_SUMMARY_VALUE.fields_by_name['scalar_value'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['image'])
_SUMMARY_VALUE.fields_by_name['image'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY.fields_by_name['value'].message_type = _SUMMARY_VALUE
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
DESCRIPTOR.message_types_by_name['Summary'] = _SUMMARY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), {
'DESCRIPTOR' : _EVENT,
'__module__' : 'lazy_read_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.summary.Event)
})
_sym_db.RegisterMessage(Event)
Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), {
'Image' : _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), {
'DESCRIPTOR' : _SUMMARY_IMAGE,
'__module__' : 'lazy_read_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.summary.Summary.Image)
})
,
'Value' : _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), {
'DESCRIPTOR' : _SUMMARY_VALUE,
'__module__' : 'lazy_read_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.summary.Summary.Value)
})
,
'DESCRIPTOR' : _SUMMARY,
'__module__' : 'lazy_read_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.summary.Summary)
})
_sym_db.RegisterMessage(Summary)
_sym_db.RegisterMessage(Summary.Image)
_sym_db.RegisterMessage(Summary.Value)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
python
|
import os
import sys
import socket
import time
from multiprocessing import Process
from pathlib import Path
from typing import Tuple, Union
import torch
from torch.utils.tensorboard import SummaryWriter
from super_gradients.training.exceptions.dataset_exceptions import UnsupportedBatchItemsFormat
# TODO: These utils should move to sg_model package as internal (private) helper functions
def try_port(port):
"""
try_port - Helper method for tensorboard port binding
:param port:
:return:
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
is_port_available = False
try:
sock.bind(("localhost", port))
is_port_available = True
except Exception as ex:
print('Port ' + str(port) + ' is in use' + str(ex))
sock.close()
return is_port_available
def launch_tensorboard_process(checkpoints_dir_path: str, sleep_postpone: bool = True, port: int = None) -> Tuple[Process, int]:
"""
launch_tensorboard_process - Default behavior is to scan all free ports from 6006-6016 and try using them
unless port is defined by the user
:param checkpoints_dir_path:
:param sleep_postpone:
:param port:
:return: tuple of tb process, port
"""
logdir_path = str(Path(checkpoints_dir_path).parent.absolute())
tb_cmd = 'tensorboard --logdir=' + logdir_path + ' --bind_all'
if port is not None:
tb_ports = [port]
else:
tb_ports = range(6006, 6016)
for tb_port in tb_ports:
if not try_port(tb_port):
continue
else:
print('Starting Tensor-Board process on port: ' + str(tb_port))
tensor_board_process = Process(target=os.system, args=([tb_cmd + ' --port=' + str(tb_port)]))
tensor_board_process.daemon = True
tensor_board_process.start()
# LET THE TENSORBOARD PROCESS START
if sleep_postpone:
time.sleep(3)
return tensor_board_process, tb_port
# RETURNING IRRELEVANT VALUES
print('Failed to initialize Tensor-Board process on port: ' + ', '.join(map(str, tb_ports)))
return None, -1
def init_summary_writer(tb_dir, checkpoint_loaded, user_prompt=False):
"""Remove previous tensorboard files from directory and launch a tensor board process"""
# If the training is from scratch, Walk through destination folder and delete existing tensorboard logs
user = ''
if not checkpoint_loaded:
for filename in os.listdir(tb_dir):
if 'events' in filename:
if not user_prompt:
print('"{}" will not be deleted'.format(filename))
continue
while True:
# Verify with user before deleting old tensorboard files
user = input('\nOLDER TENSORBOARD FILES EXISTS IN EXPERIMENT FOLDER:\n"{}"\n'
'DO YOU WANT TO DELETE THEM? [y/n]'
.format(filename)) if (user != 'n' or user != 'y') else user
if user == 'y':
os.remove('{}/{}'.format(tb_dir, filename))
print('DELETED: {}!'.format(filename))
break
elif user == 'n':
print('"{}" will not be deleted'.format(filename))
break
print('Unknown answer...')
# Launch a tensorboard process
return SummaryWriter(tb_dir)
def add_log_to_file(filename, results_titles_list, results_values_list, epoch, max_epochs):
"""Add a message to the log file"""
# -Note: opening and closing the file every time is in-efficient. It is done for experimental purposes
with open(filename, 'a') as f:
f.write('\nEpoch (%d/%d) - ' % (epoch, max_epochs))
for result_title, result_value in zip(results_titles_list, results_values_list):
if isinstance(result_value, torch.Tensor):
result_value = result_value.item()
f.write(result_title + ': ' + str(result_value) + '\t')
def write_training_results(writer, results_titles_list, results_values_list, epoch):
"""Stores the training and validation loss and accuracy for current epoch in a tensorboard file"""
for res_key, res_val in zip(results_titles_list, results_values_list):
# USE ONLY LOWER-CASE LETTERS AND REPLACE SPACES WITH '_' TO AVOID MANY TITLES FOR THE SAME KEY
corrected_res_key = res_key.lower().replace(' ', '_')
writer.add_scalar(corrected_res_key, res_val, epoch)
writer.flush()
def write_hpms(writer, hpmstructs=[], special_conf={}):
"""Stores the training and dataset hyper params in the tensorboard file"""
hpm_string = ""
for hpm in hpmstructs:
for key, val in hpm.__dict__.items():
hpm_string += '{}: {} \n '.format(key, val)
for key, val in special_conf.items():
hpm_string += '{}: {} \n '.format(key, val)
writer.add_text("Hyper_parameters", hpm_string)
writer.flush()
# TODO: This should probably move into datasets/datasets_utils.py?
def unpack_batch_items(batch_items: Union[tuple, torch.Tensor]):
"""
Adds support for unpacking batch items in train/validation loop.
@param batch_items: (Union[tuple, torch.Tensor]) returned by the data loader, which is expected to be in one of
the following formats:
1. torch.Tensor or tuple, s.t inputs = batch_items[0], targets = batch_items[1] and len(batch_items) = 2
2. tuple: (inputs, targets, additional_batch_items)
where inputs are fed to the network, targets are their corresponding labels and additional_batch_items is a
dictionary (format {additional_batch_item_i_name: additional_batch_item_i ...}) which can be accessed through
the phase context under the attribute additional_batch_item_i_name, using a phase callback.
@return: inputs, target, additional_batch_items
"""
additional_batch_items = {}
if len(batch_items) == 2:
inputs, target = batch_items
elif len(batch_items) == 3:
inputs, target, additional_batch_items = batch_items
else:
raise UnsupportedBatchItemsFormat()
return inputs, target, additional_batch_items
def log_uncaught_exceptions(logger):
"""
Makes logger log uncaught exceptions
@param logger: logging.Logger
@return: None
"""
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2016-2017 Stella/AboodXD
# Supported formats:
# -RGBA8
# -RGB10A2
# -RGB565
# -RGB5A1
# -RGBA4
# -L8/R8
# -L8A8/RG8
# -BC1
# -BC2
# -BC3
# -BC4U
# -BC4S
# -BC5U
# -BC5S
# Feel free to include this in your own program if you want, just give credits. :)
"""dds.py: DDS reader and header generator."""
import struct
try:
import form_conv_cy as form_conv
except ImportError:
import form_conv
def readDDS(f, SRGB):
with open(f, "rb") as inf:
inb = inf.read()
if len(inb) < 0x80 or inb[:4] != b'DDS ':
print("")
print(f + " is not a valid DDS file!")
return 0, 0, 0, b'', 0, [], 0, []
width = struct.unpack("<I", inb[16:20])[0]
height = struct.unpack("<I", inb[12:16])[0]
fourcc = inb[84:88]
if fourcc == b'DX10':
print("")
print("DX10 DDS files are not supported.")
return 0, 0, 0, b'', 0, [], 0, []
pflags = struct.unpack("<I", inb[80:84])[0]
bpp = struct.unpack("<I", inb[88:92])[0] >> 3
channel0 = struct.unpack("<I", inb[92:96])[0]
channel1 = struct.unpack("<I", inb[96:100])[0]
channel2 = struct.unpack("<I", inb[100:104])[0]
channel3 = struct.unpack("<I", inb[104:108])[0]
caps = struct.unpack("<I", inb[108:112])[0]
if caps not in [0x1000, 0x401008]:
print("")
print("Invalid texture.")
return 0, 0, 0, b'', 0, [], 0, []
abgr8_masks = {0xff: 0, 0xff00: 1, 0xff0000: 2, 0xff000000: 3, 0: 5}
bgr8_masks = {0xff: 0, 0xff00: 1, 0xff0000: 2, 0: 5}
a2rgb10_masks = {0x3ff00000: 0, 0xffc00: 1, 0x3ff: 2, 0xc0000000: 3, 0: 5}
bgr565_masks = {0x1f: 0, 0x7e0: 1, 0xf800: 2, 0: 5}
a1bgr5_masks = {0x1f: 0, 0x3e0: 1, 0x7c00: 2, 0x8000: 3, 0: 5}
abgr4_masks = {0xf: 0, 0xf0: 1, 0xf00: 2, 0xf000: 3, 0: 5}
l8_masks = {0xff: 0, 0: 5}
a8l8_masks = {0xff: 0, 0xff00: 1, 0: 5}
compressed = False
luminance = False
rgb = False
has_alpha = False
if pflags == 4:
compressed = True
elif pflags == 0x20000 or pflags == 2:
luminance = True
elif pflags == 0x20001:
luminance = True
has_alpha = True
elif pflags == 0x40:
rgb = True
elif pflags == 0x41:
rgb = True
has_alpha = True
else:
print("")
print("Invalid texture.")
return 0, 0, 0, b'', 0, [], 0, []
format_ = 0
if compressed:
compSel = [0, 1, 2, 3]
if fourcc == b'DXT1':
format_ = 0x42
bpp = 8
elif fourcc == b'DXT3':
format_ = 0x43
bpp = 16
elif fourcc == b'DXT5':
format_ = 0x44
bpp = 16
elif fourcc in [b'BC4U', b'ATI1']:
format_ = 0x49
bpp = 8
elif fourcc == b'BC4S':
format_ = 0x4a
bpp = 8
elif fourcc in [b'BC5U', b'ATI2']:
format_ = 0x4b
bpp = 16
elif fourcc == b'BC5S':
format_ = 0x4c
bpp = 16
size = ((width + 3) >> 2) * ((height + 3) >> 2) * bpp
else:
if luminance:
if has_alpha:
if channel0 in a8l8_masks and channel1 in a8l8_masks and channel2 in a8l8_masks and channel3 in a8l8_masks and bpp == 2:
format_ = 0xd
compSel = [a8l8_masks[channel0], a8l8_masks[channel1], a8l8_masks[channel2], a8l8_masks[channel3]]
else:
if channel0 in l8_masks and channel1 in l8_masks and channel2 in l8_masks and channel3 in l8_masks and bpp == 1:
format_ = 1
compSel = [l8_masks[channel0], l8_masks[channel1], l8_masks[channel2], l8_masks[channel3]]
elif rgb:
if has_alpha:
if bpp == 4:
if channel0 in abgr8_masks and channel1 in abgr8_masks and channel2 in abgr8_masks and channel3 in abgr8_masks:
format_ = 0x38 if SRGB else 0x25
compSel = [abgr8_masks[channel0], abgr8_masks[channel1], abgr8_masks[channel2], abgr8_masks[channel3]]
elif channel0 in a2rgb10_masks and channel1 in a2rgb10_masks and channel2 in a2rgb10_masks and channel3 in a2rgb10_masks:
format_ = 0x3d
compSel = [a2rgb10_masks[channel0], a2rgb10_masks[channel1], a2rgb10_masks[channel2], a2rgb10_masks[channel3]]
elif bpp == 2:
if channel0 in a1bgr5_masks and channel1 in a1bgr5_masks and channel2 in a1bgr5_masks and channel3 in a1bgr5_masks:
format_ = 0x3b
compSel = [a1bgr5_masks[channel0], a1bgr5_masks[channel1], a1bgr5_masks[channel2], a1bgr5_masks[channel3]]
elif channel0 in abgr4_masks and channel1 in abgr4_masks and channel2 in abgr4_masks and channel3 in abgr4_masks:
format_ = 0x39
compSel = [abgr4_masks[channel0], abgr4_masks[channel1], abgr4_masks[channel2], abgr4_masks[channel3]]
else:
if channel0 in bgr8_masks and channel1 in bgr8_masks and channel2 in bgr8_masks and channel3 == 0 and bpp == 3: # Kinda not looking good if you ask me
format_ = 0x38 if SRGB else 0x25
compSel = [bgr8_masks[channel0], bgr8_masks[channel1], bgr8_masks[channel2], 3]
if channel0 in bgr565_masks and channel1 in bgr565_masks and channel2 in bgr565_masks and channel3 in bgr565_masks and bpp == 2:
format_ = 0x3c
compSel = [bgr565_masks[channel0], bgr565_masks[channel1], bgr565_masks[channel2], bgr565_masks[channel3]]
size = width * height * bpp
if caps == 0x401008:
numMips = struct.unpack("<I", inb[28:32])[0] - 1
mipSize = get_mipSize(width, height, bpp, numMips, compressed)
else:
numMips = 0
mipSize = 0
if len(inb) < 0x80+size+mipSize:
print("")
print(f + " is not a valid DDS file!")
return 0, 0, 0, b'', 0, [], 0, []
if format_ == 0:
print("")
print("Unsupported DDS format!")
return 0, 0, 0, b'', 0, [], 0, []
data = inb[0x80:0x80+size+mipSize]
if format_ in [0x25, 0x38] and bpp == 3:
data = form_conv.rgb8torgbx8(data)
bpp += 1
size = width * height * bpp
return width, height, format_, fourcc, size, compSel, numMips, data
def get_mipSize(width, height, bpp, numMips, compressed):
size = 0
for i in range(numMips):
level = i + 1
if compressed:
size += ((max(1, width >> level) + 3) >> 2) * ((max(1, height >> level) + 3) >> 2) * bpp
else:
size += max(1, width >> level) * max(1, height >> level) * bpp
return size
def generateHeader(num_mipmaps, w, h, format_, compSel, size, compressed):
hdr = bytearray(128)
luminance = False
RGB = False
has_alpha = True
if format_ == 28: # ABGR8
RGB = True
compSels = {0: 0x000000ff, 1: 0x0000ff00, 2: 0x00ff0000, 3: 0xff000000, 5: 0}
fmtbpp = 4
elif format_ == 24: # A2RGB10
RGB = True
compSels = {0: 0x3ff00000, 1: 0x000ffc00, 2: 0x000003ff, 3: 0xc0000000, 5: 0}
fmtbpp = 4
elif format_ == 85: # BGR565
RGB = True
compSels = {0: 0x0000001f, 1: 0x000007e0, 2: 0x0000f800, 3: 0, 5: 0}
fmtbpp = 2
has_alpha = False
elif format_ == 86: # A1BGR5
RGB = True
compSels = {0: 0x0000001f, 1: 0x000003e0, 2: 0x00007c00, 3: 0x00008000, 5: 0}
fmtbpp = 2
elif format_ == 115: # ABGR4
RGB = True
compSels = {0: 0x0000000f, 1: 0x000000f0, 2: 0x00000f00, 3: 0x0000f000, 5: 0}
fmtbpp = 2
elif format_ == 61: # L8
luminance = True
compSels = {0: 0x000000ff, 1: 0, 2: 0, 3: 0, 5: 0}
fmtbpp = 1
if compSel[3] != 0:
has_alpha = False
elif format_ == 49: # A8L8
luminance = True
compSels = {0: 0x000000ff, 1: 0x0000ff00, 2: 0, 3: 0, 5: 0}
fmtbpp = 2
flags = 0x00000001 | 0x00001000 | 0x00000004 | 0x00000002
caps = 0x00001000
if num_mipmaps == 0:
num_mipmaps = 1
elif num_mipmaps != 1:
flags |= 0x00020000
caps |= 0x00000008 | 0x00400000
if not compressed:
flags |= 0x00000008
a = False
if compSel[0] != 0 and compSel[1] != 0 and compSel[2] != 0 and compSel[3] == 0: # ALPHA
a = True
pflags = 0x00000002
elif luminance: # LUMINANCE
pflags = 0x00020000
elif RGB: # RGB
pflags = 0x00000040
else: # Not possible...
return b''
if has_alpha and not a:
pflags |= 0x00000001
size = w * fmtbpp
else:
flags |= 0x00080000
pflags = 0x00000004
if format_ == "BC1":
fourcc = b'DXT1'
elif format_ == "BC2":
fourcc = b'DXT3'
elif format_ == "BC3":
fourcc = b'DXT5'
elif format_ == "BC4U":
fourcc = b'ATI1'
elif format_ == "BC4S":
fourcc = b'BC4S'
elif format_ == "BC5U":
fourcc = b'ATI2'
elif format_ == "BC5S":
fourcc = b'BC5S'
hdr[0:0 + 4] = b'DDS '
hdr[4:4 + 4] = 124 .to_bytes(4, 'little')
hdr[8:8 + 4] = flags.to_bytes(4, 'little')
hdr[12:12 + 4] = h.to_bytes(4, 'little')
hdr[16:16 + 4] = w.to_bytes(4, 'little')
hdr[20:20 + 4] = size.to_bytes(4, 'little')
hdr[28:28 + 4] = num_mipmaps.to_bytes(4, 'little')
hdr[76:76 + 4] = 32 .to_bytes(4, 'little')
hdr[80:80 + 4] = pflags.to_bytes(4, 'little')
if compressed:
hdr[84:84 + 4] = fourcc
else:
hdr[88:88 + 4] = (fmtbpp << 3).to_bytes(4, 'little')
hdr[92:92 + 4] = compSels[compSel[0]].to_bytes(4, 'little')
hdr[96:96 + 4] = compSels[compSel[1]].to_bytes(4, 'little')
hdr[100:100 + 4] = compSels[compSel[2]].to_bytes(4, 'little')
hdr[104:104 + 4] = compSels[compSel[3]].to_bytes(4, 'little')
hdr[108:108 + 4] = caps.to_bytes(4, 'little')
return hdr
|
python
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from polyaxon.env_vars.keys import (
POLYAXON_KEYS_ARCHIVE_ROOT,
POLYAXON_KEYS_ARTIFACTS_ROOT,
POLYAXON_KEYS_CONTEXT_ROOT,
POLYAXON_KEYS_OFFLINE_ROOT,
)
def polyaxon_user_path():
base_path = os.path.expanduser("~")
if not os.access(base_path, os.W_OK):
base_path = "/tmp"
return os.path.join(base_path, ".polyaxon")
CONTEXT_ROOT = os.environ.get(POLYAXON_KEYS_CONTEXT_ROOT, "/plx-context")
CONTEXT_MOUNT_GC = "{}/.gc/gc-secret.json".format(CONTEXT_ROOT)
CONTEXT_MOUNT_CONFIGS = "{}/.configs".format(CONTEXT_ROOT)
CONTEXT_MOUNT_AUTH = "{}/.auth".format(CONTEXT_MOUNT_CONFIGS)
CONTEXT_MOUNT_ARTIFACTS = "{}/artifacts".format(CONTEXT_ROOT)
CONTEXT_MOUNT_ARTIFACTS_FORMAT = "{}/{{}}".format(CONTEXT_MOUNT_ARTIFACTS)
CONTEXT_MOUNT_ARTIFACTS_RELATED = CONTEXT_MOUNT_ARTIFACTS_FORMAT.format("_related_runs")
CONTEXT_MOUNT_ARTIFACTS_RELATED_FORMAT = "{}/{{}}".format(
CONTEXT_MOUNT_ARTIFACTS_RELATED
)
CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT = "{}/outputs".format(CONTEXT_MOUNT_ARTIFACTS_FORMAT)
CONTEXT_MOUNT_RUN_EVENTS_FORMAT = "{}/events".format(CONTEXT_MOUNT_ARTIFACTS_FORMAT)
CONTEXT_MOUNT_SHM = "/dev/shm"
CONTEXT_MOUNT_DOCKER = "/var/run/docker.sock"
CONTEXT_TMP_POLYAXON_PATH = "/tmp/.polyaxon/"
CONTEXT_USER_POLYAXON_PATH = polyaxon_user_path()
CONTEXT_ARCHIVE_ROOT = os.environ.get(POLYAXON_KEYS_ARCHIVE_ROOT, "/tmp/plx/archives")
CONTEXT_ARTIFACTS_ROOT = os.environ.get(
POLYAXON_KEYS_ARTIFACTS_ROOT, "/tmp/plx/artifacts"
)
CONTEXT_OFFLINE_ROOT = os.environ.get(POLYAXON_KEYS_OFFLINE_ROOT, "/tmp/plx/offline")
CONTEXT_OFFLINE_FORMAT = "{}/{{}}".format(CONTEXT_OFFLINE_ROOT)
CONTEXT_ARTIFACTS_FORMAT = "{}/{{}}".format(CONTEXT_ARTIFACTS_ROOT)
CONTEXTS_OUTPUTS_SUBPATH_FORMAT = "{}/outputs"
CONTEXTS_EVENTS_SUBPATH_FORMAT = "{}/events"
|
python
|
# Marcelo Campos de Medeiros
# ADS UNIFIP
# REVISÃO DE PYTHON
# AULA 10 CONDIÇÕES GUSTAVO GUANABARA
'''
Faça um Programa que pergunte o salário do funcionário e calcule o valor do seu aumento.
* Para salários superiores a R$ 1.250.00, Calcule um aumento de 10%.
* Para os inferiores ou iguais o aumento é de 15%.
'''
print('='*30)
print('{:$^30}'.format(' AUMENTO DE SALÁRIO '))
print('='*30)
print()
salario = float(input('Qual valor do seu salário: '))
print()
if salario <= 1250:
reajuste = salario + (salario * 0.15)
print('Você tinha um salário de R$ %.2f.\nCom reajuste de seu novo salário é R$ %.2f'%(salario, reajuste))
else:
reajuste = salario + (salario * 0.10)
print('Você tinha um salário de R$ %.2f.\nCom reajuste de seu novo salário é R$ %.2f.'%(salario, reajuste))
print()
|
python
|
# global imports
from dash.dependencies import Input, Output, State, ALL # ClientsideFunction
from dash import html
# local imports
from ..dash_app import app
import pemfc_gui.input as gui_input
from .. import dash_layout as dl
tab_layout = html.Div(dl.frame(gui_input.main_frame_dicts[1]))
@app.callback(
Output({'type': ALL, 'id': ALL, 'specifier': 'disabled_manifolds'},
'disabled'),
Input({'type': ALL, 'id': ALL,
'specifier': 'checklist_activate_calculation'}, 'value'),
Input({'type': ALL, 'id': ALL, 'specifier': 'disabled_manifolds'}, 'value'),
)
def activate_column(input1, input2):
len_state = len(input2)
list_state = [True for x in range(len_state)] # disable=True for all inputs
for num, val in enumerate(input1): # 3 inputs in input1 for 3 rows
if val == [1]:
list_state[0 + num] = list_state[3 + num] = list_state[15 + num] = \
list_state[18 + num] = list_state[30 + num] = False
if input2[3+num] == 'circular':
list_state[6+num], list_state[9+num], list_state[12+num] = \
False, True, True
else:
list_state[6+num], list_state[9+num], list_state[12+num] = \
True, False, False
if input2[18+num] == 'circular':
list_state[21+num], list_state[24+num], list_state[27+num] = \
False, True, True
else:
list_state[21+num], list_state[24+num], list_state[27+num] = \
True, False, False
return list_state
|
python
|
import bitmex
import time
import pandas as pd
from keys import ID, SECRET, SLACK_TOKEN
from slackclient import SlackClient
sc = SlackClient(SLACK_TOKEN)
client = bitmex.bitmex(test=False, api_key=ID, api_secret=SECRET)
dfpair = []
def get_volume_data(client):
dfpair.clear()
dfxbt = client.Trade.Trade_getBucketed(
binSize='1h', reverse=True, symbol='XBTUSD', count=10, partial=True).result()[0]
dfeth = client.Trade.Trade_getBucketed(
binSize='1h', reverse=True, symbol='ETHUSD', count=10, partial=True).result()[0]
dftrx = client.Trade.Trade_getBucketed(
binSize='1h', reverse=True, symbol='TRX', count=10, partial=True).result()[0]
dfada = client.Trade.Trade_getBucketed(
binSize='1h', reverse=True, symbol='ADA', count=10, partial=True).result()[0]
dfbch = client.Trade.Trade_getBucketed(
binSize='1h', reverse=True, symbol='BCH', count=10, partial=True).result()[0]
dfeos = client.Trade.Trade_getBucketed(
binSize='1h', reverse=True, symbol='EOS', count=10, partial=True).result()[0]
dfltc = client.Trade.Trade_getBucketed(
binSize='1h', reverse=True, symbol='LTC', count=10, partial=True).result()[0]
dfxrp = client.Trade.Trade_getBucketed(
binSize='1h', reverse=True, symbol='XRP', count=10, partial=True).result()[0]
if len(dfxbt) != 0:
dfxbt = get_ohlcv(dfxbt)
dfpair.append(dfxbt)
else:
msg = 'Exception'
slack_msg(msg)
if len(dfeth) != 0:
dfeth = get_ohlcv(dfeth)
dfpair.append(dfeth)
else:
msg = 'Exception'
slack_msg(msg)
if len(dftrx) != 0:
dftrx = get_ohlcv(dftrx)
dfpair.append(dftrx)
else:
msg = 'Exception'
slack_msg(msg)
if len(dfada) != 0:
dfada = get_ohlcv(dfada)
dfpair.append(dfada)
else:
msg = 'Exception'
slack_msg(msg)
if len(dfbch) != 0:
dfbch = get_ohlcv(dfbch)
dfpair.append(dfbch)
else:
msg = 'Exception'
slack_msg(msg)
if len(dfltc) != 0:
dfltc = get_ohlcv(dfltc)
dfpair.append(dfltc)
else:
msg = 'Exception'
slack_msg(msg)
if len(dfeos) != 0:
dfeos = get_ohlcv(dfeos)
dfpair.append(dfeos)
else:
msg = 'Exception'
slack_msg(msg)
if len(dfxrp) != 0:
dfxrp = get_ohlcv(dfxrp)
dfpair.append(dfxrp)
else:
msg = 'Exception'
slack_msg(msg)
return dfpair
def get_ohlcv(df):
ohlcv = pd.DataFrame(df)
ohlcv.set_index(['timestamp'], inplace=True)
ohlcv.sort_values(by=['timestamp'], ascending=True, inplace=True)
return ohlcv
def xbt_cond(data):
if data['volume'][-2] > 100000000:
msg = 'XBTUSD: 1 Hour: Volume 20 times'
slack_msg(msg)
# def eth_cond(data):
# if data['volume'][-2] > 4*data['volume'][-3]:
# msg = 'ETHUSD: 1 Hour: Volume 20 times'
# slack_msg(msg)
# def trx_cond(data):
# if data['volume'][-2] > 20*data['volume'][-3]:
# msg = 'TRX: 1min: Volume 20 times'
# slack_msg(msg)
# def eos_cond(data):
# if data['volume'][-2] > 20*data['volume'][-3]:
# msg = 'EOS: 1min: Volume 20 times'
# slack_msg(msg)
# def ltc_cond(data):
# if data['volume'][-2] > 20*data['volume'][-3]:
# msg = 'LTC: 1min: Volume 20 times'
# slack_msg(msg)
# def bch_cond(data):
# if data['volume'][-2] > 20*data['volume'][-3]:
# msg = 'BCH: 1min: Volume 20 times'
# slack_msg(msg)
# def xrp_cond(data):
# if data['volume'][-2] > 20*data['volume'][-3]:
# msg = 'XRP: 1min: Volume 20 times'
# slack_msg(msg)
# def ada_cond(data):
# if data['volume'][-2] > 20*data['volume'][-3]:
# msg = 'ADA: 1min: Volume 20 times'
# slack_msg(msg)
def slack_msg(msg):
try:
sc.api_call(
"chat.postMessage",
channel="bitmex1hour",
text=msg+":smile:",
username='My Robot',
icon_emoji=':robot_face:')
# return True
except:
print('Exception in Slack API')
# return False
if __name__ == '__main__':
while True:
one_min = round(time.time()) % 3600 == 0
if one_min:
time.sleep(1)
dfpair = get_volume_data(client)
xbt_cond(dfpair[0])
# eth_cond(dfpair[1])
# trx_cond(dfpair[2])
# ada_cond(dfpair[3])
# bch_cond(dfpair[4])
# ltc_cond(dfpair[5])
# eos_cond(dfpair[6])
# xrp_cond(dfpair[7])
time.sleep(3598)
|
python
|
#!/usr/bin/env python3
import sys
if len(sys.argv) > 1:
infilename = sys.argv[1]
else:
infilename = 'input.txt'
with open(infilename, 'r') as infile:
buf= [line.strip().split() for line in infile]
max_col = max([len(x) for x in buf])
print(max_col)
row_checksums = []
for line in buf:
maximum = 0
minimum = 1000000000
for i in range(len(line)):
maximum = max(maximum, int(line[i]))
minimum = min(minimum, int(line[i]))
row_checksums.append(int(maximum) - int(minimum))
print(maximum, minimum)
print(sum(row_checksums))
|
python
|
import itertools
from typing import Optional, Sequence
from eth_typing import Address, BlockNumber, Hash32
from eth_utils import is_same_address
from sqlalchemy import orm
from sqlalchemy.orm.exc import NoResultFound
from cthaeh.filter import FilterParams
from cthaeh.loader import get_or_create_topics
from cthaeh.models import Header, Log
from .factories import (
AddressFactory,
BlockFactory,
BlockTransactionFactory,
HeaderFactory,
LogFactory,
LogTopicFactory,
)
def check_filter_results(params: FilterParams, results: Sequence[Log]) -> None:
for log in results:
check_log_matches_filter(params, log)
def check_log_matches_filter(params: FilterParams, log: Log) -> None:
# Check that log belongs to a canonical header
assert log.receipt.transaction.block is not None
header = log.receipt.transaction.block.header
# Check address matches
if isinstance(params.address, tuple):
assert any(
is_same_address(Address(log.address), Address(address))
for address in params.address
)
elif params.address is not None:
assert is_same_address(Address(log.address), Address(params.address))
# Check block number in range
if isinstance(params.from_block, int):
assert header.block_number >= params.from_block
if isinstance(params.to_block, int):
assert header.block_number <= params.to_block
# Check topics
zipped_topics = itertools.zip_longest(
params.topics, log.topics, fillvalue=None # type: ignore
)
for expected_topic, actual_topic in zipped_topics:
if expected_topic is None:
assert actual_topic is not None
elif actual_topic is None:
assert expected_topic is None
elif isinstance(expected_topic, tuple):
assert any(topic == actual_topic.topic for topic in expected_topic)
elif isinstance(expected_topic, bytes):
assert expected_topic == actual_topic.topic
else:
raise Exception("Invariant")
def construct_log(
session: orm.Session,
*,
block_number: Optional[BlockNumber] = None,
address: Optional[Address] = None,
topics: Sequence[Hash32] = (),
data: bytes = b"",
is_canonical: bool = True,
) -> Log:
if block_number is not None:
try:
header = (
session.query(Header) # type: ignore
.filter(Header.is_canonical.is_(is_canonical)) # type: ignore
.filter(Header.block_number == block_number)
.one()
)
except NoResultFound:
header = HeaderFactory(is_canonical=is_canonical, block_number=block_number)
else:
header = HeaderFactory(is_canonical=is_canonical)
if address is None:
address = AddressFactory()
session.add(header)
topic_objs = get_or_create_topics(session, topics)
session.add_all(topic_objs) # type: ignore
if is_canonical:
log = LogFactory(
receipt__transaction__block__header=header, address=address, data=data
)
block_transaction = BlockTransactionFactory(
idx=0,
block=log.receipt.transaction.block,
transaction=log.receipt.transaction,
)
session.add(block_transaction)
else:
log = LogFactory(receipt__transaction__block=None)
block = BlockFactory(header=header)
block_transaction = BlockTransactionFactory(
idx=0, block=block, transaction=log.receipt.transaction
)
session.add_all((block, block_transaction)) # type: ignore
log_topics = tuple(
LogTopicFactory(idx=idx, log=log, topic=topic)
for idx, topic in enumerate(topic_objs)
)
session.add(log)
session.add_all(log_topics) # type: ignore
return log
|
python
|
import yaml
import contextlib
import logging
from teuthology import misc as teuthology
from teuthology.orchestra import run
log = logging.getLogger(__name__)
import os
import pwd
import time
import argparse
"""
# Test yaml to test script mapper for boto3
tests_mapper_v2 = {'test_Mbuckets_basic': 'test_Mbuckets_basic',
'test_Mbuckets_with_Nobjects_basic': 'test_Mbuckets_with_Nobjects_basic',
'test_Mbuckets_with_Nobjects_delete': 'test_Mbuckets_with_Nobjects',
'test_Mbuckets_with_Nobjects_download': 'test_Mbuckets_with_Nobjects',
'test_Mbuckets_with_Nobjects_sharding': 'test_Mbuckets_with_Nobjects',
'test_Mbuckets_with_Nobjects_encryption': 'test_Mbuckets_with_Nobjects',
'test_bucket_lifecycle_config_disable': 'test_bucket_lifecycle_config_ops',
'test_bucket_lifecycle_config_modify': 'test_bucket_lifecycle_config_ops',
'test_bucket_lifecycle_config_read': 'test_bucket_lifecycle_config_ops',
'test_bucket_lifecycle_config_versioning': 'test_bucket_lifecycle_config_ops',
'test_acls': 'test_acls',
'test_bucket_policy_delete': 'test_bucket_policy_ops',
'test_bucket_policy_modify': 'test_bucket_policy_ops',
'test_bucket_policy_replace': 'test_bucket_policy_ops',
'test_bucket_request_payer': 'test_bucket_request_payer',
'test_bucket_request_payer_download': 'test_bucket_request_payer',
'test_dynamic_sharding_offline': 'test_dynamic_bucket_resharding',
'test_dynamic_sharding_online': 'test_dynamic_bucket_resharding',
'test_multitenant_access': 'test_multitenant_user_access',
'test_storage_policy_s3': 'test_storage_policy',
'test_storage_policy_swift': 'test_storage_policy',
'test_swift_basic_ops': 'test_swift_basic_ops',
'test_versioning_enable': 'test_versioning_with_objects',
'test_versioning_objects_copy': 'test_versioning_copy_objects',
'test_versioning_objects_delete': 'test_versioning_with_objects',
'test_versioning_objects_enable': 'test_versioning_with_objects',
'test_versioning_objects_suspend': 'test_versioning_with_objects',
'test_versioning_objects_suspend_reupload': 'test_versioning_with_objects',
}
"""
def get_remotes(ctx):
rgws = ctx.cluster.only(teuthology.is_type('rgw'))
haproxys = ctx.cluster.only(teuthology.is_type('haproxy'))
remotes = []
for remote, roles_for_host in rgws.remotes.items():
remotes.append(remote)
for remote, roles_for_host in haproxys.remotes.items():
remotes.append(remote)
return remotes
def user_creation(ctx, user_config, mclient, version):
log.info('Create user on master client')
temp_yaml_file = 'user_create_' + str(os.getpid()) + pwd.getpwuid(os.getuid()).pw_name
# temp_yaml_file = 'user_create.yaml'
if user_config is None:
assert isinstance(user_config, dict), "configuration not given"
log.info('creating yaml from the config: %s' % user_config)
local_file = '/tmp/' + temp_yaml_file
with open(local_file, 'w') as outfile:
outfile.write(yaml.dump(user_config, default_flow_style=False))
log.info('copying yaml to the client node')
destination_location = \
('rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/' % version + temp_yaml_file)
mclient.put_file(local_file, destination_location)
mclient.run(args=['ls', '-lt',
'rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/' % version])
mclient.run(args=['cat',
'rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/' % version + temp_yaml_file])
# mclient.run(args=['sudo', 'rm', '-f', run.Raw('%s' % local_file)], check_status=False)
mclient.run(
args=[
run.Raw(
'sudo venv/bin/python2.7 rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/%s '
'-c rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/%s '
% (version, 'user_create.py', version, temp_yaml_file))])
log.info('copy user_details file from source client into local dir')
user_file = mclient.get_file('user_details', '/tmp')
time.sleep(10)
log.info('copy user_file to target client')
# if mclient != tclient:
# tclient.put_file(user_file, 'user_details')
remotes = get_remotes(ctx)
for remote in remotes:
if remote != mclient:
log.info('copy user_details to {}'.format(remote))
remote.put_file(user_file, 'user_details')
def test_data(tclient, test_name, script_name, version):
tclient.run(args=['ls', '-lt',
'rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/' % version])
tclient.run(args=['cat',
'rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/' % version + test_name])
tclient.run(
args=[
run.Raw(
'sudo venv/bin/python2.7 rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/%s '
'-c rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/%s '
% (version, script_name, version, test_name))])
def copy_file_from(src_node, dest_node, file_path='/home/ubuntu/io_info.yaml'):
# copies to /tmp dir and then puts it in destination machines
log.info('copy of io_info.yaml from %s initiated' % src_node)
# io_info_file = src_node.get_file(file_path, '/tmp')
io_info_file = teuthology.get_file(
remote=src_node,
path=file_path,
)
time.sleep(10)
log.info('copy io_info_file to %s' % dest_node)
teuthology.sudo_write_file(
remote=dest_node,
path=file_path,
data=io_info_file)
# dest_node.put_file(io_info_file, file_name)
log.info('copy of io_info.yaml completed')
@contextlib.contextmanager
def pull_io_info(ctx, config):
# copy file from the node running tests to all other rgw nodes
"""
- multisite_test.pull_io_info:
"""
log.info('starting the task')
log.info('config %s' % config)
if config is None:
config = {}
tclient = ctx.multisite_test.target
remotes = get_remotes(ctx)
for remote in remotes:
if remote != tclient:
copy_file_from(tclient, remote)
yield
def cleanup(ctx):
remotes = get_remotes(ctx)
for remote in remotes:
cleanup = lambda x: remote.run(args=[run.Raw('sudo rm -rf %s' % x)])
soot = ['venv', 'rgw-tests', '*.json', 'Download.*', 'Download', '*.mpFile', 'x*', 'key.*', 'Mp.*',
'*.key.*', 'user_details', 'io_info.yaml']
list(map(cleanup, soot))
def clone_repo(ctx):
remotes = get_remotes(ctx)
for remote in remotes:
remote.run(args=['mkdir', 'rgw-tests'])
remote.run(
args=[
'cd',
'rgw-tests',
run.Raw(';'),
'git',
'clone',
'-b',
'add-encryption',
'https://github.com/red-hat-storage/ceph-qe-scripts.git',
])
remote.run(args=['virtualenv', 'venv'])
remote.run(
args=[
'source',
'venv/bin/activate',
run.Raw(';'),
run.Raw('pip install boto boto3 names python-swiftclient PyYaml psutil ConfigParser simplejson'),
run.Raw(';'),
'deactivate'])
@contextlib.contextmanager
def userexec(ctx, config):
# Create user and copy the user_details to target client
"""
-multisite-test.userexec:
test_dir_version: v1
master_client: source.rgw.0
master_config:
user_count: 3
"""
log.info('starting the task')
log.info('config %s' % config)
if config is None:
config = {}
if not hasattr(ctx, 'userexec'):
ctx.userexec = argparse.Namespace()
assert isinstance(config, dict), \
"task userexec only supports a dictionary for configuration"
log.info('cloning the repo to client machines')
cleanup(ctx)
clone_repo(ctx)
master_client = config['master_client']
(mclient,) = iter(ctx.cluster.only(master_client).remotes.keys())
user_config = config['master_config']
user_data = None
user_data = dict(
config=dict(
user_count=user_config['user_count'],
)
)
if config['test_dir_version'] == 'v1':
user_creation(ctx, user_data, mclient, version='v1')
elif config['test_dir_version'] == 'v2':
user_creation(ctx, user_data, mclient, version='v2')
yield
def execute_v1(tclient, config):
# Tests using boto2 here
test_name = config['test-name'] + ".yaml"
script_name = config['test-name'] + ".py"
log.info('test name :%s' % config['test-name'])
# Execute test
test_data(tclient, test_name, script_name, version='v1')
def execute_v2(tclient, config):
# Tests using boto3 here
test_name = config['test_name'] + ".yaml"
script_name = config['script_name'] + ".py"
log.info('test name :%s' % config['test_name'])
# Execute test
test_data(tclient, test_name, script_name, version='v2')
@contextlib.contextmanager
def task(ctx, config):
"""
- multisite-test:
test-name: test_multipart_upload_download
test_dir_version: v1
test_client: c2.rgw.1
target_config:
bucket_count: 5
min_file_size: 100
max_file_size: 200
- multisite-test:
test_name: test_bucket_policy_replace
script_name: test_bucket_policy_ops
test_dir_version: v2
test_client: c1.rgw.0
"""
log.info('starting the task')
log.info('config %s' % config)
assert isinstance(config, dict), \
"task multisite_test only supports a dictionary for configuration"
# Target node where the tests will be run. Can be primary or secondary multisite zones.
target_client = config['test_client']
(tclient,) = iter(ctx.cluster.only(target_client).remotes.keys())
if not hasattr(ctx, 'multisite_test'):
ctx.multisite_test = argparse.Namespace()
ctx.multisite_test.target = tclient
ctx.multisite_test.version = config['test_dir_version']
if not hasattr(ctx, 'userexec'):
cleanup(ctx)
clone_repo(ctx)
log.info('test_dir_version: %s' % config['test_dir_version'])
if config['test_dir_version'] == 'v1':
execute_v1(tclient, config)
if config['test_dir_version'] == 'v2':
execute_v2(tclient, config)
yield
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#- Author : (DEK) Devendra Kavthekar
# program068:
# Please write a program using generator to print the even numbers between
# 0 and n in comma separated form while n is input by console.
# Example:
# If the following n is given as input to the program:
# 10
# Then, the output of the program should be:
# 0,2,4,6,8,10
# Hints:
# Use yield to produce the next value in generator.
# In case of input data being supplied to the question, it should be
# assumed to be a console input.
def evenGenerator(endValue):
eveniter = 0
while eveniter <= endValue:
if eveniter % 2 == 0:
yield eveniter
eveniter += 1
def main(endValue):
result = []
evenGen = evenGenerator(int(endValue))
for res in evenGen:
result.append(str(res))
# print result
print ",".join(result)
if __name__ == '__main__':
main(raw_input("Input endLimit: "))
|
python
|
import numpy as np
from typing import Union, Optional
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.exploration import Exploration, TensorType
from ray.rllib.utils.framework import try_import_tf, try_import_torch, \
get_variable
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.schedules import Schedule, PiecewiseSchedule
from grl.rllib_tools.models.valid_actions_fcnet import ILLEGAL_ACTION_LOGITS_PENALTY
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
import torch
class ValidActionsEpsilonGreedy(Exploration):
"""Epsilon-greedy Exploration class that produces exploration actions.
When given a Model's output and a current epsilon value (based on some
Schedule), it produces a random action (if rand(1) < eps) or
uses the model-computed one (if rand(1) >= eps).
Modified from the original RLlib implementation to never consider actions with logits
almost as low as ILLEGAL_ACTION_LOGITS_PENALTY. Those logits correspond to illegal action in the environment.
"""
def __init__(self,
action_space,
*,
framework: str,
initial_epsilon: float = 1.0,
final_epsilon: float = 0.05,
epsilon_timesteps: int = int(1e5),
epsilon_schedule: Optional[Schedule] = None,
**kwargs):
"""Create an EpsilonGreedy exploration class.
Args:
initial_epsilon (float): The initial epsilon value to use.
final_epsilon (float): The final epsilon value to use.
epsilon_timesteps (int): The time step after which epsilon should
always be `final_epsilon`.
epsilon_schedule (Optional[Schedule]): An optional Schedule object
to use (instead of constructing one from the given parameters).
"""
assert framework is not None
super().__init__(
action_space=action_space, framework=framework, **kwargs)
self.epsilon_schedule = \
from_config(Schedule, epsilon_schedule, framework=framework) or \
PiecewiseSchedule(
endpoints=[
(0, initial_epsilon), (epsilon_timesteps, final_epsilon)],
outside_value=final_epsilon,
framework=self.framework)
# The current timestep value (tf-var or python int).
self.last_timestep = get_variable(
np.array(0, np.int64),
framework=framework,
tf_name="timestep",
dtype=np.int64)
# Build the tf-info-op.
if self.framework in ["tf2", "tf", "tfe"]:
self._tf_info_op = self.get_info()
@override(Exploration)
def get_exploration_action(self,
*,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True):
q_values = action_distribution.inputs
return self._get_torch_exploration_action(q_values, explore,
timestep)
def _get_torch_exploration_action(self, q_values: TensorType,
explore: bool,
timestep: Union[int, TensorType]):
"""Torch method to produce an epsilon exploration action.
Args:
q_values (Tensor): The Q-values coming from some Q-model.
Returns:
torch.Tensor: The exploration-action.
"""
self.last_timestep = timestep
_, exploit_action = torch.max(q_values, 1)
action_logp = torch.zeros_like(exploit_action)
# Explore.
if explore:
# Get the current epsilon.
epsilon = self.epsilon_schedule(self.last_timestep)
batch_size = q_values.size()[0]
# Mask out actions whose Q-values are almost as low as ILLEGAL_ACTION_LOGITS_PENALTY so that we don't
# even consider them for exploration.
# We compare to 0.1 * ILLEGAL_ACTION_LOGITS_PENALTY instead of just ILLEGAL_ACTION_LOGITS_PENALTY to avoid
# any ambiguity with floating point precision on extremely low numbers.
random_valid_action_logits = torch.where(
q_values <= 0.01 * ILLEGAL_ACTION_LOGITS_PENALTY,
torch.ones_like(q_values) * 0.0, torch.ones_like(q_values))
# A random action.
random_actions = torch.squeeze(
torch.multinomial(random_valid_action_logits, 1), axis=1)
# Pick either random or greedy.
action = torch.where(
torch.empty(
(batch_size,)).uniform_().to(self.device) < epsilon,
random_actions, exploit_action)
return action, action_logp
# Return the deterministic "sample" (argmax) over the logits.
else:
return exploit_action, action_logp
@override(Exploration)
def get_info(self, sess: Optional["tf.Session"] = None):
if sess:
return sess.run(self._tf_info_op)
eps = self.epsilon_schedule(self.last_timestep)
return {"cur_epsilon": eps}
|
python
|
version = "2021.11.29.01"
|
python
|
import logging
from itertools import chain
from pprint import pformat
from future.utils import lmap
from foxylib.tools.collections.collections_tool import merge_dicts, DictTool, lchain
from foxylib.tools.database.elasticsearch.elasticsearch_tool import ElasticsearchTool
from foxylib.tools.json.json_tool import JsonTool
class SampleIndex:
@classmethod
def query2hits(cls, client, index, query):
results = ElasticsearchTool.search_scroll2result_iter(
client,
{"index": index,
"body": {'query': query},
"request_timeout": 3,
},
scroll="60s",
)
hits = chain.from_iterable(map(ElasticsearchTool.result2hits, results))
return hits
@classmethod
def hit2v1(cls, hit):
return JsonTool.down(hit, ['_source', 'v1'])
@classmethod
def hit2v2(cls, hit):
return JsonTool.down(hit, ['_source', 'v2'])
@classmethod
def hits2replace_many_v2(cls, client, hits_in, index):
dict_v1_to_v2 = merge_dicts([
{cls.hit2v1(hit_in): cls.hit2v2(hit_in)} for hit_in in hits_in
], vwrite=DictTool.VWrite.no_duplicate_key)
queries_v1 = [ElasticsearchTool.key_value2query_match("v1", v1) for v1 in dict_v1_to_v2.keys()]
query = ElasticsearchTool.queries2should(queries_v1)
hits = cls.query2hits(client, index, query)
def hit2actions(hit):
if not hit:
return []
v1 = cls.hit2v1(hit)
v2 = dict_v1_to_v2.get(v1)
if not v2:
return []
id_this = ElasticsearchTool.hit2id(hit)
return [
{"update": {"_id": id_this, "_index": index, "_type": "doc"}},
{"doc": {"v2": v2}},
]
actions = lchain(*map(hit2actions, hits))
ElasticsearchTool.actions2execute_bulk(client, actions)
|
python
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import User
# Create your views here.
def help(request):
helpdict = {'help_insert':'HELP PAGE'}
return render(request,'appTwo/help.html',context=helpdict)
def index(request):
return render(request,'appTwo/index.html')
def users(request):
user_list = User.objects.order_by('first_name')
user_dict = {"users":user_list}
return render(request,'appTwo/users.html',context=user_dict)
|
python
|
import wx
from wx.adv import CalendarCtrl, GenericCalendarCtrl
from get_file import get_file
from wx import adv
from static.MButton import MButton
class TimeFrame(wx.Frame):
'''类似于日程表, 可以添加事件及其时间段, 到了相应的时间会提醒用户'''
def __init__(self, parent, data=None):
wx.Frame.__init__(self, parent, title='日程表', size=(450, 450))
self.date_event = {} if data == None else data #日期:事件列表 , like {'2018-12-12':[{'event':'event1', 'time':'12:00', 'place':'place1', 'remark':'remark1'}, {'event':'event2', 'time':'12:00', 'place':'place2', 'remark':'remark2'}]}
self.SetMinSize(wx.Size(450, 450))
self.SetMaxSize(wx.Size(450, 450))
self.SetBackgroundColour(wx.Colour(255, 255, 255))
self.init_ui()
self.SetSizer(self.main_sizer)
self.Center()
self.Show()
#设置图标
icon = wx.Icon()
icon.CopyFromBitmap(wx.Bitmap(get_file("\\images\\icon.ico"), wx.BITMAP_TYPE_ANY))
self.SetIcon(icon)
self.refresh_today_event_list()
def init_ui(self):
#sizer中有一个日程表, 添加事件按键, 删除事件按键, 和今日日程表
self.main_sizer = wx.GridBagSizer(5, 5)
self.main_sizer.Add(self.init_calendar(), pos=(0, 0), span=(4, 1), flag=wx.ALL, border=0)
self.main_sizer.Add(self.init_add_event_button(), pos=(0, 1), flag=wx.ALL, border=0)
self.main_sizer.Add(self.init_delete_event_button(), pos=(1, 1), flag=wx.ALL, border=0)
self.main_sizer.Add(self.init_to_today_event_button(), pos=(2, 1), flag=wx.ALL, border=0)
self.main_sizer.Add(wx.StaticText(self, label='温馨提示: 需要关闭此窗口\n才有提示功能哦\n关闭时会自动保存日程表的'), pos=(3, 1), flag=wx.ALL, border=0)
self.main_sizer.Add(self.init_today_event_list(), pos=(4, 0), span=(1, 2), flag=wx.EXPAND, border=0)
self.main_sizer.AddGrowableCol(1)
self.main_sizer.AddGrowableRow(2)
def init_to_today_event_button(self):
#初始化到今日事件按键
self.to_today_event_button = MButton(self, '回到今天')
self.to_today_event_button.Bind(wx.EVT_BUTTON, self.to_today_event)
self.to_today_event_button.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.to_today_event_button
def init_calendar(self):
#初始化日历
self.calendar = GenericCalendarCtrl(self, wx.ID_ANY, wx.DateTime.Today(), style=adv.CAL_SHOW_HOLIDAYS | adv.CAL_MONDAY_FIRST|adv.CAL_SHOW_SURROUNDING_WEEKS|adv.CAL_SEQUENTIAL_MONTH_SELECTION|wx.NO_BORDER)
self.calendar.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
#当所选日期发生变化时, 就触发on_calendar_sel_changed事件
self.calendar.Bind(adv.EVT_CALENDAR_SEL_CHANGED, self.on_calendar_sel_changed)
self.calendar.Bind(wx.EVT_SIZE, self.on_calendar_size)
return self.calendar
def init_add_event_button(self):
#初始化添加事件按键
self.add_event_button = MButton(self, '添加事件')
self.add_event_button.Bind(wx.EVT_BUTTON, self.add_event)
self.add_event_button.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.add_event_button
def init_delete_event_button(self):
#初始化删除事件按键
self.delete_event_button = MButton(self, '删除事件')
self.delete_event_button.Bind(wx.EVT_BUTTON, self.delete_event)
self.delete_event_button.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.delete_event_button
def init_today_event_list(self):
#初始化今日事件列表
self.today_event_list = wx.ListCtrl(self, style=wx.LC_REPORT |wx.NO_BORDER)
self.today_event_list.InsertColumn(0, '事件')
self.today_event_list.InsertColumn(1, '时间')
self.today_event_list.InsertColumn(2, '地点')
self.today_event_list.InsertColumn(3, '备注')
#设置备注一栏的宽度
self.today_event_list.SetColumnWidth(3, 150)
self.today_event_list.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.today_event_list
def to_today_event(self, event):
#回到今日事件
self.calendar.SetDate(wx.DateTime.Today())
self.refresh_today_event_list()
def refresh_today_event_list(self):
print("refresh")
#刷新今日事件列表
self.today_event_list.DeleteAllItems()
date = self.calendar.GetDate()
date_str = date.FormatISODate()
print(self.date_event)
print(date_str)
if date_str not in self.date_event:
return
for event in self.date_event[date_str]:
#如果事件的时间(包含了日期和具体时间)是self.calendar.GetDate()的话, 就添加到今日事件列表
if event["time"].startswith(date_str):
self.today_event_list.InsertStringItem(0, event["event"])
self.today_event_list.SetStringItem(0, 1, str(event["time"]))
self.today_event_list.SetStringItem(0, 2, event["place"])
self.today_event_list.SetStringItem(0, 3, event["remark"])
self.today_event_list.SetItemData(0, 0)
self.today_event_list.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.today_event_list.SetColumnWidth(1, wx.LIST_AUTOSIZE)
self.today_event_list.SetColumnWidth(2, wx.LIST_AUTOSIZE)
self.today_event_list.SetColumnWidth(3, wx.LIST_AUTOSIZE)
def on_calendar_size(self, event):
#日历大小改变时, 更新日历
self.calendar.SetSize(event.GetSize())
self.calendar.Refresh()
event.Skip()
def add_event(self, event_):
#添加事件
self.add_event_dialog = AddEventDialog(self)
self.add_event_dialog.ShowModal()
self.add_event_dialog.Destroy()
if not self.add_event_dialog.is_ok:
return
event = self.add_event_dialog.event_text.GetValue()
#有两个time控件, 一个是选取时间, 一个是选取日期, 但是选取的日期包含时间00:00:00, 所以这里要把时间拆开
time = self.add_event_dialog.time_choose.GetValue()
date = self.add_event_dialog.date_choose.GetValue()
time_str = date.FormatISODate() + ' ' + time.FormatISOTime()
print(time_str)
place = self.add_event_dialog.place_text.GetValue()
remark = self.add_event_dialog.remark_text.GetValue()
self.add_event_to_date(event, time_str, place, remark)
def add_event_to_date(self, event, time, place, remark):
#添加事件到日期
date = time.split(' ')[0]
if date in self.date_event:
self.date_event[date].append(dict(event=event, time=time, place=place, remark=remark))
else:
self.date_event[date] = [dict(event=event, time=time, place=place, remark=remark)]
self.refresh_today_event_list()
def delete_event(self, event):
#删除事件
date = self.calendar.GetDate()
#获取self.today_event_list中选中的事件
index = self.today_event_list.GetFirstSelected()
if index == -1:
return
event_str = self.today_event_list.GetItemText(index)
#获取选中的事件的时间
time_str = self.today_event_list.GetItem(index, 1).GetText()
#在self.date_event中找到对应的事件并删除
for event in self.date_event[date.FormatISODate()]:
if event["event"] == event_str and event["time"] == time_str:
self.date_event[date.FormatISODate()].remove(event)
break
self.refresh_today_event_list()
def on_calendar_sel_changed(self, event):
self.refresh_today_event_list()
class AddEventDialog(wx.Dialog):
'''一个让用户输入其事件, 时间, 地点(可选), 备注(可选)的dialog'''
def __init__(self, parent):
wx.Dialog.__init__(self, parent, title='添加事件', size=(400, 250))
self.parent = parent
self.is_ok = False
self.SetMinSize(wx.Size(250, 250))
self.init_ui()
self.Center()
self.Show()
def init_ui(self):
#初始化界面
self.main_sizer = wx.GridBagSizer(5, 5)
self.main_sizer.Add(self.init_event_label(), pos=(0, 0), flag=wx.ALL, border=5)
self.main_sizer.Add(self.init_event_text(), pos=(0, 1), span=(1, 2), flag=wx.EXPAND, border=5)
self.main_sizer.Add(self.init_time_label(), pos=(1, 0), flag=wx.ALL, border=5)
self.main_sizer.Add(self.init_time_choose(), pos=(1, 1), span=(1, 2), flag=wx.EXPAND, border=5)
self.main_sizer.Add(self.init_place_label(), pos=(2, 0), flag=wx.ALL, border=5)
self.main_sizer.Add(self.init_place_text(), pos=(2, 1), span=(1, 2), flag=wx.EXPAND, border=5)
self.main_sizer.Add(self.init_remark_label(), pos=(3, 0), flag=wx.ALL, border=5)
self.main_sizer.Add(self.init_remark_text(), pos=(3, 1), span=(1, 2), flag=wx.EXPAND, border=5)
self.main_sizer.Add(self.init_ok_button(), pos=(4, 0), flag=wx.ALL, border=5)
self.main_sizer.Add(self.init_cancel_button(), pos=(4, 1), flag=wx.ALL, border=5)
self.SetSizer(self.main_sizer)
def init_event_label(self):
#初始化事件标签
self.event_label = wx.StaticText(self, label='事件')
self.event_label.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.event_label
def init_event_text(self):
#初始化事件文本框
self.event_text = wx.TextCtrl(self)
self.event_text.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.event_text
def init_time_label(self):
#初始化时间标签
self.time_label = wx.StaticText(self, label='时间')
self.time_label.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.time_label
def init_time_choose(self):
#time choice, both choose the time and the date
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.date_choose = adv.DatePickerCtrl(self, size=(100, -1), style=adv.DP_DROPDOWN | adv.DP_SHOWCENTURY)
self.time_choose = adv.TimePickerCtrl(self, size=(100, -1))
self.date_choose.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
self.time_choose.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
self.sizer.Add(self.date_choose, 0, wx.ALL, 5)
self.sizer.Add(self.time_choose, 0, wx.ALL, 5)
return self.sizer
def init_place_label(self):
#初始化地点标签
self.place_label = wx.StaticText(self, label='地点(可选)')
self.place_label.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.place_label
def init_place_text(self):
#初始化地点文本框
self.place_text = wx.TextCtrl(self)
self.place_text.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.place_text
def init_remark_label(self):
#初始化备注标签
self.remark_label = wx.StaticText(self, label='备注(可选)')
self.remark_label.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.remark_label
def init_remark_text(self):
#初始化备注文本框
#多行文本框
self.remark_text = wx.TextCtrl(self, style=wx.TE_MULTILINE)
self.remark_text.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
return self.remark_text
def init_ok_button(self):
#初始化确定按钮
self.ok_button = MButton(self, '确定')
self.ok_button.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
self.ok_button.Bind(wx.EVT_BUTTON, self.on_ok_button)
return self.ok_button
def init_cancel_button(self):
#初始化取消按钮
self.cancel_button = MButton(self, '取消')
self.cancel_button.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, '微软雅黑'))
self.cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel_button)
return self.cancel_button
def on_ok_button(self, event):
#确定按钮事件
event_name = self.event_text.GetValue()
event_date = self.date_choose.GetValue()
print(event_date.FormatISODate())
if not event_name:
wx.MessageBox('事件不能为空!', '错误', wx.OK | wx.ICON_ERROR)
return
if event_date.FormatISODate() in self.parent.date_event:
for i in self.parent.date_event[event_date.FormatISODate()]:
if event_name == i["event"]:
wx.MessageBox('事件已存在!', '错误', wx.OK | wx.ICON_ERROR)
return
self.is_ok = True
self.Destroy()
def on_cancel_button(self, event):
#取消按钮事件
self.Destroy()
if __name__ == '__main__':
app = wx.App()
TimeFrame(None)
app.MainLoop()
|
python
|
import argparse
import cv2
# ap = argparse.ArgumentParser()
# ap.add_argument("--i", "--image", required=True, help="path to input image")
# args= vars(ap.parse_args())
# image = cv2.imread(args["image"])
image = cv2.imread("lena.png")
(h, w, c)= image.shape[:3]
#height= no of rows, width= no of columns, channnel= no of channels
print("Width : {} pixels". format(w))
print("Height : {} pixels". format(h))
print("Channels : {} pixels". format(c))
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.imwrite("New Image.jpg", image)
|
python
|
import discord
from discord.ext import commands
from discord.utils import get
class c259(commands.Cog, name="c259"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Abyssal_Awakening', aliases=['c259', 'Abyssal_10'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Abyssal Awakening',
color=0x1D9E74)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2360321.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3 (Abyssal)', inline=True)
embed.add_field(name='Type', value='Spell/Normal', inline=False)
embed.add_field(name='Card Effect', value='Send 1 "Abyssal" card from your hand or field to the GY, and if you do, Set 1 "Abyssal" card with a different type (Monster, Spell or Trap) from your Deck to your field, and if it was a Spell/Trap Card, you can activate it this turn. You can only activate 1 "Abyssal Awakening" per turn.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c259(bot))
|
python
|
# Imports
from django.db import models
from django.contrib.auth.models import User
# Lead
class Lead(models.Model):
name = models.CharField(max_length=255)
email = models.EmailField(max_length=255, unique=True)
message = models.CharField(max_length=500, blank=True)
owner = models.ForeignKey(User, related_name='leads', on_delete=models.CASCADE, null=True)
created_at = models.DateTimeField(auto_now_add=True)
|
python
|
# ckwg +28
# Copyright 2018 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from PIL import Image
from torch.autograd import Variable
import numpy as np
from vital.types import BoundingBox
from vital.types import DetectedObject
from vital.types import DetectedObjectSet
from kwiver.arrows.pytorch.seg_utils import *
# TODO Remove hardcoded class names
class_names = np.array([
'background',
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'potted plant',
'sheep',
'sofa',
'train',
'tv/monitor',
])
class FCNSegmentation(object):
def __init__(self, model, cuda=True):
self.cuda = cuda
self.model = model
def __call__(self, in_img, fcn_flag=True):
if fcn_flag:
return self._apply_fcn(in_img)
else:
return self._apply_contour(in_img)
def _apply_fcn(self, in_img):
self.model.eval()
img = transform(in_img)
if self.cuda:
img = img.cuda()
v_img = Variable(img[None], volatile=True)
score = self.model(v_img)
lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
#TODO Remove harcoded number of labels;
lbl_pred_overlap = label2rgb(lbl_pred, img=in_img, n_labels=21,
label_names=class_names)
lbl_pred = label2rgb(lbl_pred, n_labels=21)
dos = DetectedObjectSet()
for cnt in contours:
dobj = DetectedObject(bbox=BoundingBox(float(x), float(y),
float(x + w), float(y + h)), confid=1.0)
dos.add(dobj)
return dos, in_img, lbl_pred_overlap
def _apply_contour(self, in_img):
dos = DetectedObjectSet()
for cnt in contours:
dobj = DetectedObject(bbox=BoundingBox(float(x), float(y),
float(x + w), float(y + h)), confid=1.0)
dos.add(dobj)
return dos, in_img, None
|
python
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model trainer.
This file provides a generic training method that can be used to train a
DetectionModel.
"""
import functools
import google.protobuf.text_format as text_format
import tensorflow as tf
from object_detection.builders import optimizer_builder
from object_detection.builders import preprocessor_builder
from object_detection.core import batcher
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import ops as util_ops
from object_detection.utils import variables_helper
from deployment import model_deploy
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from slim import learning as custom_learning
slim = tf.contrib.slim
def _create_input_queue(batch_size_per_clone, create_tensor_dict_fn,
batch_queue_capacity, num_batch_queue_threads,
prefetch_queue_capacity, data_augmentation_options,
ignore_options=None, mtl_window=False, mtl_edgemask=False):
"""Sets up reader, prefetcher and returns input queue.
Args:
batch_size_per_clone: batch size to use per clone.
create_tensor_dict_fn: function to create tensor dictionary.
batch_queue_capacity: maximum number of elements to store within a queue.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: maximum capacity of the queue used to prefetch
assembled batches.
data_augmentation_options: a list of tuples, where each tuple contains a
data augmentation function and a dictionary containing arguments and their
values (see preprocessor.py).
ignore_options: exception condition of training loss
Returns:
input queue: a batcher.BatchQueue object holding enqueued tensor_dicts
(which hold images, boxes and targets). To get a batch of tensor_dicts,
call input_queue.Dequeue().
"""
tensor_dict = create_tensor_dict_fn()
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tensor_dict[fields.InputDataFields.image], 0)
images = tensor_dict[fields.InputDataFields.image]
float_images = tf.to_float(images)
tensor_dict[fields.InputDataFields.image] = float_images
preprocessor.make_ignore_list(tensor_dict, ignore_options)
if mtl_window:
for option in data_augmentation_options:
if 'random_horizontal_flip' in option[0].func_name:
option[1][fields.InputDataFields.window_boxes] = tensor_dict[fields.InputDataFields.window_boxes]
if mtl_edgemask:
for option in data_augmentation_options:
if 'random_horizontal_flip' in option[0].func_name:
option[1][fields.InputDataFields.groundtruth_edgemask_masks] = tensor_dict[fields.InputDataFields.groundtruth_edgemask_masks]
if data_augmentation_options:
tensor_dict = preprocessor.preprocess(tensor_dict, data_augmentation_options,
mtl_window=mtl_window, mtl_edgemask=mtl_edgemask)
input_queue = batcher.BatchQueue(
tensor_dict,
batch_size=batch_size_per_clone,
batch_queue_capacity=batch_queue_capacity,
num_batch_queue_threads=num_batch_queue_threads,
prefetch_queue_capacity=prefetch_queue_capacity)
return input_queue
def _get_inputs(input_queue, num_classes, with_filename=False):
"""Dequeue batch and construct inputs to object detection model.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
num_classes: Number of classes.
Returns:
images: a list of 3-D float tensor of images.
locations_list: a list of tensors of shape [num_boxes, 4]
containing the corners of the groundtruth boxes.
classes_list: a list of padded one-hot tensors containing target classes.
masks_list: a list of 3-D float tensors of shape [num_boxes, image_height,
image_width] containing instance masks for objects if present in the
input_queue. Else returns None.
"""
read_data_list = input_queue.dequeue()
label_id_offset = 1
def extract_images_and_targets(read_data):
image = read_data[fields.InputDataFields.image]
location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes], tf.int32)
edgemask_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_edgemask_masks], tf.float32)
ignore_gt = read_data.get(fields.InputDataFields.groundtruth_ignore)
if ignore_gt.get_shape() is not classes_gt.get_shape():
ignore_gt = tf.zeros_like(classes_gt, dtype=tf.bool)
masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
classes_gt -= label_id_offset
classes_gt = util_ops.padded_one_hot_encoding(indices=classes_gt, depth=num_classes, left_pad=0)
filename = None
if with_filename:
filename = read_data[fields.InputDataFields.filename]
# window box gt
window_location_gt = read_data[fields.InputDataFields.window_boxes]
window_classes_gt_string = read_data[fields.InputDataFields.window_classes]
st = tf.string_split(window_classes_gt_string)
st_values_float = tf.string_to_number(st.values)
window_classes_gt = tf.sparse_to_dense(st.indices, st.dense_shape, st_values_float)
window_classes_gt = tf.reshape(window_classes_gt, [-1, num_classes + 1])
# closeness gt
object_closeness_gt_string = read_data[fields.InputDataFields.groundtruth_closeness]
st = tf.string_split(object_closeness_gt_string)
st_values_float = tf.string_to_number(st.values)
closeness_classes_gt = tf.sparse_to_dense(st.indices, st.dense_shape, st_values_float)
closeness_classes_gt = tf.reshape(closeness_classes_gt, [-1, num_classes + 1])
return image, location_gt, ignore_gt, classes_gt, masks_gt, filename, \
window_location_gt, window_classes_gt, closeness_classes_gt, edgemask_gt
return zip(*map(extract_images_and_targets, read_data_list))
def _create_losses(input_queue, create_model_fn, show_image_summary,
update_schedule, **kwargs):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
kwargs: Additional arguments to make model.
"""
if kwargs.has_key('mtl'):
mtl = kwargs['mtl']
del kwargs['mtl']
detection_model = create_model_fn()
(images, groundtruth_boxes_list, groundtruth_ignore_list,
groundtruth_classes_list, groundtruth_masks_list,
filenames, window_boxes_list, window_classes_list, groundtruth_closeness_list, groundtruth_edgemask_list) \
= _get_inputs(input_queue, detection_model.num_classes, with_filename=True)
if show_image_summary:
detection_model.provide_image_infos(images, filenames)
images = [detection_model.preprocess(image) for image in images]
images = tf.concat(images, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
detection_model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_closeness_list,
groundtruth_ignore_list,
groundtruth_masks_list)
detection_model.provide_window(window_boxes_list, window_classes_list)
detection_model.provide_edgemask(groundtruth_edgemask_list)
prediction_dict = detection_model.predict(images)
# TODO: implement joint training
if mtl.window:
prediction_dict = detection_model.predict_with_window(prediction_dict)
if mtl.edgemask:
prediction_dict = detection_model.predict_edgemask(prediction_dict)
if mtl.refine:
prediction_dict = detection_model.predict_with_mtl_results(prediction_dict)
losses_dict = detection_model.loss(prediction_dict, **kwargs)
for loss_name, loss_tensor in losses_dict.iteritems():
loss_tensor = tf.check_numerics(loss_tensor,
'%s is inf or nan.' % loss_name,
name='Loss/' + loss_name)
tf.losses.add_loss(loss_tensor)
if update_schedule is not None:
for name, _, losses in update_schedule:
if loss_name in losses:
tf.losses.add_loss(loss_tensor, loss_collection=name)
def train(create_tensor_dict_fn, create_model_fn, train_config, master, task,
num_clones, worker_replicas, clone_on_cpu, ps_tasks, worker_job_name,
is_chief, train_dir, num_examples, total_configs, model_config, is_first_training=True):
"""Training function for detection models.
Args:
create_tensor_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel and generates
losses.
train_config: a train_pb2.TrainConfig protobuf.
master: BNS name of the TensorFlow master to use.
task: The task id of this training instance.
num_clones: The number of clones to run per machine.
worker_replicas: The number of work replicas to train with.
clone_on_cpu: True if clones should be forced to run on CPU.
ps_tasks: Number of parameter server tasks.
worker_job_name: Name of the worker job.
is_chief: Whether this replica is the chief replica.
train_dir: Directory to write checkpoints and training summaries to.
num_examples: The number of examples in dataset for training.
total_configs: config list
"""
detection_model = create_model_fn()
data_augmentation_options = [
preprocessor_builder.build(step)
for step in train_config.data_augmentation_options]
with tf.Graph().as_default():
# Build a configuration specifying multi-GPU and multi-replicas.
deploy_config = model_deploy.DeploymentConfig(
num_clones=num_clones,
clone_on_cpu=clone_on_cpu,
replica_id=task,
num_replicas=worker_replicas,
num_ps_tasks=ps_tasks,
worker_job_name=worker_job_name)
# Place the global step on the device storing the variables.
with tf.device(deploy_config.variables_device()):
if is_first_training:
global_step = slim.create_global_step()
else:
prev_global_step = int(train_config.fine_tune_checkpoint.split('-')[-1])
global_step = variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64,
initializer=tf.constant(prev_global_step, dtype=dtypes.int64),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.GLOBAL_STEP])
with tf.device(deploy_config.inputs_device()):
input_queue = _create_input_queue(train_config.batch_size // num_clones,
create_tensor_dict_fn,
train_config.batch_queue_capacity,
train_config.num_batch_queue_threads,
train_config.prefetch_queue_capacity,
data_augmentation_options,
ignore_options=train_config.ignore_options,
mtl_window=model_config.mtl.window,
mtl_edgemask=model_config.mtl.edgemask
)
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
global_summaries = set([])
kwargs = {}
kwargs['mtl'] = model_config.mtl
update_schedule = None
model_fn = functools.partial(_create_losses,
create_model_fn=create_model_fn,
show_image_summary=train_config.show_image_summary,
update_schedule=update_schedule,
**kwargs)
clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue])
first_clone_scope = clones[0].scope
with tf.device(deploy_config.optimizer_device()):
training_optimizer = optimizer_builder.build(train_config.optimizer,
global_summaries)
sync_optimizer = None
if train_config.sync_replicas:
# TODO: support syncrhonous update for manual loss update
training_optimizer = tf.SyncReplicasOptimizer(
training_optimizer,
replicas_to_aggregate=train_config.replicas_to_aggregate,
total_num_replicas=train_config.worker_replicas)
sync_optimizer = training_optimizer
# Create ops required to initialize the model from a given checkpoint.
init_fn = None
if train_config.fine_tune_checkpoint:
var_map = detection_model.restore_map(
from_detection_checkpoint=train_config.from_detection_checkpoint,
restore_box_predictor=train_config.restore_box_predictor,
restore_window=train_config.restore_window,
restore_edgemask=train_config.restore_edgemask,
restore_closeness=train_config.restore_closeness,
restore_mtl_refine=train_config.restore_mtl_refine,
)
available_var_map = (variables_helper.
get_variables_available_in_checkpoint(
var_map, train_config.fine_tune_checkpoint))
init_saver = tf.train.Saver(available_var_map)
mtl = model_config.mtl
mtl_init_saver_list = []
def _get_mtl_init_saver(scope_name):
_var_map = detection_model._feature_extractor.mtl_restore_from_classification_checkpoint_fn(scope_name)
if train_config.from_detection_checkpoint:
_var_map_new = dict()
for name, val in _var_map.iteritems():
_var_map_new[detection_model.second_stage_feature_extractor_scope + '/' + name] = val
_var_map = _var_map_new
_available_var_map = (variables_helper.get_variables_available_in_checkpoint(
_var_map, train_config.fine_tune_checkpoint))
if _available_var_map:
return tf.train.Saver(_available_var_map)
else:
return None
# if mtl.share_second_stage_init and mtl.shared_feature == 'proposal_feature_maps':
if mtl.share_second_stage_init and train_config.from_detection_checkpoint == False:
if mtl.window:
mtl_init_saver_list.append(_get_mtl_init_saver(detection_model.window_box_predictor_scope))
if mtl.closeness:
mtl_init_saver_list.append(_get_mtl_init_saver(detection_model.closeness_box_predictor_scope))
if mtl.edgemask:
mtl_init_saver_list.append(_get_mtl_init_saver(detection_model.edgemask_predictor_scope))
def initializer_fn(sess):
init_saver.restore(sess, train_config.fine_tune_checkpoint)
for mtl_init_saver in mtl_init_saver_list:
if not mtl_init_saver == None:
mtl_init_saver.restore(sess, train_config.fine_tune_checkpoint)
init_fn = initializer_fn
def _get_trainable_variables(except_scopes=None):
trainable_variables = tf.trainable_variables()
if except_scopes is None:
return trainable_variables
for var in tf.trainable_variables():
if any([scope in var.name for scope in except_scopes]):
trainable_variables.remove(var)
return trainable_variables
def _get_update_ops(except_scopes=None):
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
if except_scopes is None:
return update_ops
for var in tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope):
if any([scope in var.name for scope in except_scopes]):
update_ops.remove(var)
return update_ops
with tf.device(deploy_config.optimizer_device()):
def _single_update():
kwargs = {}
_training_optimizer = training_optimizer
kwargs['var_list'] = None
update_ops = _get_update_ops()
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones, _training_optimizer, regularization_losses=None, **kwargs)
# Optionaly multiply gradients by train_config.{grad_multiplier,
# divide_grad_by_batch}.
if train_config.grad_multiplier or train_config.divide_grad_by_batch:
base_multiplier = train_config.grad_multiplier \
if train_config.grad_multiplier else 1.0
batch_divider = float(train_config.batch_size) \
if train_config.divide_grad_by_batch else 1.0
total_multiplier = base_multiplier / batch_divider
grads_and_vars = variables_helper.multiply_gradients_by_scalar_multiplier(
grads_and_vars,
multiplier=total_multiplier)
# Optionally multiply bias gradients by train_config.bias_grad_multiplier.
if train_config.bias_grad_multiplier:
biases_regex_list = ['.*/biases']
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars,
biases_regex_list,
multiplier=train_config.bias_grad_multiplier)
# Optionally freeze some layers by setting their gradients to be zero.
if train_config.freeze_variables:
grads_and_vars = variables_helper.freeze_gradients_matching_regex(
grads_and_vars, train_config.freeze_variables)
# Optionally clip gradients
if train_config.gradient_clipping_by_norm > 0:
with tf.name_scope('clip_grads'):
grads_and_vars = slim.learning.clip_gradient_norms(
grads_and_vars, train_config.gradient_clipping_by_norm)
# Create gradient updates.
grad_updates = _training_optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
# update_ops.append(grad_updates)
total_update_ops = update_ops + [grad_updates]
update_op = tf.group(*total_update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name=('train_op'))
return train_tensor
train_tensor = _single_update()
# Add summaries.
def _get_total_loss_with_collection(collection,
add_regularization_losses=True,
name="total_loss"):
losses = tf.losses.get_losses(loss_collection=collection)
if add_regularization_losses:
losses += tf.losses.get_regularization_losses()
return math_ops.add_n(losses, name=name)
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram(model_var.op.name, model_var))
for loss_tensor in tf.losses.get_losses():
global_summaries.add(tf.summary.scalar(loss_tensor.op.name, loss_tensor))
global_summaries.add(
tf.summary.scalar('TotalLoss', tf.losses.get_total_loss()))
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
summaries |= global_summaries
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
# not contained in global_summaries
config_summary_list = select_config_summary_list(total_configs, as_matrix=False)
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
# Save checkpoints regularly.
keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
custom_learning.train(
train_tensor,
logdir=train_dir,
master=master,
is_chief=is_chief,
global_step=(None if is_first_training else global_step),
session_config=session_config,
startup_delay_steps=train_config.startup_delay_steps,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=(
train_config.num_steps if train_config.num_steps else None),
log_every_n_steps=(
train_config.log_every_n_steps if train_config.log_every_n_steps else None),
save_summaries_secs=train_config.save_summaries_secs,
save_interval_secs=train_config.save_interval_secs,
sync_optimizer=sync_optimizer,
saver=saver,
batch_size=train_config.batch_size,
num_examples=num_examples,
config_summary_list=config_summary_list)
def select_config_summary_list(total_configs, as_matrix=True):
def as_text_matrix(dic):
return [[k, str(w)] for k, w in sorted(dic.items())]
def config_to_md_text(config, indent=2):
if config is None: return ""
text = text_format.MessageToString(config, indent=indent, float_format='.2g')
text = text.replace("\n", "<br>").replace(" ", " ")
return text
if len(total_configs) == 5: # pipeline_config_path
model_config, train_config, input_config, eval_config, eval_input_config = total_configs
else:
model_config, train_config, input_config = total_configs
eval_config = None
eval_input_config = None
model_name = model_config.WhichOneof('model').lower()
if model_name == 'faster_rcnn':
model = model_config.faster_rcnn
elif model_name == 'ssd':
model = model_config.ssd
else:
raise ValueError('unknown model: %s'%(model_config.WhichOneof('model')))
if as_matrix:
resizer_name = model.image_resizer.WhichOneof('image_resizer_oneof')
if resizer_name == 'keep_aspect_ratio_resizer':
resizer = model.image_resizer.keep_aspect_ratio_resizer
val_resizer = 'min(%d), max(%d)'%(resizer.min_dimension, resizer.max_dimension)
elif resizer_name == 'fixed_shape_resizer':
resizer = model.image_resizer.fixed_shape_resizer
val_resizer = '(%d, %d)' % (resizer.width, resizer.height)
# model_config
model_dict = dict()
model_dict['feature_extractor'] = str(model.feature_extractor.type)
model_dict[resizer_name] = str(val_resizer)
model_dict['num_classes'] = str(model.num_classes)
model_config_text = as_text_matrix(model_dict)
# train_config
train_dict = dict()
train_dict['batch_size'] = str(train_config.batch_size)
train_dict['optimizer'] = str(train_config.optimizer.WhichOneof('optimizer'))
if train_config.gradient_clipping_by_norm > 0:
train_dict['grad_clip_norm'] = str(train_config.gradient_clipping_by_norm)
train_dict['data_augmentation'] = (', ').join([str(step.WhichOneof('preprocessing_step'))
for step
in train_config.data_augmentation_options])
train_config_text = as_text_matrix(train_dict)
# input_config
input_dict = dict()
input_dict['input_path'] = str(input_config.tf_record_input_reader.input_path)
train_input_config_text = as_text_matrix(input_dict)
# eval_config
eval_dict = dict()
if eval_config is not None:
eval_dict['num_examples'] = str(eval_config.num_examples)
eval_dict['eval_interval_secs'] = str(eval_config.eval_interval_secs)
eval_dict['nms_type'] = str(eval_config.nms_type)
eval_dict['nms_threshold'] = str(eval_config.nms_threshold)
eval_dict['soft_nms_sigma'] = str(eval_config.soft_nms_sigma)
eval_config_text = as_text_matrix(eval_dict)
# eval_input_config
eval_input_dict = dict()
if eval_input_config is not None:
eval_input_dict['input_path'] = str(eval_input_config.tf_record_input_reader.input_path)
eval_input_config_text = as_text_matrix(eval_input_dict)
else: # print all as json format
model_config_text = config_to_md_text(model_config)
train_config_text = config_to_md_text(train_config)
train_input_config_text = config_to_md_text(input_config)
eval_config_text = config_to_md_text(eval_config)
eval_input_config_text = config_to_md_text(eval_input_config)
model_config_summary = tf.summary.text('ModelConfig', tf.convert_to_tensor(model_config_text), collections=[])
train_config_summary = tf.summary.text('TrainConfig', tf.convert_to_tensor(train_config_text), collections=[])
train_input_config_summary = tf.summary.text('TrainInputConfig', tf.convert_to_tensor(train_input_config_text), collections=[])
eval_config_summary = tf.summary.text('EvalConfig', tf.convert_to_tensor(eval_config_text), collections=[])
eval_input_config_summary = tf.summary.text('EvalInputConfig', tf.convert_to_tensor(eval_input_config_text), collections=[])
return model_config_summary, train_config_summary, train_input_config_summary, eval_config_summary, eval_input_config_summary
|
python
|
# -*- coding: utf-8 -*-
r"""Consider the following properties of relation $r$. Because the corruption operations (see `Corruption`_)
are applied independently of triples, the resulting candidate corrupt triples could overlap with known positive
triples in $\mathcal{K}$.
===================== ============================================ ==============================================================
Property of :math:`r` Example pair of triples Implications
===================== ============================================ ==============================================================
one-to-many :math:`(h,r,t_1), (h,r,t_2) \in \mathcal{K}` :math:`(h,r,t_2) \in T(h,r,t_1) \cup (h,r,t_1) \in T(h,r,t_2)`
multiple :math:`(h,r_1,t), (h,r_2,t) \in \mathcal{K}` :math:`(h,r_2,t) \in R(h,r_1,t) \cup (h,r_1,t) \in R(h,r_2,t)`
many-to-one :math:`(h_1,r,t), (h_2,r,t) \in \mathcal{K}` :math:`(h_2,r,t) \in H(h_1,r,t) \cup (h_1,r,t) \in H(h_2,r,t)`
===================== ============================================ ==============================================================
If no relations in $\mathcal{K}$ satisfy any of the relevant properties for the corruption schema chosen in negative
sampling, then there is guaranteed to be no overlap between $\mathcal{N}$ and $\mathcal{K}$ such that
$\mathcal{N} \cap \mathcal{K} \neq \emptyset$. However, this scenario is very unlikely for real-world knowledge graphs.
The known positive triples that appear in $\mathcal{N}$ are known false negatives. Hence, we know that these are
incorrect (negative) training examples, and might want to exclude them to reduce the training noise.
.. warning::
It should be taken into account that also a corrupted triple that is *not part*
of the knowledge graph can represent a true fact. These "unknown" false negatives can
not be removed *a priori* in the filtered setting. The philosophy of the methodology again relies
on the low number of unknown false negatives such that learning can take place.
However, in practice, $|\mathcal{N}| \gg |\mathcal{K}|$, so the likelihood of generating a false negative is rather low.
Therefore, the additional filter step is often omitted to lower computational cost. This general observation might not
hold for all entities; e.g., for a hub entity which is connected to many other entities, there may be a considerable
number of false negatives without filtering.
Identifying False Negatives During Training
-------------------------------------------
By default, PyKEEN does *not* filter false negatives from $\mathcal{N}$ during training. To enable filtering of
negative examples during training, the ``filtered`` keyword can be given to ``negative_sampler_kwargs`` like in:
.. code-block:: python
results = pipeline(
dataset='YAGO3-10',
model='PairRE',
training_loop='sLCWA',
negative_sampler='basic',
negative_sampler_kwargs=dict(
filtered=True,
),
)
PyKEEN implements several algorithms for filtering with different properties that can be chosen using the
``filterer`` keyword argument in ``negative_sampler_kwargs``. By default, an fast and approximate algorithm is used in
:class:`pykeen.sampling.filtering.BloomFilterer`, which is based on
`bloom filters <https://en.wikipedia.org/wiki/Bloom_filter>`_. The bloom filterer also has a configurable desired error
rate, which can be further lowered at the cost of increase in memory and computation costs.
.. code-block:: python
from pykeen.pipeline import pipeline
results = pipeline(
dataset='YAGO3-10',
model='PairRE',
training_loop='sLCWA',
negative_sampler='basic',
negative_sampler_kwargs=dict(
filtered=True,
filterer='bloom',
filterer_kwargs=dict(
error_rate=0.0001,
),
),
)
If you want to have a guarantee that all known false negatives are filtered, you can use a slower implementation based
on Python's built-in sets, the :class:`pykeen.sampling.filtering.PythonSetFilterer`. It can be activated with:
.. code-block:: python
from pykeen.pipeline import pipeline
results = pipeline(
dataset='YAGO3-10',
model='PairRE',
training_loop='sLCWA',
negative_sampler='basic',
negative_sampler_kwargs=dict(
filtered=True,
filterer='python-set',
),
)
Identifying False Negatives During Evaluation
---------------------------------------------
In contrast to training, PyKEEN **does** filter false negatives from $\mathcal{N}$ during evaluation by default.
To disable the "filtered setting" during evaluation, the ``filtered`` keyword can be given to ``evaluator_kwargs``
like in:
.. code-block:: python
from pykeen.pipeline import pipeline
results = pipeline(
dataset='YAGO3-10',
model='PairRE',
evaluator_kwargs=dict(
filtered=False,
),
)
Filtering during evaluation is implemented differently than in negative sampling:
First, there are no choices between an exact or approximate algorithm via a
:class:`pykeen.sampling.filtering.Filterer`. Instead, the evaluation filtering can modify the
scores in-place and does so instead of selecting only the non-filtered entries. The reason is
mainly that evaluation always is done in 1:n scoring, and thus, we gain some efficiently here
by keeping the tensor in "dense" shape ``(batch_size, num_entities)``.
Second, filtering during evaluation has to be correct, and is crucial for reproducing results
from the filtered setting. For evaluation it makes sense to use all information we have to get
as solid evaluation results as possible.
""" # noqa
import math
from abc import abstractmethod
from typing import Iterable, Optional, Tuple
import torch
from class_resolver import Resolver
from torch import nn
from ..triples import CoreTriplesFactory
__all__ = [
"filterer_resolver",
"Filterer",
"BloomFilterer",
"PythonSetFilterer",
]
class Filterer(nn.Module):
"""An interface for filtering methods for negative triples."""
def forward(
self,
negative_batch: torch.LongTensor,
) -> Tuple[torch.LongTensor, Optional[torch.BoolTensor]]:
"""Filter all proposed negative samples that are positive in the training dataset.
Normally there is a low probability that proposed negative samples are positive in the training datasets and
thus act as false negatives. This is expected to act as a kind of regularization, since it adds noise signal to
the training data. However, the degree of regularization is hard to control since the added noise signal depends
on the ratio of true triples for a given entity relation or entity entity pair. Therefore, the effects are hard
to control and a researcher might want to exclude the possibility of having false negatives in the proposed
negative triples.
.. note ::
Filtering is a very expensive task, since every proposed negative sample has to be checked against the
entire training dataset.
:param negative_batch: shape: ???
The batch of negative triples.
:return:
A pair (filtered_negative_batch, keep_mask) of shape ???
"""
keep_mask = ~self.contains(batch=negative_batch)
return negative_batch[keep_mask], keep_mask
@abstractmethod
def contains(self, batch: torch.LongTensor) -> torch.BoolTensor:
"""
Check whether a triple is contained.
Supports batching.
:param batch: shape (batch_size, 3)
The batch of triples.
:return: shape: (batch_size,)
Whether the triples are contained in the training triples.
"""
raise NotImplementedError
class PythonSetFilterer(Filterer):
"""A filterer using Python sets for filtering.
This filterer is expected to be rather slow due to the conversion from torch long tensors to Python tuples. It can
still serve as a baseline for performance comparison.
"""
def __init__(self, triples_factory: CoreTriplesFactory):
"""Initialize the filterer.
:param triples_factory:
The triples factory.
"""
super().__init__()
# store set of triples
self.triples = set(map(tuple, triples_factory.mapped_triples.tolist()))
def contains(self, batch: torch.LongTensor) -> torch.BoolTensor: # noqa: D102
return torch.as_tensor(
data=[tuple(triple) in self.triples for triple in batch.tolist()],
dtype=torch.bool,
device=batch.device,
)
class BloomFilterer(Filterer):
"""
A filterer for negative triples based on the Bloom filter.
Pure PyTorch, a proper module which can be moved to GPU, and support batch-wise computation.
.. seealso ::
* https://github.com/hiway/python-bloom-filter/ - for calculation of sizes, and rough structure of code
* https://github.com/skeeto/hash-prospector#two-round-functions - for parts of the hash function
"""
#: some prime numbers for tuple hashing
mersenne: torch.LongTensor
#: The bit-array for the Bloom filter data structure
bit_array: torch.BoolTensor
def __init__(self, triples_factory: CoreTriplesFactory, error_rate: float = 0.001):
"""
Initialize the Bloom filter based filterer.
:param triples_factory:
The triples factory.
:param error_rate:
The desired error rate.
"""
super().__init__()
# Allocate bit array
self.ideal_num_elements = triples_factory.num_triples
size = self.num_bits(num=self.ideal_num_elements, error_rate=error_rate)
self.register_buffer(name="bit_array", tensor=torch.zeros(size, dtype=torch.bool))
self.register_buffer(
name="mersenne",
tensor=torch.as_tensor(
data=[2 ** x - 1 for x in [17, 19, 31]],
dtype=torch.long,
).unsqueeze(dim=0),
)
# calculate number of hashing rounds
self.rounds = self.num_probes(num_elements=self.ideal_num_elements, num_bits=size)
# index triples
self.add(triples=triples_factory.mapped_triples)
# Store some meta-data
self.error_rate = error_rate
def __repr__(self): # noqa:D105
return (
f"{self.__class__.__name__}("
f"error_rate={self.error_rate}, "
f"size={self.bit_array.shape[0]}, "
f"rounds={self.rounds}, "
f"ideal_num_elements={self.ideal_num_elements}, "
f")"
)
@staticmethod
def num_bits(num: int, error_rate: float = 0.01) -> int:
"""
Determine the required number of bits.
:param num:
The number of elements the Bloom filter shall store.
:param error_rate:
The desired error rate.
:return:
The required number of bits.
"""
numerator = -1 * num * math.log(error_rate)
denominator = math.log(2) ** 2
real_num_bits_m = numerator / denominator
return int(math.ceil(real_num_bits_m))
@staticmethod
def num_probes(num_elements: int, num_bits: int):
"""
Determine the number of probes / hashing rounds.
:param num_elements:
The number of elements.
:param num_bits:
The number of bits, i.e., the size of the Bloom filter.
:return:
The number of hashing rounds.
"""
num_bits = num_bits
real_num_probes_k = (num_bits / num_elements) * math.log(2)
return int(math.ceil(real_num_probes_k))
def probe(
self,
batch: torch.LongTensor,
) -> Iterable[torch.LongTensor]:
"""
Iterate over indices from the probes.
:param batch: shape: (batch_size, 3)
A batch of elements.
:yields:
Indices of the k-th round, shape: (batch_size,).
"""
# pre-hash
x = (self.mersenne * batch).sum(dim=-1)
for _ in range(self.rounds):
# cf. https://github.com/skeeto/hash-prospector#two-round-functions
x = x ^ (x >> 16)
x = x * 0x7feb352d
x = x ^ (x >> 15)
x = x * 0x846ca68b
x = x ^ (x >> 16)
yield x % self.bit_array.shape[0]
def add(self, triples: torch.LongTensor) -> None:
"""Add triples to the Bloom filter."""
for i in self.probe(batch=triples):
self.bit_array[i] = True
def contains(self, batch: torch.LongTensor) -> torch.BoolTensor:
"""
Check whether a triple is contained.
:param batch: shape (batch_size, 3)
The batch of triples.
:return: shape: (batch_size,)
The result. False guarantees that the element was not contained in the indexed triples. True can be
erroneous.
"""
result = batch.new_ones(batch.shape[0], dtype=torch.bool)
for i in self.probe(batch):
result &= self.bit_array[i]
return result
filterer_resolver = Resolver.from_subclasses(
base=Filterer,
default=BloomFilterer,
)
|
python
|
import statistics
import os
import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn
import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql
import jwql.instrument_monitors.miri_monitors.data_trending.utils.csv_to_AstropyTable as apt
from jwql.utils.utils import get_config, filename_parser
from jwql.instrument_monitors.miri_monitors.data_trending.utils.process_data import once_a_day_routine
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
#point to the directory where your files are located!
directory = '/home/daniel/STScI/trainigData/set_15_min/'
#here some some files contain the same data but they are all incomplete
#in order to generate a full database we have to import all of them
filenames = [
'imir_190218_229_15mFOFTLM2019049190357389.CSV',
'imir_190218_242_15mFOFTLM2019049190828360.CSV',
'imir_190218_230_15mFOFTLM2019049190418712.CSV',
'imir_190218_243_15mFOFTLM2019049190849540.CSV',
'imir_190218_231_15mFOFTLM2019049190441348.CSV',
'imir_190218_244_15mFOFTLM2019049190905148.CSV',
'imir_190218_232_15mFOFTLM2019049190505181.CSV',
'imir_190218_245_15mFOFTLM2019049190928651.CSV',
'imir_190218_233_15mFOFTLM2019049190534172.CSV',
'imir_190218_246_15mFOFTLM2019049190944061.CSV',
'imir_190218_247_15mFOFTLM2019049191005149.CSV',
'imir_190218_235_15mFOFTLM2019049190616250.CSV',
'imir_190218_234_15mFOFTLM2019049190551817.CSV',
'imir_190218_248_15mFOFTLM2019049191021707.CSV',
'imir_190218_236_15mFOFTLM2019049190632019.CSV',
'imir_190218_249_15mFOFTLM2019049191042754.CSV',
'imir_190218_237_15mFOFTLM2019049190653391.CSV',
'imir_190218_250_15mFOFTLM2019049191100333.CSV',
'imir_190218_238_15mFOFTLM2019049190708898.CSV',
'imir_190218_251_15mFOFTLM2019049191121307.CSV',
'imir_190218_239_15mFOFTLM2019049190733579.CSV',
'imir_190218_252_15mFOFTLM2019049191135679.CSV',
'imir_190218_240_15mFOFTLM2019049190750440.CSV',
'imir_190218_253_15mFOFTLM2019049191156202.CSV',
'imir_190218_241_15mFOFTLM2019049190811168.CSV',
'imir_190218_254_15mFOFTLM2019049191211341.CSV',
'imir_190130_otis229FOFTLM2019030204146194.CSV',
'imir_190130_otis240FOFTLM2019030210631185.CSV',
'imir_190130_otis230FOFTLM2019030204240886.CSV',
'imir_190130_otis241FOFTLM2019030210651672.CSV',
'imir_190130_otis231FOFTLM2019030204334644.CSV',
'imir_190130_otis242FOFTLM2019030210728909.CSV',
'imir_190130_otis232FOFTLM2019030204455835.CSV',
'imir_190130_otis243FOFTLM2019030210744062.CSV',
'imir_190130_otis233FOFTLM2019030204521412.CSV',
'imir_190130_otis244FOFTLM2019030210809362.CSV',
'imir_190130_otis234FOFTLM2019030204555665.CSV',
'imir_190130_otis245FOFTLM2019030210828095.CSV',
'imir_190130_otis235FOFTLM2019030204617145.CSV',
'imir_190130_otis246FOFTLM2019030210852965.CSV',
'imir_190130_otis236FOFTLM2019030204651604.CSV',
'imir_190130_otis247FOFTLM2019030210914141.CSV',
'imir_190130_otis237FOFTLM2019030204712019.CSV',
'imir_190130_otis248FOFTLM2019030210940944.CSV',
'imir_190130_otis238FOFTLM2019030204738855.CSV',
'imir_190130_otis249FOFTLM2019030211002524.CSV',
'imir_190130_otis239FOFTLM2019030204805611.CSV',
'imir_190130_otis250FOFTLM2019030211032094.CSV']
def process_file(conn, path):
'''Parse CSV file, process data within and put to DB
Parameters
----------
conn : DBobject
Connection object to temporary database
path : str
defines path to the files
'''
#import mnemonic data and append dict to variable below
m_raw_data = apt.mnemonics(path)
#process raw data with once a day routine
cond1, cond2 = once_a_day_routine(m_raw_data)
#push extracted and filtered data to temporary database
for key, value in cond1.items():
#abbreviate data table
m = m_raw_data.mnemonic(key)
if key == "SE_ZIMIRICEA":
length = len(value)
mean = statistics.mean(value)
deviation = statistics.stdev(value)
dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation)
sql.add_data(conn, "SE_ZIMIRICEA_IDLE", dataset)
elif key == "IMIR_HK_ICE_SEC_VOLT4":
length = len(value)
mean = statistics.mean(value)
deviation = statistics.stdev(value)
dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation)
sql.add_data(conn, "IMIR_HK_ICE_SEC_VOLT4_IDLE", dataset)
else:
length = len(value)
mean = statistics.mean(value)
deviation = statistics.stdev(value)
dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation)
sql.add_data(conn, key, dataset)
for key, value in cond2.items():
length = len(value)
mean = statistics.mean(value)
deviation = statistics.stdev(value)
dataset = (float(m.meta['start']), float(m.meta['end']), length, mean, deviation)
sql.add_data(conn, key, dataset)
def main():
#generate paths
DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database')
DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db')
#connect to temporary database
conn = sql.create_connection(DATABASE_FILE)
#do for every file in list above
for name in filenames:
path = directory + name
process_file(conn, path)
#close connection
sql.close_connection(conn)
print("done")
if __name__ == "__main__":
main()
|
python
|
import matplotlib.pyplot as plt
def show(*args, **kws):
"""Show a window with the given plot data. Blocks until window is closed.
Parameters
----------
*args : pyplot args
**kws : pyplot kw
Examples
--------
Plot a line.
>>> bplot.line(x, y)
>>> bplot.show()
"""
plt.show(*args, **kws)
|
python
|
import matplotlib.pyplot as plt
from keras.preprocessing import image
import warnings
warnings.filterwarnings("ignore")
import time
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import json
#from basemodels import VGGFace, OpenFace, Facenet, Age, Gender, Race, Emotion
#from extendedmodels import Age, Gender, Race, Emotion
#from commons import functions, distance as dst
from deepface.basemodels import VGGFace, OpenFace, Facenet
from deepface.extendedmodels import Age, Gender, Race, Emotion
from deepface.commons import functions, distance as dst
def verify(img1_path, img2_path
, model_name ='VGG-Face', distance_metric = 'cosine'):
tic = time.time()
if os.path.isfile(img1_path) != True:
raise ValueError("Confirm that ",img1_path," exists")
if os.path.isfile(img2_path) != True:
raise ValueError("Confirm that ",img2_path," exists")
#-------------------------
#tuned thresholds for model and metric pair
threshold = functions.findThreshold(model_name, distance_metric)
#-------------------------
if model_name == 'VGG-Face':
print("Using VGG-Face backend ", end='')
model = VGGFace.loadModel()
input_shape = (224, 224)
elif model_name == 'OpenFace':
print("Using OpenFace backend ", end='')
model = OpenFace.loadModel()
input_shape = (96, 96)
elif model_name == 'Facenet':
print("Using Facenet backend ", end='')
model = Facenet.loadModel()
input_shape = (160, 160)
else:
raise ValueError("Invalid model_name passed - ", model_name)
#-------------------------
#crop face
img1 = functions.detectFace(img1_path, input_shape)
img2 = functions.detectFace(img2_path, input_shape)
#-------------------------
#TO-DO: Apply face alignment here. Experiments show that aligment increases accuracy 1%.
#-------------------------
#find embeddings
img1_representation = model.predict(img1)[0,:]
img2_representation = model.predict(img2)[0,:]
#-------------------------
#find distances between embeddings
if distance_metric == 'cosine':
print("and cosine similarity.")
distance = dst.findCosineDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean':
print("and euclidean distance.")
distance = dst.findEuclideanDistance(img1_representation, img2_representation)
elif distance_metric == 'euclidean_l2':
print("and euclidean distance l2 form.")
distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
else:
raise ValueError("Invalid distance_metric passed - ", distance_metric)
#-------------------------
#decision
if distance <= threshold:
identified = True
message = "The both face photos are same person."
else:
identified = False
message = "The both face photos are not same person!"
#-------------------------
plot = False
if plot:
label = "Distance is "+str(round(distance, 2))+"\nwhereas max threshold is "+ str(threshold)+ ".\n"+ message
fig = plt.figure()
fig.add_subplot(1,2, 1)
plt.imshow(img1[0][:, :, ::-1])
plt.xticks([]); plt.yticks([])
fig.add_subplot(1,2, 2)
plt.imshow(img2[0][:, :, ::-1])
plt.xticks([]); plt.yticks([])
fig.suptitle(label, fontsize=17)
plt.show(block=True)
#-------------------------
toc = time.time()
#print("identification lasts ",toc-tic," seconds")
#Return a tuple. First item is the identification result based on tuned threshold.
#Second item is the threshold. You might want to customize this threshold to identify faces.
return (identified, distance, threshold)
def analyze(img_path, actions= []):
resp_obj = "{"
#if a specific target is not passed, then find them all
if len(actions) == 0:
actions= ['emotion', 'age', 'gender', 'race']
print("Actions to do: ", actions)
#TO-DO: do this in parallel
pbar = tqdm(range(0,len(actions)), desc='Finding actions')
action_idx = 0
#for action in actions:
for index in pbar:
action = actions[index]
pbar.set_description("Action: %s" % (action))
if action_idx > 0:
resp_obj += ", "
if action == 'emotion':
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
img = functions.detectFace(img_path, (48, 48), True)
model = Emotion.loadModel()
emotion_predictions = model.predict(img)[0,:]
sum_of_predictions = emotion_predictions.sum()
emotion_obj = "\"emotion\": {"
for i in range(0, len(emotion_labels)):
emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
if i > 0: emotion_obj += ", "
emotion_obj += "\"%s\": %s" % (emotion_label, emotion_prediction)
emotion_obj += "}"
emotion_obj += ", \"dominant_emotion\": \"%s\"" % (emotion_labels[np.argmax(emotion_predictions)])
resp_obj += emotion_obj
elif action == 'age':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("age prediction")
model = Age.loadModel()
age_predictions = model.predict(img)[0,:]
apparent_age = Age.findApparentAge(age_predictions)
resp_obj += "\"age\": %s" % (apparent_age)
elif action == 'gender':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
#print("gender prediction")
model = Gender.loadModel()
gender_prediction = model.predict(img)[0,:]
if np.argmax(gender_prediction) == 0:
gender = "Woman"
elif np.argmax(gender_prediction) == 1:
gender = "Man"
resp_obj += "\"gender\": \"%s\"" % (gender)
elif action == 'race':
img = functions.detectFace(img_path, (224, 224), False) #just emotion model expects grayscale images
model = Race.loadModel()
race_predictions = model.predict(img)[0,:]
race_labels = ['asian', 'indian', 'black', 'white', 'middle eastern', 'latino hispanic']
sum_of_predictions = race_predictions.sum()
race_obj = "\"race\": {"
for i in range(0, len(race_labels)):
race_label = race_labels[i]
race_prediction = 100 * race_predictions[i] / sum_of_predictions
if i > 0: race_obj += ", "
race_obj += "\"%s\": %s" % (race_label, race_prediction)
race_obj += "}"
race_obj += ", \"dominant_race\": \"%s\"" % (race_labels[np.argmax(race_predictions)])
resp_obj += race_obj
action_idx = action_idx + 1
resp_obj += "}"
resp_obj = json.loads(resp_obj)
return resp_obj
#---------------------------
functions.initializeFolder()
#---------------------------
|
python
|
"""Module containing a template class to generate counterfactual explanations.
Subclasses implement interfaces for different ML frameworks such as TensorFlow or PyTorch.
All methods are in dice_ml.explainer_interfaces"""
from abc import ABC, abstractmethod
from collections.abc import Iterable
import numpy as np
import pandas as pd
from sklearn.neighbors import KDTree
from tqdm import tqdm
from dice_ml.constants import ModelTypes, _PostHocSparsityTypes
from dice_ml.counterfactual_explanations import CounterfactualExplanations
from dice_ml.utils.exception import UserConfigValidationException
class ExplainerBase(ABC):
def __init__(self, data_interface, model_interface=None):
"""Init method
:param data_interface: an interface class to access data related params.
:param model_interface: an interface class to access trained ML model.
"""
# initiating data and model related parameters
self.data_interface = data_interface
if model_interface is not None:
# self.data_interface.create_ohe_params()
self.model = model_interface
self.model.load_model() # loading pickled trained model if applicable
self.model.transformer.feed_data_params(data_interface)
self.model.transformer.initialize_transform_func()
# moved the following snippet to a method in public_data_interface
# self.minx, self.maxx, self.encoded_categorical_feature_indexes = self.data_interface.get_data_params()
#
# # min and max for continuous features in original scale
# flattened_indexes = [item for sublist in self.encoded_categorical_feature_indexes for item in sublist]
# self.encoded_continuous_feature_indexes = [ix for ix in range(len(self.minx[0])) if ix not in flattened_indexes]
# org_minx, org_maxx = self.data_interface.get_minx_maxx(normalized=False)
# self.cont_minx = list(org_minx[0][self.encoded_continuous_feature_indexes])
# self.cont_maxx = list(org_maxx[0][self.encoded_continuous_feature_indexes])
#
# # decimal precisions for continuous features
# self.cont_precisions = \
# [self.data_interface.get_decimal_precisions()[ix] for ix in self.encoded_continuous_feature_indexes]
def _validate_counterfactual_configuration(
self, query_instances, total_CFs,
desired_class="opposite", desired_range=None,
permitted_range=None, features_to_vary="all",
stopping_threshold=0.5, posthoc_sparsity_param=0.1,
posthoc_sparsity_algorithm="linear", verbose=False, **kwargs):
if total_CFs <= 0:
raise UserConfigValidationException(
"The number of counterfactuals generated per query instance (total_CFs) should be a positive integer.")
if posthoc_sparsity_algorithm not in _PostHocSparsityTypes.ALL:
raise UserConfigValidationException(
'The posthoc_sparsity_algorithm should be {0} and not {1}'.format(
' or '.join(_PostHocSparsityTypes.ALL), posthoc_sparsity_algorithm)
)
if stopping_threshold < 0.0 or stopping_threshold > 1.0:
raise UserConfigValidationException('The stopping_threshold should lie between {0} and {1}'.format(
str(0.0), str(1.0)))
if posthoc_sparsity_param is not None and (posthoc_sparsity_param < 0.0 or posthoc_sparsity_param > 1.0):
raise UserConfigValidationException('The posthoc_sparsity_param should lie between {0} and {1}'.format(
str(0.0), str(1.0)))
if self.model is not None and self.model.model_type == ModelTypes.Classifier:
if desired_range is not None:
raise UserConfigValidationException(
'The desired_range parameter should not be set for classification task')
if self.model is not None and self.model.model_type == ModelTypes.Regressor:
if desired_range is None:
raise UserConfigValidationException(
'The desired_range parameter should be set for regression task')
if desired_range is not None:
if len(desired_range) != 2:
raise UserConfigValidationException(
"The parameter desired_range needs to have two numbers in ascending order.")
if desired_range[0] > desired_range[1]:
raise UserConfigValidationException(
"The range provided in desired_range should be in ascending order.")
def generate_counterfactuals(self, query_instances, total_CFs,
desired_class="opposite", desired_range=None,
permitted_range=None, features_to_vary="all",
stopping_threshold=0.5, posthoc_sparsity_param=0.1,
proximity_weight=0.2, sparsity_weight=0.2, diversity_weight=5.0,
categorical_penalty=0.1,
posthoc_sparsity_algorithm="linear", verbose=False, **kwargs):
"""General method for generating counterfactuals.
:param query_instances: Input point(s) for which counterfactuals are to be generated.
This can be a dataframe with one or more rows.
:param total_CFs: Total number of counterfactuals required.
:param desired_class: Desired counterfactual class - can take 0 or 1. Default value
is "opposite" to the outcome class of query_instance for binary classification.
:param desired_range: For regression problems. Contains the outcome range to
generate counterfactuals in. This should be a list of two numbers in
ascending order.
:param permitted_range: Dictionary with feature names as keys and permitted range in list as values.
Defaults to the range inferred from training data.
If None, uses the parameters initialized in data_interface.
:param features_to_vary: Either a string "all" or a list of feature names to vary.
:param stopping_threshold: Minimum threshold for counterfactuals target class probability.
:param proximity_weight: A positive float. Larger this weight, more close the counterfactuals are to the
query_instance. Used by ['genetic', 'gradientdescent'],
ignored by ['random', 'kdtree'] methods.
:param sparsity_weight: A positive float. Larger this weight, less features are changed from the query_instance.
Used by ['genetic', 'kdtree'], ignored by ['random', 'gradientdescent'] methods.
:param diversity_weight: A positive float. Larger this weight, more diverse the counterfactuals are.
Used by ['genetic', 'gradientdescent'], ignored by ['random', 'kdtree'] methods.
:param categorical_penalty: A positive float. A weight to ensure that all levels of a categorical variable sums to 1.
Used by ['genetic', 'gradientdescent'], ignored by ['random', 'kdtree'] methods.
:param posthoc_sparsity_param: Parameter for the post-hoc operation on continuous features to enhance sparsity.
:param posthoc_sparsity_algorithm: Perform either linear or binary search. Takes "linear" or "binary".
Prefer binary search when a feature range is large (for instance,
income varying from 10k to 1000k) and only if the features share a
monotonic relationship with predicted outcome in the model.
:param verbose: Whether to output detailed messages.
:param sample_size: Sampling size
:param random_seed: Random seed for reproducibility
:param kwargs: Other parameters accepted by specific explanation method
:returns: A CounterfactualExplanations object that contains the list of
counterfactual examples per query_instance as one of its attributes.
"""
self._validate_counterfactual_configuration(
query_instances=query_instances,
total_CFs=total_CFs,
desired_class=desired_class,
desired_range=desired_range,
permitted_range=permitted_range, features_to_vary=features_to_vary,
stopping_threshold=stopping_threshold, posthoc_sparsity_param=posthoc_sparsity_param,
posthoc_sparsity_algorithm=posthoc_sparsity_algorithm, verbose=verbose,
kwargs=kwargs
)
cf_examples_arr = []
query_instances_list = []
if isinstance(query_instances, pd.DataFrame):
for ix in range(query_instances.shape[0]):
query_instances_list.append(query_instances[ix:(ix+1)])
elif isinstance(query_instances, Iterable):
query_instances_list = query_instances
for query_instance in tqdm(query_instances_list):
self.data_interface.set_continuous_feature_indexes(query_instance)
res = self._generate_counterfactuals(
query_instance, total_CFs,
desired_class=desired_class,
desired_range=desired_range,
permitted_range=permitted_range,
features_to_vary=features_to_vary,
stopping_threshold=stopping_threshold,
posthoc_sparsity_param=posthoc_sparsity_param,
posthoc_sparsity_algorithm=posthoc_sparsity_algorithm,
verbose=verbose,
**kwargs)
cf_examples_arr.append(res)
self._check_any_counterfactuals_computed(cf_examples_arr=cf_examples_arr)
return CounterfactualExplanations(cf_examples_list=cf_examples_arr)
@abstractmethod
def _generate_counterfactuals(self, query_instance, total_CFs,
desired_class="opposite", desired_range=None,
permitted_range=None, features_to_vary="all",
stopping_threshold=0.5, posthoc_sparsity_param=0.1,
posthoc_sparsity_algorithm="linear", verbose=False, **kwargs):
"""Internal method for generating counterfactuals for a given query instance. Any explainerclass
inherting from this class would need to implement this abstract method.
:param query_instance: Input point for which counterfactuals are to be generated.
This can be a dataframe with one row.
:param total_CFs: Total number of counterfactuals required.
:param desired_class: Desired counterfactual class - can take 0 or 1. Default value
is "opposite" to the outcome class of query_instance for binary classification.
:param desired_range: For regression problems. Contains the outcome range to
generate counterfactuals in.
:param permitted_range: Dictionary with feature names as keys and permitted range in list as values.
Defaults to the range inferred from training data.
If None, uses the parameters initialized in data_interface.
:param features_to_vary: Either a string "all" or a list of feature names to vary.
:param stopping_threshold: Minimum threshold for counterfactuals target class probability.
:param posthoc_sparsity_param: Parameter for the post-hoc operation on continuous features to enhance sparsity.
:param posthoc_sparsity_algorithm: Perform either linear or binary search. Takes "linear" or "binary".
Prefer binary search when a feature range is large (for instance,
income varying from 10k to 1000k) and only if the features share a
monotonic relationship with predicted outcome in the model.
:param verbose: Whether to output detailed messages.
:param sample_size: Sampling size
:param random_seed: Random seed for reproducibility
:param kwargs: Other parameters accepted by specific explanation method
:returns: A CounterfactualExplanations object that contains the list of
counterfactual examples per query_instance as one of its attributes.
"""
pass
def setup(self, features_to_vary, permitted_range, query_instance, feature_weights):
self.data_interface.check_features_to_vary(features_to_vary=features_to_vary)
self.data_interface.check_permitted_range(permitted_range)
if features_to_vary == 'all':
features_to_vary = self.data_interface.feature_names
if permitted_range is None: # use the precomputed default
self.feature_range = self.data_interface.permitted_range
feature_ranges_orig = self.feature_range
else: # compute the new ranges based on user input
self.feature_range, feature_ranges_orig = self.data_interface.get_features_range(permitted_range)
self.check_query_instance_validity(features_to_vary, permitted_range, query_instance, feature_ranges_orig)
# check feature MAD validity and throw warnings
self.data_interface.check_mad_validity(feature_weights)
return features_to_vary
def check_query_instance_validity(self, features_to_vary, permitted_range, query_instance, feature_ranges_orig):
for feature in query_instance:
if feature == self.data_interface.outcome_name:
raise ValueError("Target", self.data_interface.outcome_name, "present in query instance")
if feature not in self.data_interface.feature_names:
raise ValueError("Feature", feature, "not present in training data!")
for feature in self.data_interface.categorical_feature_names:
if query_instance[feature].values[0] not in feature_ranges_orig[feature] and \
str(query_instance[feature].values[0]) not in feature_ranges_orig[feature]:
raise ValueError("Feature", feature, "has a value outside the dataset.")
if feature not in features_to_vary and permitted_range is not None:
if feature in permitted_range and feature in self.data_interface.continuous_feature_names:
if not permitted_range[feature][0] <= query_instance[feature].values[0] <= permitted_range[feature][1]:
raise ValueError("Feature:", feature, "is outside the permitted range and isn't allowed to vary.")
elif feature in permitted_range and feature in self.data_interface.categorical_feature_names:
if query_instance[feature].values[0] not in self.feature_range[feature]:
raise ValueError("Feature:", feature, "is outside the permitted range and isn't allowed to vary.")
def local_feature_importance(self, query_instances, cf_examples_list=None,
total_CFs=10,
desired_class="opposite", desired_range=None, permitted_range=None,
features_to_vary="all", stopping_threshold=0.5,
posthoc_sparsity_param=0.1, posthoc_sparsity_algorithm="linear",
**kwargs):
""" Estimate local feature importance scores for the given inputs.
:param query_instances: A list of inputs for which to compute the
feature importances. These can be provided as a dataframe.
:param cf_examples_list: If precomputed, a list of counterfactual
examples for every input point. If cf_examples_list is provided, then
all the following parameters are ignored.
:param total_CFs: The number of counterfactuals to generate per input
(default is 10)
:param other_parameters: These are the same as the
generate_counterfactuals method.
:returns: An object of class CounterfactualExplanations that includes
the list of counterfactuals per input, local feature importances per
input, and the global feature importance summarized over all inputs.
"""
self._validate_counterfactual_configuration(
query_instances=query_instances,
total_CFs=total_CFs,
desired_class=desired_class,
desired_range=desired_range,
permitted_range=permitted_range, features_to_vary=features_to_vary,
stopping_threshold=stopping_threshold, posthoc_sparsity_param=posthoc_sparsity_param,
posthoc_sparsity_algorithm=posthoc_sparsity_algorithm,
kwargs=kwargs
)
if cf_examples_list is not None:
if any([len(cf_examples.final_cfs_df) < 10 for cf_examples in cf_examples_list]):
raise UserConfigValidationException(
"The number of counterfactuals generated per query instance should be "
"greater than or equal to 10 to compute feature importance for all query points")
elif total_CFs < 10:
raise UserConfigValidationException(
"The number of counterfactuals requested per "
"query instance should be greater than or equal to 10 "
"to compute feature importance for all query points")
importances = self.feature_importance(
query_instances,
cf_examples_list=cf_examples_list,
total_CFs=total_CFs,
local_importance=True,
global_importance=False,
desired_class=desired_class,
desired_range=desired_range,
permitted_range=permitted_range,
features_to_vary=features_to_vary,
stopping_threshold=stopping_threshold,
posthoc_sparsity_param=posthoc_sparsity_param,
posthoc_sparsity_algorithm=posthoc_sparsity_algorithm,
**kwargs)
return importances
def global_feature_importance(self, query_instances, cf_examples_list=None,
total_CFs=10, local_importance=True,
desired_class="opposite", desired_range=None, permitted_range=None,
features_to_vary="all", stopping_threshold=0.5,
posthoc_sparsity_param=0.1, posthoc_sparsity_algorithm="linear",
**kwargs):
""" Estimate global feature importance scores for the given inputs.
:param query_instances: A list of inputs for which to compute the
feature importances. These can be provided as a dataframe.
:param cf_examples_list: If precomputed, a list of counterfactual
examples for every input point. If cf_examples_list is provided, then
all the following parameters are ignored.
:param total_CFs: The number of counterfactuals to generate per input
(default is 10)
:param local_importance: Binary flag indicating whether local feature
importance values should also be returned for each query instance.
:param other_parameters: These are the same as the generate_counterfactuals method.
:returns: An object of class CounterfactualExplanations that includes
the list of counterfactuals per input, local feature importances per
input, and the global feature importance summarized over all inputs.
"""
self._validate_counterfactual_configuration(
query_instances=query_instances,
total_CFs=total_CFs,
desired_class=desired_class,
desired_range=desired_range,
permitted_range=permitted_range, features_to_vary=features_to_vary,
stopping_threshold=stopping_threshold, posthoc_sparsity_param=posthoc_sparsity_param,
posthoc_sparsity_algorithm=posthoc_sparsity_algorithm,
kwargs=kwargs
)
if query_instances is not None and len(query_instances) < 10:
raise UserConfigValidationException(
"The number of query instances should be greater than or equal to 10 "
"to compute global feature importance over all query points")
if cf_examples_list is not None:
if len(cf_examples_list) < 10:
raise UserConfigValidationException(
"The number of points for which counterfactuals generated should be "
"greater than or equal to 10 "
"to compute global feature importance")
elif any([len(cf_examples.final_cfs_df) < 10 for cf_examples in cf_examples_list]):
raise UserConfigValidationException(
"The number of counterfactuals generated per query instance should be "
"greater than or equal to 10 "
"to compute global feature importance over all query points")
elif total_CFs < 10:
raise UserConfigValidationException(
"The number of counterfactuals requested per query instance should be greater "
"than or equal to 10 "
"to compute global feature importance over all query points")
importances = self.feature_importance(
query_instances,
cf_examples_list=cf_examples_list,
total_CFs=total_CFs,
local_importance=local_importance,
global_importance=True,
desired_class=desired_class,
desired_range=desired_range,
permitted_range=permitted_range,
features_to_vary=features_to_vary,
stopping_threshold=stopping_threshold,
posthoc_sparsity_param=posthoc_sparsity_param,
posthoc_sparsity_algorithm=posthoc_sparsity_algorithm,
**kwargs)
return importances
def feature_importance(self, query_instances, cf_examples_list=None,
total_CFs=10, local_importance=True, global_importance=True,
desired_class="opposite", desired_range=None,
permitted_range=None, features_to_vary="all", stopping_threshold=0.5,
posthoc_sparsity_param=0.1, posthoc_sparsity_algorithm="linear", **kwargs):
""" Estimate feature importance scores for the given inputs.
:param query_instances: A list of inputs for which to compute the
feature importances. These can be provided as a dataframe.
:param cf_examples_list: If precomputed, a list of counterfactual
examples for every input point. If cf_examples_list is provided, then
all the following parameters are ignored.
:param total_CFs: The number of counterfactuals to generate per input
(default is 10)
:param other_parameters: These are the same as the generate_counterfactuals method.
:returns: An object of class CounterfactualExplanations that includes
the list of counterfactuals per input, local feature importances per
input, and the global feature importance summarized over all inputs.
"""
self._validate_counterfactual_configuration(
query_instances=query_instances,
total_CFs=total_CFs,
desired_class=desired_class,
desired_range=desired_range,
permitted_range=permitted_range, features_to_vary=features_to_vary,
stopping_threshold=stopping_threshold, posthoc_sparsity_param=posthoc_sparsity_param,
posthoc_sparsity_algorithm=posthoc_sparsity_algorithm,
kwargs=kwargs
)
if cf_examples_list is None:
cf_examples_list = self.generate_counterfactuals(
query_instances, total_CFs,
desired_class=desired_class,
desired_range=desired_range,
permitted_range=permitted_range,
features_to_vary=features_to_vary,
stopping_threshold=stopping_threshold,
posthoc_sparsity_param=posthoc_sparsity_param,
posthoc_sparsity_algorithm=posthoc_sparsity_algorithm,
**kwargs).cf_examples_list
allcols = self.data_interface.categorical_feature_names + self.data_interface.continuous_feature_names
summary_importance = None
local_importances = None
if global_importance:
summary_importance = {}
# Initializing importance vector
for col in allcols:
summary_importance[col] = 0
if local_importance:
local_importances = [{} for _ in range(len(cf_examples_list))]
# Initializing local importance for the ith query instance
for i in range(len(cf_examples_list)):
for col in allcols:
local_importances[i][col] = 0
overall_num_cfs = 0
# Summarizing the found counterfactuals
for i in range(len(cf_examples_list)):
cf_examples = cf_examples_list[i]
org_instance = cf_examples.test_instance_df
if cf_examples.final_cfs_df_sparse is not None:
df = cf_examples.final_cfs_df_sparse
else:
df = cf_examples.final_cfs_df
if df is None:
continue
per_query_point_cfs = 0
for _, row in df.iterrows():
per_query_point_cfs += 1
for col in self.data_interface.continuous_feature_names:
if not np.isclose(org_instance[col].iat[0], row[col]):
if summary_importance is not None:
summary_importance[col] += 1
if local_importances is not None:
local_importances[i][col] += 1
for col in self.data_interface.categorical_feature_names:
if org_instance[col].iat[0] != row[col]:
if summary_importance is not None:
summary_importance[col] += 1
if local_importances is not None:
local_importances[i][col] += 1
if local_importances is not None:
for col in allcols:
if per_query_point_cfs > 0:
local_importances[i][col] /= per_query_point_cfs
overall_num_cfs += per_query_point_cfs
if summary_importance is not None:
for col in allcols:
if overall_num_cfs > 0:
summary_importance[col] /= overall_num_cfs
return CounterfactualExplanations(
cf_examples_list,
local_importance=local_importances,
summary_importance=summary_importance)
def predict_fn(self, input_instance):
"""prediction function"""
return self.model.get_output(input_instance)
def predict_fn_for_sparsity(self, input_instance):
"""prediction function for sparsity correction"""
return self.model.get_output(input_instance)
def do_posthoc_sparsity_enhancement(self, final_cfs_sparse, query_instance, posthoc_sparsity_param,
posthoc_sparsity_algorithm, limit_steps_ls):
"""Post-hoc method to encourage sparsity in a generated counterfactuals.
:param final_cfs_sparse: Final CFs in original user-fed format, in a pandas dataframe.
:param query_instance: Query instance in original user-fed format, in a pandas dataframe.
:param posthoc_sparsity_param: Parameter for the post-hoc operation on continuous features to enhance sparsity.
:param posthoc_sparsity_algorithm: Perform either linear or binary search.
Prefer binary search when a feature range is
large (for instance, income varying from 10k to 1000k)
and only if the features share a monotonic relationship
with predicted outcome in the model.
:param limit_steps_ls: Defines the limit of steps to be done in the linear search,
necessary to avoid infinite loops
"""
if final_cfs_sparse is None:
return final_cfs_sparse
# quantiles of the deviation from median for every continuous feature
quantiles = self.data_interface.get_quantiles_from_training_data(quantile=posthoc_sparsity_param)
mads = self.data_interface.get_valid_mads()
# Setting the quantile of a feature to be the minimum of mad and quantile
# Thus, the maximum deviation can be mad.
for feature in quantiles:
quantiles[feature] = min(quantiles[feature], mads[feature])
# Sorting features such that the feature with the highest quantile deviation
# is first
features_sorted = sorted(quantiles.items(), key=lambda kv: kv[1], reverse=True)
for ix in range(len(features_sorted)):
features_sorted[ix] = features_sorted[ix][0]
precs = self.data_interface.get_decimal_precisions()
decimal_prec = dict(zip(self.data_interface.continuous_feature_names, precs))
cfs_preds_sparse = []
for cf_ix in list(final_cfs_sparse.index):
current_pred = self.predict_fn_for_sparsity(final_cfs_sparse.loc[[cf_ix]][self.data_interface.feature_names])
for feature in features_sorted:
# current_pred = self.predict_fn_for_sparsity(final_cfs_sparse.iat[[cf_ix]][self.data_interface.feature_names])
# feat_ix = self.data_interface.continuous_feature_names.index(feature)
diff = query_instance[feature].iat[0] - int(final_cfs_sparse.at[cf_ix, feature])
if(abs(diff) <= quantiles[feature]):
if posthoc_sparsity_algorithm == "linear":
final_cfs_sparse = self.do_linear_search(diff, decimal_prec, query_instance, cf_ix,
feature, final_cfs_sparse, current_pred, limit_steps_ls)
elif posthoc_sparsity_algorithm == "binary":
final_cfs_sparse = self.do_binary_search(
diff, decimal_prec, query_instance, cf_ix, feature, final_cfs_sparse, current_pred)
temp_preds = self.predict_fn_for_sparsity(final_cfs_sparse.loc[[cf_ix]][self.data_interface.feature_names])
cfs_preds_sparse.append(temp_preds)
final_cfs_sparse[self.data_interface.outcome_name] = self.get_model_output_from_scores(cfs_preds_sparse)
# final_cfs_sparse[self.data_interface.outcome_name] = np.round(final_cfs_sparse[self.data_interface.outcome_name], 3)
return final_cfs_sparse
def do_linear_search(self, diff, decimal_prec, query_instance, cf_ix, feature, final_cfs_sparse,
current_pred_orig, limit_steps_ls):
"""Performs a greedy linear search - moves the continuous features in CFs towards original values in
query_instance greedily until the prediction class changes, or it reaches the maximum number of steps"""
old_diff = diff
change = (10**-decimal_prec[feature]) # the minimal possible change for a feature
current_pred = current_pred_orig
count_steps = 0
if self.model.model_type == ModelTypes.Classifier:
while((abs(diff) > 10e-4) and (np.sign(diff*old_diff) > 0) and
self.is_cf_valid(current_pred)) and (count_steps < limit_steps_ls):
old_val = int(final_cfs_sparse.at[cf_ix, feature])
final_cfs_sparse.at[cf_ix, feature] += np.sign(diff)*change
current_pred = self.predict_fn_for_sparsity(final_cfs_sparse.loc[[cf_ix]][self.data_interface.feature_names])
old_diff = diff
if not self.is_cf_valid(current_pred):
final_cfs_sparse.at[cf_ix, feature] = old_val
diff = query_instance[feature].iat[0] - int(final_cfs_sparse.at[cf_ix, feature])
return final_cfs_sparse
diff = query_instance[feature].iat[0] - int(final_cfs_sparse.at[cf_ix, feature])
count_steps += 1
return final_cfs_sparse
def do_binary_search(self, diff, decimal_prec, query_instance, cf_ix, feature, final_cfs_sparse, current_pred):
"""Performs a binary search between continuous features of a CF and corresponding values
in query_instance until the prediction class changes."""
old_val = int(final_cfs_sparse.at[cf_ix, feature])
final_cfs_sparse.at[cf_ix, feature] = query_instance[feature].iat[0]
# Prediction of the query instance
current_pred = self.predict_fn_for_sparsity(final_cfs_sparse.loc[[cf_ix]][self.data_interface.feature_names])
# first check if assigning query_instance values to a CF is required.
if self.is_cf_valid(current_pred):
return final_cfs_sparse
else:
final_cfs_sparse.at[cf_ix, feature] = old_val
# move the CF values towards the query_instance
if diff > 0:
left = int(final_cfs_sparse.at[cf_ix, feature])
right = query_instance[feature].iat[0]
while left <= right:
current_val = left + ((right - left)/2)
current_val = round(current_val, decimal_prec[feature])
final_cfs_sparse.at[cf_ix, feature] = current_val
current_pred = self.predict_fn_for_sparsity(final_cfs_sparse.loc[[cf_ix]][self.data_interface.feature_names])
if current_val == right or current_val == left:
break
if self.is_cf_valid(current_pred):
left = current_val + (10 ** -decimal_prec[feature])
else:
right = current_val - (10 ** -decimal_prec[feature])
else:
left = query_instance[feature].iat[0]
right = int(final_cfs_sparse.at[cf_ix, feature])
while right >= left:
current_val = right - ((right - left)/2)
current_val = round(current_val, decimal_prec[feature])
final_cfs_sparse.at[cf_ix, feature] = current_val
current_pred = self.predict_fn_for_sparsity(final_cfs_sparse.loc[[cf_ix]][self.data_interface.feature_names])
if current_val == right or current_val == left:
break
if self.is_cf_valid(current_pred):
right = current_val - (10**-decimal_prec[feature])
else:
left = current_val + (10**-decimal_prec[feature])
return final_cfs_sparse
def misc_init(self, stopping_threshold, desired_class, desired_range, test_pred):
self.stopping_threshold = stopping_threshold
if self.model.model_type == ModelTypes.Classifier:
self.target_cf_class = np.array(
[[self.infer_target_cfs_class(desired_class, test_pred, self.num_output_nodes)]],
dtype=np.float32)
desired_class = int(self.target_cf_class[0][0])
if self.target_cf_class == 0 and self.stopping_threshold > 0.5:
self.stopping_threshold = 0.25
elif self.target_cf_class == 1 and self.stopping_threshold < 0.5:
self.stopping_threshold = 0.75
elif self.model.model_type == ModelTypes.Regressor:
self.target_cf_range = self.infer_target_cfs_range(desired_range)
return desired_class
def infer_target_cfs_class(self, desired_class_input, original_pred, num_output_nodes):
""" Infer the target class for generating CFs. Only called when
model_type=="classifier".
TODO: Add support for opposite desired class in multiclass.
Downstream methods should decide whether it is allowed or not.
"""
if desired_class_input == "opposite":
if num_output_nodes == 2:
original_pred_1 = np.argmax(original_pred)
target_class = int(1 - original_pred_1)
return target_class
elif num_output_nodes > 2:
raise UserConfigValidationException(
"Desired class cannot be opposite if the number of classes is more than 2.")
elif isinstance(desired_class_input, int):
if desired_class_input >= 0 and desired_class_input < num_output_nodes:
target_class = desired_class_input
return target_class
else:
raise UserConfigValidationException("Desired class not present in training data!")
else:
raise UserConfigValidationException("The target class for {0} could not be identified".format(
desired_class_input))
def infer_target_cfs_range(self, desired_range_input):
target_range = None
if desired_range_input is None:
raise ValueError("Need to provide a desired_range for the target counterfactuals for a regression model.")
else:
if desired_range_input[0] > desired_range_input[1]:
raise ValueError("Invalid Range!")
else:
target_range = desired_range_input
return target_range
def decide_cf_validity(self, model_outputs):
validity = np.zeros(len(model_outputs), dtype=np.int32)
for i in range(len(model_outputs)):
pred = model_outputs[i]
if self.model.model_type == ModelTypes.Classifier:
if self.num_output_nodes == 2: # binary
pred_1 = pred[self.num_output_nodes-1]
validity[i] = 1 if \
((self.target_cf_class == 0 and pred_1 <= self.stopping_threshold) or
(self.target_cf_class == 1 and pred_1 >= self.stopping_threshold)) else 0
else: # multiclass
if np.argmax(pred) == self.target_cf_class:
validity[i] = 1
elif self.model.model_type == ModelTypes.Regressor:
if self.target_cf_range[0] <= pred <= self.target_cf_range[1]:
validity[i] = 1
return validity
def is_cf_valid(self, model_score):
"""Check if a cf belongs to the target class or target range.
"""
# Converting to single prediction if the prediction is provided as a
# singleton array
correct_dim = 1 if self.model.model_type == ModelTypes.Classifier else 0
if hasattr(model_score, "shape") and len(model_score.shape) > correct_dim:
model_score = model_score[0]
# Converting target_cf_class to a scalar (tf/torch have it as (1,1) shape)
if self.model.model_type == ModelTypes.Classifier:
target_cf_class = self.target_cf_class
if hasattr(self.target_cf_class, "shape"):
if len(self.target_cf_class.shape) == 1:
target_cf_class = self.target_cf_class[0]
elif len(self.target_cf_class.shape) == 2:
target_cf_class = self.target_cf_class[0][0]
target_cf_class = int(target_cf_class)
if self.num_output_nodes == 1: # for tensorflow/pytorch models
pred_1 = model_score[0]
validity = True if \
((target_cf_class == 0 and pred_1 <= self.stopping_threshold) or
(target_cf_class == 1 and pred_1 >= self.stopping_threshold)) else False
return validity
if self.num_output_nodes == 2: # binary
pred_1 = model_score[self.num_output_nodes-1]
validity = True if \
((target_cf_class == 0 and pred_1 <= self.stopping_threshold) or
(target_cf_class == 1 and pred_1 >= self.stopping_threshold)) else False
return validity
else: # multiclass
return np.argmax(model_score) == target_cf_class
else:
return self.target_cf_range[0] <= model_score and model_score <= self.target_cf_range[1]
def get_model_output_from_scores(self, model_scores):
if self.model.model_type == ModelTypes.Classifier:
output_type = np.int32
else:
output_type = np.float32
model_output = np.zeros(len(model_scores), dtype=output_type)
for i in range(len(model_scores)):
if self.model.model_type == ModelTypes.Classifier:
model_output[i] = np.argmax(model_scores[i])
elif self.model.model_type == ModelTypes.Regressor:
model_output[i] = model_scores[i]
return model_output
def check_permitted_range(self, permitted_range):
"""checks permitted range for continuous features
TODO: add comments as to where this is used if this function is necessary, else remove.
"""
if permitted_range is not None:
# if not self.data_interface.check_features_range(permitted_range):
# raise ValueError(
# "permitted range of features should be within their original range")
# else:
self.data_interface.permitted_range = permitted_range
self.minx, self.maxx = self.data_interface.get_minx_maxx(normalized=True)
self.cont_minx = []
self.cont_maxx = []
for feature in self.data_interface.continuous_feature_names:
self.cont_minx.append(self.data_interface.permitted_range[feature][0])
self.cont_maxx.append(self.data_interface.permitted_range[feature][1])
def sigmoid(self, z):
"""This is used in VAE-based CF explainers."""
return 1 / (1 + np.exp(-z))
def build_KD_tree(self, data_df_copy, desired_range, desired_class, predicted_outcome_name):
# Stores the predictions on the training data
dataset_instance = self.data_interface.prepare_query_instance(
query_instance=data_df_copy[self.data_interface.feature_names])
predictions = self.model.model.predict(dataset_instance)
# TODO: Is it okay to insert a column in the original dataframe with the predicted outcome? This is memory-efficient
data_df_copy[predicted_outcome_name] = predictions
# segmenting the dataset according to outcome
dataset_with_predictions = None
if self.model.model_type == ModelTypes.Classifier:
dataset_with_predictions = data_df_copy.loc[[i == desired_class for i in predictions]].copy()
elif self.model.model_type == ModelTypes.Regressor:
dataset_with_predictions = data_df_copy.loc[
[desired_range[0] <= pred <= desired_range[1] for pred in predictions]].copy()
KD_tree = None
# Prepares the KD trees for DiCE
if len(dataset_with_predictions) > 0:
dummies = pd.get_dummies(dataset_with_predictions[self.data_interface.feature_names])
KD_tree = KDTree(dummies)
return dataset_with_predictions, KD_tree, predictions
def round_to_precision(self):
# to display the values with the same precision as the original data
precisions = self.data_interface.get_decimal_precisions()
for ix, feature in enumerate(self.data_interface.continuous_feature_names):
self.final_cfs_df[feature] = self.final_cfs_df[feature].astype(float).round(precisions[ix])
if self.final_cfs_df_sparse is not None:
self.final_cfs_df_sparse[feature] = self.final_cfs_df_sparse[feature].astype(float).round(precisions[ix])
def _check_any_counterfactuals_computed(self, cf_examples_arr):
"""Check if any counterfactuals were generated for any query point."""
no_cf_generated = True
# Check if any counterfactuals were generated for any query point
for cf_examples in cf_examples_arr:
if cf_examples.final_cfs_df is not None and len(cf_examples.final_cfs_df) > 0:
no_cf_generated = False
break
if no_cf_generated:
raise UserConfigValidationException(
"No counterfactuals found for any of the query points! Kindly check your configuration.")
|
python
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class RelatedEventSummary(object):
"""
Event occurrence on managed instances.
"""
def __init__(self, **kwargs):
"""
Initializes a new RelatedEventSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this RelatedEventSummary.
:type id: str
:param instance_id:
The value to assign to the instance_id property of this RelatedEventSummary.
:type instance_id: str
:param timestamp:
The value to assign to the timestamp property of this RelatedEventSummary.
:type timestamp: datetime
"""
self.swagger_types = {
'id': 'str',
'instance_id': 'str',
'timestamp': 'datetime'
}
self.attribute_map = {
'id': 'id',
'instance_id': 'instanceId',
'timestamp': 'timestamp'
}
self._id = None
self._instance_id = None
self._timestamp = None
@property
def id(self):
"""
**[Required]** Gets the id of this RelatedEventSummary.
OCID identifier of the event
:return: The id of this RelatedEventSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this RelatedEventSummary.
OCID identifier of the event
:param id: The id of this RelatedEventSummary.
:type: str
"""
self._id = id
@property
def instance_id(self):
"""
**[Required]** Gets the instance_id of this RelatedEventSummary.
OCID identifier of the instance
:return: The instance_id of this RelatedEventSummary.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""
Sets the instance_id of this RelatedEventSummary.
OCID identifier of the instance
:param instance_id: The instance_id of this RelatedEventSummary.
:type: str
"""
self._instance_id = instance_id
@property
def timestamp(self):
"""
Gets the timestamp of this RelatedEventSummary.
time occurence
:return: The timestamp of this RelatedEventSummary.
:rtype: datetime
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this RelatedEventSummary.
time occurence
:param timestamp: The timestamp of this RelatedEventSummary.
:type: datetime
"""
self._timestamp = timestamp
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
python
|
from setuptools import setup
setup(name='my-package-ahadmushir',
version='0.0.1',
description='Testing installation of Package',
url='https://github.com/ahadmushir/my_first_package',
author='ahadmushir',
author_email='[email protected]',
license='MIT',
packages=['mypackage'],
zip_safe=False)
|
python
|
'''
Author : Bhishm Daslaniya [17CE023]
"Make it work, make it right, make it fast."
– Kent Beck
'''
n = int(input("Enter number: "))
print("Factors of "+ str(n) )
for _ in range(1,n):
if(n % _ == 0):
print( _ ,end=" ")
|
python
|
# coding: utf-8
from flask import Blueprint
scope = Blueprint("scope", __name__, url_prefix="/scope/")
|
python
|
__author__ = 'matth'
import unittest
import sys
from processfamily.test import ParentProcess, Config
import os
import subprocess
import requests
import time
import socket
import logging
import glob
from processfamily.processes import process_exists, kill_process, AccessDeniedError
from processfamily import _traceback_str
import signal
import threading
if sys.platform.startswith('win'):
from processfamily._winprocess_ctypes import CAN_USE_EXTENDED_STARTUPINFO, CREATE_BREAKAWAY_FROM_JOB
class _BaseProcessFamilyFunkyWebServerTestSuite(unittest.TestCase):
skip_crash_test = None
def setUp(self):
self.pid_dir = os.path.join(os.path.dirname(__file__), 'test', 'tmp', 'pid')
if not os.path.exists(self.pid_dir):
os.makedirs(self.pid_dir)
for pid_file in self.get_pid_files():
with open(pid_file, "r") as f:
pid = f.read().strip()
if pid and self.process_exists_or_access_denied(int(pid)):
logging.warning(
("Process with pid %s is stilling running. This could be a problem " + \
"(but it might be a new process with a recycled pid so I'm not killing it).") % pid )
else:
os.remove(pid_file)
self.check_server_ports_unbound()
def process_exists_or_access_denied(self, pid):
try:
return process_exists(pid)
except AccessDeniedError as e:
#It is most likely that this process does exist!
return True
def kill_process_ignore_access_denied(self, pid):
try:
return kill_process(pid)
except AccessDeniedError as e:
#Can't do anything about this
pass
def try_and_stop_everything_for_tear_down(self):
#Override this if you can do something about stopping everything
pass
def tearDown(self):
command_file = os.path.join(os.path.dirname(__file__), 'test', 'tmp', 'command.txt')
if os.path.exists(command_file):
os.remove(command_file)
self.wait_for_parent_to_stop(5)
#Now check that no processes are left over:
start_time = time.time()
processes_left_running = []
for pid_file in self.get_pid_files():
with open(pid_file, "r") as f:
pid = f.read().strip()
if pid:
while self.process_exists_or_access_denied(int(pid)) and time.time() - start_time < 5:
time.sleep(0.3)
if self.process_exists_or_access_denied(int(pid)):
processes_left_running.append(int(pid))
os.remove(pid_file)
if processes_left_running:
for pid in processes_left_running:
try:
self.kill_process_ignore_access_denied(pid)
except Exception as e:
logging.warning("Error killing process with pid %d: %s", pid, _traceback_str())
self.try_and_stop_everything_for_tear_down()
start_time = time.time()
for pid in processes_left_running:
while self.process_exists_or_access_denied(int(pid)) and time.time() - start_time < 40:
time.sleep(0.3)
self.check_server_ports_unbound()
self.assertFalse(processes_left_running, msg="There should have been no PIDs left running but there were: %s" % (', '.join([str(p) for p in processes_left_running])))
def start_up(self, test_command=None, wait_for_middle_child=True, wait_for_children=True, parent_timeout=None):
command_file = os.path.join(os.path.dirname(__file__), 'test', 'tmp', 'command.txt')
if test_command:
with open(command_file, "w") as f:
f.write(test_command)
elif os.path.exists(command_file):
os.remove(command_file)
self.start_parent_process(timeout=parent_timeout)
#Wait up to 15 secs for the all ports to be available (the parent might wait 10 for a middle child):
start_time = time.time()
still_waiting = True
ports_to_wait = range(4) if wait_for_children else [0]
if not wait_for_middle_child:
ports_to_wait.remove(2)
while still_waiting and time.time() - start_time < 15:
still_waiting = False
for i in ports_to_wait:
try:
s = socket.socket()
try:
s.connect(("localhost", Config.get_starting_port_nr()+i))
except socket.error, e:
still_waiting = True
break
finally:
s.close()
if still_waiting:
time.sleep(0.3)
self.assertFalse(still_waiting, "Waited 10 seconds and some http ports are still not accessible")
def assert_middle_child_port_unbound(self):
port = Config.get_starting_port_nr()+2
logging.info("Checking for ability to bind to port %d", port)
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if not sys.platform.startswith('win'):
#On linux I need this setting cos we are starting and stopping things
#so frequently that they are still in a STOP_WAIT state when I get here
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(("", port))
except Exception as e:
self.fail("Middle child port is not unbound as expected")
finally:
serversocket.close()
def get_pid_files(self):
return glob.glob(os.path.join(self.pid_dir, "*.pid"))
def kill_parent(self):
for pid_file in self.get_pid_files():
if os.path.basename(pid_file).startswith('c'):
continue
with open(pid_file, "r") as f:
pid = f.read().strip()
kill_process(int(pid))
def check_stop(self, force_kills=0, timeout=None):
"""Checks that a stop succeeds, and that the number of child processes that had to be terminated is as expected"""
params = {"timeout": str(timeout)} if timeout else {}
child_processes_terminated = self.send_parent_http_command("stop", params=params)
if child_processes_terminated != str(force_kills):
raise ValueError("Stop received, but parent reports %r instead of %r child processes terminated",
child_processes_terminated, force_kills)
def test_parent_stop(self):
self.start_up()
self.check_stop()
def test_parent_exit(self):
self.start_up()
self.send_parent_http_command("exit")
def test_parent_crash(self):
if self.skip_crash_test:
self.skipTest(self.skip_crash_test)
self.start_up()
self.send_parent_http_command("crash")
def test_parent_interrupt_main(self):
self.start_up()
self.send_parent_http_command("interrupt_main")
def test_parent_kill(self):
self.start_up()
self.kill_parent()
def test_parent_stop_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
self.check_stop(1, timeout=5)
#This needs time to wait for the child for 10 seconds:
self.wait_for_parent_to_stop(11)
def test_parent_exit_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
self.send_parent_http_command("exit")
def test_parent_crash_child_locked_up(self):
if self.skip_crash_test:
self.skipTest(self.skip_crash_test)
self.start_up()
self.freeze_up_middle_child()
self.send_parent_http_command("crash")
def test_parent_interrupt_main_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
self.send_parent_http_command("interrupt_main")
#This needs time to wait for the child for 10 seconds:
self.wait_for_parent_to_stop(11)
def test_parent_kill_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
self.kill_parent()
def test_parent_exit_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
self.send_parent_http_command("exit")
def test_child_exit_on_start(self):
self.start_up(test_command='child_exit_on_start', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
self.check_stop()
def test_child_error_during_run(self):
self.start_up(test_command='child_error_during_run', wait_for_middle_child=False)
self.check_stop()
def test_child_freeze_on_start(self):
self.start_up(test_command='child_freeze_on_start', wait_for_middle_child=False, parent_timeout=2)
self.assert_middle_child_port_unbound()
self.check_stop(1, timeout=5)
def test_child_error_on_start(self):
self.start_up(test_command='child_error_on_start', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
self.check_stop()
def test_child_error_during_init(self):
self.start_up(test_command='child_error_during_init', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
self.check_stop()
def test_child_freeze_during_init(self):
self.start_up(test_command='child_freeze_during_init', wait_for_middle_child=False, parent_timeout=2)
self.assert_middle_child_port_unbound()
self.check_stop(1, timeout=5)
self.wait_for_parent_to_stop(11)
def test_child_crash_on_start(self):
if self.skip_crash_test:
self.skipTest(self.skip_crash_test)
self.start_up(test_command='child_crash_on_start', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
self.check_stop()
if not sys.platform.startswith('win'):
def test_sigint(self):
self.start_up()
os.kill(self.parent_process.pid, signal.SIGINT)
def test_sigint_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
os.kill(self.parent_process.pid, signal.SIGINT)
#This needs time to wait for the child for 10 seconds:
self.wait_for_parent_to_stop(11)
def test_file_open_by_parent_before_fork_can_be_closed_and_deleted(self):
self.start_up()
result = self.send_parent_http_command("close_file_and_delete_it")
self.assertEqual("OK", result, "Command to close file and delete it failed (got response: %s)" % result)
self.check_stop()
def test_echo_std_err_on(self):
self.start_up(test_command='echo_std_err')
self.check_stop()
def test_handles_over_commandline_off(self):
if not sys.platform.startswith('win') or not CAN_USE_EXTENDED_STARTUPINFO:
self.skipTest("This test is not supported on this platform")
self.start_up(test_command='handles_over_commandline_off')
self.check_stop()
def test_handles_over_commandline_off_close_fds_off(self):
if not sys.platform.startswith('win') or not CAN_USE_EXTENDED_STARTUPINFO:
self.skipTest("This test is not supported on this platform")
self.start_up(test_command='handles_over_commandline_off_close_fds_off')
result = self.send_parent_http_command("close_file_and_delete_it")
self.assertEqual("FAIL", result, "Command to close file and delete it did not fail (got response: %s)" % result)
self.check_stop()
def test_close_fds_off(self):
self.start_up(test_command='close_fds_off')
result = self.send_parent_http_command("close_file_and_delete_it")
if sys.platform.startswith('win'):
#On linux this works fine
self.assertEqual("FAIL", result, "Command to close file and delete it did not fail (got response: %s)" % result)
else:
#TODO: a relevant test on linux?
pass
self.check_stop()
def test_child_comms_strategy_stdin_close(self):
self.start_up(test_command='use_cat', wait_for_children=False)
self.check_stop()
def test_child_comms_strategy_none(self):
self.start_up(test_command='use_cat_comms_none', wait_for_children=False)
# we don't actually have the ability to tell these children to stop
self.check_stop(3)
def test_child_comms_strategy_signal(self):
self.start_up(test_command='use_signal', wait_for_children=False)
# since we're not waiting for the children to start up, give them a chance to register signal handlers
time.sleep(0.5)
self.check_stop()
def test_use_job_object_off(self):
self.start_up(test_command=
'use_job_object_off')
self.check_stop()
def test_cpu_affinity_off(self):
self.start_up(test_command='cpu_affinity_off')
self.check_stop()
def test_handles_over_commandline_off_file_open_by_parent(self):
if not sys.platform.startswith('win') or not CAN_USE_EXTENDED_STARTUPINFO:
self.skipTest("This test is not supported on this platform")
self.start_up(test_command='handles_over_commandline_off')
result = self.send_parent_http_command("close_file_and_delete_it")
self.assertEqual("OK", result, "Command to close file and delete it failed (got response: %s)" % result)
self.check_stop()
def freeze_up_middle_child(self):
#First check that we can do this fast (i.e. things aren't stuttering because of environment):
for i in range(5):
self.send_middle_child_http_command("", timeout=1)
self.send_middle_child_http_command("hold_gil?t=%d" % (60*10)) #Freeze up for 10 minutes
while True:
#Try and do this request until it takes longer than 1 sec - this would mean that we have successfully got stuck
try:
self.send_middle_child_http_command("", timeout=1)
except requests.exceptions.Timeout as t:
break
def check_server_ports_unbound(self):
bound_ports = []
for pnumber in range(4):
port = Config.get_starting_port_nr() + pnumber
#I just try and bind to the server port and see if I have a problem:
logging.info("Checking for ability to bind to port %d", port)
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if not sys.platform.startswith('win'):
#On linux I need this setting cos we are starting and stopping things
#so frequently that they are still in a STOP_WAIT state when I get here
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(("", port))
except Exception as e:
bound_ports.append(port)
finally:
serversocket.close()
self.assertFalse(bound_ports, "The following ports are still bound: %s" % ', '.join([str(p) for p in bound_ports]))
def get_path_to_ParentProcessPy(self):
return os.path.join(os.path.dirname(__file__), 'test', 'ParentProcess.py')
def send_parent_http_command(self, command, params=None, **kwargs):
return self.send_http_command(Config.get_starting_port_nr(), command, params=params, **kwargs)
def send_middle_child_http_command(self, command, params=None, **kwargs):
return self.send_http_command(Config.get_starting_port_nr()+2, command, params=params, **kwargs)
def send_http_command(self, port, command, params=None, **kwargs):
r = requests.get('http://localhost:%d/%s' % (port, command), params=params, **kwargs)
j = r.json
if callable(j):
return j()
else:
#This is the old requests api:
return j
def wait_for_process_to_stop(self, process, timeout):
if process is None:
logging.info("No process to wait for")
return
logging.info("Waiting for process (%d) to finish", process.pid)
start_time = time.time()
while time.time()-start_time < timeout:
if process.poll() is None:
time.sleep(0.3)
else:
return
class NormalSubprocessTests(_BaseProcessFamilyFunkyWebServerTestSuite):
skip_crash_test = "The crash test throws up a dialog in this context" if sys.platform.startswith('win') else None
def start_parent_process(self, timeout=None):
kwargs={}
if sys.platform.startswith('win'):
kwargs['creationflags'] = CREATE_BREAKAWAY_FROM_JOB
environ = os.environ.copy()
if timeout:
environ["STARTUP_TIMEOUT"] = str(timeout)
self.parent_process = subprocess.Popen(
[sys.executable, self.get_path_to_ParentProcessPy()],
close_fds=True, env=environ, **kwargs)
threading.Thread(target=self.parent_process.communicate).start()
def wait_for_parent_to_stop(self, timeout):
self.wait_for_process_to_stop(getattr(self, 'parent_process', None), timeout)
if sys.platform.startswith('win'):
import win32service
import win32serviceutil
from processfamily.test.ExeBuilder import build_service_exe
from processfamily.processes import USE_PROCESS_QUERY_LIMITED_INFORMATION
class PythonWTests(_BaseProcessFamilyFunkyWebServerTestSuite):
skip_crash_test = "The crash test throws up a dialog in this context" if sys.platform.startswith('win') else None
def start_parent_process(self, timeout=None):
self.parent_process = subprocess.Popen(
[Config.pythonw_exe, self.get_path_to_ParentProcessPy()],
close_fds=True,
creationflags=CREATE_BREAKAWAY_FROM_JOB)
threading.Thread(target=self.parent_process.communicate).start()
def wait_for_parent_to_stop(self, timeout):
self.wait_for_process_to_stop(getattr(self, 'parent_process', None), timeout)
class WindowsServiceTests(_BaseProcessFamilyFunkyWebServerTestSuite):
@classmethod
def setUpClass(cls, service_username=None):
cls.send_stop_and_then_wait_for_service_to_stop_ignore_errors()
cls.service_exe = build_service_exe()
subprocess.check_call([cls.service_exe] + (["--username", service_username] if service_username else []) + ["install"])
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'service_exe'):
subprocess.check_call([cls.service_exe, "remove"])
def try_and_stop_everything_for_tear_down(self):
self.send_stop_and_then_wait_for_service_to_stop_ignore_errors()
def start_parent_process(self, timeout=None):
win32serviceutil.StartService(Config.svc_name)
def wait_for_parent_to_stop(self, timeout):
self.wait_for_service_to_stop(timeout)
@classmethod
def wait_for_service_to_stop(cls, timeout):
start_time = time.time()
while time.time()-start_time < timeout:
if win32serviceutil.QueryServiceStatus(Config.svc_name)[1] != win32service.SERVICE_STOPPED:
time.sleep(0.3)
def test_parent_interrupt_main(self):
self.skipTest("Interrupt main doesn't do anything useful in a windows service")
def test_parent_interrupt_main_child_locked_up(self):
self.skipTest("Interrupt main doesn't do anything useful in a windows service")
def test_service_stop(self):
self.start_up()
win32serviceutil.StopService(Config.svc_name)
def test_service_stop_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
win32serviceutil.StopService(Config.svc_name)
#This needs time to wait for the child for 10 seconds:
self.wait_for_parent_to_stop(11)
def test_service_stop_child_freeze_on_start(self):
self.start_up(test_command='child_freeze_on_start', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
win32serviceutil.StopService(Config.svc_name)
#This still needs time to wait for the child to stop for 10 seconds:
self.wait_for_parent_to_stop(11)
@classmethod
def send_stop_and_then_wait_for_service_to_stop_ignore_errors(cls):
try:
win32serviceutil.StopService(Config.svc_name)
cls.wait_for_service_to_stop(20)
except Exception as e:
pass
if not USE_PROCESS_QUERY_LIMITED_INFORMATION:
def test_parent_kill(self):
self.skipTest("I cannot kill a network service service from here - I get an access denied error")
def test_parent_kill_child_locked_up(self):
self.skipTest("I cannot kill a network service service from here - I get an access denied error")
class WindowsServiceNetworkServiceUserTests(WindowsServiceTests):
@staticmethod
def grant_network_service_rights(folder, rights):
try:
subprocess.check_call(["cmd.exe", "/C", "icacls", folder, "/grant", "NETWORK SERVICE:(OI)(CI)%s" % rights])
except Exception as e:
logging.warning("icacls command returned a non-zero response for folder/file '%s'")
@classmethod
def setUpClass(cls):
#I do this just in case we left the service running by interrupting the tests
cls.send_stop_and_then_wait_for_service_to_stop_ignore_errors()
tmp_dir = os.path.join(os.path.dirname(__file__), 'test', 'tmp')
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
#Make sure network service has full access to the tmp folder (and these are inheritable)
cls.grant_network_service_rights(tmp_dir, "F")
#And read / execute access to Python, and other folders on the python path:
cls.grant_network_service_rights(os.path.abspath(sys.prefix), "RX")
done_paths = [os.path.abspath(sys.prefix)]
for path_item in sorted(sys.path, key=lambda p: len(os.path.abspath(p))):
abspath_item = os.path.abspath(path_item)
already_done = False
for p in done_paths:
if abspath_item.startswith(p):
already_done = True
break
if not already_done:
cls.grant_network_service_rights(abspath_item, "RX")
done_paths.append(abspath_item)
super(WindowsServiceNetworkServiceUserTests, cls).setUpClass(service_username="NT AUTHORITY\\NetworkService")
def test_parent_kill(self):
self.skipTest("I cannot kill a network service service from here - I get an access denied error")
def test_parent_kill_child_locked_up(self):
self.skipTest("I cannot kill a network service service from here - I get an access denied error")
#Remove the base class from the module dict so it isn't smelled out by nose:
del(_BaseProcessFamilyFunkyWebServerTestSuite)
|
python
|
import os
import torch
from sklearn.manifold import TSNE
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
def plot_tsne(feat, index, output_dir):
print("feat: global mean = {:.4f}, {:.4f}".format(feat.mean(), feat.std()))
embed = TSNE(n_components=2).fit_transform(feat)
cm_subsection = torch.true_divide(index, index.max())
colors = cm.jet(cm_subsection)
fig = plt.figure(frameon=False)
fig.set_size_inches(6, 4)
ax = fig.add_subplot(1, 1, 1)
ax.scatter(embed[:, 0], embed[:, 1], c=colors, alpha=0.1)
ax.xaxis.set_major_formatter(matplotlib.ticker.NullFormatter())
ax.yaxis.set_major_formatter(matplotlib.ticker.NullFormatter())
ax.axis('tight')
plt.savefig(os.path.join(output_dir, 'tsne.png'), bbox_inches='tight', pad_inches=0)
plt.close(fig)
def plot_samples(primary, neighbor, goal, positive, negative, fname=None, window=3.0):
fig = plt.figure(frameon=False)
fig.set_size_inches(6, 4)
ax = fig.add_subplot(1, 1, 1)
ax.plot(neighbor[:, 0]-primary[0], neighbor[:, 1]-primary[1], 'bo')
ax.plot(goal[0]-primary[0], goal[1]-primary[1], 'k*')
for i in range(neighbor.size(0)):
ax.arrow(neighbor[i, 0]-primary[0], neighbor[i, 1]-primary[1], neighbor[i, 2]*0.1, neighbor[i, 3]*0.1, color='b', width=0.05)
if len(positive.shape) < 2:
positive = positive[None, :]
ax.plot(positive[:, 0], positive[:, 1], 'gs')
ax.plot(negative[:, 0], negative[:, 1], 'rx')
ax.plot(0, 0, 'ko')
ax.arrow(0, 0, primary[2]*0.1, primary[3]*0.1, color='k', width=0.05)
ax.set_xlim(-window, window)
ax.set_ylim(-window, window)
ax.set_aspect('equal')
plt.grid()
plt.savefig(fname, bbox_inches='tight', pad_inches=0)
plt.close(fig)
|
python
|
# pylint: skip-file
from yamicache import Cache
c = Cache()
class App1(object):
@c.cached()
def test1(self, argument, power):
"""running test1"""
return argument ** power
@c.clear_cache()
def test2(self):
return 0
def test_clear():
"""Make sure cache gets cleared"""
a1 = App1()
assert len(c) == 0
assert a1.test1(1, 2) == 1
assert a1.test1(1, 2) == 1
assert a1.test1(1, 2) == 1
assert len(c) == 1
a1.test2()
assert len(c) == 0
def main():
test_clear()
if __name__ == "__main__":
main()
|
python
|
import os
import argparse
import requests
from sanic import Sanic
from sanic.response import json
from osf_pigeon.pigeon import main, sync_metadata, get_id
from concurrent.futures import ThreadPoolExecutor
from sanic.log import logger
from asyncio import events
def run(main):
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
return loop.run_until_complete(main)
finally:
try:
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
events.set_event_loop(None)
loop.close()
app = Sanic("osf_pigeon")
pigeon_jobs = ThreadPoolExecutor(max_workers=10, thread_name_prefix="pigeon_jobs")
def task_done(future):
if future._exception:
exception = future._exception
exception = str(exception)
logger.debug(f"ERROR:{exception}")
if future._result:
guid, url = future._result
resp = requests.post(
f"{settings.OSF_API_URL}_/ia/{guid}/done/", json={"IA_url": url}
)
logger.debug(f"DONE:{future._result} Response:{resp}")
@app.route("/")
async def index(request):
return json({"🐦": "👍"})
@app.route("/archive/<guid>", methods=["GET", "POST"])
async def archive(request, guid):
future = pigeon_jobs.submit(run, main(guid))
future.add_done_callback(task_done)
return json({guid: future._state})
@app.route("/metadata/<guid>", methods=["POST"])
async def metadata(request, guid):
item_name = get_id(guid)
future = pigeon_jobs.submit(sync_metadata, item_name, request.json)
future.add_done_callback(task_done)
return json({guid: future._state})
parser = argparse.ArgumentParser(
description="Set the environment to run OSF pigeon in."
)
parser.add_argument(
"--env", dest="env", help="what environment are you running this for"
)
if __name__ == "__main__":
args = parser.parse_args()
if args.env:
os.environ["ENV"] = args.env
from osf_pigeon import settings
if args.env == "production":
app.run(host=settings.HOST, port=settings.PORT)
else:
app.run(host=settings.HOST, port=settings.PORT, auto_reload=True, debug=True)
|
python
|
import random
import requests
from bs4 import BeautifulSoup
from colorama import Fore
from colorama import Style
from blagues_api import BlaguesAPI
import asyncio
def faituneblague():
blagues = BlaguesAPI(
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiMjAwNTQ5NjM1MDY3MDg0ODAwIiwibGltaXQiOjEwMCwia2V5IjoiM1lne"
"nMxdjNWV1NZSHhkcFR5U1FQSjFFOW9lTHd5aHNtY1JWbzl1dGRKTTU5NEZ2ejgiLCJjcmVhdGVkX2F0IjoiMjAyMS0wNy0xNlQxNTozNDoxNysw"
"MDowMCIsImlhdCI6MTYyNjQ0OTY1N30.0-ldZbArp0y0cjfi8kpDQhDXE2C6rxB3C5t9DbIvYeY")
async def blague():
blg = await blagues.random()
blg_joke = blg.joke
blg_answer = blg.answer
print(
Fore.RED + Style.BRIGHT + "IA > " + Fore.LIGHTGREEN_EX + blg_joke + "\n" + Fore.RED + Style.BRIGHT +
"IA > " + Fore.LIGHTMAGENTA_EX + blg_answer + Style.RESET_ALL)
blg_fin = "IA > " + blg_joke + "\n" + "IA > " + blg_answer
return blg_fin
return asyncio.run(blague())
def randomme():
print(Style.RESET_ALL + Fore.GREEN + "Bienvenue dans Random Me\n")
print("Options :\n" +
Fore.RED + "[1] " + Fore.YELLOW + " chiffre aléatoire entre 0 et 1000\n" +
Fore.RED + "[2] " + Fore.YELLOW + " chiffre aléatoire entre deux valeurs définit par vous\n" +
Fore.RED + "[3] " + Fore.YELLOW + " mot aléatoire en français\n" +
Fore.RED + "[4] " + Fore.YELLOW + " mot aléatoire en anglais\n" +
Fore.RED + "[5] " + Fore.YELLOW + " blague aléatoire\n")
choix = input(Fore.RED + Style.BRIGHT + "IA > " + Fore.LIGHTYELLOW_EX + "Votre choix : " + Fore.LIGHTCYAN_EX)
if choix == "1":
resrange = random.randint(0, 1000)
print(Fore.RED + Style.BRIGHT + "IA > " + Fore.LIGHTYELLOW_EX + "Résultat : "
+ Fore.GREEN + Style.BRIGHT + str(resrange) + Style.RESET_ALL)
if choix == "2":
min_val = input(Fore.RED + Style.BRIGHT + "IA > " + Fore.LIGHTYELLOW_EX + "Valeur minimum : " + Fore.LIGHTCYAN_EX)
max_val = input(Fore.RED + Style.BRIGHT + "IA > " + Fore.LIGHTYELLOW_EX + "Valeur maximum : " + Fore.LIGHTCYAN_EX)
resrange = random.randint(int(min_val), int(max_val))
print(Fore.RED + Style.BRIGHT + "IA > " + Fore.LIGHTYELLOW_EX + "Résultat : "
+ Fore.GREEN + Style.BRIGHT + str(resrange) + Style.RESET_ALL)
if choix == "3":
requete = requests.get("https://www.motsqui.com/mots-aleatoires.php?Submit=Nouveau+mot")
page = requete.content
soup = BeautifulSoup(page, "html.parser")
mot = soup.find("b")
mot_fin = str(mot).replace("<b>", "").replace("</b>", "")
print(Fore.RED + Style.BRIGHT + "IA > " + Fore.LIGHTYELLOW_EX + "Résultat : "
+ Fore.GREEN + Style.BRIGHT + mot_fin + Style.RESET_ALL)
if choix == "4":
requete = requests.get("https://randomword.com/")
page = requete.content
soup = BeautifulSoup(page, "html.parser")
mot = soup.find("div", {"id": "random_word"})
mot_fin = str(mot).replace('"', "").replace("</div>", "").replace("<div id=random_word>", "")
print(Fore.RED + Style.BRIGHT + "IA > " + Fore.LIGHTYELLOW_EX + "Résultat : "
+ Fore.GREEN + Style.BRIGHT + mot_fin + Style.RESET_ALL)
if choix == "5":
faituneblague()
|
python
|
from .weibo_h5 import WeiboH5API as WeiboAPI
from .base import WeiboVisible
|
python
|
class Cursor:
def __init__(self, wnd):
self.wnd = wnd
self.pos = 0
self.preferred_col = 0
self.preferred_linecol = 0
self.last_screenpos = (-1, -1)
def refresh(self, top=None, middle=None, bottom=None,
align_always=False):
self.pos, y, x = self.wnd.locate_cursor(
self.pos, top=top, middle=middle, bottom=bottom,
align_always=align_always)
assert self.pos is not None
self.last_screenpos = (x, y)
def setpos(self, pos, top=None, middle=None, bottom=None,
align_always=False):
assert pos is not None
self.pos = pos
self.refresh(top=top, middle=middle, bottom=bottom,
align_always=align_always)
def savecol(self):
"""Update current preferred column"""
y, x = self.wnd.screen.getrowcol(self.pos)
self.preferred_col = x
self.preferred_linecol = self.wnd.screen.get_cursorcol(self.pos)
def up(self):
# Ensure current position is displayed
self.wnd.screen.locate(self.pos, middle=True, align_always=False)
idx, row = self.wnd.screen.getrow(self.pos)
if row.posfrom == 0:
return
if idx == self.wnd.screen.portfrom:
# Scroll up a line
if not self.wnd.lineup():
# First line of text file
return False
idx, prevrow = self.wnd.screen.getrow(row.posfrom - 1)
else:
idx -= 1
pos = self.wnd.screen.get_pos_above(idx, self.preferred_col)
pos = self.adjust_nextpos(self.pos, pos)
self.setpos(pos)
return True
def down(self):
# Ensure current position is displayed
self.wnd.screen.locate(self.pos, middle=True, align_always=False)
idx, row = self.wnd.screen.getrow(self.pos)
if self.wnd.screen.is_lastrow(row):
return False
if idx + 1 >= self.wnd.screen.portto:
# Scroll down a line
if not self.wnd.linedown():
# last line of text file
return False
idx, nextrow = self.wnd.screen.getrow(row.posto)
else:
idx += 1
pos = self.wnd.screen.get_pos_under(idx, self.preferred_col)
pos = self.adjust_nextpos(self.pos, pos)
self.setpos(pos)
return True
def prev_line(self):
tol = self.wnd.document.gettol(self.pos)
if tol != 0:
self.wnd.screen.locate(tol - 1, top=True)
prev = self.wnd.document.gettol(tol - 1)
pos = self.wnd.screen.get_pos_at_cols(prev, self.preferred_linecol)
pos = self.adjust_nextpos(self.pos, pos)
self.wnd.screen.locate(pos, top=True)
self.setpos(pos)
return True
def next_line(self):
next = self.wnd.document.gettol(self.wnd.document.geteol(self.pos))
if self.pos < next:
self.wnd.screen.locate(next, bottom=True)
pos = self.wnd.screen.get_pos_at_cols(next, self.preferred_linecol)
pos = self.adjust_nextpos(self.pos, pos)
self.wnd.screen.locate(pos, bottom=True)
self.setpos(pos)
return True
def adjust_nextpos(self, curpos, nextpos):
return nextpos
def word_right_pos(self, pos):
# Get next word break
nextpos = self.wnd.document.endpos()
for word in self.wnd.document.mode.split_word(pos):
f, t, chars, cg = word
nextpos = t
if f == pos: # first word
# get next word
continue
if cg[0] == 'Z': # skip white space
continue
nextpos = f
break
return self.adjust_nextpos(pos, nextpos)
def word_left_pos(self, pos):
# Get previous word break
prevpos = tol = self.wnd.document.gettol(pos)
if pos == prevpos:
prevpos -= 1
else:
for f, t, chars, cg in self.wnd.document.mode.split_word(tol):
# This word is at after cursor pos
if pos <= f:
break
if cg[0] == 'Z': # skip white space
continue
prevpos = f
return self.adjust_nextpos(pos, prevpos)
def right(self, word=False):
# Ensure current position is displayed
self.wnd.screen.locate(self.pos, middle=True, align_always=False)
if self.pos == self.wnd.document.endpos():
return
if not word:
# Get right of current position
nextpos = self.wnd.document.get_nextpos(self.pos)
nextpos = self.adjust_nextpos(self.pos, nextpos)
else:
nextpos = self.word_right_pos(self.pos)
if nextpos != self.pos:
# Scroll down if next position is not visible
while not self.wnd.screen.is_visible(nextpos):
# scroll down
if not self.wnd.linedown():
break
self.setpos(nextpos)
self.savecol()
def left(self, word=False):
# Ensure current position is displayed
self.wnd.screen.locate(self.pos, middle=True, align_always=False)
if self.pos == 0:
return
if not word:
# Get left of current position
prevpos = self.wnd.document.get_prevpos(self.pos)
prevpos = self.adjust_nextpos(self.pos, prevpos)
else:
prevpos = self.word_left_pos(self.pos)
if prevpos != self.pos:
# Scroll up if next position is not visible
while not self.wnd.screen.is_visible(prevpos):
# scroll up
if not self.wnd.lineup():
break
self.setpos(prevpos)
self.savecol()
def pagedown(self):
# Ensure current position is displayed
self.wnd.screen.locate(self.pos, middle=True, align_always=False)
idx, row = self.wnd.screen.getrow(self.pos)
y = idx - self.wnd.screen.portfrom
lastrow = self.wnd.screen.rows[self.wnd.screen.portto - 1]
if lastrow.posto == self.wnd.document.endpos():
nextpos = self.adjust_nextpos(self.pos, lastrow.posto)
if self.pos != lastrow.posto:
self.setpos(nextpos)
return True
if self.wnd.pagedown():
idx = max(0, min(self.wnd.screen.portto - 1,
self.wnd.screen.portfrom + y))
pos = self.wnd.screen.get_pos_under(idx, self.preferred_col)
nextpos = self.adjust_nextpos(self.pos, pos)
self.setpos(nextpos)
return True
else:
pos = self.wnd.document.endpos()
nextpos = self.adjust_nextpos(self.pos, pos)
if nextpos != pos:
self.setpos(nextpos)
return True
def pageup(self):
# Ensure current position is displayed
self.wnd.screen.locate(self.pos, middle=True, align_always=False)
idx, row = self.wnd.screen.getrow(self.pos)
y = idx - self.wnd.screen.portfrom
if self.wnd.pageup():
idx = max(0, min(self.wnd.screen.portto - 1,
self.wnd.screen.portfrom + y))
pos = self.wnd.screen.get_pos_above(idx, self.preferred_col)
nextpos = self.adjust_nextpos(self.pos, pos)
self.setpos(nextpos)
return True
elif self.pos != 0:
nextpos = self.adjust_nextpos(self.pos, 0)
if nextpos != self.pos:
self.setpos(nextpos)
return True
def home(self):
# Ensure current position is displayed
self.wnd.screen.locate(self.pos, middle=True, align_always=False)
idx, row = self.wnd.screen.getrow(self.pos)
nextpos = self.adjust_nextpos(self.pos, row.posfrom)
self.setpos(nextpos)
self.savecol()
def end(self):
# Ensure current position is displayed
self.wnd.screen.locate(self.pos, middle=True, align_always=False)
idx, row = self.wnd.screen.getrow(self.pos)
if self.wnd.screen.is_lastrow(row):
pos = self.wnd.document.endpos()
else:
pos = row.posto - 1
nextpos = self.adjust_nextpos(self.pos, pos)
self.setpos(nextpos)
self.savecol()
def tol(self, pos):
tol = self.wnd.document.gettol(pos)
nextpos = self.adjust_nextpos(self.pos, tol)
self.wnd.screen.locate(nextpos, middle=True)
self.setpos(nextpos)
self.savecol()
def first_letter(self, pos):
f, tol = self.wnd.document.mode.get_indent_range(pos)
nextpos = self.adjust_nextpos(self.pos, tol)
self.wnd.screen.locate(nextpos, middle=True)
self.setpos(nextpos)
self.savecol()
def eol(self, pos):
eol = self.wnd.document.get_line_to(pos)
nextpos = self.adjust_nextpos(self.pos, eol)
self.wnd.screen.locate(nextpos, middle=True)
self.setpos(nextpos)
self.savecol()
def tof(self):
self.wnd.screen.locate(0, top=True)
nextpos = self.adjust_nextpos(self.pos, 0)
self.setpos(nextpos)
self.savecol()
def eof(self):
nextpos = self.wnd.document.endpos()
self.wnd.screen.locate(nextpos, middle=True)
nextpos = self.adjust_nextpos(self.pos, nextpos)
self.setpos(nextpos)
self.savecol()
|
python
|
import warnings
import numpy as np
import math
def ugly_number(n):
"""
Returns the n'th Ugly number.
Ugly Numbers are numbers whose only prime factors are 2,3 or 5.
Parameters
----------
n : int
represent the position of ugly number
"""
if(n<1):
raise NotImplementedError(
"Enter a valid natural number"
)
ugly = [0]*n
ugly[0] = 1
i2 = i3 = i5 = 0
next_multiple_of_2 = 2
next_multiple_of_3 = 3
next_multiple_of_5 = 5
for l in range(1, n):
ugly[l] = min(next_multiple_of_2,
next_multiple_of_3,
next_multiple_of_5)
if ugly[l] == next_multiple_of_2:
i2 += 1
next_multiple_of_2 = ugly[i2] * 2
if ugly[l] == next_multiple_of_3:
i3 += 1
next_multiple_of_3 = ugly[i3] * 3
if ugly[l] == next_multiple_of_5:
i5 += 1
next_multiple_of_5 = ugly[i5] * 5
return ugly[-1]
def ugly_series(n):
"""
Returns n Ugly numbers starting from 1.
Ugly Numbers are numbers whose only prime factors are 2,3 or 5.
Parameters
----------
n : int
represent the count of ugly numbers
return : array
return an array of length n
"""
if(n<1):
raise NotImplementedError(
"Enter a valid natural number"
)
arr = []
for i in range(0,n):
arr.append(ugly_number(i+1))
return arr
def fibonacci(n):
"""
Returns the n'th fibonacci number starting from 0
Parameters
----------
n : int
represent the position of fibonacci number
"""
a = 0
b = 1
if n < 0:
raise NotImplementedError(
"Enter a non-negative integer"
)
elif n == 0:
return a
elif n == 1:
return b
else:
for i in range(2,n+1):
c = a + b
a = b
b = c
return b
def fibonacci_series(n):
"""
Return a series of n fibonacci numbers
Parameters
----------
n : int
represent the count of fibonacci numbers
return : array
return an array of length n
"""
if(n<1):
raise NotImplementedError(
"Enter a valid input"
)
arr = []
for i in range(0,n):
arr.append(fibonacci(i))
return arr
def catalan(n):
"""
Returns n'th catalan number
Parameters
----------
n : int
represent the position of catalan number starting from zero
"""
if(n<0):
raise NotImplementedError(
"Enter a valid natural number"
)
if(n==0 or n==1):
return 1
catalan = [0]*(n+1)
catalan[0] = 1
catalan[1] = 1
for i in range(2,n+1):
for j in range(i):
catalan[i] += catalan[j]*catalan[i-j-1]
return catalan[n]
def catalan_series(n):
"""
Returns first n catalan numbers
Parameters
----------
n : int
denotes the count of catalan numbers
return : array
returns an array of n catalan numbers
"""
if(n<1):
raise NotImplementedError(
"Enter a positive integer"
)
arr = []
for i in range(0,n):
arr.append(catalan(i))
return arr
def factorial(n):
"""
Returns the factorial of n
Parameters
----------
n : int
denotes the non-negative integer for which factorial value is needed
"""
if(n<0):
raise NotImplementedError(
"Enter a valid non-negative integer"
)
if(n==0 or n==1):
return 1
elif(n==2):
return 2
return n*factorial(n-1)
def stirling_factorial(n):
"""
Returns the upper and lower bounds of Stirling's approximation of a factorial
Parameters
----------
n : int
denotes the positive integer for which factorial needs to be approximated
return : array
return an array of length 2
first element denotes lower bound
second element denotes upper bound
"""
if(n<0):
raise NotImplementedError(
"Enter a valid natural number"
)
arr = []
lower = (math.sqrt(2*math.pi))*(math.pow(n,n+0.5))*(math.pow(math.e,-1*n))
arr.append(lower)
upper = (math.e)*(math.pow(n,n+0.5))*(math.pow(math.e,-1*n))
arr.append(upper)
return arr
def bell_number(n):
"""
Returns the n'th bell number
Parameters
----------
n : int
denotes the number for which bell number needs to be calculated
"""
if(n<0):
raise NotImplementedError(
"Invalid Input"
)
bell = [[0 for i in range(n+1)] for j in range(n+1)]
bell[0][0] = 1
for i in range(1,n+1):
bell[i][0] = bell[i-1][i-1]
for j in range(1,i+1):
bell[i][j] = bell[i-1][j-1] + bell[i][j-1]
return bell[n][0]
def bell_series(n):
"""
Returns first n bell numbers
Parameters
----------
n : int
denotes the count of bell numbers
return : array
return an array of integers
"""
if(n<1):
raise NotImplementedError(
"Invalid Input"
)
arr = []
for i in range(n):
arr.append(bell_number(i))
return arr
def binomialCoef(n, k):
"""
Return the binomial coefficient nCk i.e., coefficient of x^k in (1+x)^n
Parameters
----------
n : int
denotes n in nCk
k : int
denotes k in nCk
return : int
return an integer
"""
if(n<k):
raise TypeError(
"Value of first argument cannot be smaller than second"
)
Coef = [[0 for x in range(k+1)] for x in range(n+1)]
for i in range(n+1):
for j in range(min(i, k)+1):
if j == 0 or j == i:
Coef[i][j] = 1
else:
Coef[i][j] = Coef[i-1][j-1] + Coef[i-1][j]
return Coef[n][k]
def nCkModp(n, k, p):
"""
Returns nCk % p
Parameters
----------
n : int
denotes n in nCk%p
k : int
denotes k in nCk%p
p : int
denotes p in nCk%p
return : int
returns an integer
"""
if (k > n- k):
k = n - k
Coef = [0 for i in range(k + 1)]
Coef[0] = 1
for i in range(1, n + 1):
for j in range(min(i, k), 0, -1):
Coef[j] = (Coef[j] + Coef[j-1]) % p
return Coef[k]
#def moser_de_bruijn(n):
#def moser_de_bruijn_series(n):
#def golomb(n):
#def golomb_series(n):
#def newman_conway(n):
#def newman_conway_series(n):
#def newman_prime(n):
#def newman_prime_series(n):
#def lobb(n):
#def lobb_series(n):
#def eulerian(n):
#def eulerian_series(n):
#def delannoy(n):
#def delannoy_series(n):
#def entringer(n):
#def entringer_series(n):
#def recontres(n):
#def recontres_series(n):
#def jacobsthal(n):
#def jacobsthal_series(n):
|
python
|
"""
ARC's settings
"""
import os
import string
import sys
servers = {
'local': {
'path': '/storage/ce_dana/',
'cluster_soft': 'HTCondor',
'un': '[username]',
'cpus': 8,
'memory': 256,
},
}
# List here servers you'd like to associate with specific ESS.
# An ordered list of servers indicates priority
# Keeping this dictionary empty will cause ARC to scan for software on the servers defined above
global_ess_settings = {
'gaussian': 'local',
'orca': 'local',
'molpro': 'local',
}
# Electronic structure software ARC may access (use lowercase):
supported_ess = ['gaussian', 'molpro', 'orca']
# Default job memory, cpu, time settings
default_job_settings = {
'job_total_memory_gb': 6,
'job_cpu_cores': 8,
}
|
python
|
from setuptools import setup, find_packages
setup(
name='PyloXyloto',
version='1.0',
description='PyloXyloto is a simple Deep learning framework built from scratch with python that supports the main '
'functionalities needed for a deep learning project',
py_modules=["activations, losses, layers, data, metrics, utils, visualization"],
package_dir={'': 'src'},
author='Ahmed Mohamed, Ghada Ahmed : AinShams University',
keywords=['DeepLearning', 'FrameWork', 'NeuralNetworks', 'TensorFlow', 'Pytorch', 'Python'],
url='https://github.com/ghada120/Deep-learning-framework',
download_url='https://pypi.org/project/PyloXyloto/',
)
|
python
|
import unittest
from libsaas.executors import test_executor
from libsaas.services import base, twilio
class TwilioTestCase(unittest.TestCase):
def setUp(self):
self.executor = test_executor.use()
self.executor.set_response(b'{}', 200, {})
self.service = twilio.Twilio('my-sid', 'my-token')
self.account = self.service.account('foo')
def expect(self, method=None, uri=None, params=None, headers=None):
if method is not None:
self.assertEqual(method, self.executor.request.method)
if uri is not None:
self.assertEqual(self.executor.request.uri,
'https://api.twilio.com/2010-04-01/' + uri)
if params is not None:
self.assertEqual(self.executor.request.params, params)
if headers is not None:
self.assertEqual(self.executor.request.headers, headers)
def test_translate_inequality(self):
self.assertEqual('NothingToDo',
twilio.resource.translate_inequality('NothingToDo'))
self.assertEqual('Greater>',
twilio.resource.translate_inequality('GreaterGT'))
self.assertEqual('Lower<',
twilio.resource.translate_inequality('LowerLT'))
def test_auth(self):
service = twilio.Twilio('a-sid', 'a-token')
service.accounts().get()
self.expect('GET', 'Accounts.json', {},
{'Authorization': 'Basic YS1zaWQ6YS10b2tlbg=='})
def test_accounts(self):
# Account resource
self.assertRaises(TypeError, self.service.account)
self.account.get()
self.expect('GET', 'Accounts/foo.json')
update_account_data = {'FriendlyName': 'updated-account'}
self.account.update(update_account_data)
self.expect('POST', 'Accounts/foo.json', update_account_data)
self.assertRaises(base.MethodNotSupported, self.account.create)
self.assertRaises(base.MethodNotSupported, self.account.delete)
# Accounts resource
accounts = self.service.accounts()
accounts.get()
self.expect('GET', 'Accounts.json')
accounts.get(FriendlyName='an-account')
self.expect('GET', 'Accounts.json', {'FriendlyName': 'an-account'})
new_account_data = {'FriendlyName': 'foo'}
accounts.create(new_account_data)
self.expect('POST', 'Accounts.json', new_account_data)
self.assertRaises(base.MethodNotSupported, accounts.update)
self.assertRaises(base.MethodNotSupported, accounts.delete)
def test_numbers(self):
# AvailablePhoneNumbers resource
available_phone_numbers = self.account.available_phone_numbers()
self.assertRaises(base.MethodNotSupported,
available_phone_numbers.get)
self.assertRaises(base.MethodNotSupported,
available_phone_numbers.create)
self.assertRaises(base.MethodNotSupported,
available_phone_numbers.update)
self.assertRaises(base.MethodNotSupported,
available_phone_numbers.delete)
# Local AvailablePhoneNumbers resource
local_us = available_phone_numbers.local('US')
self.assertRaises(TypeError, available_phone_numbers.local)
local_us.get()
self.expect('GET', 'Accounts/foo/AvailablePhoneNumbers/US/Local.json')
local_us.get(Contains='510555****')
self.expect('GET', 'Accounts/foo/AvailablePhoneNumbers/US/Local.json',
{'Contains': '510555****'})
self.assertRaises(base.MethodNotSupported, local_us.create)
self.assertRaises(base.MethodNotSupported, local_us.update)
self.assertRaises(base.MethodNotSupported, local_us.delete)
# Toll-Free AvailablePhoneNumbers resource
toll_free_us = available_phone_numbers.toll_free('US')
self.assertRaises(TypeError, available_phone_numbers.toll_free)
toll_free_us.get()
self.expect('GET', 'Accounts/foo/AvailablePhoneNumbers/US/TollFree.json')
toll_free_us.get(Contains='510555****')
self.expect('GET', 'Accounts/foo/AvailablePhoneNumbers/US/TollFree.json',
{'Contains': '510555****'})
self.assertRaises(base.MethodNotSupported, toll_free_us.create)
self.assertRaises(base.MethodNotSupported, toll_free_us.update)
self.assertRaises(base.MethodNotSupported, toll_free_us.delete)
# OutgoingCallerId resource
caller_id = self.account.outgoing_caller_id('a-caller')
caller_id.get()
self.expect('GET', 'Accounts/foo/OutgoingCallerIds/a-caller.json')
update_outgoing_data = {'FriendlyName': 'foo'}
caller_id.update(update_outgoing_data)
self.expect('POST', 'Accounts/foo/OutgoingCallerIds/a-caller.json',
update_outgoing_data)
caller_id.delete()
self.expect('DELETE', 'Accounts/foo/OutgoingCallerIds/a-caller.json')
self.assertRaises(base.MethodNotSupported, caller_id.create)
# OutgoingCallerIds resource
caller_ids = self.account.outgoing_caller_ids()
caller_ids.get()
self.expect('GET', 'Accounts/foo/OutgoingCallerIds.json')
caller_ids.get(FriendlyName='a-caller')
self.expect('GET', 'Accounts/foo/OutgoingCallerIds.json',
{'FriendlyName': 'a-caller'})
new_outgoing_data = {'PhoneNumber': 555, 'Extension': 123}
caller_ids.create(new_outgoing_data)
self.expect('POST', 'Accounts/foo/OutgoingCallerIds.json',
new_outgoing_data)
self.assertRaises(base.MethodNotSupported, caller_ids.update)
self.assertRaises(base.MethodNotSupported, caller_ids.delete)
# IncomingPhoneNumber resource
number = self.account.incoming_phone_number('55510')
number.get()
self.expect('GET', 'Accounts/foo/IncomingPhoneNumbers/55510.json')
update_number_data = {'FriendlyName': 'foo'}
number.update(update_number_data)
self.expect('POST', 'Accounts/foo/IncomingPhoneNumbers/55510.json',
update_number_data)
number.delete()
self.expect('DELETE', 'Accounts/foo/IncomingPhoneNumbers/55510.json')
self.assertRaises(base.MethodNotSupported, number.create)
# IncomingPhoneNumbers resource
numbers = self.account.incoming_phone_numbers()
numbers.get()
self.expect('GET', 'Accounts/foo/IncomingPhoneNumbers.json')
numbers.get(FriendlyName='a-number')
self.expect('GET', 'Accounts/foo/IncomingPhoneNumbers.json',
{'FriendlyName': 'a-number'})
new_number_data = {'PhoneNumber': 555, 'AreaCode': 123}
numbers.create(new_number_data)
self.expect('POST', 'Accounts/foo/IncomingPhoneNumbers.json',
new_number_data)
self.assertRaises(base.MethodNotSupported, numbers.update)
self.assertRaises(base.MethodNotSupported, numbers.delete)
# Local IncomingPhoneNumbers resource
local_numbers = self.account.incoming_phone_numbers().local()
local_numbers.get()
self.expect('GET', 'Accounts/foo/IncomingPhoneNumbers/Local.json')
local_numbers.get(FriendlyName='a-number')
self.expect('GET', 'Accounts/foo/IncomingPhoneNumbers/Local.json',
{'FriendlyName': 'a-number'})
new_number_data = {'PhoneNumber': 555}
local_numbers.create(new_number_data)
self.expect('POST', 'Accounts/foo/IncomingPhoneNumbers/Local.json',
new_number_data)
self.assertRaises(base.MethodNotSupported, local_numbers.update)
self.assertRaises(base.MethodNotSupported, local_numbers.delete)
# Toll-Free IncomingPhoneNumbers resource
toll_free_numbers = self.account.incoming_phone_numbers().toll_free()
toll_free_numbers.get()
self.expect('GET', 'Accounts/foo/IncomingPhoneNumbers/TollFree.json')
toll_free_numbers.get(FriendlyName='number')
self.expect('GET', 'Accounts/foo/IncomingPhoneNumbers/TollFree.json',
{'FriendlyName': 'number'})
new_number_data = {'PhoneNumber': 555}
toll_free_numbers.create(new_number_data)
self.expect('POST', 'Accounts/foo/IncomingPhoneNumbers/TollFree.json',
new_number_data)
self.assertRaises(base.MethodNotSupported, toll_free_numbers.update)
self.assertRaises(base.MethodNotSupported, toll_free_numbers.delete)
def test_applications(self):
# ConnectApp resource
connect_app = self.account.connect_app('app')
self.assertRaises(TypeError, self.account.connect_app)
connect_app.get()
self.expect('GET', 'Accounts/foo/ConnectApps/app.json')
update_app_data = {'FriendlyName': 'foo'}
connect_app.update(update_app_data)
self.expect('POST', 'Accounts/foo/ConnectApps/app.json',
update_app_data)
self.assertRaises(base.MethodNotSupported, connect_app.create)
self.assertRaises(base.MethodNotSupported, connect_app.delete)
# ConnectApps resource
connect_apps = self.account.connect_apps()
connect_apps.get()
self.expect('GET', 'Accounts/foo/ConnectApps.json')
self.assertRaises(base.MethodNotSupported, connect_apps.create)
self.assertRaises(base.MethodNotSupported, connect_apps.update)
self.assertRaises(base.MethodNotSupported, connect_apps.delete)
# AuthorizedConnectApp resource
authorized_connect_app = self.account.authorized_connect_app('app')
self.assertRaises(TypeError, self.account.authorized_connect_app)
authorized_connect_app.get()
self.expect('GET', 'Accounts/foo/AuthorizedConnectApps/app.json')
self.assertRaises(base.MethodNotSupported,
authorized_connect_app.create)
self.assertRaises(base.MethodNotSupported,
authorized_connect_app.update)
self.assertRaises(base.MethodNotSupported,
authorized_connect_app.delete)
# AuthorizedConnectApps resource
authorized_connect_apps = self.account.authorized_connect_apps()
authorized_connect_apps.get()
self.expect('GET', 'Accounts/foo/AuthorizedConnectApps.json')
self.assertRaises(base.MethodNotSupported,
authorized_connect_apps.create)
self.assertRaises(base.MethodNotSupported,
authorized_connect_apps.update)
self.assertRaises(base.MethodNotSupported,
authorized_connect_apps.delete)
# Application resource
application = self.account.application('app')
self.assertRaises(TypeError, self.account.application)
application.get()
self.expect('GET', 'Accounts/foo/Applications/app.json')
update_app_data = {'FriendlyName': 'foo', 'VoiceUrl': 'http://bar/'}
application.update(update_app_data)
self.expect('POST', 'Accounts/foo/Applications/app.json',
update_app_data)
application.delete()
self.expect('DELETE', 'Accounts/foo/Applications/app.json')
self.assertRaises(base.MethodNotSupported, application.create)
# Applications resource
applications = self.account.applications()
applications.get()
self.expect('GET', 'Accounts/foo/Applications.json')
applications.get(FriendlyName='foo')
self.expect('GET', 'Accounts/foo/Applications.json',
{'FriendlyName': 'foo'})
new_app_data = {'FriendlyName': 'foo'}
applications.create(new_app_data)
self.expect('POST', 'Accounts/foo/Applications.json', new_app_data)
self.assertRaises(base.MethodNotSupported, applications.update)
self.assertRaises(base.MethodNotSupported, applications.delete)
def test_calls(self):
# Call resource
call = self.account.call('a-call')
self.assertRaises(TypeError, self.account.call)
call.get()
self.expect('GET', 'Accounts/foo/Calls/a-call.json')
update_call_data = {'Url': 'http://bar/', 'Status': 'completed'}
call.update(update_call_data)
self.expect('POST', 'Accounts/foo/Calls/a-call.json', update_call_data)
self.assertRaises(base.MethodNotSupported, call.create)
self.assertRaises(base.MethodNotSupported, call.delete)
# Call Notifications resource
call_notifications = call.notifications()
call_notifications.get()
self.expect('GET', 'Accounts/foo/Calls/a-call/Notifications.json')
call_notifications.get(MessageDateGT='2012-06-06')
self.expect('GET', 'Accounts/foo/Calls/a-call/Notifications.json',
{'MessageDate>': '2012-06-06'})
self.assertRaises(base.MethodNotSupported, call_notifications.create)
self.assertRaises(base.MethodNotSupported, call_notifications.update)
self.assertRaises(base.MethodNotSupported, call_notifications.delete)
# Call Recordings resource
call_recordings = call.recordings()
call_recordings.get()
self.expect('GET', 'Accounts/foo/Calls/a-call/Recordings.json')
call_recordings.get(DateCreatedLT='2012-06-06')
self.expect('GET', 'Accounts/foo/Calls/a-call/Recordings.json',
{'DateCreated<': '2012-06-06'})
self.assertRaises(base.MethodNotSupported, call_recordings.create)
self.assertRaises(base.MethodNotSupported, call_recordings.update)
self.assertRaises(base.MethodNotSupported, call_recordings.delete)
# Calls resource
calls = self.account.calls()
calls.get()
self.expect('GET', 'Accounts/foo/Calls.json')
calls.get(StartTimeGT='2012-06-06')
self.expect('GET', 'Accounts/foo/Calls.json',
{'StartTime>': '2012-06-06'})
new_call_data = {'Url': 'http://bar/'}
calls.create(new_call_data)
self.expect('POST', 'Accounts/foo/Calls.json', new_call_data)
self.assertRaises(base.MethodNotSupported, calls.update)
self.assertRaises(base.MethodNotSupported, calls.delete)
def test_conferences(self):
# Conference resource
conference = self.account.conference('conf')
self.assertRaises(TypeError, self.account.conference)
conference.get()
self.expect('GET', 'Accounts/foo/Conferences/conf.json')
self.assertRaises(base.MethodNotSupported, conference.create)
self.assertRaises(base.MethodNotSupported, conference.update)
self.assertRaises(base.MethodNotSupported, conference.delete)
# Conference participant resource
participant = conference.participant('guy')
participant.get()
self.expect('GET',
'Accounts/foo/Conferences/conf/Participants/guy.json')
update_participant_data = {'Muted': True}
participant.update(update_participant_data)
self.expect('POST',
'Accounts/foo/Conferences/conf/Participants/guy.json',
update_participant_data)
participant.delete()
self.expect('DELETE',
'Accounts/foo/Conferences/conf/Participants/guy.json')
self.assertRaises(base.MethodNotSupported, participant.create)
# Conference participants resource
participants = conference.participants()
participants.get()
self.expect('GET', 'Accounts/foo/Conferences/conf/Participants.json')
participants.get(Muted=True)
self.expect('GET', 'Accounts/foo/Conferences/conf/Participants.json',
{'Muted': 'true'})
self.assertRaises(base.MethodNotSupported, participants.create)
self.assertRaises(base.MethodNotSupported, participants.update)
self.assertRaises(base.MethodNotSupported, participants.delete)
# Conference resource
conferences = self.account.conferences()
conferences.get()
self.expect('GET', 'Accounts/foo/Conferences.json')
conferences.get(DateUpdatedGT='2012-06-06')
self.expect('GET', 'Accounts/foo/Conferences.json',
{'DateUpdated>': '2012-06-06'})
self.assertRaises(base.MethodNotSupported, conferences.create)
self.assertRaises(base.MethodNotSupported, conferences.update)
self.assertRaises(base.MethodNotSupported, conferences.delete)
def test_queues(self):
# Queue resource
queue = self.account.queue('queue')
self.assertRaises(TypeError, self.account.queue)
queue.get()
self.expect('GET', 'Accounts/foo/Queues/queue.json')
update_queue_data = {'CurrentSize': 16}
queue.update(update_queue_data)
self.expect('POST', 'Accounts/foo/Queues/queue.json',
update_queue_data)
queue.delete()
self.expect('DELETE', 'Accounts/foo/Queues/queue.json')
self.assertRaises(base.MethodNotSupported, queue.create)
# Queue Member resource
member = queue.member('member')
member.get()
self.expect('GET', 'Accounts/foo/Queues/queue/Members/member.json')
update_member_data = {'Url': 'http://bar/'}
member.update(update_member_data)
self.expect('POST', 'Accounts/foo/Queues/queue/Members/member.json',
update_member_data)
self.assertRaises(base.MethodNotSupported, member.create)
self.assertRaises(base.MethodNotSupported, member.delete)
# Queue Members resource
members = queue.members()
members.get()
self.expect('GET', 'Accounts/foo/Queues/queue/Members.json')
self.assertRaises(base.MethodNotSupported, members.create)
self.assertRaises(base.MethodNotSupported, members.update)
self.assertRaises(base.MethodNotSupported, members.delete)
# Queues resource
queues = self.account.queues()
queues.get()
self.expect('GET', 'Accounts/foo/Queues.json')
new_queue_data = {'FriendlyName': 'foo', 'MaxSize': 12}
queues.create(new_queue_data)
self.expect('POST', 'Accounts/foo/Queues.json', new_queue_data)
self.assertRaises(base.MethodNotSupported, queues.update)
self.assertRaises(base.MethodNotSupported, queues.delete)
def test_sms(self):
# SMS resource
sms = self.account.sms()
self.assertRaises(base.MethodNotSupported, sms.get)
self.assertRaises(base.MethodNotSupported, sms.create)
self.assertRaises(base.MethodNotSupported, sms.update)
self.assertRaises(base.MethodNotSupported, sms.delete)
# SMS Message resource
message = sms.message('message')
self.assertRaises(TypeError, sms.message)
message.get()
self.expect('GET', 'Accounts/foo/SMS/Messages/message.json')
self.assertRaises(base.MethodNotSupported, message.create)
self.assertRaises(base.MethodNotSupported, message.update)
self.assertRaises(base.MethodNotSupported, message.delete)
# SMS Messages resource
messages = sms.messages()
messages.get()
self.expect('GET', 'Accounts/foo/SMS/Messages.json')
messages.get(DateSentLT='2012-06-06')
self.expect('GET', 'Accounts/foo/SMS/Messages.json',
{'DateSent<': '2012-06-06'})
new_message_data = {'To': '55510', 'Body': 'foo bar baz'}
messages.create(new_message_data)
self.expect('POST', 'Accounts/foo/SMS/Messages.json',
new_message_data)
self.assertRaises(base.MethodNotSupported, messages.update)
self.assertRaises(base.MethodNotSupported, messages.delete)
# ShortCode resource
short_code = sms.short_code('55510')
self.assertRaises(TypeError, sms.short_code)
short_code.get()
self.expect('GET', 'Accounts/foo/SMS/ShortCodes/55510.json')
update_code_data = {'FriendlyName': 'foo'}
short_code.update(update_code_data)
self.expect('POST', 'Accounts/foo/SMS/ShortCodes/55510.json',
update_code_data)
self.assertRaises(base.MethodNotSupported, short_code.create)
self.assertRaises(base.MethodNotSupported, short_code.delete)
# ShortCodes resource
short_codes = sms.short_codes()
short_codes.get()
self.expect('GET', 'Accounts/foo/SMS/ShortCodes.json')
short_codes.get(ShortCode='55510')
self.expect('GET', 'Accounts/foo/SMS/ShortCodes.json',
{'ShortCode': '55510'})
self.assertRaises(base.MethodNotSupported, short_codes.create)
self.assertRaises(base.MethodNotSupported, short_codes.update)
self.assertRaises(base.MethodNotSupported, short_codes.delete)
def test_recordings(self):
# Recording resource
recording = self.account.recording('rec')
self.assertRaises(TypeError, self.account.recording)
recording.get()
self.expect('GET', 'Accounts/foo/Recordings/rec.json')
recording.delete()
self.expect('DELETE', 'Accounts/foo/Recordings/rec.json')
self.assertRaises(base.MethodNotSupported, recording.create)
self.assertRaises(base.MethodNotSupported, recording.update)
# Recording Transcriptions resource
record_trans = recording.transcriptions()
record_trans.get()
self.expect('GET', 'Accounts/foo/Recordings/rec/Transcriptions.json')
self.assertRaises(base.MethodNotSupported, record_trans.create)
self.assertRaises(base.MethodNotSupported, record_trans.update)
self.assertRaises(base.MethodNotSupported, record_trans.delete)
# Recordings resource
recordings = self.account.recordings()
recordings.get()
self.expect('GET', 'Accounts/foo/Recordings.json')
recordings.get(DateCreatedGT='2012-06-06')
self.expect('GET', 'Accounts/foo/Recordings.json',
{'DateCreated>': '2012-06-06'})
self.assertRaises(base.MethodNotSupported, recordings.create)
self.assertRaises(base.MethodNotSupported, recordings.update)
self.assertRaises(base.MethodNotSupported, recordings.delete)
# Transcription resource
transcription = self.account.transcription('trans')
self.assertRaises(TypeError, self.account.transcription)
transcription.get()
self.expect('GET', 'Accounts/foo/Transcriptions/trans.json')
self.assertRaises(base.MethodNotSupported, transcription.create)
self.assertRaises(base.MethodNotSupported, transcription.update)
self.assertRaises(base.MethodNotSupported, transcription.delete)
# Transcriptions resource
transcriptions = self.account.transcriptions()
transcriptions.get()
self.expect('GET', 'Accounts/foo/Transcriptions.json')
self.assertRaises(base.MethodNotSupported, transcriptions.create)
self.assertRaises(base.MethodNotSupported, transcriptions.update)
self.assertRaises(base.MethodNotSupported, transcriptions.delete)
def test_notifications(self):
# Notification resource
notification = self.account.notification('noti')
self.assertRaises(TypeError, self.account.notification)
notification.get()
self.expect('GET', 'Accounts/foo/Notifications/noti.json')
notification.delete()
self.expect('DELETE', 'Accounts/foo/Notifications/noti.json')
self.assertRaises(base.MethodNotSupported, notification.create)
self.assertRaises(base.MethodNotSupported, notification.update)
# Notifications resource
notifications = self.account.notifications()
notifications.get()
self.expect('GET', 'Accounts/foo/Notifications.json')
notifications.get(MessageDateGT='2012-06-06')
self.expect('GET', 'Accounts/foo/Notifications.json',
{'MessageDate>': '2012-06-06'})
self.assertRaises(base.MethodNotSupported, notifications.create)
self.assertRaises(base.MethodNotSupported, notifications.update)
self.assertRaises(base.MethodNotSupported, notifications.delete)
def test_usage(self):
# Usage resource
usage = self.account.usage()
self.assertRaises(base.MethodNotSupported, usage.get)
self.assertRaises(base.MethodNotSupported, usage.create)
self.assertRaises(base.MethodNotSupported, usage.update)
self.assertRaises(base.MethodNotSupported, usage.delete)
# Records resource
records = usage.records()
records.get()
self.expect('GET', 'Accounts/foo/Usage/Records.json')
records.get(Category='calls', StartDate='-30days', Page=3)
self.expect('GET', 'Accounts/foo/Usage/Records.json',
{'Category': 'calls', 'StartDate': '-30days',
'Page': 3})
self.assertRaises(base.MethodNotSupported, records.create)
self.assertRaises(base.MethodNotSupported, records.update)
self.assertRaises(base.MethodNotSupported, records.delete)
# Records sub-resources
subresources = [
('daily', 'Daily'),
('monthly', 'Monthly'),
('yearly', 'Yearly'),
('all_time', 'AllTime'),
('today', 'Today'),
('yesterday', 'Yesterday'),
('this_month', 'ThisMonth'),
('last_month', 'LastMonth')
]
for subresource in subresources:
resource = getattr(records, subresource[0])()
url = 'Accounts/foo/Usage/Records/{0}.json'.format(subresource[1])
resource.get()
self.expect('GET', url)
resource.get(Category='calls', StartDate='-30days', Page=3)
self.expect('GET', url, {'Category': 'calls',
'StartDate': '-30days',
'Page': 3})
self.assertRaises(base.MethodNotSupported, resource.create)
self.assertRaises(base.MethodNotSupported, resource.update)
self.assertRaises(base.MethodNotSupported, resource.delete)
# Trigger resource
trigger = usage.trigger('trigger')
self.assertRaises(TypeError, self.account.usage().trigger)
trigger.get()
self.expect('GET', 'Accounts/foo/Usage/Triggers/trigger.json')
update_trigger_data = {'CallbackUrl': 'http://foo/bar/'}
trigger.update(update_trigger_data)
self.expect('POST', 'Accounts/foo/Usage/Triggers/trigger.json',
update_trigger_data)
trigger.delete()
self.expect('DELETE', 'Accounts/foo/Usage/Triggers/trigger.json')
self.assertRaises(base.MethodNotSupported, trigger.create)
# Triggers resource
triggers = usage.triggers()
triggers.get()
self.expect('GET', 'Accounts/foo/Usage/Triggers.json')
triggers.get(UsageCategory='calls', Page=3)
self.expect('GET', 'Accounts/foo/Usage/Triggers.json',
{'UsageCategory': 'calls', 'Page': 3})
new_trigger_data = {'UsageCategory': 'calls', 'TriggerValue': '+30'}
triggers.create(new_trigger_data)
self.expect('POST', 'Accounts/foo/Usage/Triggers.json',
new_trigger_data)
self.assertRaises(base.MethodNotSupported, triggers.update)
self.assertRaises(base.MethodNotSupported, triggers.delete)
|
python
|
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas
from functions import getbinary
from sklearn.utils import shuffle
class LogisticModel(object):
def __init__(self):
pass
def fit(self,X,Y,X_test,Y_test,lr = 10e-7,reg = 10e-22,epoch=120000,show_fig=False):
N,D = X.shape
self.W=(np.random.randn(D)/np.sqrt(D)).reshape(D,1)
#Running gradient descent now
cost=[]
for i in range(0,epoch):
y_pred = self.forward(X)
self.W = self.W-lr*(X.T.dot(y_pred-Y)+reg*self.W)
#b -= learning_rate*((pY - Y).sum() + reg*b)
if i%20 ==0:
yhat = self.forward(X_test)
c = self.cross_entropy(Y_test,yhat)
cost.append(c)
r = self.classification_rate(Y_test,np.round(yhat))
print("i:", i , "cost:" ,c,"rate:",r)
def sigmoid(self,Z):
Z =np.exp(-Z)
return 1/(1+Z)
def forward(self,X):
return self.sigmoid(X.dot(self.W))
def classification_rate(self,T,Y):
return np.mean(T==Y)
def cross_entropy(self,T,Y):
return-(T*np.log(Y)+(1-T)*np.log(1-Y)).sum()
def main():
X,Y = getbinary() #we extract binary data from the complete dataset using the function getbinary
Y=Y.reshape(Y.shape[0],1)
count1 = np.count_nonzero(Y) # This means we have only 547 samples of class 1 and 4953 samples of class 0
# This creates a class imbalance problem and we have to address that by repeating the copies of the class 1 data set atleast 9 folds of the current dataset
X_class1 =[]
for i in range(0,Y.shape[0]):
temp = Y[i]
if temp==1:
X_class1.append(X[i,:])
X_class1=np.repeat(X_class1,8,axis=0) # Repeats ndarray 8 times and stacks it vertically along rows
#So we now are going to add 4376 additional elements in X matrix and subsequently we need to add similar elements in Y
X = np.vstack((X,X_class1))
Y_class1 = np.ones((X_class1.shape[0],1))
Y = np.vstack([Y,Y_class1])
X,Y = shuffle(X,Y)
# Now we have 4923 samples of class 1 and 4953 samples of class 0 , so we have sorted the class imbalance problem
X_test,Y_test = X[-1000:,:],Y[-1000:,:] # Converting into train and test sets
X,Y=X[:-1000,:],Y[:-1000,:]
bias = np.ones((X.shape[0],1))
X = np.hstack([bias,X])
bias2 =np.ones((X_test.shape[0],1))
X_test =np.hstack([bias2,X_test])
model = LogisticModel()
model.fit(X,Y,X_test,Y_test,show_fig=True)
if __name__ == '__main__':
main()
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.cloudstackAPI import createVolume, createTemplate
from marvin.lib.utils import (cleanup_resources,
random_gen, validateList)
from marvin.lib.base import (Account,
VirtualMachine,
ServiceOffering,
Volume,
DiskOffering,
Snapshot,
Template,
SnapshotPolicy)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
find_storage_pool_type)
from nose.plugins.attrib import attr
from marvin.codes import PASS
class TestVolumes(cloudstackTestCase):
@classmethod
def setUpClass(cls):
try:
cls._cleanup = []
cls.testClient = super(TestVolumes, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testClient.getHypervisorInfo()
if cls.hypervisor.lower() == 'lxc':
if not find_storage_pool_type(cls.apiclient, storagetype='rbd'):
raise unittest.SkipTest("RBD storage type is required for data volumes for LXC")
# Get Domain, Zone, Template
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(
cls.api_client,
cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
if cls.zone.localstorageenabled:
cls.storagetype = 'local'
cls.services["service_offerings"][
"tiny"]["storagetype"] = 'local'
cls.services["disk_offering"]["storagetype"] = 'local'
else:
cls.storagetype = 'shared'
cls.services["service_offerings"][
"tiny"]["storagetype"] = 'shared'
cls.services["disk_offering"]["storagetype"] = 'shared'
cls.services['mode'] = cls.zone.networktype
cls.services["virtual_machine"][
"hypervisor"] = cls.hypervisor
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.services["custom_volume"]["zoneid"] = cls.zone.id
# Creating Disk offering, Service Offering and Account
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["tiny"]
)
cls._cleanup.append(cls.disk_offering)
cls._cleanup.append(cls.service_offering)
except Exception as e:
cls.tearDownClass()
raise Exception("Warning: Exception in setup : %s" % e)
return
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.account = Account.create(
self.apiClient,
self.services["account"],
domainid=self.domain.id
)
# Getting authentication for user in newly created Account
self.user = self.account.user[0]
self.userapiclient = self.testClient.getUserApiClient(
self.user.username,
self.domain.name)
# Creating Virtual Machine
self.virtual_machine = VirtualMachine.create(
self.userapiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
)
self.cleanup = [self.account, ]
def tearDown(self):
# Clean up, terminate the created volumes
cleanup_resources(self.apiClient, self.cleanup)
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def __verify_values(self, expected_vals, actual_vals):
"""
@summary: Function to verify expected and actual values
Step1: Initializing return flag to True
Step1: Verifying length of expected and actual dictionaries is
matching
If not matching returning false
Step2: Listing all the keys from expected dictionary
Step3: Looping through each key from step2 and verifying expected
and actual dictionaries have same value
If not making return flag to False
Step4: returning the return flag after all the values are verified
"""
return_flag = True
if len(expected_vals) != len(actual_vals):
return False
keys = expected_vals.keys()
for i in range(0, len(expected_vals)):
exp_val = expected_vals[keys[i]]
act_val = actual_vals[keys[i]]
if exp_val == act_val:
return_flag = return_flag and True
else:
return_flag = return_flag and False
self.debug(
"expected Value: %s, is not matching with actual value:\
%s" %
(exp_val, act_val))
return return_flag
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_01_list_volumes_pagination(self):
"""
@summary: Test List Volumes pagination
Step1: Listing all the volumes for a user
Step2: Verifying listed volumes for account created at class level
Step3: If number of volumes is less than (page size + 1),
then creating them
Step4: Listing all the volumes again after creation of volumes
Step5: Verifying the length of the volumes is (page size + 1)
Step6: Listing all the volumes in page1
Step7: Verifying that the length of the volumes in page 1 is
(page size)
Step8: Listing all the volumes in page2
Step9: Verifying that the length of the volumes in page 2 is 1
Step10: Deleting the volume present in page 2
Step11: Listing for the volumes on page 2
Step12: Verifying that there are no volumes present in page 2
"""
# Listing all the volumes for a user
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
# Verifying listed volumes for account created at class level
self.assertIsNotNone(
list_volumes_before,
"create volume from VM failed at class setup method"
)
self.assertEqual(
len(list_volumes_before),
1,
"more than 1 volume created from VM at class level"
)
# If number of volumes is less than (pagesize + 1), then creating them
for i in range(0, (self.services["pagesize"])):
volume_created = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(
volume_created,
"Volume is not created"
)
# Listing all the volumes again after creation of volumes
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
# Verifying the length of the volumes is (page size + 1)
self.assertEqual(
len(list_volumes_after),
(self.services["pagesize"] + 1),
"Number of volumes created is not matching expected"
)
# Listing all the volumes in page1
list_volumes_page1 = Volume.list(
self.userapiclient,
listall=self.services["listall"],
page=1,
pagesize=self.services["pagesize"]
)
self.assertIsNotNone(
list_volumes_page1,
"No volumes found in Page 1"
)
# Verifying that the length of the volumes in page 1 is (page size)
self.assertEqual(
len(list_volumes_page1),
self.services["pagesize"],
"List Volume response is not matching with\
the page size length for page 1"
)
# Listing all the volumes in page2
list_volumes_page2 = Volume.list(
self.userapiclient,
listall=self.services["listall"],
page=2,
pagesize=self.services["pagesize"]
)
self.assertIsNotNone(
list_volumes_page2,
"No volumes found in Page 2"
)
# Verifying that the length of the volumes in page 2 is 1
self.assertEqual(
len(list_volumes_page2),
1,
"List Volume response is not matching with\
the page size length for page 2"
)
volume_page2 = list_volumes_page2[0]
# Verifying that the volume on page 2 is not present in page1
for i in range(0, len(list_volumes_page1)):
volume_page1 = list_volumes_page1[i]
self.assertNotEquals(
volume_page2.id,
volume_page1.id,
"Volume listed in page 2 is also listed in page 1"
)
# Deleting a single volume
Volume.delete(volume_created, self.userapiclient)
# Listing the volumes in page 2
list_volume_response = Volume.list(
self.userapiclient,
listall=self.services["listall"],
page=2,
pagesize=self.services["pagesize"]
)
# verifying that volume does not exists on page 2
self.assertEqual(
list_volume_response,
None,
"Volume was not deleted"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_02_list_volume_byid(self):
"""
@summary: Test List Volumes with Id
Step1: Listing all the volumes for a user before creating a data volume
Step2: Verifying the length of the list as 1
Step3: Creating a data volume
Step4: Listing all the volumes for a user after creating a data volume
Step5: Verifying the list volume size is increased by 1
Step6: List the volumes by specifying root volume Id
Step7: Verifying the details of the root volume
Step8: List the volumes by specifying data volume Id
Step9: Verifying the details of the data volume
"""
# Listing all the volumes for a user before creating a data volume
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"]
)
self.assertIsNotNone(
list_volumes_before,
"create volume from VM failed at class setup method")
# Verifying the length of the list as 1
self.assertEqual(
len(list_volumes_before),
1,
"more than 1 volume created at class level"
)
root_volume = list_volumes_before[0]
# Creating a data volume
volume_created = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(
volume_created,
"Volume is not created"
)
# Listing all the volumes for a user after creating a data volume
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"]
)
self.assertIsNotNone(
list_volumes_after,
"Volume creation failed"
)
# Verifying the list volume size is increased by 1
self.assertEqual(
len(list_volumes_before) + 1,
len(list_volumes_after),
"list volume is not matching with Number of volumes created"
)
# Listing a Root Volume by Id and verifying the volume details
list_volumes_by_id = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=root_volume.id
)
self.assertIsNotNone(
list_volumes_by_id,
"Root volume is not listed"
)
self.assertEqual(
1,
len(list_volumes_by_id),
"list volume is not matching with Number of volumes created"
)
obtained_volume = list_volumes_by_id[0]
# Creating expected and actual values dictionaries
expected_dict = {
"id": root_volume.id,
"name": root_volume.name,
"vmname": self.virtual_machine.name,
"state": "Ready",
"type": "ROOT",
"zoneid": self.zone.id,
"account": self.account.name,
"storagetype": self.storagetype,
"size": self.template.size
}
actual_dict = {
"id": obtained_volume.id,
"name": obtained_volume.name,
"vmname": obtained_volume.vmname,
"state": obtained_volume.state,
"type": obtained_volume.type,
"zoneid": obtained_volume.zoneid,
"account": obtained_volume.account,
"storagetype": obtained_volume.storagetype,
"size": obtained_volume.size,
}
root_volume_status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
root_volume_status,
"Listed Root Volume details are not as expected"
)
# Listing a Data Volume by Id and verifying the volume details
list_volumes_by_id = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
self.assertIsNotNone(
list_volumes_by_id,
"Data volume is not listed"
)
self.assertEqual(
len(list_volumes_by_id),
1,
"list volume is not matching with Number of volumes created"
)
obtained_volume = list_volumes_by_id[0]
# Creating expected and actual values dictionaries
expected_dict = {
"id": volume_created.id,
"name": volume_created.name,
"state": "Allocated",
"type": "DATADISK",
"zoneid": self.zone.id,
"account": self.account.name,
"storagetype": self.storagetype,
"size": self.disk_offering.disksize
}
actual_dict = {
"id": obtained_volume.id,
"name": obtained_volume.name,
"state": obtained_volume.state,
"type": obtained_volume.type,
"zoneid": obtained_volume.zoneid,
"account": obtained_volume.account,
"storagetype": obtained_volume.storagetype,
"size": obtained_volume.size / (1024 * 1024 * 1024),
}
root_volume_status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
root_volume_status,
"Listed Data Volume details are not as expected"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_03_data_volume_resize(self):
"""
@summary: Test to verify creation and resize of data volume
Step1: Listing the volumes for a user before creating data volume
Step2: Creating a data volume
Step3: Listing the volumes for a user after creating data volume
Step4: Attaching and Detaching data volume created to Virtual Machine
Step5: Verifying if there exists a disk offering with higher size
If not present creating it
Step6: Resizing data volume
"""
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest(
"This featureis not supported on existing\
hypervisor. Hence, skipping the test")
# Listing volumes for a user before creating a volume
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
# Creating a data volume
volume_created = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(volume_created, "Data volume creation failed")
# Listing volumes for a user after creating data volume
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertEquals(
len(list_volumes_before) + 1,
len(list_volumes_after),
"Data volume creation failed"
)
# Attaching data volume created to Virtual Machine
self.virtual_machine.attach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
attached_volume = list_volumes[0]
self.assertIsNotNone(
attached_volume.vmname,
"VM is not attached to Volume"
)
self.assertEquals(
self.virtual_machine.name,
attached_volume.vmname,
"VM Name is not matching with attached vm"
)
# Detaching data volume from Virtual Machine
self.virtual_machine.detach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
detached_volume = list_volumes[0]
self.assertIsNone(
detached_volume.vmname,
"VM is not detached from volume"
)
# Verifying if there exists a disk offering with higher size. If not
# present creating it
list_disk_offerings = DiskOffering.list(self.apiClient)
large_disk_offering_exists = False
# Converting disk_size in bytes to GB
current_disk_size = volume_created.size / (1024 * 1024 * 1024)
for disk_offering in list_disk_offerings:
if ((disk_offering.disksize > current_disk_size) and (
not disk_offering.iscustomized) and
disk_offering.storagetype == self.storagetype):
new_disk_offering = disk_offering
large_disk_offering_exists = True
break
if not large_disk_offering_exists:
new_size = (volume_created.size / (1024 * 1024 * 1024)) + 1
self.services["disk_offering"]["disksize"] = new_size
new_disk_offering = DiskOffering.create(
self.apiClient,
self.services["disk_offering"]
)
if new_disk_offering is not None:
self.cleanup.append(new_disk_offering)
else:
new_size = new_disk_offering.disksize
# Resizing data volume
resized_volume = volume_created.resize(
self.userapiclient,
diskofferingid=new_disk_offering.id,
shrinkok='false',
)
self.assertIsNotNone(resized_volume, "Resize Volume failed")
# Verifying data volume size is increased
self.assertEquals(
new_size,
(resized_volume.size / (1024 * 1024 * 1024)),
"volume not resized to expected value"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_04_custom_volume_resize(self):
"""
@summary: Test to verify creation and resize of custom volume
Step1: Checking if Custom disk offering already exists.
If not present then creating custom Disk Offering
Step2: Listing the volumes for a user before creating custom volume
Step3: Creating a custom volume
Step4: Listing the volumes for a user after creating custom volume
Step5: Attaching and Detaching custom volume created to Virtual Machine
Step6: Resizing custom volume
"""
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest(
"This featureis not supported on existing\
hypervisor. Hence, skipping the test")
# Listing all the disk offerings
list_disk_offerings = DiskOffering.list(self.apiClient)
custom_disk_offering_exists = False
# Verifying if a custom disk offering already exists
if list_disk_offerings is not None:
for disk_offering in list_disk_offerings:
if (disk_offering.iscustomized and disk_offering.storagetype ==
self.storagetype):
custom_disk_offering = disk_offering
custom_disk_offering_exists = True
break
# If a custom disk offering does not exists, then creating a custom
# disk offering
if not custom_disk_offering_exists:
custom_disk_offering = DiskOffering.create(
self.apiClient,
self.services["disk_offering"],
custom=True
)
if custom_disk_offering is not None:
self.cleanup.append(custom_disk_offering)
# Listing the volumes for a user before creating custom volume
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
# Creating a custom volume
volume_created = Volume.create_custom_disk(
self.userapiclient,
self.services["custom_volume"],
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=custom_disk_offering.id
)
self.assertIsNotNone(
volume_created,
"Custom volume did not get created"
)
# Listing the volumes for a user after creating custom volume
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
# Verifyign that volume list is increased by 1 after creation of
# custion volume
self.assertEquals(
len(list_volumes_before) + 1,
len(list_volumes_after),
"Custom volume did not get created"
)
# Attaching custom volume created to Virtual Machine
self.virtual_machine.attach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
attached_volume = list_volumes[0]
self.assertIsNotNone(
attached_volume.vmname,
"VM is not attached to Volume"
)
self.assertEquals(
self.virtual_machine.name,
attached_volume.vmname,
"VM Name is not matching with attached vm"
)
# Detaching custom volume from Virtual Machine
self.virtual_machine.detach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
detached_volume = list_volumes[0]
self.assertIsNone(
detached_volume.vmname,
"VM is not detached from volume"
)
# Resizing custom volume
# Increasing custom disk size by 1
new_size = self.services["custom_volume"]["customdisksize"] + 1
resized_volume = volume_created.resize(
self.userapiclient,
diskofferingid=custom_disk_offering.id,
shrinkok='false',
size=new_size)
self.assertIsNotNone(resized_volume, "Resize Volume failed")
# Verifying that custom disk size is increased
self.assertEquals(
new_size,
(resized_volume.size / (1024 * 1024 * 1024)),
"volume not resized to expected value"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_05_volume_snapshot(self):
"""
@summary: Test to verify creation of snapshot from volume
and creation of template, volume from snapshot
Step1: Creating a volume
Step2: Attaching and Detaching custom volume created to Virtual Machine
Step3: Creating Snapshot from volume
Step4: Creating Volume from snapshot
Step5: Creating Template from Snapshot
"""
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest(
"This featureis not supported on existing\
hypervisor. Hence, skipping the test")
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
volume_created = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(volume_created, "Volume not created")
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertEquals(
len(list_volumes_before) + 1,
len(list_volumes_after),
"Volume not created"
)
# Attaching and Detaching custom volume created to Virtual Machine
self.virtual_machine.attach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
attached_volume = list_volumes[0]
self.assertIsNotNone(
attached_volume.vmname,
"VM is not attached to Volume"
)
self.assertEquals(
self.virtual_machine.name,
attached_volume.vmname,
"VM Name is not matching with attached vm"
)
self.virtual_machine.detach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
detached_volume = list_volumes[0]
self.assertIsNone(
detached_volume.vmname,
"VM is not detached from volume"
)
# Creating Snapshot from volume
snapshot_created = Snapshot.create(
self.userapiclient,
volume_created.id,
)
self.assertIsNotNone(snapshot_created, "Snapshot not created")
# Creating expected and actual values dictionaries
expected_dict = {
"id": volume_created.id,
"intervaltype": "MANUAL",
"snapshottype": "MANUAL",
"volumetype": volume_created.type,
"domain": self.domain.id
}
actual_dict = {
"id": snapshot_created.volumeid,
"intervaltype": snapshot_created.intervaltype,
"snapshottype": snapshot_created.snapshottype,
"volumetype": snapshot_created.volumetype,
"domain": snapshot_created.domainid,
}
status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
status,
"Snapshot created from Volume details are not as expected"
)
# Creating Volume from snapshot
cmd = createVolume.createVolumeCmd()
cmd.name = "-".join([self.services["volume"]
["diskname"], random_gen()])
cmd.snapshotid = snapshot_created.id
volume_from_snapshot = Volume(
self.userapiclient.createVolume(cmd).__dict__)
self.assertIsNotNone(
volume_from_snapshot,
"Volume creation failed from snapshot"
)
# Creating expected and actual values dictionaries
expected_dict = {
"snapshotid": snapshot_created.id,
"volumetype": snapshot_created.volumetype,
"size": self.disk_offering.disksize,
"accounr": self.account.name,
"domain": self.domain.id,
"storagetype": self.storagetype,
"zone": self.zone.id
}
actual_dict = {
"snapshotid": volume_from_snapshot.snapshotid,
"volumetype": volume_from_snapshot.type,
"size": volume_from_snapshot.size / (1024 * 1024 * 1024),
"accounr": volume_from_snapshot.account,
"domain": volume_from_snapshot.domainid,
"storagetype": volume_from_snapshot.storagetype,
"zone": volume_from_snapshot.zoneid,
}
status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
status,
"Volume created from Snapshot details are not as expected"
)
# Creating Template from Snapshot
list_templates_before = Template.list(
self.userapiclient,
templatefilter='self')
if list_templates_before is None:
templates_before_size = 0
else:
templates_before_size = len(list_templates_before)
cmd = createTemplate.createTemplateCmd()
cmd.name = self.services["ostype"]
cmd.displaytext = self.services["ostype"]
cmd.ostypeid = self.template.ostypeid
cmd.snapshotid = snapshot_created.id
cmd.ispublic = False
cmd.passwordenabled = False
template_from_snapshot = Template(
self.userapiclient.createTemplate(cmd).__dict__)
self.assertIsNotNone(
template_from_snapshot,
"Template creation failed from snapshot"
)
self.cleanup.append(template_from_snapshot)
# Creating expected and actual values dictionaries
expected_dict = {
"name": self.services["ostype"],
"ostypeid": self.template.ostypeid,
"type": "USER",
"zone": self.zone.id,
"domain": self.domain.id,
"account": self.account.name,
"passwordenabled": False,
"ispublic": False,
"size": self.disk_offering.disksize
}
actual_dict = {
"name": template_from_snapshot.name,
"ostypeid": template_from_snapshot.ostypeid,
"type": template_from_snapshot.templatetype,
"zone": template_from_snapshot.zoneid,
"domain": template_from_snapshot.domainid,
"account": template_from_snapshot.account,
"passwordenabled": template_from_snapshot.passwordenabled,
"ispublic": template_from_snapshot.ispublic,
"size": template_from_snapshot.size / (1024 * 1024 * 1024)
}
status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
status,
"Template created from Snapshot details are not as expected"
)
list_templates_after = Template.list(
self.userapiclient,
templatefilter='self')
self.assertEquals(
templates_before_size + 1,
len(list_templates_after),
"Template creation failed from snapshot"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_06_volume_snapshot_policy_hourly(self):
"""
@summary: Test to verify creation of Hourly Snapshot policies
from volume
Step1: Creating a Volume.
Step2: Attaching volume created in Step2 to virtual machine
Step3: Detaching the volume created in step2 from virtual machine
Step4: Listing snapshot policies for a volume created in step1
Step5: Creating Hourly snapshot policy
Step6: Listing snapshot policies for a volume created in step1 again
Step7: Verifyign that the list snapshot policy length is increased by 1
"""
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
volume_created = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(volume_created, "Volume not created")
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertEquals(
len(list_volumes_before) + 1,
len(list_volumes_after),
"Volume not created"
)
# Attaching volume created to Virtual Machine
self.virtual_machine.attach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
attached_volume = list_volumes[0]
self.assertIsNotNone(
attached_volume.vmname,
"VM is not attached to Volume"
)
self.assertEquals(
self.virtual_machine.name,
attached_volume.vmname,
"VM Name is not matching with attached vm"
)
# Detaching volume created from Virtual Machine
self.virtual_machine.detach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
detached_volume = list_volumes[0]
self.assertIsNone(
detached_volume.vmname,
"VM is not detached from volume"
)
# Creating Hourly Snapshot Policy from volume
self.services["recurring_snapshot"]["intervaltype"] = 'hourly'
self.services["recurring_snapshot"]["schedule"] = '1'
list_snapshot_policy_before = SnapshotPolicy.list(
self.userapiclient,
volumeid=volume_created.id)
snapshot_policy_before_size = 0
if list_snapshot_policy_before is not None:
snapshot_policy_before_size = len(list_snapshot_policy_before)
snapshot_policy_hourly = SnapshotPolicy.create(
self.userapiclient,
volume_created.id,
self.services["recurring_snapshot"]
)
self.assertIsNotNone(
snapshot_policy_hourly,
"Hourly Snapshot policy creation failed"
)
# Creating expected and actual values dictionaries
expected_dict = {
"schedule": self.services["recurring_snapshot"]["schedule"],
"intervaltype": 0,
"volumeid": volume_created.id
}
actual_dict = {
"schedule": snapshot_policy_hourly.schedule,
"intervaltype": snapshot_policy_hourly.intervaltype,
"volumeid": snapshot_policy_hourly.volumeid
}
status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
status,
"Hourly Snapshot Policy details are not as expected"
)
list_snapshot_policy_after = SnapshotPolicy.list(
self.userapiclient,
volumeid=volume_created.id)
self.assertIsNotNone(
list_snapshot_policy_after,
"Hourly Snapshot policy creation failed"
)
self.assertEquals(
snapshot_policy_before_size + 1,
len(list_snapshot_policy_after),
"Hourly Snapshot policy creation failed"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_07_volume_snapshot_policy_daily(self):
"""
@summary: Test to verify creation of Daily Snapshot policies
from volume
Step1: Creating a Volume.
Step2: Attaching volume created in Step2 to virtual machine
Step3: Detaching the volume created in step2 from virtual machine
Step4: Listing snapshot policies for a volume created in step1
Step5: Creating Daily snapshot policy
Step6: Listing snapshot policies for a volume created in step1 again
Step7: Verifyign that the list snapshot policy length is increased by 1
"""
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
volume_created = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(volume_created, "Volume not created")
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertEquals(
len(list_volumes_before) + 1,
len(list_volumes_after),
"Volume not created"
)
# Attaching volume created to Virtual Machine
self.virtual_machine.attach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
attached_volume = list_volumes[0]
self.assertIsNotNone(
attached_volume.vmname,
"VM is not attached to Volume"
)
self.assertEquals(
self.virtual_machine.name,
attached_volume.vmname,
"VM Name is not matching with attached vm"
)
# Detaching volume created from Virtual Machine
self.virtual_machine.detach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
detached_volume = list_volumes[0]
self.assertIsNone(
detached_volume.vmname,
"VM is not detached from volume"
)
# Creating Daily Snapshot Policy from volume
self.services["recurring_snapshot"]["intervaltype"] = 'daily'
self.services["recurring_snapshot"]["schedule"] = '00:00'
list_snapshot_policy_before = SnapshotPolicy.list(
self.userapiclient,
volumeid=volume_created.id)
snapshot_policy_before_size = 0
if list_snapshot_policy_before is not None:
snapshot_policy_before_size = len(list_snapshot_policy_before)
snapshot_policy_daily = SnapshotPolicy.create(
self.userapiclient,
volume_created.id,
self.services["recurring_snapshot"]
)
self.assertIsNotNone(
snapshot_policy_daily,
"Daily Snapshot policy creation failed"
)
# Creating expected and actual values dictionaries
expected_dict = {
"schedule": self.services["recurring_snapshot"]["schedule"],
"intervaltype": 1,
"volumeid": volume_created.id
}
actual_dict = {
"schedule": snapshot_policy_daily.schedule,
"intervaltype": snapshot_policy_daily.intervaltype,
"volumeid": snapshot_policy_daily.volumeid
}
status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
status,
"Daily Snapshot Policy details are not as expected"
)
list_snapshot_policy_after = SnapshotPolicy.list(
self.userapiclient,
volumeid=volume_created.id)
self.assertIsNotNone(
list_snapshot_policy_after,
"Daily Snapshot policy creation failed"
)
self.assertEquals(
snapshot_policy_before_size + 1,
len(list_snapshot_policy_after),
"Daily Snapshot policy creation failed"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_08_volume_snapshot_policy_weekly(self):
"""
@summary: Test to verify creation of Weekly Snapshot policies
from volume
Step1: Creating a Volume.
Step2: Attaching volume created in Step2 to virtual machine
Step3: Detaching the volume created in step2 from virtual machine
Step4: Listing snapshot policies for a volume created in step1
Step5: Creating Weekly snapshot policy
Step6: Listing snapshot policies for a volume created in step1 again
Step7: Verifyign that the list snapshot policy length is increased by 1
"""
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
volume_created = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(volume_created, "Volume not created")
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertEquals(
len(list_volumes_before) + 1,
len(list_volumes_after),
"Volume not created"
)
# Attaching volume created to Virtual Machine
self.virtual_machine.attach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
attached_volume = list_volumes[0]
self.assertIsNotNone(
attached_volume.vmname,
"VM is not attached to Volume"
)
self.assertEquals(
self.virtual_machine.name,
attached_volume.vmname,
"VM Name is not matching with attached vm"
)
# Detaching volume created to Virtual Machine
self.virtual_machine.detach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
detached_volume = list_volumes[0]
self.assertIsNone(
detached_volume.vmname,
"VM is not detached from volume"
)
# Creating Weekly Snapshot Policy from volume
self.services["recurring_snapshot"]["intervaltype"] = 'weekly'
self.services["recurring_snapshot"]["schedule"] = '00:00:1'
list_snapshot_policy_before = SnapshotPolicy.list(
self.userapiclient,
volumeid=volume_created.id)
snapshot_policy_before_size = 0
if list_snapshot_policy_before is not None:
snapshot_policy_before_size = len(list_snapshot_policy_before)
snapshot_policy_weekly = SnapshotPolicy.create(
self.userapiclient,
volume_created.id,
self.services["recurring_snapshot"]
)
self.assertIsNotNone(
snapshot_policy_weekly,
"Weekly Snapshot policy creation failed"
)
# Creating expected and actual values dictionaries
expected_dict = {
"schedule": self.services["recurring_snapshot"]["schedule"],
"intervaltype": 2,
"volumeid": volume_created.id
}
actual_dict = {
"schedule": snapshot_policy_weekly.schedule,
"intervaltype": snapshot_policy_weekly.intervaltype,
"volumeid": snapshot_policy_weekly.volumeid
}
status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
status,
"Weekly Snapshot Policy details are not as expected"
)
list_snapshot_policy_after = SnapshotPolicy.list(
self.userapiclient,
volumeid=volume_created.id)
self.assertIsNotNone(
list_snapshot_policy_after,
"Weekly Snapshot policy creation failed"
)
self.assertEquals(
snapshot_policy_before_size + 1,
len(list_snapshot_policy_after),
"Weekly Snapshot policy creation failed"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_09_volume_snapshot_policy_monthly(self):
"""
@summary: Test to verify creation of Monthly Snapshot policies
from volume
Step1: Creating a Volume.
Step2: Attaching volume created in Step2 to virtual machine
Step3: Detaching the volume created in step2 from virtual machine
Step4: Listing snapshot policies for a volume created in step1
Step5: Creating Monthly snapshot policy
Step6: Listing snapshot policies for a volume created in step1 again
Step7: Verifyign that the list snapshot policy length is increased by 1
Step8: Deleting monthly snapshot policy created in step5
Step9: List snapshot policies for a volume again
Step10:Verifying that the list snapshot policy length is decreased
by 1
"""
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
volume_created = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(volume_created, "Volume not created")
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertEquals(
len(list_volumes_before) + 1,
len(list_volumes_after),
"Volume not created"
)
# Attaching and Detaching custom volume created to Virtual Machine
self.virtual_machine.attach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
attached_volume = list_volumes[0]
self.assertIsNotNone(
attached_volume.vmname,
"VM is not attached to Volume"
)
self.assertEquals(
self.virtual_machine.name,
attached_volume.vmname,
"VM Name is not matching with attached vm"
)
self.virtual_machine.detach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
detached_volume = list_volumes[0]
self.assertIsNone(
detached_volume.vmname,
"VM is not detached from volume"
)
# Creating Monthly Snapshot Policy from volume
self.services["recurring_snapshot"]["intervaltype"] = 'monthly'
self.services["recurring_snapshot"]["schedule"] = '00:00:1'
list_snapshot_policy_before = SnapshotPolicy.list(
self.userapiclient,
volumeid=volume_created.id)
snapshot_policy_before_size = 0
if list_snapshot_policy_before is not None:
snapshot_policy_before_size = len(list_snapshot_policy_before)
snapshot_policy_monthly = SnapshotPolicy.create(
self.userapiclient,
volume_created.id,
self.services["recurring_snapshot"])
self.assertIsNotNone(
snapshot_policy_monthly,
"Monthly Snapshot policy creation failed"
)
# Creating expected and actual values dictionaries
expected_dict = {
"schedule": self.services["recurring_snapshot"]["schedule"],
"intervaltype": 3,
"volumeid": volume_created.id
}
actual_dict = {
"schedule": snapshot_policy_monthly.schedule,
"intervaltype": snapshot_policy_monthly.intervaltype,
"volumeid": snapshot_policy_monthly.volumeid
}
status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
status,
"Monthly Snapshot Policy details are not as expected"
)
list_snapshot_policy_after = SnapshotPolicy.list(
self.userapiclient,
volumeid=volume_created.id)
self.assertIsNotNone(
list_snapshot_policy_after,
"Monthly Snapshot policy creation failed"
)
self.assertEquals(
snapshot_policy_before_size + 1,
len(list_snapshot_policy_after),
"Monthly Snapshot policy creation failed"
)
# Deleting monthly snapshot policy
SnapshotPolicy.delete(snapshot_policy_monthly, self.userapiclient)
list_snapshot_policies = SnapshotPolicy.list(
self.userapiclient,
volumeid=volume_created.id)
self.assertIsNone(
list_snapshot_policies,
"Deletion of Monthly Snapshot policy failed"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_10_volume_snapshots_pagination(self):
"""
@summary: Test to verify pagination of snapshots for Volume
Step1: Creating a Volume.
Step2: Attaching volume created in Step2 to virtual machine
Step3: Detaching the volume created in step2 from virtual machine
Step4: Listing all the snapshots for a volume
Step5: Creating Pagesize + 1 number of snapshots for a volume
Step6: Listing all the snapshots for a volume
Step7: Verifying that there are pagesize + 1 number of snapshots
listed
Step8: Listing all the snapshots in page 1
Step9: Listing all the snapshots in page 2
Step10: Deleting the snapshot present in page 2
Step11: Listign the snapshots from page 2 again and verifyign that
list returns none
"""
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest(
"This featureis not supported on existing\
hypervisor. Hence, skipping the test")
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
# Creating a Volume
volume_created = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(volume_created, "Volume not created")
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertEquals(
len(list_volumes_before) + 1,
len(list_volumes_after),
"Volume not created"
)
# Attaching volume to virtual machine
self.virtual_machine.attach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
attached_volume = list_volumes[0]
self.assertIsNotNone(
attached_volume.vmname,
"VM is not attached to Volume"
)
self.assertEquals(
self.virtual_machine.name,
attached_volume.vmname,
"VM Name is not matching with attached vm"
)
# Detaching volume from virtual machine
self.virtual_machine.detach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
detached_volume = list_volumes[0]
self.assertIsNone(
detached_volume.vmname,
"VM is not detached from volume"
)
# Creating 3 Snapshots from volume
list_snapshot_before = Snapshot.list(
self.userapiclient,
volumeid=volume_created.id,
listall=self.services["listall"]
)
self.assertIsNone(
list_snapshot_before,
"Newly created volume is already having snapshots"
)
list_snapshot_before_size = 0
for i in range(0, 3):
snapshot_created = Snapshot.create(
self.userapiclient,
volume_created.id,
)
self.assertIsNotNone(snapshot_created, "Snapshot not created")
self.assertEquals(
volume_created.id,
snapshot_created.volumeid,
"Snapshot not created for given volume"
)
list_snapshot_after = Snapshot.list(
self.userapiclient,
volumeid=volume_created.id,
listall=self.services["listall"]
)
self.assertEqual(
list_snapshot_before_size + 3,
len(list_snapshot_after),
"Number of snapshots created is not matching expected"
)
# Listing all the snapshots in page1
list_snapshots_page1 = Snapshot.list(
self.userapiclient,
volumeid=volume_created.id,
listall=self.services["listall"],
page=1,
pagesize=2
)
self.assertEqual(
2,
len(list_snapshots_page1),
"List snapshots response is not matching with the\
page size length for page 1"
)
# Listing all the snapshots in page2 and ensuring only 1 snapshot is
# present
list_snapshots_page2 = Snapshot.list(
self.userapiclient,
volumeid=volume_created.id,
listall=self.services["listall"],
page=2,
pagesize=2
)
self.assertEqual(
len(list_snapshots_page2),
1,
"List snapshots response is not matching with\
the page size length for page 2"
)
snapshot_page2 = list_snapshots_page2[0]
# Verifying that the snapshot on page 2 is not present in page1
for i in range(0, len(list_snapshots_page1)):
snapshot_page1 = list_snapshots_page1[i]
self.assertNotEquals(
snapshot_page2.id,
snapshot_page1.id,
"Snapshot listed in page 2 is also listed in page 1"
)
# Deleting a single snapshot and verifying that snapshot does not
# exists on page 2
Snapshot.delete(snapshot_created, self.userapiclient)
list_snapshot_page2 = Snapshot.list(
self.userapiclient,
volumeid=volume_created.id,
listall=self.services["listall"],
page=2,
pagesize=2
)
self.assertEqual(
None,
list_snapshot_page2,
"Snapshot was not deleted"
)
list_snapshot_page1 = Snapshot.list(
self.userapiclient,
volumeid=volume_created.id,
listall=self.services["listall"],
page=1,
pagesize=2
)
self.assertEqual(
2,
len(list_snapshot_page1),
"Snapshots on page 1 are not matching"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_11_volume_extract(self):
"""
@summary: Test to verify extract/download a Volume
Step1: Listing Volumes before creating a Volume
Step2: Creating a Volume.
Step3: Verifying that created volume is not none and adding to clean up
Step4: Listing the volumes after creation
Step5: Verifying that the list volume size is increased by 1
Step6: Attaching volume created in Step2 to virtual machine
Step7: Detaching the volume created in step2 from virtual machine
Step8: Extracting/Downloadign the volume
Step9: Verifyign that a download URL is created for volume download
"""
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertIsNotNone(
list_volumes_before,
"volume not created for the vm launched at class level"
)
volume_created = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(volume_created, "Volume not created")
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertIsNotNone(
list_volumes_after,
"volume creation failed"
)
self.assertEquals(
len(list_volumes_before) + 1,
len(list_volumes_after),
"Volume not created"
)
# Attaching and Detaching volume created to Virtual Machine
self.virtual_machine.attach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
attached_volume = list_volumes[0]
self.assertIsNotNone(
attached_volume.vmname,
"VM is not attached to Volume"
)
self.assertEquals(
self.virtual_machine.name,
attached_volume.vmname,
"VM Name is not matching with attached vm"
)
self.virtual_machine.detach_volume(
self.userapiclient,
volume_created
)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=volume_created.id
)
detached_volume = list_volumes[0]
self.assertIsNone(
detached_volume.vmname,
"VM is not detached from volume"
)
# Extract/Download the volume
self.services["mode"] = "HTTP_DOWNLOAD"
extract_volume_response = Volume.extract(
self.userapiclient,
volume_created.id,
self.zone.id,
self.services["mode"]
)
self.assertIsNotNone(
extract_volume_response,
"Extract/Download volume failed")
self.assertEquals(
"DOWNLOAD_URL_CREATED",
extract_volume_response.state,
"Failed to create Download URL"
)
self.assertIsNotNone(
extract_volume_response.url,
"Extract/Download volume URL is NULL"
)
self.assertTrue(
(extract_volume_response.url.find("http") != -1),
"Extract/Download volume URL doesnot contain http"
)
self.assertEquals(
volume_created.id,
extract_volume_response.id,
"Extracted/Downloaded volume is not matching with original volume"
)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_12_volume_upload(self):
"""
@summary: Test to verify upload volume
Step1: Listing the volumes for a user before uploading volume
Step2: Uploading a volume
Step3: Listing the volumes for a user after uploading data volume
Step4: Verifying that the list volume length after upload is
increased by 1
"""
list_volumes_before = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertIsNotNone(
list_volumes_before,
"volume not created for the vm launched at class level"
)
# Uploading a Volume
volume_uploaded = Volume.upload(
self.userapiclient,
self.services["configurableData"]["upload_volume"],
self.zone.id
)
self.assertIsNotNone(volume_uploaded, "volume uploading failed")
# Listing the volumes for a user after uploading data volume
list_volumes_after = Volume.list(
self.userapiclient,
listall=self.services["listall"])
self.assertIsNotNone(
list_volumes_after,
"volume not created for the vm launched at class level"
)
# Asserting that the list volume length after upload is increased by 1
self.assertEquals(
len(list_volumes_before) + 1,
len(list_volumes_after),
"upload volume failed"
)
return
@attr(tags=["advanced", "basic", "sg"], required_hardware="true")
def test_13_volume_custom_disk_size(self):
"""
@Desc:Create volume from custom disk offering does not work as expected
Step1:Create custom disk offering
Step2:Create Volume with size x
Step3:Attach that volume to a vm
Step4:Create another volume with size y
Step5:Verify that the new volume is created with size Y
but not with size X
"""
if self.hypervisor.lower() in ['hyperv']:
raise unittest.SkipTest(
"This featureis not supported on existing\
hypervisor. Hence, skipping the test")
disk_offering = DiskOffering.create(
self.api_client,
self.services["disk_offering"],
custom=True
)
self.assertIsNotNone(
disk_offering,
"Failed to create custom disk offering")
self.cleanup.append(disk_offering)
self.services["custom_volume"]["customdisksize"] = 2
vol1 = Volume.create_custom_disk(
self.userapiclient,
self.services["custom_volume"],
account=self.account.name,
domainid=self.domain.id,
diskofferingid=disk_offering.id
)
self.assertIsNotNone(
vol1,
"Volume creation failed with custom disk size")
vol1_res = Volume.list(
self.userapiclient,
id=vol1.id
)
self.assertEqual(
validateList(vol1_res)[0],
PASS,
"Volume list returned invalid response")
vol1_size = vol1_res[0].size
try:
self.virtual_machine.attach_volume(self.userapiclient, vol1)
except Exception as e:
self.fail(
"Attaching custom data disk to vm failed\
with error{}".format(e))
self.services["custom_volume"]["customdisksize"] = 3
vol2 = Volume.create_custom_disk(
self.userapiclient,
self.services["custom_volume"],
account=self.account.name,
domainid=self.domain.id,
diskofferingid=disk_offering.id
)
self.assertIsNotNone(
vol2,
"Failed to create custom data disk with size %s" %
self.services["custom_volume"]["customdisksize"])
vol2_res = Volume.list(
self.userapiclient,
id=vol2.id
)
self.assertEqual(
validateList(vol2_res)[0],
PASS,
"Volume list returned invalid response")
vol2_size = vol2_res[0].size
self.assertNotEqual(
vol1_size,
vol2_size,
"Creating volume from custom disk offering does not work\
as expected"
)
try:
self.virtual_machine.detach_volume(self.userapiclient, vol1)
except Exception as e:
self.fail("Detaching volume failed with error %s" % e)
return
|
python
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from eventlet import greenthread
from nova.tests.virt.xenapi import stubs
from nova import utils
from nova.virt.xenapi import volume_utils
class CallXenAPIHelpersTestCase(stubs.XenAPITestBaseNoDB):
def test_vbd_plug(self):
session = mock.Mock()
volume_utils.vbd_plug(session, "vbd_ref", "vm_ref:123")
session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref")
@mock.patch.object(utils, 'synchronized')
def test_vbd_plug_check_synchronized(self, mock_synchronized):
session = mock.Mock()
volume_utils.vbd_plug(session, "vbd_ref", "vm_ref:123")
mock_synchronized.assert_called_once_with("xenapi-events-vm_ref:123")
def test_vbd_unplug(self):
session = mock.Mock()
volume_utils.vbd_unplug(session, "vbd_ref", "vm_ref:123")
session.call_xenapi.assert_called_once_with("VBD.unplug", "vbd_ref")
@mock.patch.object(utils, 'synchronized')
def test_vbd_unplug_check_synchronized(self, mock_synchronized):
session = mock.Mock()
volume_utils.vbd_unplug(session, "vbd_ref", "vm_ref:123")
mock_synchronized.assert_called_once_with("xenapi-events-vm_ref:123")
class SROps(stubs.XenAPITestBaseNoDB):
def test_find_sr_valid_uuid(self):
self.session = mock.Mock()
self.session.call_xenapi.return_value = 'sr_ref'
self.assertEqual(volume_utils.find_sr_by_uuid(self.session,
'sr_uuid'),
'sr_ref')
def test_find_sr_invalid_uuid(self):
class UUIDException(Exception):
details = ["UUID_INVALID", "", "", ""]
self.session = mock.Mock()
self.session.XenAPI.Failure = UUIDException
self.session.call_xenapi.side_effect = UUIDException
self.assertEqual(volume_utils.find_sr_by_uuid(self.session,
'sr_uuid'),
None)
class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB):
def test_target_host(self):
self.assertEqual(volume_utils._get_target_host('host:port'),
'host')
self.assertEqual(volume_utils._get_target_host('host'),
'host')
# There is no default value
self.assertEqual(volume_utils._get_target_host(':port'),
None)
self.assertEqual(volume_utils._get_target_host(None),
None)
def test_target_port(self):
self.assertEqual(volume_utils._get_target_port('host:port'),
'port')
self.assertEqual(volume_utils._get_target_port('host'),
'3260')
class IntroduceTestCase(stubs.XenAPITestBaseNoDB):
@mock.patch.object(volume_utils, '_get_vdi_ref')
@mock.patch.object(greenthread, 'sleep')
def test_introduce_vdi_retry(self, mock_sleep, mock_get_vdi_ref):
def fake_get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
fake_get_vdi_ref.call_count += 1
if fake_get_vdi_ref.call_count == 2:
return 'vdi_ref'
def fake_call_xenapi(method, *args):
if method == 'SR.scan':
return
elif method == 'VDI.get_record':
return {'managed': 'true'}
session = mock.Mock()
session.call_xenapi.side_effect = fake_call_xenapi
mock_get_vdi_ref.side_effect = fake_get_vdi_ref
fake_get_vdi_ref.call_count = 0
self.assertEqual(volume_utils.introduce_vdi(session, 'sr_ref'),
'vdi_ref')
mock_sleep.assert_called_once_with(20)
@mock.patch.object(volume_utils, '_get_vdi_ref')
@mock.patch.object(greenthread, 'sleep')
def test_introduce_vdi_exception(self, mock_sleep, mock_get_vdi_ref):
def fake_call_xenapi(method, *args):
if method == 'SR.scan':
return
elif method == 'VDI.get_record':
return {'managed': 'true'}
session = mock.Mock()
session.call_xenapi.side_effect = fake_call_xenapi
mock_get_vdi_ref.return_value = None
self.assertRaises(volume_utils.StorageError,
volume_utils.introduce_vdi, session, 'sr_ref')
mock_sleep.assert_called_once_with(20)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
# Let's import what we need
import os
import sys
import tld
import time
import random
import warnings
import argparse
import threading
from math import log
from re import search, findall
from requests import get, post
try:
import concurrent.futures
from urllib.parse import urlparse # for python3
python2, python3 = False, True
except ImportError:
from urlparse import urlparse # for python2
python2, python3 = True, False
from core.config import intels, badTypes
try:
input = raw_input
except NameError:
pass
colors = True # Output should be colored
machine = sys.platform # Detecting the os of current system
if machine.lower().startswith(('os', 'win', 'darwin', 'ios')):
colors = False # Colors shouldn't be displayed in mac & windows
if not colors:
end = red = white = green = yellow = run = bad = good = info = que = ''
else:
end = '\033[1;m'
red = '\033[91m'
white = '\033[1;97m'
green = '\033[1;32m'
yellow = '\033[1;33m'
run = '\033[1;97m[~]\033[1;m'
bad = '\033[1;31m[-]\033[1;m'
good = '\033[1;32m[+]\033[1;m'
info = '\033[1;33m[!]\033[1;m'
que = '\033[1;34m[?]\033[1;m'
# Just a fancy ass banner
print('''%s ____ __ __
/ %s__%s \/ /_ ____ / /_____ ____
/ %s/_/%s / __ \/ %s__%s \/ __/ %s__%s \/ __ \\
/ ____/ / / / %s/_/%s / /_/ %s/_/%s / / / /
/_/ /_/ /_/\____/\__/\____/_/ /_/ %sv1.1.1%s\n''' %
(red, white, red, white, red, white, red, white, red, white, red, white, red, white, end))
warnings.filterwarnings('ignore') # Disable SSL related warnings
# Processing command line arguments
parser = argparse.ArgumentParser()
# Options
parser.add_argument('-u', '--url', help='root url', dest='root')
parser.add_argument('-c', '--cookie', help='cookie', dest='cook')
parser.add_argument('-r', '--regex', help='regex pattern', dest='regex')
parser.add_argument('-e', '--export', help='export format', dest='export')
parser.add_argument('-o', '--output', help='output directory', dest='output')
parser.add_argument('-l', '--level', help='levels to crawl', dest='level', type=int)
parser.add_argument('-t', '--threads', help='number of threads', dest='threads', type=int)
parser.add_argument('-d', '--delay', help='delay between requests', dest='delay', type=float)
parser.add_argument('-s', '--seeds', help='additional seed urls', dest='seeds', nargs="+", default=[])
parser.add_argument('--stdout', help='send variables to stdout', dest='std')
parser.add_argument('--user-agent', help='custom user agent(s)', dest='user_agent')
parser.add_argument('--exclude', help='exclude urls matching this regex', dest='exclude')
parser.add_argument('--timeout', help='http request timeout', dest='timeout', type=float)
# Switches
parser.add_argument('--dns', help='enumerate subdomains & dns data', dest='dns', action='store_true')
parser.add_argument('--ninja', help='ninja mode', dest='ninja', action='store_true')
parser.add_argument('--keys', help='find secret keys', dest='api', action='store_true')
parser.add_argument('--update', help='update photon', dest='update', action='store_true')
parser.add_argument('--only-urls', help='only extract urls', dest='only_urls', action='store_true')
parser.add_argument('--wayback', help='fetch urls from archive.org as seeds', dest='archive', action='store_true')
args = parser.parse_args()
####
# This function git clones the latest version and merges it with the current directory
####
def update():
print('%s Checking for updates' % run)
changes = '''bug fixes;minor refactor;added --stdout option''' # Changes must be seperated by ;
latest_commit = get('https://raw.githubusercontent.com/s0md3v/Photon/master/photon.py').text
if changes not in latest_commit: # just a hack to see if a new version is available
changelog = search(r"changes = '''(.*?)'''", latest_commit)
changelog = changelog.group(1).split(';') # splitting the changes to form a list
print('%s A new version of Photon is available.' % good)
print('%s Changes:' % info)
for change in changelog: # print changes
print('%s>%s %s' % (green, end, change))
current_path = os.getcwd().split('/') # if you know it, you know it
folder = current_path[-1] # current directory name
path = '/'.join(current_path) # current directory path
choice = input('%s Would you like to update? [Y/n] ' % que).lower()
if choice != 'n':
print('%s Updating Photon' % run)
os.system('git clone --quiet https://github.com/s0md3v/Photon %s' % (folder))
os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null' % (path, folder, path, path, folder))
print('%s Update successful!' % good)
else:
print('%s Photon is up to date!' % good)
if args.update: # if the user has supplied --update argument
update()
quit() # quitting because files have been changed
if args.root: # if the user has supplied a url
main_inp = args.root
if main_inp.endswith('/'): # if the url ends with '/'
main_inp = main_inp[:-1] # we will remove it as it can cause problems later in the code
else: # if the user hasn't supplied a url
print('\n' + parser.format_help().lower())
quit()
delay = args.delay or 0 # Delay between requests
timeout = args.timeout or 6 # HTTP request timeout
cook = args.cook or None # Cookie
api = bool(args.api) # extract high entropy strings i.e. API keys and stuff
ninja = bool(args.ninja) # Ninja mode toggle
crawl_level = args.level or 2 # Crawling level
thread_count = args.threads or 2 # Number of threads
only_urls = bool(args.only_urls) # only urls mode is off by default
# Variables we are gonna use later to store stuff
keys = set() # high entropy strings, prolly secret keys
files = set() # pdf, css, png etc.
intel = set() # emails, website accounts, aws buckets etc.
robots = set() # entries of robots.txt
custom = set() # string extracted by custom regex pattern
failed = set() # urls that photon failed to crawl
scripts = set() # javascript files
external = set() # urls that don't belong to the target i.e. out-of-scope
fuzzable = set() # urls that have get params in them e.g. example.com/page.php?id=2
endpoints = set() # urls found from javascript files
processed = set() # urls that have been crawled
internal = set([s for s in args.seeds]) # urls that belong to the target i.e. in-scope
everything = []
bad_intel = set() # unclean intel urls
bad_scripts = set() # unclean javascript file urls
# If the user hasn't supplied the root url with http(s), we will handle it
if main_inp.startswith('http'):
main_url = main_inp
else:
try:
get('https://' + main_inp)
main_url = 'https://' + main_inp
except:
main_url = 'http://' + main_inp
schema = main_url.split('//')[0] # https: or http:?
internal.add(main_url) # adding the root url to internal for crawling
host = urlparse(main_url).netloc # Extracts host out of the url
output_dir = args.output or host
####
# This function extracts top level domain from a url
####
def topLevel(url):
try:
toplevel = tld.get_fld(host, fix_protocol=True)
except tld.exceptions.TldDomainNotFound:
toplevel = urlparse(main_url).netloc
return toplevel
domain = topLevel(main_url)
####
# This function makes requests to webpage and returns response body
####
if args.user_agent:
user_agents = args.user_agent.split(',')
else:
with open(sys.path[0] + '/core/user-agents.txt', 'r') as uas:
user_agents = [agent.strip('\n') for agent in uas]
def requester(url):
processed.add(url) # mark the url as crawled
time.sleep(delay) # pause/sleep the program for specified time
def normal(url):
headers = {
'Host' : host, # ummm this is the hostname?
'User-Agent' : random.choice(user_agents), # selecting a random user-agent
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language' : 'en-US,en;q=0.5',
'Accept-Encoding' : 'gzip',
'DNT' : '1',
'Connection' : 'close'}
# make request and return response
response = get(url, cookies=cook, headers=headers, verify=False, timeout=timeout, stream=True)
if 'text/html' in response.headers['content-type']:
if response.status_code != '404':
return response.text
else:
response.close()
failed.add(url)
return 'dummy'
else:
response.close()
return 'dummy'
# developer.facebook.com API
def facebook(url):
return get('https://developers.facebook.com/tools/debug/echo/?q=' + url, verify=False).text
# pixlr.com API
def pixlr(url):
if url == main_url:
url = main_url + '/' # because pixlr throws error if http://example.com is used
# make request and return response
return get('https://pixlr.com/proxy/?url=' + url, headers={'Accept-Encoding' : 'gzip'}, verify=False).text
# codebeautify.org API
def code_beautify(url):
headers = {
'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0',
'Accept' : 'text/plain, */*; q=0.01',
'Accept-Encoding' : 'gzip',
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin' : 'https://codebeautify.org',
'Connection' : 'close'
}
# make request and return response
return post('https://codebeautify.com/URLService', headers=headers, data='path=' + url, verify=False).text
# www.photopea.com API
def photopea(url):
# make request and return response
return get('https://www.photopea.com/mirror.php?url=' + url, verify=False).text
if ninja: # if the ninja mode is enabled
# select a random request function i.e. random API
response = random.choice([photopea, normal, facebook, pixlr, code_beautify])(url)
return response or 'dummy'
else:
return normal(url)
####
# This function extracts links from .xml files
####
def xmlParser(response):
return findall(r'<loc>(.*?)</loc>', response) # regex for extracting urls
####
# This function extracts links from robots.txt and sitemap.xml
####
def zap(url):
if args.archive:
from plugins.wayback import timeMachine
print ('%s Fetching URLs from archive.org' % run)
if False:
archived_urls = timeMachine(domain, 'domain')
else:
archived_urls = timeMachine(host, 'host')
print ('%s Retrieved %i URLs from archive.org' % (good, len(archived_urls) - 1))
for url in archived_urls:
internal.add(url)
response = get(url + '/robots.txt', verify=False).text # makes request to robots.txt
if '<body' not in response: # making sure robots.txt isn't some fancy 404 page
matches = findall(r'Allow: (.*)|Disallow: (.*)', response) # If you know it, you know it
if matches:
for match in matches: # iterating over the matches, match is a tuple here
match = ''.join(match) # one item in match will always be empty so will combine both items
if '*' not in match: # if the url doesn't use a wildcard
url = main_url + match
internal.add(url) # add the url to internal list for crawling
robots.add(url) # add the url to robots list
print('%s URLs retrieved from robots.txt: %s' % (good, len(robots)))
response = get(url + '/sitemap.xml', verify=False).text # makes request to sitemap.xml
if '<body' not in response: # making sure robots.txt isn't some fancy 404 page
matches = xmlParser(response)
if matches: # if there are any matches
print('%s URLs retrieved from sitemap.xml: %s' % (good, len(matches)))
for match in matches:
internal.add(match) #cleaning up the url & adding it to the internal list for crawling
####
# This functions checks whether a url matches a regular expression
####
def remove_regex(urls, regex):
"""
Parses a list for non-matches to a regex
Args:
urls: iterable of urls
custom_regex: string regex to be parsed for
Returns:
list of strings not matching regex
"""
if not regex:
return urls
# to avoid iterating over the characters of a string
if not isinstance(urls, (list, set, tuple)):
urls = [urls]
try:
non_matching_urls = [url for url in urls if not search(regex, url)]
except TypeError:
return []
return non_matching_urls
####
# This functions checks whether a url should be crawled or not
####
def is_link(url):
# file extension that don't need to be crawled and are files
conclusion = False # whether the the url should be crawled or not
if url not in processed: # if the url hasn't been crawled already
if url.split('.')[-1].lower() in badTypes:
files.add(url)
else:
return True # url can be crawled
return conclusion # return the conclusion :D
####
# This function extracts string based on regex pattern supplied by user
####
supress_regex = False
def regxy(pattern, response):
try:
matches = findall(r'%s' % pattern, response)
for match in matches:
custom.add(match)
except:
supress_regex = True
####
# This function extracts intel from the response body
####
def intel_extractor(response):
matches = findall(r'''([\w\.-]+s[\w\.-]+\.amazonaws\.com)|([\w\.-]+@[\w\.-]+\.[\.\w]+)''', response)
if matches:
for match in matches: # iterate over the matches
bad_intel.add(match) # add it to intel list
####
# This function extracts js files from the response body
####
def js_extractor(response):
matches = findall(r'src=[\'"](.*?\.js)["\']', response) # extract .js files
for match in matches: # iterate over the matches
bad_scripts.add(match)
####
# This function calculates the entropy of a string
####
def entropy(payload):
entropy = 0
for number in range(256):
result = float(payload.encode('utf-8').count(chr(number)))/len(payload.encode('utf-8'))
if result != 0:
entropy = entropy - result * log(result, 2)
return entropy
####
# This function extracts stuff from the response body
####
def extractor(url):
response = requester(url) # make request to the url
matches = findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response)
for link in matches: # iterate over the matches
link = link.split('#')[0] # remove everything after a "#" to deal with in-page anchors
if is_link(link): # checks if the urls should be crawled
if link[:4] == 'http':
if link.startswith(main_url):
internal.add(link)
else:
external.add(link)
elif link[:2] == '//':
if link.split('/')[2].startswith(host):
internal.add(schema + link)
else:
external.add(link)
elif link[:1] == '/':
internal.add(main_url + link)
else:
internal.add(main_url + '/' + link)
if not only_urls:
intel_extractor(response)
js_extractor(response)
if args.regex and not supress_regex:
regxy(args.regex, response)
if api:
matches = findall(r'[\w-]{16,45}', response)
for match in matches:
if entropy(match) >= 4:
keys.add(url + ': ' + match)
####
# This function extracts endpoints from JavaScript Code
####
def jscanner(url):
response = requester(url) # make request to the url
matches = findall(r'[\'"](/.*?)[\'"]|[\'"](http.*?)[\'"]', response) # extract urls/endpoints
for match in matches: # iterate over the matches, match is a tuple
match = match[0] + match[1] # combining the items because one of them is always empty
if not search(r'[}{><"\']', match) and not match == '/': # making sure it's not some js code
endpoints.add(match) # add it to the endpoints list
####
# This function starts multiple threads for a function
####
def threader(function, *urls):
threads = [] # list of threads
urls = urls[0] # because urls is a tuple
for url in urls: # iterating over urls
task = threading.Thread(target=function, args=(url,))
threads.append(task)
# start threads
for thread in threads:
thread.start()
# wait for all threads to complete their work
for thread in threads:
thread.join()
# delete threads
del threads[:]
####
# This function processes the urls and uses a threadpool to execute a function
####
def flash(function, links): # This shit is NOT complicated, please enjoy
links = list(links) # convert links (set) to list
if sys.version_info < (3, 2):
for begin in range(0, len(links), thread_count): # range with step
end = begin + thread_count
splitted = links[begin:end]
threader(function, splitted)
progress = end
if progress > len(links): # fix if overflow
progress = len(links)
print('\r%s Progress: %i/%i' % (info, progress, len(links)), end='\r')
else:
threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=thread_count)
futures = (threadpool.submit(function, link) for link in links)
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(links) or (i + 1) % thread_count == 0:
print('%s Progress: %i/%i' % (info, i + 1, len(links)), end='\r')
print('')
then = time.time() # records the time at which crawling started
# Step 1. Extract urls from robots.txt & sitemap.xml
zap(main_url)
# this is so the level 1 emails are parsed as well
internal = set(remove_regex(internal, args.exclude))
# Step 2. Crawl recursively to the limit specified in "crawl_level"
for level in range(crawl_level):
links = remove_regex(internal - processed, args.exclude) # links to crawl = (all links - already crawled links) - links not to crawl
if not links: # if links to crawl are 0 i.e. all links have been crawled
break
elif len(internal) <= len(processed): # if crawled links are somehow more than all links. Possible? ;/
if len(internal) > 2 + len(args.seeds): # if you know it, you know it
break
print('%s Level %i: %i URLs' % (run, level + 1, len(links)))
try:
flash(extractor, links)
except KeyboardInterrupt:
print('')
break
if not only_urls:
for match in bad_scripts:
if match.startswith(main_url):
scripts.add(match)
elif match.startswith('/') and not match.startswith('//'):
scripts.add(main_url + match)
elif not match.startswith('http') and not match.startswith('//'):
scripts.add(main_url + '/' + match)
# Step 3. Scan the JavaScript files for enpoints
print('%s Crawling %i JavaScript files' % (run, len(scripts)))
flash(jscanner, scripts)
for url in internal:
if '=' in url:
fuzzable.add(url)
for match in bad_intel:
for x in match: # because "match" is a tuple
if x != '': # if the value isn't empty
intel.add(x)
for url in external:
try:
if tld.get_fld(url, fix_protocol=True) in intels:
intel.add(url)
except:
pass
now = time.time() # records the time at which crawling stopped
diff = (now - then) # finds total time taken
def timer(diff):
minutes, seconds = divmod(diff, 60) # Changes seconds into minutes and seconds
try:
time_per_request = diff / float(len(processed)) # Finds average time taken by requests
except ZeroDivisionError:
time_per_request = 0
return minutes, seconds, time_per_request
minutes, seconds, time_per_request = timer(diff)
# Step 4. Save the results
if not os.path.exists(output_dir): # if the directory doesn't exist
os.mkdir(output_dir) # create a new directory
datasets = [files, intel, robots, custom, failed, internal, scripts, external, fuzzable, endpoints, keys]
dataset_names = ['files', 'intel', 'robots', 'custom', 'failed', 'internal', 'scripts', 'external', 'fuzzable', 'endpoints', 'keys']
def writer(datasets, dataset_names, output_dir):
for dataset, dataset_name in zip(datasets, dataset_names):
if dataset:
filepath = output_dir + '/' + dataset_name + '.txt'
if python3:
with open(filepath, 'w+', encoding='utf8') as f:
f.write(str('\n'.join(dataset)))
f.write('\n')
else:
with open(filepath, 'w+') as f:
joined = '\n'.join(dataset)
f.write(str(joined.encode('utf-8')))
f.write('\n')
writer(datasets, dataset_names, output_dir)
# Printing out results
print (('%s-%s' % (red, end)) * 50)
for dataset, dataset_name in zip(datasets, dataset_names):
if dataset:
print ('%s %s: %s' % (good, dataset_name.capitalize(), len(dataset)))
print (('%s-%s' % (red, end)) * 50)
print('%s Total requests made: %i' % (info, len(processed)))
print('%s Total time taken: %i minutes %i seconds' % (info, minutes, seconds))
print('%s Requests per second: %i' % (info, int(len(processed)/diff)))
datasets = {
'files': list(files), 'intel': list(intel), 'robots': list(robots), 'custom': list(custom), 'failed': list(failed), 'internal': list(internal),
'scripts': list(scripts), 'external': list(external), 'fuzzable': list(fuzzable), 'endpoints': list(endpoints), 'keys' : list(keys)
}
if args.dns:
print ('%s Enumerating subdomains' % run)
from plugins.findSubdomains import findSubdomains
subdomains = findSubdomains(domain)
print ('%s %i subdomains found' % (info, len(subdomains)))
writer([subdomains], ['subdomains'], output_dir)
datasets['subdomains'] = subdomains
from plugins.dnsdumpster import dnsdumpster
print ('%s Generating DNS map' % run)
dnsdumpster(domain, output_dir)
if args.export:
from plugins.exporter import exporter
# exporter(directory, format, datasets)
exporter(output_dir, args.export, datasets)
print('%s Results saved in %s%s%s directory' % (good, green, output_dir, end))
if args.std:
for string in datasets[args.std]:
sys.stdout.write(string + '\n')
|
python
|
from flask import Flask
from flask_restful import Api
from status import Status
from runner import Runner
app = Flask(__name__)
api = Api(app)
# Routing
api.add_resource(Status, '/')
api.add_resource(Runner, '/analyze')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, port=3000)
|
python
|
"""
Main lesscss parse library. Contains lexer and parser, along with
utility classes
"""
|
python
|
# coding=utf-8
# Marcelo Ambrosio de Goes
# [email protected]
# 2022-03-07
# 100 Days of Code: The Complete Python Pro Bootcamp for 2022
# Day 20/21 - Snake Game
from turtle import Turtle
import random
class Food(Turtle):
def __init__(self, input_width, input_height):
super().__init__()
self.shape("circle")
self.color("blue")
self.penup()
self.shapesize(stretch_len=0.5, stretch_wid=0.5)
self.speed("fastest")
self.refresh(input_width, input_height)
def refresh(self, input_width, input_height):
x_coord = random.randint(round(-input_width/2)+10,round(input_width/2)-10)
y_coord = random.randint(round(-input_height/2)+10,round(input_height/2)-10)
self.goto(x_coord, y_coord)
|
python
|
from tests.nlg.test_nlg import BaseTestTemplateNLG, BaseTestCamrest
from convlab2.nlg.template.camrest.nlg import TemplateNLG
class TestTemplateCamrest(BaseTestTemplateNLG, BaseTestCamrest):
@classmethod
def setup_class(cls):
BaseTestTemplateNLG.setup_class()
BaseTestCamrest.setup_class()
def test_nlg(self):
self._test_nlg(TemplateNLG)
|
python
|
from threading import Thread, Timer
from time import time
import logging
import random
import numpy as np
class Brandbox_temperature_humidity(Thread):
"""This class is reads out continiously the temperature and humidity from the Brandbox
This class inherits all function from the threading class an therefore can be startet as thread."""
def __init__(self, main, framework, update_interval=5000):
"""This starts the background and continuous tasks like humidity and temperature control"""
Thread.__init__(self)
self.main = main
self.framework = framework
self.stop_measurement_loop = self.main.stop_measurement_loop
self.resource = framework["Devices"]["temphum_controller"]
self.update_interval = float(update_interval)
self.queue_to_main = framework["Message_to_main"]
self.vcw = framework["VCW"]
self.log = logging.getLogger(__name__)
self.testmode = False
self.running = False
# First try if visa_resource is valid
self.success = False
try:
first_try = self.vcw.query(self.resource, self.resource["get_environment"])
self.framework["Configs"]["config"]["settings"]["light"] = True # Dummy
if first_try:
self.success = True
except Exception as e:
self.log.error(
"The temperature and humidity controller seems not to be responding. Error:"
+ str(e)
)
def run(self):
"""This is the update function for temp hum query"""
if self.success and not self.running:
self.log.info("Humidity and temp control started...")
self.running = True
elif self.testmode and not self.running:
self.log.critical("Humidity and temp TEST MODE started!!!")
self.running = True
elif not self.running:
self.log.info("Humidity and temp control NOT started...")
return
if not self.stop_measurement_loop and self.success:
try:
# Query the environemnt etc from Brandbox
envvalues = self.vcw.query(
self.resource, self.resource["get_environment"]
)
envvalues = envvalues.split(",")
# Get dewpoint
boxvalues = self.vcw.query(
self.resource, self.resource["get_box_environment"]
)
boxvalues = boxvalues.split(",")
# get light
luxvalues = self.vcw.query(self.resource, self.resource["get_lux"])
luxvalues = luxvalues.split(",")[0]
if float(luxvalues) >= 0.5:
self.framework["Configs"]["config"]["settings"]["lights"] = True
else:
self.framework["Configs"]["config"]["settings"]["lights"] = False
# get door
# doorvalues = self.vcw.query(self.resource, self.resource["get_door"])
# doorvalues = doorvalues.split(",")[0]
# if doorvalues == "1":
# self.framework["Configs"]["config"]["settings"]["door"] = False
# else:
# self.framework["Configs"]["config"]["settings"]["door"] = True
# get light
vacuumvalues = self.vcw.query(
self.resource, self.resource["get_vacuum"]
)
vacuumvalues = vacuumvalues.split(",")[0]
if vacuumvalues == "1":
self.framework["Configs"]["config"]["settings"]["vacuum"] = True
else:
self.framework["Configs"]["config"]["settings"]["vacuum"] = False
# Here a list
self.main.humidity_history = np.append(
self.main.humidity_history, float(envvalues[1])
) # todo: memory leak since no values will be deleted
self.main.temperatur_history = np.append(
self.main.humidity_history, float(envvalues[3])
)
# Write the pt100 and light status and environement in the box to the global variables
self.framework["Configs"]["config"]["settings"][
"chuck_temperature"
] = float(envvalues[3])
self.framework["Configs"]["config"]["settings"][
"air_temperature"
] = float(envvalues[0])
self.framework["Configs"]["config"]["settings"]["dew_point"] = float(
boxvalues[2]
)
# Send data to main
self.queue_to_main.put(
{
"temperature_air": [float(time()), float(envvalues[0])],
"temperature_chuck": [float(time()), float(envvalues[3])],
"dew_point": [float(time()), float(boxvalues[2])],
"humidity": [float(time()), float(envvalues[1])],
}
)
except Exception as err:
self.log.error(
"The temperature and humidity controller seems not to be responding. Error: {!s}".format(
err
),
exc_info=True,
)
elif self.testmode:
self.log.critical("Testmode sends message to main!")
self.queue_to_main.put(
{
"temperature": [float(time()), float(random.randint(1, 10))],
"humidity": [float(time()), float(random.randint(1, 10))],
}
)
if not self.main.stop_measurement_loop:
self.start_timer(self.run)
else:
self.log.info(
"Shutting down environment control due to stop of measurement loop"
)
def start_timer(self, object):
Timer(
self.update_interval / 1000.0, object
).start() # This ensures the function will be called again
|
python
|
#!/usr/bin/env python3
#
# laaso/hydrator.py
#
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
'''
Main program to hydrate the Lustre namespace from blob.
'''
import datetime
import multiprocessing
import os
import pickle
import queue
import stat
import sys
import syslog
import logging
from logging.handlers import SysLogHandler
import threading
import time
import traceback
import laaso.appmanager
import laaso.azure_tool
from laaso.blobcache import (BlobAttrsBatch,
BlobAttributes,
BlobCache,
BlobCacheTermPill,
Ftypes)
import laaso.common
from laaso.exceptions import ApplicationExit
from laaso.hsmimport import (HSMImport,
HSMDefaults)
from laaso.hydratorstats import (HydratorStats,
PeriodicStatsPrinter)
import laaso.identity
from laaso.lustre_ctypes import (CLustreFid,
LibLustreApi)
from laaso.output import output_redact
import laaso.util
HYDRATOR_DESCRIPTION = """
Hydrate a Lustre namespace from an Azure storage account.
"""
HYDRATOR_STANDALONE = False
class LemurResults():
'''
Lemur results.
'''
def __init__(self):
'''
Init.
'''
self.setxattr_cnt = 0
self.setxattr_retry = 0
self.err_msg = None
self.err_tb = None
class FileImportResults():
'''
Results data sent to worker callback after a file is imported.
'''
def __init__(self,
uid=0, # effective uid, gid, mode that were used
gid=0,
mode=0,
rc=0, # rc from lustre hsm_import call
fid_out=None, # CLustreFid result from lustre hsm_import call
err_msg=None, # these two are only used if there was an error
err_tb=None):
'''
Init.
'''
self.uid = uid
self.gid = gid
self.mode = mode
self.rc = rc
self.fid_out = fid_out if fid_out else CLustreFid()
self.err_msg = err_msg
self.err_tb = err_tb
class FileImportWork():
'''
A container for the work description and all callback data for a worker process.
'''
def __init__(self, lustre_path, blob_attrs, archive_id):
'''
Init.
lustre_path, blob_attrs: parameters for the file import.
'''
self.lustre_path = lustre_path
self.blob_attrs = blob_attrs
self.archive_id = archive_id
self.import_results = None # set to a FileImportResults() by the worker process
self.lemur_results = None # set to LemurResults() if lemur setxattrs were ocmpleted
class LemurParams():
'''
Container for the lemur params when doing a file import.
This only gets used when lemur compatibility is enabled.
'''
def __init__(self, container):
self.container = container
class FileImportWorkersBatch():
'''
Batch of work to send to worker processes for importing files.
'''
def __init__(self):
'''
Init. Call append to add a work item.
'''
self.work = list()
self.start = time.monotonic()
self.lemur_params = None
def append(self, more_work):
'''
Add an item to the batch.
'''
self.work.append(more_work)
class Context():
'''
Maintain some context while switching between directories in the tree.
The goal of tracking the current context is to reduce the overall number of
ops that we perform on directories. For example, we only want to set attributes
on a directoy once. Also, if we created a directory (vs. it was pre-existing) we know
that it should not have any children so we can make some performance-enhancing assumptions.
'''
def __init__(self, path, created=False, is_root=False):
'''
Init.
'''
self.path = path # path to the cwd
self.created = created # True if the hydrator created this dir (vs. pre-existing)
self.is_root = is_root # True if this is the root of the tree
class Hydrator(laaso.appmanager.ApplicationWithManager):
'''
Hydrate a Lustre namespace from an Azure storage account.
'''
STATS_FREQ_SEC = 30 # Print stats every this many seconds
# format for log messages
LOG_FMT = laaso.common.Application.LOG_FORMAT_TS_LEVEL
# File name for error reporting (goes in the root of the dest_path by default)
ERR_FILE_NAME = "azure_hydration_errors.log"
# header that we put on the error-reporting file (customer sees this)
ERR_FILE_HEADER = "# This file contains a list of errors encountered by the service while hydrating the Lustre namespace."
# Max errors before the hydrator gives up
MAX_ERRORS = 1000
# The following params are likely the key knobs influencing performance.
# In the future, it might be good to write code that adjusts these on the fly.
# - MAX_BLOBCACHE_PREFETCH is the number of blobs that are pre-fetched by the blob listing api.
# The blob list fetches blobs in pages, which are stored in memory. The main latency comes in to play
# when it runs out of blobs in the page (in memory) and must make a round trip to the storage account
# to gather more. Appropriate values for this setting will tend to be at least 2 or 3 times the
# page size plus some buffer so we can hide the latency of these round trips to the storage account,
# effectively staying out ahead of it. If this value is too low, you'll notice that 'qsize' occasionally
# reaches zero in the stats and 'blobwait' times start to drastically increase, indicating that
# our thread which reads from the interprocess queue is blocked waiting for work.
# - MAX_BLOBCACHE_QSIZE is how long the multprocessing.Queue can grow. For efficiency, we put many
# blobs in a batch when placing them on the interprocess queue because fewer large messages are
# more efficient than many small messages, likely to decrease locking overhead. There is probably
# no need to adjust this.
# - The number of IMPORT_WORKERS represents how may in-flight hsm_import requests we make
# in parallel. Base this on the latency of hsm_import calls. If an hsm_import takes 1ms, and
# we can send one every 100us, then we can likely keep 10 of them busy at once. Having a few extras
# threads sitting idle is OK, but testing has revealed that too many extras adds overhead in
# interprocess communication and has a net effect of slowing things down.
# - IMPORT_WORKERS_QUEUE_MAX represents how many hsm_import requests we allow to sit idle when
# all workers are busy. We want to maintain a healthy backlog of work, while at the same time
# avoid hogging too many resources when worker processes get slow due to networking or lustre issues.
# At a minimum, you want this to be several multiples of the IMPORT_WORKERS size as a buffer
# so we don't leave idle worker processes.
# - IMPORT_WORKERS_BATCH_SIZE controls how many files are sent to each worker in a batch.
# The queue mechanism used by a multiprocessing Pool in python gains some efficiency if you send
# more larger items rather than many small items over the queue.
MAX_BLOBCACHE_PREFETCH = 12000
MAX_BLOBCACHE_QSIZE = max(1, int(MAX_BLOBCACHE_PREFETCH / BlobAttrsBatch.MAX_BATCH_SIZE))
IMPORT_WORKERS = 50
IMPORT_WORKERS_QUEUE_MAX = int(IMPORT_WORKERS * 4)
IMPORT_WORKERS_BATCH_SIZE = 20
def __init__(self,
storage_acct,
container,
credential,
keyvault_name=None,
managed_identity_client_id=None,
keyvault_secret=False,
prefix="",
dest_path=".",
archive_id=HSMDefaults.ARCHIVEID,
err_file_name=ERR_FILE_NAME,
lemur=False,
resume_file_name="",
geneva_enable=False,
**kwargs):
'''
Constructor.
'''
kwargs.setdefault('log_fmt', self.LOG_FMT)
super().__init__(**kwargs)
self.storage_acct = storage_acct
self.container = container
self.credential = credential
self.keyvault_secret = keyvault_secret
self.keyvault_name = keyvault_name
self.managed_identity_client_id_pre = managed_identity_client_id or laaso.scfg.msi_client_id_default
self.managed_identity_client_id_post = None
self.lemur = lemur
self.prefix = prefix
self.dest_path = dest_path
self.archive_id = archive_id
self.last_stats = 0
self.geneva_enable = geneva_enable
output_redact('kv_cred_or_item_id', self.credential)
self.credential_val = None
self.contexts = []
self.stats_printer = None
self.stats = HydratorStats()
self.manager = None
# A pointer to our blobcache subprocess and a queue that we will communicate over
# Note: Also tried a pipe, but it can't hold a large enough backlog
self.blobcache = None
self.blobcache_queue = multiprocessing.Queue(maxsize=self.MAX_BLOBCACHE_QSIZE)
# For managing our worker processes which call hsm_import
self.import_workers = None
self.import_workers_lock = threading.Lock()
self.import_workers_cond = threading.Condition(lock=self.import_workers_lock)
self.import_workers_reqs = 0
self.import_workers_batch = FileImportWorkersBatch() # Initial (empty) batch
# File in Lustre where errors are logged
self.err_file = None
self.err_file_name = os.path.join(dest_path, err_file_name)
# This data helps us to resume where we left off in case of a failure after a partial hydration.
# The resume_timeline is an ordered dict of batch start times.
# Whenever we complete the batch at the head of the ordered dict (oldest active batch), we save the
# last blob name from the batch as a resume point.
self.resume_file_name = resume_file_name
self.resume_timeline = dict()
# Save umask so we can revert it later
self.old_umask = None
self.myuid = os.getuid()
self.mygid = os.getgid()
# setup sylog handler -- /dev/log sends logs to local syslogd
self.syslog_handler = SysLogHandler(facility=SysLogHandler.LOG_DAEMON, address='/dev/log')
self.logger.addHandler(self.syslog_handler)
log_format = '[%(levelname)s] %(filename)s \"%(message)s\"'
self.syslog_handler.setFormatter(logging.Formatter(fmt=log_format))
@classmethod
def main_add_parser_args(cls, ap_parser):
'''
Inherited from Application class. Add parser args.
'''
super().main_add_parser_args(ap_parser)
ap_parser.description = HYDRATOR_DESCRIPTION
ap_parser.add_argument('storage_acct', type=str, help='storage account name')
ap_parser.add_argument('container', type=str, help='container name within the storage account')
ap_parser.add_argument('credential', type=str, help='storage key or SAS token (surround by quotes)')
lemur_desc = '''
Set this flag if you intend to use a copytool that is based on the Lustre open source lemur project.
The flag causes hydrator.py to set extended attributes required by lemur-based copytools.
This reduces performance of hydrator.py, but is necessary for the copytool.
'''
ap_parser.add_argument("-l", "--lemur", action="store_true", help=lemur_desc)
ap_parser.add_argument("-p", "--prefix", type=str, default='', help='Prefix filter for the ingest.')
ap_parser.add_argument("-a", "--dest_path", type=str, default='.', help='Import to this lustre directory, example: /mnt/lustre. Default is cwd.')
ap_parser.add_argument("-d", "--archive_id", type=int, default=HSMDefaults.ARCHIVEID, help='lustre hsm archive id to use with importing files, default=1')
ap_parser.add_argument("-e", "--err_file_name", type=str, default=cls.ERR_FILE_NAME, help='Name of file to write errors in dest_path dir')
ap_parser.add_argument("-r", "--resume_file_name", type=str, default='', help='File that can be used to write status so the hydrator can pick up where it left off in case the node suffers failures during the hydration. By default this feature is turned off.')
ap_parser.add_argument("-g", "--geneva_enable", action="store_true", help='push hydrator stats to geneva')
@classmethod
def main_handle_parser_args(cls, ap_args):
'''
see laaso.common.Application.main_handle_parser_args()
'''
super().main_handle_parser_args(ap_args)
if ap_args.credential:
output_redact("%s.credential" % cls.__name__, ap_args.credential)
def clear_umask(self):
'''
Clears the umask so the hydrator can properly set permissions.
'''
self.old_umask = os.umask(0)
def restore_umask(self):
'''
Restore the umask to the value that it was before we called clear_umask.
'''
if self.old_umask is not None:
os.umask(self.old_umask)
self.old_umask = None
def main_execute(self):
'''
Main entry point.
'''
if sys.version_info <= (3, 7, 0):
raise ApplicationExit("Python version 3.7 or higher is required to run this program.")
self.go()
raise ApplicationExit('Errors occurred during hydration.' if self.stats.general['errors'].get() else 0)
def blobname2path(self, name):
'''
Prepend the lustre mount to the path.
'''
return os.path.join(self.dest_path, name)
def do_chown(self, dbg, lustre_path, blob_attrs, default_uid, default_gid, created=False, force=False):
'''
Chown a file if the blob_attrs contain a valid uid or gid, using supplied defaults iff necessary.
dbg - a debugging string to track the caller
created - set to True if we created this file before calling the function (allows for perf optimization)
'''
if not blob_attrs:
return
if not blob_attrs.st_uid_valid and not blob_attrs.st_gid_valid:
return
uid = default_uid
if blob_attrs.st_uid_valid:
uid = blob_attrs.st_uid
gid = default_gid
if blob_attrs.st_gid_valid:
gid = blob_attrs.st_gid
if created and uid == self.myuid and gid == self.mygid:
return # no need to chown if we created the file and the desired uid and gid match our own
if uid == default_uid and gid == default_gid and not force:
return
self.stats.extended['chown'].inc()
self.logger.debug("CHOWN(%s): uid=%d gid=%d '%s'", dbg, uid, gid, lustre_path)
try:
os.chown(lustre_path, uid, gid, follow_symlinks=False)
except FileNotFoundError:
self.print_error(lustre_path, "unable to chown (file not found)")
except OSError as exc:
self.print_error(lustre_path, "unknown exception occurred during chown(uid=%d, gid=%d): %r", uid, gid, exc)
def do_chmod(self, dbg, lustre_path, blob_attrs, check_mode=None):
'''
Chmod a file if the blob attrs contain valid mode bits and they don't match check_mode.
dbg - a debugging string to track the caller
check_mode - only do the chmod if the existing mode bits don't match these
'''
if not blob_attrs or not blob_attrs.st_mode_valid:
return
if check_mode and blob_attrs.st_mode == check_mode:
return
if blob_attrs.st_type == Ftypes.SYMLINK:
return # No chmod on symlnks (not supported)
self.stats.extended['chmod'].inc()
self.logger.debug("CHMOD(%s): mode=%s '%s'", dbg, oct(blob_attrs.st_mode), lustre_path)
try:
os.chmod(lustre_path, blob_attrs.st_mode)
except FileNotFoundError:
self.print_error(lustre_path, "unable to chmod (file not found)")
except OSError as exc:
self.print_error(lustre_path, "unknown exception occurred during chmod(mode=%s)': %r",
oct(blob_attrs.st_mode), exc)
@staticmethod
def do_lemur_xattrs(lustre_path, blob_attrs, container,
test=False, test_retry=False, test_fail=False):
'''
Set the xattrs for lemur if we are configured to do so.
The xattrs are applied in parallel using the python threading interface.
This function is typically called from the worker subprocess, but may also be
called from the main process when comparing blobs to an existing file system.
lustre_path - the full path of the file in the filesystem.
blob_attrs - the attributes received from the blob read.
container - the container that we are hydrating from
test* - testing mode params to aid code coverage
returns: LemurResults()
'''
if blob_attrs.st_type != Ftypes.FILE:
return None # No xattr on symlinks, dirs, unknown (not supported)
# Important - the lemur copytool requires the UUID xattr to be set for it to work.
lemur_results = LemurResults()
threads = list()
for key_str, key_bytes, value_bytes in blob_attrs.get_lemur_xattrs(container):
name = "{path} : {key}".format(path=lustre_path, key=key_str)
th = threading.Thread(target=Hydrator.do_setxattr,
args=(lustre_path, key_str, key_bytes,
value_bytes, lemur_results, True,
test, test_retry, test_fail,),
name=name)
th.start()
threads.append(th)
for th in threads:
while True:
th.join(timeout=60.0)
if th.is_alive():
# Note: We are likely running in a subprocess. While printing to syslog isn't optimal,
# it's better than nothing.
syslog.syslog("hydrator: setxattr taking a long time: %s" % th.getName())
else:
break
return lemur_results
@staticmethod
def do_setxattr(lustre_path, key_str, key_bytes, value_bytes, lemur_results, create=True,
test=False, test_retry=False, test_fail=False):
'''
Utility function to set an xattr.
This function is typically called from the worker subprocess, but may also be
called from the main process when comparing blobs to an existing file system.
lustre_path - the full path of the file in the filesystem.
key_str - debugging string to help identify the xattr key being set
key_bytes - the bytes to set for the xattr key
value_bytes - the bytes to set for the xattr value
lemur_results - LemurResults() containing status of the operation
create - if true, attempt to create the xattr - otherwise replace.
test* - testing mode params to aid code coverage
'''
try:
flag = os.XATTR_CREATE if create else os.XATTR_REPLACE
if not test:
# This is the normal path, so putting it first for readability
os.setxattr(lustre_path, key_bytes, value_bytes, flag, follow_symlinks=False)
else:
# This path is used by pytest to help with code coverage.
if test_retry or test_fail:
raise OSError("Exception raised by error injection, retry(%r), fail(%r)" % (test_retry, test_fail))
print("LEMUR_TEST: setxattr %r:%r:%r, path %r" % (key_bytes, value_bytes, flag, lustre_path))
lemur_results.setxattr_cnt += 1
except OSError as exc:
# The docs give one set of exception cases: https://docs.python.org/3.3/library/os.html#os.XATTR_CREATE
# This ticket is new, and may or may not change anything: https://bugs.python.org/issue41277
# We're covering both.
if create:
# We failed to create, let's try to replace.
lemur_results.setxattr_retry += 1
Hydrator.do_setxattr(lustre_path, key_str, key_bytes, value_bytes, lemur_results, create=False,
test=test, test_retry=False, test_fail=test_fail)
else:
lemur_results.err_msg = "xattr(%s) %r" % (key_str, exc)
lemur_results.err_tb = traceback.format_exc()
except Exception as exc:
# Generic exception handler so we can properly report unhandled/unexpected errors.
# This function runs in a separate thread, so we don't want to miss anything.
lemur_results.err_msg = "xattr(%s) %r" % (key_str, exc)
lemur_results.err_tb = traceback.format_exc()
def lemur_setxattr_results_handler(self, lustre_path, lemur_results):
'''
Common results handler for lemur setxattr to properly log errors and register stats.
'''
if not lemur_results:
return
if lemur_results:
self.stats.extended['xattr'].add(lemur_results.setxattr_cnt)
self.stats.extended['xattr_retry'].add(lemur_results.setxattr_retry)
if lemur_results.err_msg:
self.print_error(lustre_path,
"File contents may not hydrate properly from the archive, error setting xattrs: %r, traceback:\n%s",
lemur_results.err_msg, lemur_results.err_tb)
else:
self.logger.debug("SETXATTR: xattr_cnt(%d) retries(%d): %r",
lemur_results.setxattr_cnt, lemur_results.setxattr_retry, lustre_path)
@classmethod
def do_stat(cls, lustre_path, blob_attrs):
'''
Common function for calling os.stat. Returns the results of os.stat() if the file exists.
Callers should be sure to catch exceptions, such as OSError, that a typical stat call may generate.
'''
try:
if blob_attrs and blob_attrs.st_type == Ftypes.SYMLINK:
stat_res = os.lstat(lustre_path)
else:
stat_res = os.stat(lustre_path)
return stat_res
except FileNotFoundError:
pass
except PermissionError as exc:
raise ApplicationExit("Permission error performing stat operation, are you running as root? %r" % lustre_path) from exc
return None
def validate_existing(self, lustre_path, blob_attrs):
'''
Verify that a file exists and in Lustre and make sure that its attributes in Lustre match those in blob.
lustre_path - path to the file or dir
blob_attrs - the attributes that we want the directory to have
Returns True if the file exists at the Lustre path. False if it does not.
'''
stat_res = self.do_stat(lustre_path, blob_attrs)
if not stat_res:
return False # File does not exist. Return False.
if not blob_attrs:
return True # No attrs passed, so nothing else to check
assert stat_res
if not Ftypes.is_matching(blob_attrs.st_type, stat_res):
# If we are trying to import a dir and there is an existing
# file in Lustre, we will remove the file and proceed to import the dir.
# If there is an existing dir in Lustre and we are trying to import a file, then we will
# flag it as an error instead of attempting to delete the whole tree.
self.stats.extended['wrong_ftype'].inc()
if stat.S_ISDIR(stat_res.st_mode):
self.print_error(lustre_path,
"The path references a directory in Lustre, but a file is specified in the Azure storage account.")
return True
try:
os.remove(lustre_path)
self.print_error(lustre_path,
"Removed existing file in Lustre and replacing it with a directory from the Azure storage account.")
except OSError as exc:
raise ApplicationExit("Error removing a conflicting file when a directory should be present at %r" % lustre_path) from exc
return False
if not blob_attrs.st_mode_valid and not blob_attrs.st_uid_valid and not blob_attrs.st_gid_valid:
# No attrs that we care about were passed
return True
dbg = "ex"
if blob_attrs.st_type == Ftypes.DIR:
dbg += "d"
self.stats.extended['stat_existing_dir'].inc()
elif blob_attrs.st_type == Ftypes.FILE:
dbg += "f"
self.stats.extended['stat_existing_file'].inc()
else:
assert blob_attrs.st_type == Ftypes.SYMLINK
dbg += "l"
self.stats.extended['stat_existing_symlink'].inc()
self.do_chmod(dbg, lustre_path, blob_attrs, stat_res.st_mode & BlobAttributes.ALL_MODE_BITS)
self.do_chown(dbg, lustre_path, blob_attrs, stat_res.st_uid, stat_res.st_gid)
if self.lemur:
lemur_results = self.do_lemur_xattrs(lustre_path, blob_attrs, self.container)
self.lemur_setxattr_results_handler(lustre_path, lemur_results)
return True
def try_validate_existing(self, lustre_path, blob_attrs):
'''
Wrapper validate_existing with a try-except to catch some known errors and
gracefully return an error status.
Returns a tuple of bools: (success, exists)
'''
exists = False
try:
exists = self.validate_existing(lustre_path, blob_attrs)
except OSError as exc:
self.print_error(lustre_path, "Error validating file or directory name: %r", exc)
return False, False
return True, exists
def import_a_directory(self, lustre_path, blob_attrs):
'''
Create a new directory using the supplied attributes.
We only set mode bits and uid/gid on directories.
'''
created = False
self.stats.general['dirs'].inc()
mode = BlobAttributes.DEFAULT_MODE_DIRS
if blob_attrs and blob_attrs.st_mode_valid:
mode = blob_attrs.st_mode
self.logger.debug("MKDIR: mode=%s '%s'", oct(mode), lustre_path)
dbg = "mkdir"
try:
os.makedirs(lustre_path, mode=mode)
created = True
except FileExistsError as exc:
if os.path.isdir(lustre_path):
# This can hapen if the children were created first using a makedirs and this
# directory was created incidentally.
self.stats.extended['mkdir_exists'].inc()
else:
raise ApplicationExit("Expected a directory, but found an existing file in Lustre at %r" % lustre_path) from exc
self.do_chmod(dbg, lustre_path, blob_attrs)
except OSError as exc:
# Generic handler for unexpected cases. Permissions problem?
# It seems like we should exit if we can't create an entire portion of the tree.
raise ApplicationExit("Unexpected error while creating directory '%s'" % lustre_path) from exc
self.do_chown(dbg, lustre_path, blob_attrs, BlobAttributes.DEFAULT_UID, BlobAttributes.DEFAULT_GID, created=created, force=True)
return created
def import_a_symlink(self, lustre_path, blob_attrs):
'''
Import a symbolic link into the namespace.
'''
assert blob_attrs.st_type == Ftypes.SYMLINK
self.stats.general['symlinks'].inc()
created = False
if blob_attrs.contents:
self.logger.debug("CREATE(lnk): '%s'", lustre_path)
try:
os.symlink(blob_attrs.contents, lustre_path)
created = True
except FileExistsError:
self.stats.extended['eexist_symlink'].inc()
self.print_error(lustre_path, "symlink unexpectedly exists")
# Future: What to do if the existing file is not a symlink or the link dest does not match?
# fallthrough: symlink already exists, see if we need to chown it
except FileNotFoundError:
self.print_error(lustre_path, "could not import symlink (path not found)")
return
except OSError as exc:
# Generic handler for other exceptions (what else can we expect here?)
self.print_error(lustre_path, "exception while importing symlink with dest '%s': %r",
blob_attrs.contents, exc)
return
else:
self.print_error(lustre_path, "Could not import symbolic link with no contents")
return
dbg = "lnk"
# No chmod for symlinks. Python doesn't support it and symlink mode bits are ignored anyway.
self.do_chown(dbg, lustre_path, blob_attrs, BlobAttributes.DEFAULT_UID, BlobAttributes.DEFAULT_GID, created=created, force=True)
def print_blob_warnings(self, blob_attrs):
'''
Print warnings that occurred while listing and reading the metadata from blob.
'''
for warning in blob_attrs.warnings:
self.print_error(blob_attrs.name, "blob processing error: %s", warning)
def write_to_hydration_errors_file(self, msg):
'''
Write an error message to the hydration errors file.
'''
try:
if not self.err_file:
self.err_file = open(self.err_file_name, 'a')
if os.path.getsize(self.err_file_name) == 0:
self.err_file.write(self.ERR_FILE_HEADER + '\n')
self.err_file.write(msg + '\n')
except Exception as exc:
raise ApplicationExit("Terminating due to exception while logging errors to the hydration errors file %r: %r" % (self.err_file_name, exc)) from exc
def print_error(self, path, msg, *args):
'''
Log an error that occurred during the import process.
'''
msg_formatted = msg % args
err_msg = "\"{path}\": {msg}".format(path=path, msg=msg_formatted)
self.logger.error(err_msg)
self.write_to_hydration_errors_file(err_msg)
self.stats.general['errors'].inc()
def context_switch(self, lustre_path, blob_attrs=None):
'''
Handle a directory [context] change.
lustre_path - next directory that we intend to operate within
blob_attrs - make sure the directory attributes match these attributes
'''
while True:
context = self.contexts[-1]
if context.path == lustre_path:
# Switching back to a leaf dir that we previously created
self.logger.debug("PATHUPD(1): '%s'", lustre_path)
return
if lustre_path.startswith(context.path):
# Context is a parent
break
# Pop to parent dir
if context.is_root:
self.logger.warning("blob path '%s' is outside of root '%s'", lustre_path, context.path)
assert not context.is_root # Everything must be a subdir of root
self.logger.debug("PATHPOP: '%s'", context.path)
self.contexts.pop()
context = self.contexts[-1]
# Check if we need to create a new directory, or if it already exists.
# If we created the parent directory, then we can skip the exists check.
created = False
exists = self.validate_existing(lustre_path, blob_attrs)
if not exists:
created = self.import_a_directory(lustre_path, blob_attrs)
if created:
self.logger.debug("PATHNEW: '%s'", lustre_path)
else:
# Switching back to an internal dir that we previously created
self.logger.debug("PATHUPD(2): '%s'", lustre_path)
self.contexts.append(Context(lustre_path, created))
self.stats.progress['last_dir'].set(lustre_path)
def try_context_switch(self, lustre_path, blob_attrs=None):
'''
Wrapper to context_switch() which catches specific exceptions
and returns an error status.
'''
try:
self.context_switch(lustre_path, blob_attrs)
except OSError as exc:
self.print_error(lustre_path, "Error importing a directory: %r, traceback:\n%s", exc, laaso.util.indent_exc())
return False
return True
@staticmethod
def get_attrs_to_import(blob_attrs):
'''
Return the mode, uid, gid that should be imported for the file based on the blob attributes.
'''
if not blob_attrs:
return BlobAttributes.DEFAULT_MODE_FILES, BlobAttributes.DEFAULT_UID, BlobAttributes.DEFAULT_GID
mode = blob_attrs.st_mode if blob_attrs.st_mode_valid else BlobAttributes.DEFAULT_MODE_FILES
uid = blob_attrs.st_uid if blob_attrs.st_uid_valid else BlobAttributes.DEFAULT_UID
gid = blob_attrs.st_gid if blob_attrs.st_gid_valid else BlobAttributes.DEFAULT_GID
return mode, uid, gid
@staticmethod
def import_workers_batch_process(workers_batch):
'''
Main driver for importing files in the worker subprocess.
Iterate over the batch, calling hsm_import on each file.
This is called from inside of the worker subprocess.
'''
try:
lemur_params = workers_batch.lemur_params
for work in workers_batch.work:
work.import_results = Hydrator.import_a_file(work.lustre_path, work.blob_attrs, work.archive_id)
if lemur_params:
work.lemur_results = Hydrator.do_lemur_xattrs(work.lustre_path, work.blob_attrs,
lemur_params.container)
except Exception as exc:
# Fill in results for any work that we could not complete
for work in workers_batch.work:
if not work.import_results:
work.import_results = FileImportResults(err_msg=repr(exc), err_tb=traceback.format_exc())
return workers_batch
@staticmethod
def import_a_file(lustre_path, blob_attrs, archive_id):
'''
Import a file into Lustre by calling hsm_import.
This is called from inside of the worker subprocess.
'''
try:
mode, uid, gid = Hydrator.get_attrs_to_import(blob_attrs)
hsmimport = HSMImport(abspath=lustre_path, mode=mode, uid=uid, gid=gid,
size=blob_attrs.st_size,
mtime=blob_attrs.st_mtim,
archiveid=archive_id)
fid_out = CLustreFid()
rc = hsmimport.do_it(fid_out)
return FileImportResults(uid=uid, gid=gid, mode=mode, rc=rc, fid_out=fid_out)
except Exception as exc:
return FileImportResults(err_msg=repr(exc), err_tb=traceback.format_exc())
def import_workers_batch_cb(self, workers_batch):
'''
Callback executed after an entire batch of files is imported.
Execute the per-file callback function to process the results for each file.
'''
try:
for work in workers_batch.work:
self.import_a_file_cb(work)
self.lemur_setxattr_cb(work)
except Exception as exc:
self.print_error("Internal", "Exception occurred while processing import results: %r\n%s",
exc, traceback.format_exc())
finally:
self.dec_worker_req_count()
self.remove_from_resume_timeline(workers_batch)
self.stats.threading['batch_count'].inc()
self.stats.timing['batch_latency'].add(time.monotonic() - workers_batch.start)
def import_a_file_cb(self, work):
'''
Callback executed for each file in the batch imported by the workers so we can handle status.
'''
res = work.import_results
if res.rc > 0:
self.print_error(work.lustre_path, "Lustre hsm_import error mode=%s, uid=%d, gid=%d, size=%d rc=%d",
oct(res.mode), res.uid, res.gid, work.blob_attrs.st_size, res.rc)
elif res.err_msg:
self.print_error(work.lustre_path, "Exception %r occurred while importing file %r:\n%s",
res.err_msg, work.lustre_path, res.err_tb)
else:
self.stats.progress['last_file'].set(work.lustre_path)
self.stats.general['size'].add(work.blob_attrs.st_size)
self.stats.general['files'].inc()
self.logger.debug("IMPORT(file): mode=%s uid=%d gid=%d fid=[0x%x:0x%x:0x%x] '%s'",
oct(res.mode), res.uid, res.gid,
res.fid_out.f_seq, res.fid_out.f_oid, res.fid_out.f_ver,
work.lustre_path)
def lemur_setxattr_cb(self, work):
'''
Callback executed for each file in the batch to handle lemur xattr results.
'''
self.lemur_setxattr_results_handler(work.lustre_path, work.lemur_results)
def dec_worker_req_count(self):
'''
Decrement the number of outstanding workers.
'''
with self.import_workers_cond:
assert self.import_workers_reqs
self.import_workers_reqs -= 1
if self.import_workers_reqs <= self.IMPORT_WORKERS_QUEUE_MAX:
self.import_workers_cond.notify()
self.stats.threading['active'].set(self.import_workers_reqs)
def inc_worker_req_count(self):
'''
Increment the request count. May block if we're above the global limit.
'''
with self.import_workers_cond:
self.import_workers_reqs += 1
self.stats.threading['active'].set(self.import_workers_reqs)
while self.import_workers_reqs > self.IMPORT_WORKERS_QUEUE_MAX:
self.stats.threading['throttled'].inc()
self.import_workers_cond.wait(timeout=5.0)
def init_blobcache(self):
'''
Initialize the blobcache.
'''
self.blobcache = BlobCache(self.blobcache_queue,
self.manager.subscription_id,
self.storage_acct,
self.container,
self.credential_val,
self.manager,
prefix=self.prefix)
blobop = self.blobcache.blobop_container_bundle_generate()
try:
hns_enabled = blobop.hns_enabled
except Exception as exc:
self.logger.error("%s: cannot access %r: %r", self.mth(), blobop.name, exc)
# Drop blobcache ref to avoid getting stuck in blobcache.join()
self.blobcache = None
raise
self.logger.info("%s client_id=%s hns_enabled=%s", self.mth(), self.managed_identity_client_id_post, hns_enabled)
def manager_kwargs(self, **kwargs):
'''
See laaso.Application.manager_kwargs()
'''
ret = super().manager_kwargs()
if self.managed_identity_client_id_post:
ret['managed_identity_client_id'] = self.managed_identity_client_id_post
return ret
@classmethod
def init_liblustreapi(cls):
'''
Initialize our interface to lustre.
'''
LibLustreApi.init_once()
def init_azure(self):
'''
Initialize interactions with Azure
'''
bootstrap_mgr = self.MANAGER_CLASS(**self.manager_kwargs())
self.managed_identity_client_id_post = laaso.identity.client_id_from_uami_str(self.managed_identity_client_id_pre, bootstrap_mgr)
if not self.managed_identity_client_id_post:
raise ApplicationExit(f"cannot resolve managed_identity_client_id {self.managed_identity_client_id_pre!r}")
self.manager = self.MANAGER_CLASS(**self.manager_kwargs())
def init_creds(self):
'''
Initialize our credentials from the keyvault if necessary.
'''
if self.keyvault_secret:
try:
client_id = self.managed_identity_client_id_post
self.credential_val = self.manager.keyvault_secret_get(keyvault_name=self.keyvault_name, secret_name=self.credential, client_id=client_id).value
self.logger.debug("Acquired Managed Identity for client_id %r", self.managed_identity_client_id_post)
except Exception as exc:
raise ApplicationExit("Could not fetch secret %r from keyvault %s: %r" % (self.credential, self.keyvault_name, exc)) from exc
else:
self.credential_val = self.credential
def init_stats(self):
'''
Initialize stats from our resume point, if necessary.
'''
self.stats.get_resume_point(self.resume_file_name, self.storage_acct, self.container, self.prefix)
def get_next_blob_batch(self):
'''
Read the next blob from the blobcache queue.
'''
obj = None
while True:
try:
start = time.monotonic()
msg = self.blobcache_queue.get(timeout=3.0)
end = time.monotonic()
self.stats.timing['blobcache_latency'].add(end-start)
obj = pickle.loads(msg)
break
except queue.Empty:
self.stats.threading['blobcache_qempty'].inc()
if not self.blobcache.is_alive():
# This is unexpected.
# The blobcache should have at least sent its term pill
self.print_error("Internal", "BlobCache terminated unexpectedly (no traceback available)")
break
self.stats.threading['blobcache_qsize'].set(self.blobcache_queue.qsize())
if isinstance(obj, BlobCacheTermPill):
# BlobCache hit an exception and terminated
pill = obj
self.print_error("Internal", "BlobCache terminated unexpectedly with exception '%s', traceback:\n%s",
pill.error_msg, pill.error_tb)
return None
assert isinstance(obj, BlobAttrsBatch)
return obj
def add_to_resume_timeline(self, workers_batch):
'''
Add a batch to our resume timeline.
'''
self.resume_timeline[workers_batch.start] = workers_batch
def remove_from_resume_timeline(self, workers_batch):
'''
Remove the batch from the timeline of outstanding batches.
Call this when we're done processing a batch.
If this is the oldest batch, it will be at the head of the dict ordering, and we update
our resume point using this blob name.
Note: This feature relies on dict() keys maintaining insertion order when you
iterate over them. This only works with Python3.7 and later.
Previously, an OrderedDict was required.
'''
assert self.resume_timeline.get(workers_batch.start)
# next(iter(mydict)) seems to be the fastest way to get the first key
if next(iter(self.resume_timeline)) == workers_batch.start:
# This is the head/oldest batch. Use it to record our resume point.
if workers_batch.work[-1].blob_attrs:
self.stats.progress['resume_blob'].set(workers_batch.work[-1].blob_attrs.name)
del self.resume_timeline[workers_batch.start]
def send_to_workers(self, lustre_path, blob_attrs):
'''
Add more work to the current workers_batch so it can be deployed to the workers.
If the current workers_batch is full, then flush it.
'''
self.import_workers_batch.append(FileImportWork(lustre_path, blob_attrs, self.archive_id))
if len(self.import_workers_batch.work) >= self.IMPORT_WORKERS_BATCH_SIZE:
self.flush_workers_batch()
def flush_workers_batch(self):
'''
Send the current workers_batch to the workers.
Reinitialize to prepare for the next workers_batch.
'''
workers_batch = self.import_workers_batch
if not workers_batch.work:
return # nothing to flush
self.inc_worker_req_count() # blocks if there is too much work outstanding
if self.lemur:
workers_batch.lemur_params = LemurParams(self.container)
self.add_to_resume_timeline(workers_batch)
self.import_workers.apply_async(self.import_workers_batch_process,
(workers_batch,),
callback=self.import_workers_batch_cb)
self.import_workers_batch = FileImportWorkersBatch() # New empty batch
def go(self):
'''
Initialize the hydration process.
This is a wrapper around the main routine to make sure we handle initializing
and shutdown steps properly in case there are exceptions.
'''
complete = False
try:
self.init_liblustreapi()
self.init_azure()
self.init_creds()
self.init_stats()
self.clear_umask()
self.init_blobcache()
self.stats_printer = PeriodicStatsPrinter(self.stats, self.logger, self.STATS_FREQ_SEC, self.resume_file_name,
self.storage_acct, self.container, self.prefix, self.geneva_enable)
self.stats_printer.start()
self.stats.timing['start'].set(time.time())
self.stats.timing['start_mono'].set(time.monotonic())
if self.stats.progress['resume_blob'].get():
self.logger.info("Hydrator resuming from blob %r error count %d at %s",
self.stats.progress['resume_blob'].get(),
self.stats.general['errors'].get(),
datetime.datetime.fromtimestamp(self.stats.timing['start'].get()))
else:
self.logger.info("Hydrator starting at %s",
datetime.datetime.fromtimestamp(self.stats.timing['start'].get()))
self.logger.info("Account(%s) Container(%s) Prefix(%s) LustrePath(%s)",
self.storage_acct, self.container, self.prefix, self.dest_path)
self.import_workers = multiprocessing.Pool(self.IMPORT_WORKERS)
self.blobcache.start()
self.go_internal() # Start main routine
complete = True
finally:
# Shutdown steps. Try to unwind them in the opposite order as above.
if self.blobcache:
if self.blobcache.is_alive():
self.blobcache.terminate()
self.blobcache.join()
if self.import_workers:
self.import_workers.close()
self.import_workers.join() # Note: No timeout param available, if workers are stuck this could get stuck here
# Our resume timeline should be flushed/empty if we completed our walk through the blob list
if complete:
assert not self.resume_timeline
self.stats.timing['end'].set(time.time())
if self.stats_printer:
self.stats_printer.stop()
self.stats_printer.print_now()
now = time.monotonic()
self.logger.info("Hydration complete at %s, elapsed: %.2fs, %d errors.",
datetime.datetime.fromtimestamp(self.stats.timing['end'].get()),
now - self.stats.timing['start_mono'].get(),
self.stats.general['errors'].get())
if self.resume_file_name and os.path.exists(self.resume_file_name):
try:
os.remove(self.resume_file_name)
except OSError as exc:
self.logger.error("Hydration complete, but there was an error deleting resume file %r: %r",
self.resume_file_name, exc)
if self.err_file:
self.err_file.close()
self.err_file = None
self.restore_umask()
def go_internal(self):
'''
Main routine for running the import process.
Loop over blobs, create corresponding files and dirs in Lustre.
'''
# Create and setup the root directory context
context = Context(self.dest_path)
context.is_root = True
if self.dest_path:
exists = os.path.isdir(self.dest_path)
if not exists:
context.created = True
try:
os.makedirs(self.dest_path, mode=BlobAttributes.DEFAULT_MODE_DIRS)
except OSError as exc:
raise ApplicationExit("Error creating destination path %r: %r" % (self.dest_path, exc)) from exc
self.contexts.append(context)
# Main loop for looping over the blobs and importing them
done = False
while not done:
context = self.contexts[-1]
# Get the next blob, deal with any warnings
blob_batch = self.get_next_blob_batch()
if not blob_batch:
break # done, blobcache hit an error
for blob_attrs in blob_batch.contents:
if not blob_attrs:
done = True
break # processed all blobs
self.stats.general['blobs'].inc()
lustre_path = self.blobname2path(blob_attrs.name)
if len(lustre_path) > BlobAttributes.PATH_MAX:
self.print_error(lustre_path, "cannot import blob since it will exceed PATH_MAX(%d)", BlobAttributes.PATH_MAX)
continue
dirpath = os.path.dirname(lustre_path)
if blob_attrs.warnings:
self.print_blob_warnings(blob_attrs)
# Handle directory, file, or symlink
if blob_attrs.st_type == Ftypes.DIR:
self.try_context_switch(lustre_path, blob_attrs)
continue
assert blob_attrs.st_type in (Ftypes.FILE, Ftypes.SYMLINK)
if dirpath != context.path:
if not self.try_context_switch(dirpath):
continue
context = self.contexts[-1]
if not context.created:
success, exists = self.try_validate_existing(lustre_path, blob_attrs)
if not success:
continue
if exists:
continue
if blob_attrs.st_type == Ftypes.SYMLINK:
self.import_a_symlink(lustre_path, blob_attrs)
else:
assert blob_attrs.st_type == Ftypes.FILE
self.send_to_workers(lustre_path, blob_attrs)
# Check if this batch sent us over the max errors
if self.stats.general['errors'].get() > self.MAX_ERRORS:
raise ApplicationExit("Hydration terminating due to too many errors(%d), see '%s' for more details." %
(self.MAX_ERRORS, self.err_file_name))
self.flush_workers_batch() # flush any outstanding work
Hydrator.main(__name__)
|
python
|
from admin_tabs.tests.metaadminpageconfig import *
|
python
|
#!/usr/bin/env python3
import sys
import operator
import unittest
import hypothesis
from .util import lattice_dimensions_with_lower_bound
from synth.search import Simple
from synth.search import MinimizedSplit
from synth.search import BinaryPartition
from synth.search import Saddleback
class DummyFunction:
def __init__(self, lower_bounds, upper_bounds):
self._lower_bounds = lower_bounds
self._upper_bounds = upper_bounds
self.function = self
def naive_lattice_bounds(self): return self._upper_bounds
def lower_bound(self): return operator.mul(*self._lower_bounds)
def upper_bound(self): return operator.mul(*self._upper_bounds)
class DummySynthesizer:
def __init__(self, dimensions): self._dimensions = dimensions
def __call__(self, _function, m, n): (self.m, self.n) = (m, n); return self
def _get(self, m, n):
try: return self._dimensions[m - 1][n - 1]
except IndexError:
if m > n: return self._get(m - 1, n)
else: return self._get(m, n - 1)
def synth(self, *args):
if self._get(self.m, self.n):
return {"solution": True, "solution_height": self.m,
"solution_width": self.n}
return dict()
class SearchBase:
@hypothesis.given(lattice_dimensions_with_lower_bound())
def test_search(self, dimensions_and_lower_bound):
(lower_bound, dimensions) = dimensions_and_lower_bound
upper_bound = (len(dimensions), len(dimensions[0]))
minimal_dim = min(((m, n) for (m, row) in enumerate(dimensions, 1)
for (n, value) in enumerate(row, 1)
if value), key=lambda x: x[0]*x[1])
function = DummyFunction(lower_bound, upper_bound)
synthesizer = DummySynthesizer(dimensions)
result = self.SEARCH(function, synthesizer).synth()
self.assertIsNotNone(result.get("solution"))
result_dim = (result.get("solution_height"), result.get("solution_width"))
self.assertEqual(operator.mul(*minimal_dim), operator.mul(*result_dim))
thismodule = sys.modules[__name__]
# MinimizedSplit is broken
for search in (Saddleback, BinaryPartition):
class_name = "Test{}Search".format(search.__name__)
clazz = type(class_name, (SearchBase, unittest.TestCase), {"SEARCH": search})
setattr(thismodule, class_name, clazz)
if __name__ == '__main__':
unittest.main()
|
python
|
from vumi.transports.twitter.twitter import (
ConfigTwitterEndpoints, TwitterTransport)
__all__ = ['ConfigTwitterEndpoints', 'TwitterTransport']
|
python
|
#!/usr/bin/env python
"""A mercurial external differ for notebooks.
Uses nbdime to create diffs for notebooks instead of plain text diffs of JSON.
See the documentation for how to correctly configure mercurial to use this.
Use with:
hg extdiff -p hg-nbdiff [<commit> [<commit>]]
"""
from __future__ import print_function
import os
import sys
from nbdime import nbdiffapp
from nbdime.args import (
add_diff_args, add_filename_args, add_diff_cli_args, add_prettyprint_args,
ConfigBackedParser,
)
from nbdime.diffing.directorydiff import diff_directories
def main(args=None):
if args is None:
args = sys.argv[1:]
import argparse
parser = ConfigBackedParser('hg-nbdiff', description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
add_diff_args(parser)
add_diff_cli_args(parser)
add_prettyprint_args(parser)
add_filename_args(parser, ('base', 'remote'))
opts = parser.parse_args(args)
# TODO: Filter base/remote: If directories, find all modified notebooks
# If files that are not notebooks, ensure a decent error is printed.
if not os.path.isfile(opts.base) or not os.path.isfile(opts.remote):
base, remote = opts.base, opts.remote
for a, b in diff_directories(base, remote):
opts.base, opts.remote = a, b
ret = nbdiffapp.main_diff(opts)
if ret != 0:
return ret
return ret
else:
return nbdiffapp.main_diff(opts)
if __name__ == "__main__":
sys.exit(main())
|
python
|
# imports
import matplotlib.pyplot as plt
import scipy.stats as stats
import numpy as np
import json
import pickle
from urllib.request import urlopen
# helper functions
def bisection(array, value):
'''Given an ``array`` , and given a ``value`` , returns an index j such that ``value`` is between array[j]
and array[j+1]. ``array`` must be monotonic increasing. j=-1 or j=len(array) is returned
to indicate that ``value`` is out of range below and above respectively.'''
n = len(array)
if (value < array[0]):
return -1
elif (value > array[n - 1]):
return n
jl = 0 # Initialize lower
ju = n - 1 # and upper limits.
while (ju - jl > 1): # If we are not yet done,
jm = (ju + jl) >> 1 # compute a midpoint with a bit shift
if (value >= array[jm]):
jl = jm # and replace either the lower limit
else:
ju = jm # or the upper limit, as appropriate.
# Repeat until the test condition is satisfied.
if (value == array[0]): # edge cases at bottom
return 0
elif (value == array[n - 1]): # and top
return n - 1
else:
return jl
def if_habitlab_on(timestamp, log):
'''
given a timestamp, and a log of the user goal enableness, determine
if the habit lab is on at that timestamp
'''
timestamp_list = [x["timestamp"] for x in log]
index = bisection(timestamp_list, timestamp) # the index of which the timestamp just falls after
# print(str(index) + " " + str(timestamp_list[index]) + " " + str(timestamp))
# for prehistoric time no one enables HabitLab
if index == -1:
return False
if index == len(log):
'''if the value is above the largest value in the list
'''
index -= 1
if log[index]["type"] == "goal_enabled":
return True
elif log[index]["type"] == "goal_disabled":
return False
return
def get_time_stamp(item):
return item["timestamp"]
with open("get_user_to_all_install_times.json") as lc:
installtime = json.load(lc)
installtime = {k: min(installtime[k]) for k in installtime}
# read a unique user list
with open(r"log_data\all_users_in_experiment_by_name") as lc:
userIDs = set(json.load(lc))
success = 0
fail = 0
we = ["facebook", "reddit", "twitter", "youtube", "gmail"]
result_string = ""
for w in we:
test_data = []
print("processing " + w)
# traverse thu all users to obtain t/f average time spent on websites per day/ per session reduced after enabling the goals
idx = 0
for userid in userIDs:
if idx % 100 == 0 : print(str(idx) + "/" + str(len(userIDs)))
idx += 1
num_month = 0
try:
install = installtime[userid]
except KeyError:
continue
# user log
# http://localhost:5000/printcollection?collection=e98febf6f84d010a469e9d0f_logs:goals
link = "http://localhost:5000/printcollection?collection=" + userid + "_logs:goals"
# print(link)
# print("retrieving log for userid = " + userid)
f = urlopen(link).read()
parsed_raw = json.loads(f.decode('utf-8'))
#f.close()
'''
raw = ""
with open("data.txt",encoding='utf-8', mode = 'r') as f:
raw = f.read()
parsed_raw = json.loads(raw)
'''
# filter out those without goal_name
parsed_raw = [i for i in parsed_raw if "goal_name" in i]
# secs on a website per session
# http://localhost:5000/printcollection?collection=683c1e28dcad53573b3f2c83_synced:seconds_on_domain_per_day
link = "http://localhost:5000/printcollection?collection=" + userid + "_synced:visits_to_domain_per_day"
# print(link)
# print("retrieving seconds_on_domain_per_day for userid = " + userid)
f2 = urlopen(link).read()
seconds_on_domain_per_session = json.loads(f2.decode('utf-8'))
#f2.close()
'''
seconds_on_domain_per_session = ""
with open("seconds_on_domain_per_day.txt",encoding='utf-8', mode = 'r') as f:
seconds_on_domain_per_session = f.read()
seconds_on_domain_per_session = json.loads(seconds_on_domain_per_session)
'''
# sort to websites
websites = set()
for line in parsed_raw:
websites.add(line["goal_name"].split("/")[0])
website_vs_raw = {w: [] for w in websites}
for line in parsed_raw:
website_vs_raw[line["goal_name"].split("/")[0]].append(line)
website_vs_sec_on_domain = {w: [] for w in websites}
for web in websites:
for line in seconds_on_domain_per_session:
# print(line)
# print()
try:
if web in line["key"]:
website_vs_sec_on_domain[web].append(line)
except KeyError:
pass
# print(line)
if w not in website_vs_sec_on_domain:
continue
sec_on_domain_per_for_w = website_vs_sec_on_domain[w]
raw_for_w = website_vs_raw[w]
# notuniquekeys = set([line["key2"] for line in sec_on_domain_per_for_w])
# get the largest value on the same day
largest = dict()
pop_list = []
for i, line in enumerate(sec_on_domain_per_for_w):
try:
if largest[line["key2"]][1] > line["val"]:
pop_list.append(i)
else:
pop_list.append(largest[line["key2"]][0])
largest[line["key2"]] = (i, line["val"])
except KeyError:
largest[line["key2"]] = (i, line["val"])
# pop all
pop_list = sorted(pop_list, reverse= True)
for p in pop_list:
sec_on_domain_per_for_w.pop(p)
# test if unique
# uniquekeys = [line["key2"] for line in sec_on_domain_per_for_w]
'''
if len(uniquekeys) > len(set(uniquekeys)):
print("not unique!!")
# check if poppsed too much
if len(uniquekeys) < len(notuniquekeys):
print("popped too much!!")
'''
#sort by timestamp
raw_for_w = sorted(raw_for_w, key=get_time_stamp)
sec_on_domain_per_for_w = sorted(sec_on_domain_per_for_w, key=get_time_stamp)
disabled_sec = 0
abled_sec = 0
disabled_num_visits = 0
abled_num_visits = 0
for line in sec_on_domain_per_for_w:
if if_habitlab_on(line["timestamp"], raw_for_w):
abled_sec += line["val"]
abled_num_visits += 1
else:
disabled_sec += line["val"]
disabled_num_visits += 1
if disabled_num_visits == 0 or abled_num_visits == 0:
continue
else:
avg_disabled_sec = disabled_sec / disabled_num_visits
avg_abled_sec = abled_sec / abled_num_visits
test_data.append({"userid": userid, "disabled_sec": disabled_sec, "abled_sec": abled_sec,
"disabled_num_visits": disabled_num_visits, "abled_num_visits": abled_num_visits,
"avg_disabled_sec": avg_disabled_sec, "avg_abled_sec": avg_abled_sec})
# print("userid = " + userid)
# print(disabled_sec)
# print(abled_sec)
# print(disabled_num_visits)
# print(abled_num_visits)
# print(disabled_sec / disabled_num_visits)
# print(abled_sec / abled_num_visits)
# if (avg_abled_sec < avg_disabled_sec):
# success += 1
# else:
# fail += 1
# print(success)
# print(fail)
dd = test_data
diabled = [i['avg_disabled_sec'] for i in dd]
abled = [i['avg_abled_sec'] for i in dd]
result_string += (w + '\n')
result_string += (str(np.average(diabled)) + '\n')
result_string += (str(np.average(abled)) + '\n')
result_string += (str(stats.ttest_rel(diabled, abled)) + '\n')
# print(w)
# print(np.average(diabled))
# print(np.average(abled))
# print(stats.ttest_rel(diabled, abled))
print(result_string)
|
python
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
import mock
from oauth2client.client import AccessTokenCredentials
import unittest
import datalab.context
import datalab.stackdriver.monitoring as gcm
class TestCases(unittest.TestCase):
def test_make_client(self):
project_id = 'project_id'
context = self._create_context()
client = gcm._utils.make_client(project_id, context)
self.assertEqual(client.project, project_id)
self.assertEqual(client.connection.credentials, context.credentials)
self.assertEqual(client._connection_class.USER_AGENT, 'pydatalab/v0')
@mock.patch('datalab.context._context.Context.default')
def test_make_client_w_defaults(self, mock_context_default):
default_context = self._create_context()
mock_context_default.return_value = default_context
client = gcm._utils.make_client()
self.assertEqual(client.project, default_context.project_id)
self.assertEqual(
client.connection.credentials, default_context.credentials)
self.assertEqual(client._connection_class.USER_AGENT, 'pydatalab/v0')
@staticmethod
def _create_context(project_id='test'):
creds = AccessTokenCredentials('test_token', 'test_ua')
return datalab.context.Context(project_id, creds)
|
python
|
#!/usr/bin/python3
"""
Consumer
--------
Write a generator that consumes lines of a text and prints them to standard
output. Use this generator and a `flatten` function from the previous task to
print contents of two different files to a screen pseudo-simultaneously.
"""
from course.lesson07.task05.flatten import flatten
def read_file(name):
"""
Creates generator that returns lines from file
Args:
name(str): path to file
Return:
generator(str) : returns each line in file
"""
with open(name, mode='r', encoding='utf-8') as fd:
for line in fd:
yield line.strip()
if __name__ == '__main__':
for line in flatten(read_file('../../alice.txt'), read_file('../../alice.txt')):
print(line)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 6 13:55:09 2019
@author: Mrinmoy Bhattacharjee, PhD Scholar, EEE Dept., IIT Guwahati
"""
import os
import numpy as np
import pickle
from sklearn import preprocessing
import json
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
from scipy.spatial import distance
import csv
def save_obj(obj, folder, name):
with open(folder+'/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(folder, name):
with open(folder+'/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def print_configuration(PARAMS):
opFile = PARAMS['opDir'] + '/Configuration.csv'
fid = open(opFile, 'a+', encoding = 'utf-8')
PARAM_keys = [key for key in PARAMS.keys()]
for i in range(len(PARAM_keys)):
if PARAM_keys[i]=='GPU_session':
continue
# print('PARAMS key: ', PARAM_keys[i])
try:
fid.write(PARAM_keys[i] + '\t')
fid.write(json.dumps(PARAMS[PARAM_keys[i]]))
fid.write('\n')
except:
fid.write(PARAM_keys[i] + '\tERROR\n')
fid.close()
def getPerformance(PtdLabels, GroundTruths, labels):
ConfMat = confusion_matrix(y_true=GroundTruths, y_pred=PtdLabels)
precision, recall, fscore, support = precision_recall_fscore_support(y_true=GroundTruths, y_pred=PtdLabels, beta=1.0, average=None, labels=labels)
precision = np.round(precision,4)
recall = np.round(recall,4)
fscore = np.round(fscore,4)
fscore = np.append(fscore, np.mean(fscore))
return ConfMat, precision, recall, fscore
def print_results(PARAMS, suffix, **kwargs):
opFile = PARAMS['opDir'] + '/Performance' + suffix + '.csv'
linecount = 0
if os.path.exists(opFile):
with open(opFile, 'r', encoding='utf8') as fid:
for line in fid:
linecount += 1
fid = open(opFile, 'a+', encoding = 'utf-8')
heading = 'fold'
values = str(PARAMS['fold'])
for i in range(len(kwargs)):
heading = heading + '\t' + np.squeeze(kwargs[str(i)]).tolist().split(':')[0]
values = values + '\t' + np.squeeze(kwargs[str(i)]).tolist().split(':')[1]
if linecount==0:
fid.write(heading + '\n' + values + '\n')
else:
fid.write(values + '\n')
fid.close()
def print_model_summary(arch_file, model):
stringlist = ['']
model.summary(print_fn=lambda x: stringlist.append(x))
short_model_summary = "\n".join(stringlist)
with open(arch_file, 'w+', encoding='utf8') as f:
f.write(short_model_summary)
def get_annotations(path, possible_genres):
annotations = {}
genre_list = {}
genre_id = 0
with open(path) as annot_file:
annotreader = csv.reader(annot_file, delimiter=',', quotechar='|')
row_count = 0
for row in annotreader:
if row==[]:
continue
if row_count==0:
row_count += 1
continue
annotations[row[0]] = {'movie_title':row[2], 'genre':row[1].split('|')}
valid_labels = []
for genre in row[1].split('|'):
G = genre.replace(' ', '')
if (not G in genre_list.keys()) and (G in possible_genres):
genre_list[G] = genre_id
genre_id += 1
if G in possible_genres:
valid_labels.append(genre)
annotations[row[0]]['genre'] = valid_labels
row_count += 1
return annotations, genre_list
|
python
|
"""
Incident Updates API Endpoint
"""
# Third Party Library
from django.views import View
from django.urls import reverse
from pyvalitron.form import Form
from django.http import JsonResponse
from django.forms.fields import DateTimeField
from django.utils.translation import gettext as _
# Local Library
from app.modules.util.helpers import Helpers
from app.modules.core.request import Request
from app.modules.core.response import Response
from app.modules.core.task import Task as Task_Module
from app.modules.validation.extension import ExtraRules
from app.modules.core.decorators import allow_if_authenticated
from app.modules.core.incident import Incident as IncidentModule
from app.modules.core.subscriber import Subscriber as SubscriberModule
from app.modules.core.notification import Notification as NotificationModule
from app.modules.core.incident_update import IncidentUpdate as IncidentUpdateModule
from app.modules.core.incident_update_component import IncidentUpdateComponent as IncidentUpdateComponentModule
from app.modules.core.incident_update_notification import IncidentUpdateNotification as IncidentUpdateNotificationModule
class IncidentUpdates(View):
__request = None
__response = None
__helpers = None
__form = None
__logger = None
__user_id = None
__incident = None
__incident_update = None
__task = None
__notification = None
__subscriber = None
__incident_update_notification = None
__correlation_id = None
def __init__(self):
self.__request = Request()
self.__response = Response()
self.__helpers = Helpers()
self.__form = Form()
self.__incident = IncidentModule()
self.__incident_update = IncidentUpdateModule()
self.__task = Task_Module()
self.__notification = NotificationModule()
self.__subscriber = SubscriberModule()
self.__incident_update_notification = IncidentUpdateNotificationModule()
self.__logger = self.__helpers.get_logger(__name__)
self.__form.add_validator(ExtraRules())
@allow_if_authenticated
def post(self, request, incident_id):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__user_id = request.user.id
self.__request.set_request(request)
request_data = self.__request.get_request_data("post", {
"status": "",
"notify_subscribers": "",
"message": "",
"datetime": "",
})
self.__form.add_inputs({
'message': {
'value': request_data["message"],
'sanitize': {
'strip': {}
},
'validate': {}
},
'datetime': {
'value': request_data["datetime"],
'sanitize': {
'strip': {}
},
'validate': {}
},
'status': {
'value': request_data["status"],
'validate': {
'any_of': {
'param': [["investigating", "identified", "monitoring", "update", "resolved"]],
'error': _('Error! Status is invalid.')
}
}
},
'notify_subscribers': {
'value': request_data["notify_subscribers"],
'validate': {
'any_of': {
'param': [["on", "off"]],
'error': _('Error! Notify subscribers is invalid.')
}
}
}
})
self.__form.process()
if not self.__form.is_passed():
return JsonResponse(self.__response.send_errors_failure(self.__form.get_errors(), {}, self.__correlation_id))
result = self.__incident_update.insert_one({
"notify_subscribers": self.__form.get_sinput("notify_subscribers"),
"datetime": DateTimeField().clean(self.__form.get_sinput("datetime")),
"total_suscribers": self.__subscriber.count_by_status(SubscriberModule.VERIFIED),
"message": self.__form.get_sinput("message"),
"status": self.__form.get_sinput("status"),
"incident_id": incident_id
})
if self.__form.get_sinput("status") == "resolved":
self.__incident.update_one_by_id(incident_id, {
"status": "closed"
})
else:
self.__incident.update_one_by_id(incident_id, {
"status": "open"
})
if result:
return JsonResponse(self.__response.send_private_success([{
"type": "success",
"message": _("Incident update created successfully.")
}], {}, self.__correlation_id))
else:
return JsonResponse(self.__response.send_private_failure([{
"type": "error",
"message": _("Error! Something goes wrong while creating update.")
}], {}, self.__correlation_id))
@allow_if_authenticated
def get(self, request, incident_id):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__request.set_request(request)
request_data = self.__request.get_request_data("get", {
"offset": "",
"limit": ""
})
try:
offset = int(request_data["offset"])
limit = int(request_data["limit"])
except Exception:
offset = 0
limit = 0
return JsonResponse(self.__response.send_private_success([], {
'updates': self.__format_incident_updates(self.__incident_update.get_all(incident_id, offset, limit), incident_id),
'metadata': {
'offset': offset,
'limit': limit,
'count': self.__incident_update.count_all(incident_id)
}
}, self.__correlation_id))
def __format_incident_updates(self, updates, incident_id):
updates_list = []
for update in updates:
notified_subscribers = self.__incident_update_notification.count_by_update_status(
update.id,
IncidentUpdateNotificationModule.SUCCESS
)
progress = int(notified_subscribers/update.total_suscribers) * 100 if update.total_suscribers > 0 else 0
updates_list.append({
"id": update.id,
"status": update.status.title(),
"notify_subscribers": update.notify_subscribers.title(),
"datetime": update.datetime.strftime("%b %d %Y %H:%M:%S"),
"progress": progress if progress <= 100 else 100,
"created_at": update.created_at.strftime("%b %d %Y %H:%M:%S"),
"view_url": reverse("app.web.admin.incident_update.view", kwargs={'incident_id': incident_id, "update_id": update.id}),
"edit_url": reverse("app.web.admin.incident_update.edit", kwargs={'incident_id': incident_id, "update_id": update.id}),
"delete_url": reverse("app.api.private.v1.admin.incident_update.endpoint", kwargs={'incident_id': incident_id, "update_id": update.id})
})
return updates_list
class IncidentUpdate(View):
__request = None
__response = None
__helpers = None
__form = None
__logger = None
__user_id = None
__incident_update = None
__correlation_id = None
def __init__(self):
self.__request = Request()
self.__response = Response()
self.__helpers = Helpers()
self.__form = Form()
self.__incident_update = IncidentUpdateModule()
self.__logger = self.__helpers.get_logger(__name__)
self.__form.add_validator(ExtraRules())
@allow_if_authenticated
def post(self, request, incident_id, update_id):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__request.set_request(request)
request_data = self.__request.get_request_data("post", {
"status": "",
"notify_subscribers": "",
"message": "",
"datetime": "",
})
self.__form.add_inputs({
'message': {
'value': request_data["message"],
'sanitize': {
'strip': {}
},
'validate': {}
},
'datetime': {
'value': request_data["datetime"],
'sanitize': {
'strip': {}
},
'validate': {}
},
'status': {
'value': request_data["status"],
'validate': {
'any_of': {
'param': [["investigating", "identified", "monitoring", "update", "resolved"]],
'error': _('Error! Status is invalid.')
}
}
},
'notify_subscribers': {
'value': request_data["notify_subscribers"],
'validate': {
'any_of': {
'param': [["on", "off"]],
'error': _('Error! Notify subscribers is invalid.')
}
}
}
})
self.__form.process()
if not self.__form.is_passed():
return JsonResponse(self.__response.send_errors_failure(self.__form.get_errors(), {}, self.__correlation_id))
result = self.__incident_update.update_one_by_id(update_id, {
"notify_subscribers": self.__form.get_sinput("notify_subscribers"),
"datetime": DateTimeField().clean(self.__form.get_sinput("datetime")),
"message": self.__form.get_sinput("message"),
"status": self.__form.get_sinput("status")
})
if result:
return JsonResponse(self.__response.send_private_success([{
"type": "success",
"message": _("Incident update updated successfully.")
}], {}, self.__correlation_id))
else:
return JsonResponse(self.__response.send_private_failure([{
"type": "error",
"message": _("Error! Something goes wrong while updating update.")
}], {}, self.__correlation_id))
@allow_if_authenticated
def delete(self, request, incident_id, update_id):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__user_id = request.user.id
if self.__incident_update.delete_one_by_id(update_id):
return JsonResponse(self.__response.send_private_success([{
"type": "success",
"message": _("Incident update deleted successfully.")
}], {}, self.__correlation_id))
else:
return JsonResponse(self.__response.send_private_failure([{
"type": "error",
"message": _("Error! Something goes wrong while deleting incident update.")
}], {}, self.__correlation_id))
class IncidentUpdatesNotify(View):
__request = None
__response = None
__helpers = None
__form = None
__logger = None
__user_id = None
__incident_update = None
__task = None
__notification = None
__subscriber = None
__correlation_id = None
def __init__(self):
self.__request = Request()
self.__response = Response()
self.__helpers = Helpers()
self.__form = Form()
self.__incident_update = IncidentUpdateModule()
self.__task = Task_Module()
self.__notification = NotificationModule()
self.__subscriber = SubscriberModule()
self.__logger = self.__helpers.get_logger(__name__)
self.__form.add_validator(ExtraRules())
@allow_if_authenticated
def post(self, request, incident_id, update_id):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__user_id = request.user.id
task = self.__task.delay("incident_update", {
"incident_update_id": update_id,
"user_id": self.__user_id
}, self.__user_id)
result = False
if task:
result = self.__notification.create_notification({
"highlight": "Incident Update",
"notification": "notifying subscribers with the incident update",
"url": "#",
"type": NotificationModule.PENDING,
"delivered": False,
"user_id": self.__user_id,
"task_id": task.id
})
if task and result:
return JsonResponse(self.__response.send_private_success([{
"type": "success",
"message": _("Notification delivery started successfully.")
}], {}, self.__correlation_id))
else:
return JsonResponse(self.__response.send_private_failure([{
"type": "error",
"message": _("Error! Something goes wrong while starting delivery.")
}], {}, self.__correlation_id))
class IncidentUpdatesComponents(View):
__request = None
__response = None
__helpers = None
__form = None
__logger = None
__user_id = None
__incident_update = None
__task = None
__notification = None
__subscriber = None
__incident_update_component = None
__correlation_id = None
def __init__(self):
self.__request = Request()
self.__response = Response()
self.__helpers = Helpers()
self.__form = Form()
self.__incident_update = IncidentUpdateModule()
self.__task = Task_Module()
self.__notification = NotificationModule()
self.__subscriber = SubscriberModule()
self.__logger = self.__helpers.get_logger(__name__)
self.__incident_update_component = IncidentUpdateComponentModule()
self.__form.add_validator(ExtraRules())
@allow_if_authenticated
def post(self, request, incident_id, update_id):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__user_id = request.user.id
self.__request.set_request(request)
request_data = self.__request.get_request_data("post", {
"type": "",
"component_id": ""
})
self.__form.add_inputs({
'component_id': {
'value': request_data["component_id"],
'validate': {
'sv_numeric': {
'error': _('Error! Component is required.')
}
}
},
'type': {
'value': request_data["type"],
'validate': {
'any_of': {
'param': [["operational", "degraded_performance", "partial_outage", "major_outage", "maintenance"]],
'error': _('Error! Type is required.')
}
}
}
})
self.__form.process()
if not self.__form.is_passed():
return JsonResponse(self.__response.send_errors_failure(self.__form.get_errors(), {}, self.__correlation_id))
result = self.__incident_update_component.insert_one({
"component_id": int(self.__form.get_sinput("component_id")),
"type": self.__form.get_sinput("type"),
"incident_update_id": int(update_id)
})
if result:
return JsonResponse(self.__response.send_private_success([{
"type": "success",
"message": _("Affected component created successfully.")
}], {}, self.__correlation_id))
else:
return JsonResponse(self.__response.send_private_failure([{
"type": "error",
"message": _("Error! Something goes wrong while creating affected component.")
}], {}, self.__correlation_id))
class IncidentUpdatesComponent(View):
__request = None
__response = None
__helpers = None
__form = None
__logger = None
__user_id = None
__incident_update_component = None
__correlation_id = None
def __init__(self):
self.__request = Request()
self.__response = Response()
self.__helpers = Helpers()
self.__form = Form()
self.__incident_update_component = IncidentUpdateComponentModule()
self.__logger = self.__helpers.get_logger(__name__)
self.__form.add_validator(ExtraRules())
@allow_if_authenticated
def delete(self, request, incident_id, update_id, item_id):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
self.__user_id = request.user.id
if self.__incident_update_component.delete_one_by_id(item_id):
return JsonResponse(self.__response.send_private_success([{
"type": "success",
"message": _("Affected component deleted successfully.")
}], {}, self.__correlation_id))
else:
return JsonResponse(self.__response.send_private_failure([{
"type": "error",
"message": _("Error! Something goes wrong while deleting affected component.")
}], {}, self.__correlation_id))
|
python
|
class TreeNode:
def __init__(self, val, left, right):
self.val = val
self.right = right
self.left = left
class Solution:
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
Given two binary trees and imagine that when you put one of them to cover the other, some nodes of the two trees are overlapped while the others are not.
You need to merge them into a new binary tree. The merge rule is that if two nodes overlap, then sum node values up as the new value of the merged node. Otherwise, the NOT null node will be used as the node of new tree.
"""
if t1 is None:
return t2
if t2 is None:
return t1
t1.val += t2.val
t1.left = self.mergeTrees(t1.left, t2.left)
t1.right = self.mergeTrees(t1.right, t2.right)
return t1
|
python
|
from owslib.etree import etree
from owslib.util import Authentication, openURL
from urllib.parse import urlencode, parse_qsl
class WFSCapabilitiesReader(object):
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version="1.0", username=None, password=None, headers=None, auth=None):
"""Initialize"""
self.headers = headers
if auth:
if username:
auth.username = username
if password:
auth.password = password
self.auth = auth or Authentication(username, password)
self.version = version
self._infoset = None
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find("?") != -1:
qs = parse_qsl(service_url.split("?")[1])
params = [x[0] for x in qs]
if "service" not in params:
qs.append(("service", "WFS"))
if "request" not in params:
qs.append(("request", "GetCapabilities"))
if "version" not in params:
qs.append(("version", self.version))
urlqs = urlencode(tuple(qs))
return service_url.split("?")[0] + "?" + urlqs
def read(self, url, timeout=30):
"""Get and parse a WFS capabilities document, returning an
instance of WFSCapabilitiesInfoset
Parameters
----------
url : string
The URL to the WFS capabilities document.
timeout : number
A timeout value (in seconds) for the request.
"""
request = self.capabilities_url(url)
u = openURL(request, timeout=timeout, headers=self.headers, auth=self.auth)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WFS capabilities document, returning an
instance of WFSCapabilitiesInfoset
string should be an XML capabilities document
"""
if not isinstance(st, str) and not isinstance(st, bytes):
raise ValueError(
"String must be of type string or bytes, not %s" % type(st)
)
return etree.fromstring(st)
class AbstractContentMetadata(object):
def __init__(self, headers=None, auth=None):
self.auth = auth or Authentication()
self.headers = headers
def get_metadata(self):
return [
m["metadata"]
for m in self.metadataUrls
if m.get("metadata", None) is not None
]
|
python
|
import shutil
from django.core.checks import Error, register
@register
def check_binaries(app_configs, **kwargs):
errors = []
if not shutil.which("convert", path="/usr/local/bin:/usr/bin:/bin"):
errors.append(
Error(
'The "convert" binary could not be found',
hint="Try installing ImageMagick.",
id="feincms3_downloads.E001",
)
)
if not shutil.which("pdftocairo", path="/usr/local/bin:/usr/bin:/bin"):
errors.append(
Error(
'The "pdftocairo" binary could not be found',
hint="Try installing poppler-utils.",
id="feincms3_downloads.E001",
)
)
return errors
|
python
|
import pytest
from ..traittypes import _DelayedImportError
def test_delayed_access_raises():
dummy = _DelayedImportError('mypackage')
with pytest.raises(RuntimeError):
dummy.asarray([1, 2, 3])
|
python
|
import os
import time
import pytest
from huntsman.drp.services.plotter import PlotterService
@pytest.fixture(scope="function")
def plotter_service(config, exposure_collection_real_data):
ps = PlotterService(config=config)
yield ps
# Make sure the service is stopped
ps.stop()
# Cleanup the image dir ready for other tests
for p in ps.plotters:
for fname in os.listdir(p.image_dir):
if fname.endswith(".png"):
os.remove(os.path.join(p.image_dir, fname))
def test_plotter_service(plotter_service):
assert len(plotter_service.plotters) == 2
# Start the service
plotter_service.start()
# Wait for plots to be created
time.sleep(10)
# Check that the files exist
for p in plotter_service.plotters:
assert os.path.isdir(p.image_dir)
n_images = sum([len(_) for _ in p._plot_configs.values()])
n_actual = len([_ for _ in os.listdir(p.image_dir) if _.endswith(".png")])
# Strictly, this should be an exact equality. However we don't necessarily know how many
# cameras / filters there are, so this will do for now.
assert n_actual >= n_images
# Check that the service is still running
assert plotter_service.is_running
# Check we can stop the service
plotter_service.stop()
assert not plotter_service.is_running
# Check that calling stop again does not raise an error
plotter_service.stop()
|
python
|
from nn_recipe.NN import Network
import numpy as np
from nn_recipe.utility import OneHotEncoder
from nn_recipe.DataLoader import MNISTDataLoader
# Loading Data fro mnist Data set
mnist = MNISTDataLoader(rootPath="C:\\Users\\mgtmP\\Desktop\\NNRecipe\\mnist", download=False)
mnist.load()
X_test = mnist.get_test_data().reshape((-1, 28 * 28))
Y_test = mnist.get_test_labels().reshape((-1, 1))
X_test = X_test / 255
# Creating Label encoder
encoder = OneHotEncoder(
types=[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0],
active_state=1,
inactive_state=0
)
# Mnist Dense layer Model construction
net = Network.load("C:\\Users\\mgtmP\\Desktop\\mnist_network_model_3_38.net")
# check for model accuracy using the test dataset
out = net.evaluate(X_test)
yhat = encoder.decode(out)
yhat = np.array(yhat).reshape((-1, 1))
print("Total Accuracy is :", 1 - np.count_nonzero(yhat - Y_test) / Y_test.shape[0])
|
python
|
import os
import unittest
from conans.model.env_info import DepsEnvInfo
from conans.model.profile import Profile
from conans.model.settings import Settings
from conans.paths import CONANFILE
from conans.test.utils.tools import TestBufferConanOutput, TestClient
from conans.util.files import save
class MockSetting(str):
@property
def value(self):
return self
class MockCompiler(object):
def __init__(self, name, libcxx, version):
self.name = name
self.libcxx = libcxx
self.version = MockSetting(version)
def __repr__(self, *args, **kwargs): # @UnusedVariable
return self.name
class MockSettings(Settings):
def __init__(self, build_type="Release", os=None, arch=None,
compiler_name=None, libcxx=None, version=None):
self._build_type = build_type
self._libcxx = libcxx or "libstdc++"
self._os = os or "Linux"
self._arch = arch or "x86"
self._compiler = MockCompiler(compiler_name or "gcc", self._libcxx, version or "4.8")
@property
def build_type(self):
return self._build_type
@property
def libcxx(self):
return self._libcxx
@property
def os(self):
return MockSetting(self._os)
@property
def arch(self):
return MockSetting(self._arch)
@property
def compiler(self):
return self._compiler
class MockAndroidSettings(Settings):
@property
def os(self):
return "Android"
class BuildInfoMock(object):
@property
def lib_paths(self):
return ["path/to/lib1", "path/to/lib2"]
@property
def exelinkflags(self):
return ["-framework thing"]
@property
def sharedlinkflags(self):
return ["-framework thing2"]
@property
def include_paths(self):
return ["path/to/includes/lib1", "path/to/includes/lib2"]
@property
def defines(self):
return ["MYDEF1", "MYDEF2"]
@property
def libs(self):
return ["lib1", "lib2"]
@property
def cflags(self):
return ["cflag1"]
@property
def cxxflags(self):
return ["cxxflag1"]
class MockConanfile(object):
def __init__(self, settings):
self.settings = settings
self.output = TestBufferConanOutput()
@property
def deps_cpp_info(self):
return BuildInfoMock()
@property
def deps_env_info(self):
return DepsEnvInfo()
@property
def env_values_dicts(self):
return {}, {}
conanfile_scope_env = """
from conans import ConanFile
class AConan(ConanFile):
settings = "os"
requires = "Hello/0.1@lasote/testing"
def build(self):
self.run("SET" if self.settings.os=="Windows" else "env")
"""
conanfile_dep = """
from conans import ConanFile
class AConan(ConanFile):
name = "Hello"
version = "0.1"
def package_info(self):
self.env_info.PATH=["/path/to/my/folder"]
"""
class ProfilesEnvironmentTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
def build_with_profile_test(self):
self._create_profile("scopes_env", {},
{"CXX": "/path/tomy/g++_build", "CC": "/path/tomy/gcc_build"})
self.client.save({CONANFILE: conanfile_dep})
self.client.run("export . lasote/testing")
self.client.save({CONANFILE: conanfile_scope_env}, clean_first=True)
self.client.run("install . --build=missing --pr scopes_env")
self.client.run("build .")
self.assertRegexpMatches(str(self.client.user_io.out), "PATH=['\"]*/path/to/my/folder")
self._assert_env_variable_printed("CC", "/path/tomy/gcc_build")
self._assert_env_variable_printed("CXX", "/path/tomy/g++_build")
def _assert_env_variable_printed(self, name, value):
self.assertIn("%s=%s" % (name, value), self.client.user_io.out)
def _create_profile(self, name, settings, env=None):
env = env or {}
profile = Profile()
profile._settings = settings or {}
for varname, value in env.items():
profile.env_values.add(varname, value)
save(os.path.join(self.client.cache.profiles_path, name), "include(default)\n" + profile.dumps())
|
python
|
from abc import ABCMeta, abstractmethod
from argparse import ArgumentParser
from distutils import dir_util
import logging
from pathlib import Path
import platform
import sys
from typing import NoReturn
from hephaistos import backups, config, hashes, helpers, interactive, lua_mod, patchers, sjson_data
from hephaistos.config import LOGGER
from hephaistos.helpers import HadesNotFound, HUD, Scaling
class ParserBase(ArgumentParser):
"""Base parser for hosting shared behavior.
- Print help when user supplies invalid arguments.
- Shared arguments (verbosity, etc.).
`ParserBase` serves as the base class for both the main CLI and the actual subcommand parsers,
even if not defined as such (`BaseSubcommand` and children inherit from `ArgumentParser`).
Counter-intuitively, the defined subcommand parsers must NOT directly inherit from `ParserBase`.
This is due to how subparsers and parenting work in `argparse`:
- When initializing subparsers via `add_subparsers`:
- `parser_class` is provided as the base class to use for subcommand parsers.
- When adding subparsers via `add_parser`:
- A new instance of `parser_class` is instantiated.
- If `parents` are provided, the parents' arguments are copied into the `parser_class` instance.
- This new `parser_class` instance is the actual parser used for the subcommand.
This means the actual type of the subparser is ignored, and must NOT be the same as
`parser_class` to avoid argument conflicts while copying. This explains why only the main
Hephaistos CLI is declared as deriving from `ParserBase`, even though at runtime all parsers
(including `BaseSubcommand`) will inherit from `ParserBase`.
"""
VERBOSE_TO_LOG_LEVEL = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG,
}
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.add_argument('-v', '--verbose', action='count', default=0,
help="verbosity level (none: errors only, '-v': info, '-vv': debug)")
self.add_argument('--hades-dir', default='.',
help="path to Hades directory (default: '.', i.e. current directory)")
self.add_argument('--no-modimporter', action='store_false', default=True, dest='modimporter',
help="do not use modimporter for registering / unregistering Hephaistos (default: use modimporter if available)")
def error(self, message) -> NoReturn:
"""Print help when user supplies invalid arguments."""
sys.stderr.write(f"error: {message}\n\n")
self.print_help()
sys.exit(2)
CONTENT_DIR_PATH_WINDOWS_LINUX = 'Content'
CONTENT_DIR_PATH_MACOS = 'Game.macOS.app/Contents/Resources/Content'
CONTENT_DIR_PATH = CONTENT_DIR_PATH_MACOS if platform.system() == 'Darwin' else CONTENT_DIR_PATH_WINDOWS_LINUX
class Hephaistos(ParserBase):
"""Hephaistos entry point. Main parser for hosting the individual subcommands."""
def __init__(self, **kwargs) -> None:
super().__init__(prog=config.HEPHAISTOS_NAME, description="Hephaistos CLI", **kwargs)
subparsers = self.add_subparsers(parser_class=ParserBase,
help="one of:", metavar='subcommand', dest='subcommand')
self.subcommands: dict[str, BaseSubcommand] = {
'patch': PatchSubcommand(),
'restore': RestoreSubcommand(),
'status': StatusSubcommand(),
'version': VersionSubcommand(),
}
for name, subcommand in self.subcommands.items():
subparsers.add_parser(name, parents=[subcommand],
description=subcommand.description, help=subcommand.description)
self.__start()
def __start(self) -> None:
raw_args = sys.argv[1:]
# if no argument is provided, enter interactive mode
if len(raw_args) == 0:
self.__interactive(raw_args)
args = self.parse_args(raw_args)
# handle global args
self.__handle_global_args(args)
try:
# handle subcommand args via SubcommandBase.dispatch handler
args.dispatch(**vars(args))
except Exception as e:
LOGGER.exception(e) # log any unhandled exception
# if in interactive mode, loop until user manually closes
self.__restart() if config.interactive_mode else self.__end()
def __interactive(self, raw_args: list[str]) -> None:
config.interactive_mode = True
interactive.clear()
self.__configure_hades_dir('.')
try:
msg = f"""Hi! This interactive wizard will help you to set up Hephaistos.
Note: while Hephaistos can be used in interactive mode for basic usage, you will need to switch to non-interactive mode for any advanced usage. See the README for more details.
{helpers.check_version()}
"""
print(msg)
available_subcommands = {
subcommand: helpers.capitalize(self.subcommands[subcommand].description)
for subcommand in ['patch', 'restore', 'status']
}
subcommand = interactive.pick(
add_option=interactive.EXIT_OPTION,
**available_subcommands,
)
raw_args.append(subcommand)
if subcommand == 'patch':
choice = interactive.pick(
common219="Select from common 21:9 resolutions",
common329="Select from common 32:9 resolutions",
common489="Select from common 48:9 / triple screen resolutions",
manual="Input resolution manually",
)
if choice == 'common219':
(width, height) = interactive.pick(
prompt="Select resolution:",
options=[
'2560 x 1080',
'3440 x 1440',
'3840 x 1600',
'5120 x 2160',
],
).split(' x ')
elif choice == 'common329':
(width, height) = interactive.pick(
prompt="Select resolution:",
options=[
'3840 x 1080',
'5120 x 1440',
],
).split(' x ')
elif choice == 'common489':
(width, height) = interactive.pick(
prompt="Select resolution:",
options=[
'5760 x 1080',
'7680 x 1440',
],
).split(' x ')
else:
width = interactive.input_number("Width: ")
height = interactive.input_number("Height: ")
print()
raw_args.append(width)
raw_args.append(height)
choice = interactive.pick(
prompt="Select HUD preference (for 32:9, try out both options and see what you prefer!):",
expand="Expand HUD horizontally (recommended for 21:9)",
center="Keep HUD in the center (recommended for 48:9 / triple screen)",
)
raw_args.append('--hud')
raw_args.append(choice)
raw_args.append('-v') # auto-enable verbose mode
except interactive.InteractiveCancel:
self.__restart(prompt_user=False)
except interactive.InteractiveExit:
self.__end()
def __handle_global_args(self, args: list[str]) -> None:
# logging verbosity level
level = ParserBase.VERBOSE_TO_LOG_LEVEL[min(args.verbose, 2)]
LOGGER.setLevel(level)
# hades_dir
self.__configure_hades_dir(args.hades_dir)
# modimporter
if args.modimporter:
config.modimporter = helpers.try_get_modimporter()
else:
LOGGER.info("Using '--no-modimporter': will not run 'modimporter', even if available")
def __configure_hades_dir(self, hades_dir_arg: str) -> None:
# if we are on MacOS and running PyInstaller executable and defaulting
# to current directory, force working directory to be the one containing
# the executable
# this is a kludge around MacOS calling executables from the user home
# rather than the current directory when double-clicked on from Finder
if platform.system() == 'Darwin' and getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS') and hades_dir_arg == '.':
hades_dir_arg = Path(sys.argv[0]).parent
LOGGER.debug(f"Running MacOS executable from Finder: forced working directory to {hades_dir_arg}")
config.hades_dir = Path(hades_dir_arg)
try:
helpers.is_valid_hades_dir(config.hades_dir)
config.content_dir = config.hades_dir.joinpath(CONTENT_DIR_PATH)
except HadesNotFound as e:
LOGGER.error(e)
hades_dirs = helpers.try_detect_hades_dirs()
if len(hades_dirs) > 0:
advice = '\n'.join(f" - {hades_dir}" for hades_dir in hades_dirs)
else:
advice = " - Could not auto-detect any Hades directory."
msg = f"""Hephaistos does not seem to be located in the Hades directory:
{advice}
Please move Hephaistos directly to the Hades directory.
If you know what you're doing, you can also re-run with '--hades-dir' to manually specify Hades directory while storing Hephaistos elsewhere."""
LOGGER.error(msg)
self.__end(1, prompt_user=config.interactive_mode)
def __restart(self, prompt_user=True) -> None:
if prompt_user:
interactive.any_key("\nPress any key to continue...")
interactive.clear()
self.__start()
def __end(self, exit_code=None, prompt_user=False) -> None:
if prompt_user:
interactive.any_key("\nPress any key to exit...")
sys.exit(exit_code)
class BaseSubcommand(ArgumentParser, metaclass=ABCMeta):
def __init__(self, description: str, **kwargs) -> None:
super().__init__(add_help=False, **kwargs)
self.description = description
self.set_defaults(dispatch=self.handler)
@abstractmethod
def handler(self, **kwargs) -> None:
raise NotImplementedError("Subclasses must implement a handler method.")
class PatchSubcommand(BaseSubcommand):
def __init__(self, **kwargs) -> None:
super().__init__(description="patch Hades using Hephaistos", **kwargs)
self.add_argument('width', type=int, help="display resolution width")
self.add_argument('height', type=int, help="display resolution height")
self.add_argument('--scaling', default=Scaling.HOR_PLUS,
choices=[Scaling.HOR_PLUS.value, Scaling.PIXEL_BASED.value],
help="scaling type (default: 'hor+')")
self.add_argument('--hud', default=HUD.EXPAND,
choices=[HUD.EXPAND.value, HUD.CENTER.value],
help="HUD mode (default: 'expand')")
self.add_argument('--no-custom-resolution', action='store_false', default=True, dest='custom_resolution',
help="do not use custom resolution (default: use custom resolution, bypassing monitor resolution detection)")
self.add_argument('-f', '--force', action='store_true',
help="force patching, bypassing hash check and removing previous backups (useful after game update)")
def handler(self, width: int, height: int, scaling: Scaling, hud: HUD, custom_resolution: bool, force: bool, **kwargs) -> None:
"""Compute viewport depending on arguments, then patch all needed files and install Lua mod.
If using '--force', discard backups, hashes and SJSON data, and uninstall Lua mod."""
helpers.configure_screen_variables(width, height, scaling)
LOGGER.info(f"Using resolution: {config.resolution.width, config.resolution.height}")
LOGGER.info(f"Using '--scaling={scaling}': computed patch viewport {config.new_screen.width, config.new_screen.height}")
config.center_hud = True if hud == HUD.CENTER else False
msg = f"Using '--hud={hud}': HUD will be kept in the center of the screen" if config.center_hud else f"Using '--hud={hud}': HUD will be expanded horizontally"
LOGGER.info(msg)
if not custom_resolution:
LOGGER.info("Using '--no-custom-resolution': will not bypass monitor resolution detection")
config.custom_resolution = custom_resolution
if force:
LOGGER.info("Using '--force': will repatch on top of existing files in case of hash mismatch and store new backups / hashes")
config.force = True
# run 'modimporter --clean' (if available) to restore everything before patching
if config.modimporter:
LOGGER.info(f"Running 'modimporter --clean' to restore original state before patching")
helpers.run_modimporter(config.modimporter, clean_only=True)
try:
patchers.patch_engines()
patchers.patch_sjsons()
patchers.patch_profile_sjsons()
lua_mod.install()
except hashes.HashMismatch as e:
LOGGER.error(e)
if config.interactive_mode:
LOGGER.error("It looks like the game was updated. Do you wish to discard previous backups and re-patch Hades from its current state?")
choice = interactive.pick(options=['Yes', 'No',], add_option=None)
if choice == 'Yes':
self.handler(width, height, scaling, hud, custom_resolution, force=True)
else:
LOGGER.error("Was the game updated? Re-run with '--force' to discard previous backups and re-patch Hades from its current state.")
except (LookupError, FileExistsError) as e:
LOGGER.error(e)
class RestoreSubcommand(BaseSubcommand):
def __init__(self, **kwargs) -> None:
super().__init__(description="restore Hades to its pre-Hephaistos state", **kwargs)
def handler(self, **kwargs) -> None:
"""Restore backups, discard hashes and SJSON data, uninstall Lua mod."""
# run 'modimporter --clean' (if available) to unregister Hephaistos
if config.modimporter:
LOGGER.info(f"Running 'modimporter --clean' to unregister Hephaistos")
helpers.run_modimporter(config.modimporter, clean_only=True)
backups.restore()
hashes.discard()
sjson_data.discard()
lua_mod.uninstall()
# clean up Hephaistos data dir if empty (using standalone executable)
if not any(config.HEPHAISTOS_DATA_DIR.iterdir()):
dir_util.remove_tree(str(config.HEPHAISTOS_DATA_DIR))
LOGGER.info(f"Cleaned up empty directory '{config.HEPHAISTOS_DATA_DIR}'")
# re-run modimporter (if available) to re-register other mods
if config.modimporter:
LOGGER.info(f"Running 'modimporter' to re-register other mods")
helpers.run_modimporter(config.modimporter)
class StatusSubcommand(BaseSubcommand):
def __init__(self, **kwargs) -> None:
super().__init__(description="check current Hades / Hephaistos status", **kwargs)
def handler(self, **kwargs) -> None:
"""Check Hades and Hephaistos files and report back on probable current status."""
hephaistos_data_checks = [
backups.status(),
hashes.status(),
sjson_data.status(),
]
hades_engine_checks = [
patchers.patch_engines_status(),
]
hades_lua_checks = [
lua_mod.status(),
]
if all(hephaistos_data_checks) and all(hades_engine_checks) and all (hades_lua_checks):
print(f"Hades is correctly patched with Hephaistos.")
elif all(hephaistos_data_checks) and all(hades_engine_checks) and config.modimporter:
print(f"Hades was patched with Hephaistos, but Lua hook not found in Hades files. Was there an error while running 'modimporter'? Try to re-run 'modimporter' or re-patch Hephaistos.")
elif all(hephaistos_data_checks):
print(f"Hades was patched with Hephaistos, but Hades files were modified. Was the game updated?")
elif all(hades_engine_checks):
print(f"Hades was patched with Hephaistos, but Hephaistos data files were lost. Was 'hephaistos-data' (or part of it) deleted?")
else:
print(f"Hades is not patched with Hephaistos.")
class VersionSubcommand(BaseSubcommand):
def __init__(self, **kwargs) -> None:
super().__init__(description="check Hephaistos version and if an update is available", **kwargs)
def handler(self, **kwargs) -> None:
"""Check Hephaistos version and if an update is available."""
print(helpers.check_version())
|
python
|
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
# standard libraries
from queue import Queue
# external packages
from PyQt5.QtCore import QObject, pyqtSignal, QThreadPool
from skyfield.api import wgs84, load, Loader, Angle
import numpy as np
# local import
class Power:
class PowerSignals(QObject):
version = pyqtSignal()
signals = PowerSignals()
data = {}
@staticmethod
def sendDew(port=None, value=None):
return True
@staticmethod
def togglePowerPort(port=None):
return True
@staticmethod
def togglePowerPortBoot(port=None):
return True
@staticmethod
def toggleHubUSB():
return True
@staticmethod
def togglePortUSB(port=None):
return True
@staticmethod
def toggleAutoDew():
return True
@staticmethod
def reboot():
return True
@staticmethod
def sendAdjustableOutput(value=None):
return True
class Mount(QObject):
class MountSatellite:
class Name:
name = ''
jdStart = 1
jdEnd = 1
flip = False
message = ''
altitude = None
azimuth = None
tleParams = Name()
@staticmethod
def setTLE(line0='', line1='', line2=''):
return
@staticmethod
def slewTLE(julD=0, duration=0):
return
@staticmethod
def calcTLE():
return
@staticmethod
def setTrackingOffsets(Time=None,
RA=None,
DEC=None,
DECcorr=None):
return
@staticmethod
def clearTrackingOffsets():
return
class MountFirmware:
product = 'test'
hardware = 'test'
vString = '12345'
date = 'test'
time = 'test'
@staticmethod
def checkNewer(a):
return True
class MountGeometry:
offNorth = 0
offEast = 0
offVert = 0
domeRadius = 0
offGemPlate = 0
class MountSetting:
meridianLimitSlew = 0
meridianLimitTrack = 0
horizonLimitHigh = 0
horizonLimitLow = 0
timeToFlip = 0
statusUnattendedFlip = False
statusDualAxisTracking = False
statusRefraction = False
refractionTemp = 0
refractionPress = 0
wakeOnLan = False
typeConnection = 1
slewRateMin = 0
slewRateMax = 1
webInterfaceStat = True
UTCExpire = None
gpsSynced = True
@staticmethod
def timeToMeridian():
return 0
@staticmethod
def setMeridianLimitSlew():
return True
@staticmethod
def setMeridianLimitTrack():
return True
@staticmethod
def setHorizonLimitLow():
return True
@staticmethod
def setHorizonLimitHigh():
return True
@staticmethod
def setSlewRate():
return True
@staticmethod
def setUnattendedFlip():
return True
@staticmethod
def setDualAxisTracking():
return True
@staticmethod
def setRefraction():
return True
@staticmethod
def setWOL():
return True
@staticmethod
def setSlewSpeedMax():
return True
@staticmethod
def setSlewSpeedHigh():
return True
@staticmethod
def setSlewSpeedMed():
return True
@staticmethod
def setSlewSpeedLow():
return True
@staticmethod
def checkRateSidereal():
return False
@staticmethod
def checkRateLunar():
return False
@staticmethod
def checkRateSolar():
return False
@staticmethod
def setLunarTracking():
return True
@staticmethod
def setSolarTracking():
return True
@staticmethod
def setSiderealTracking():
return True
class MountSignals(QObject):
locationDone = pyqtSignal()
settingDone = pyqtSignal()
pointDone = pyqtSignal()
firmwareDone = pyqtSignal()
calcTLEdone = pyqtSignal()
getTLEdone = pyqtSignal()
alert = pyqtSignal()
slewFinished = pyqtSignal()
calcTrajectoryDone = pyqtSignal()
trajectoryProgress = pyqtSignal()
class MountObsSite:
class Location:
latitude = None
longitude = None
elevation = None
Alt = None
Az = None
haJNowTarget = None
decJNowTarget = None
piersideTarget = None
angularPosRA = None
angularPosDEC = None
raJNow = None
decJNow = None
haJNow = None
AzTarget = None
AltTarget = None
pierside = None
timeSidereal = Angle(hours=12)
location = wgs84.latlon(latitude_degrees=0, longitude_degrees=0, elevation_m=0)
ts = load.timescale(builtin=True)
timeJD = ts.now()
timeDiff = 0
loader = Loader('tests/workDir', verbose=False)
status = 0
statusSat = 'E'
UTC2TT = 69.184
@staticmethod
def setLongitude(a):
return True
@staticmethod
def setLatitude(a):
return True
@staticmethod
def setElevation(a):
return True
@staticmethod
def startTracking():
return True
@staticmethod
def stopTracking():
return True
@staticmethod
def park():
return True
@staticmethod
def unpark():
return True
@staticmethod
def flip():
return True
@staticmethod
def stop():
return True
@staticmethod
def stopMoveAll():
return True
@staticmethod
def moveNorth():
return True
@staticmethod
def moveEast():
return True
@staticmethod
def moveWest():
return True
@staticmethod
def moveSouth():
return True
@staticmethod
def startSlewing():
return True
@staticmethod
def adjustClock(a):
return True
@staticmethod
def setTargetAltAz(alt_degrees=0,
az_degrees=0):
return True
@staticmethod
def setTargetRaDec(ra_hours=0,
dec_degrees=0):
return True
@staticmethod
def setLocation(loc):
return True
signals = MountSignals()
obsSite = MountObsSite()
geometry = MountGeometry()
firmware = MountFirmware()
setting = MountSetting()
satellite = MountSatellite()
bootMount = None
shutdown = None
host = None
@staticmethod
def getLocation():
return True
@staticmethod
def calcTLE():
return
@staticmethod
def getTLE():
return
@staticmethod
def progTrajectory():
return
@staticmethod
def startClockTimer():
return
@staticmethod
def stopClockTimer():
return
class Automation:
installPath = None
@staticmethod
def uploadTLEData():
return
@staticmethod
def uploadMPCData(comets=False):
return
@staticmethod
def uploadEarthRotationData():
return
class Dome:
class DomeSignals(QObject):
slewFinished = pyqtSignal()
domeShutterWidth = 0.6
offGEM = 0
offLAT = 0
offNorth = 0
offEast = 0
domeRadius = 1.0
data = {}
signals = DomeSignals()
@staticmethod
def abortSlew():
return
@staticmethod
def openShutter():
return
@staticmethod
def closeShutter():
return
@staticmethod
def slewDome(azimuth=0,
altitude=0,
follow=False):
return
@staticmethod
def followDome(azimuth=0,
altitude=0):
return
@staticmethod
def avoidFirstOvershoot():
return
class Relay:
class RelaySignals(QObject):
statusReady = pyqtSignal()
signals = RelaySignals()
class Camera:
class CameraSignals(QObject):
saved = pyqtSignal()
signals = CameraSignals()
class Astrometry:
class AstrometrySignals(QObject):
done = pyqtSignal()
signals = AstrometrySignals()
class OnlineWeather:
class OnlineWeatherSignals(QObject):
done = pyqtSignal()
signals = OnlineWeatherSignals()
class Data:
@staticmethod
def loadHorizonP(fileName=''):
return
@staticmethod
def saveHorizonP(fileName=''):
return
def isAboveHorizon(self, point):
"""
isAboveHorizon calculates for a given point the relationship to the actual horizon
and determines if this point is above the horizon line. for that there will be a
linear interpolation for the horizon line points.
:param point:
:return:
"""
if point[1] > 360:
point = (point[0], 360)
if point[1] < 0:
point = (point[0], 0)
x = range(0, 361)
if self.horizonP:
xRef = [i[1] for i in self.horizonP]
yRef = [i[0] for i in self.horizonP]
else:
xRef = [0]
yRef = [0]
y = np.interp(x, xRef, yRef)
if point[0] > y[int(point[1])]:
return True
else:
return False
class App(QObject):
config = {'mainW': {}}
deviceStat = {}
update10s = pyqtSignal()
update1s = pyqtSignal()
update3s = pyqtSignal()
update30s = pyqtSignal()
update10m = pyqtSignal()
update30m = pyqtSignal()
update1h = pyqtSignal()
start1s = pyqtSignal()
start3s = pyqtSignal()
start5s = pyqtSignal()
start10s = pyqtSignal()
sendSatelliteData = pyqtSignal()
updateDomeSettings = pyqtSignal()
drawHorizonPoints = pyqtSignal()
redrawHemisphere = pyqtSignal()
message = pyqtSignal(str, int)
messageQueue = Queue()
mount = Mount()
power = Power()
dome = Dome()
relay = Relay()
data = Data()
camera = Camera()
automation = Automation()
astrometry = Astrometry()
onlineWeather = OnlineWeather()
ephemeris = load('tests/testData/de421_23.bsp')
mwGlob = {'modelDir': 'tests/workDir/model',
'imageDir': 'tests/workDir/image',
'dataDir': 'tests/workDir/data',
'configDir': 'tests/workDir/config',
}
uiWindows = {}
threadPool = QThreadPool()
|
python
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# ARNEGSIS
from openerp import fields,models, api
import cx_Oracle
#--------------------------------------bitacora
class analisis_bitacora_pedidos(models.Model):
_name = 'analisis.bitacora.pedidos'
##_order = 'codigo_ped , indice_det'
##_rec_name = 'pedido'
clase = fields.Char(string='Clase')
codigo = fields.Char(string='Articulo')
descripcion = fields.Char(string='Descripcion')
dia = fields.Date(string='Fecha')
pedido = fields.Char(string='Pedido')
vendedor= fields.Char(string='Vendedor')
cli_des = fields.Char(string='Cliente')
cerrado = fields.Char(string='Cerrado')
cantidad = fields.Float(string='Cant Ini')
estado = fields.Char(string='Estado Ini')
bodega = fields.Char(string='Bodega')
total = fields.Float(string='Total')
disponible_cia = fields.Float(string='Disp')
reservado_cia = fields.Float(string='Res')
disponible_bod = fields.Float(string='Dis Bod')
reservado_bod = fields.Float(string='Res Bod')
pedidos_id = fields.Many2one('analisis.pedidos',string="Pedidos",requrired="True",ondelete="cascade")
cia = fields.Char(string='Compañia')
periodo = fields.Char(string='Periodo')
vendedor_cod = fields.Char(string='Cod Vend')
cantidadpedida = fields.Float(string='Can Pedida')
cantidadfacturada = fields.Float(string='Can Facturada')
estadofinal = fields.Char(string='Estado Final')
es_venta_perdida = fields.Char(string='Es venta perdida')
inicio = fields.Datetime(string='Inicio')
fin = fields.Datetime(string='Fin')
horas = fields.Float(string='Horas')
calificacion = fields.Char(string='Calificacion')
user_id = fields.Many2one(string='usuario')
motivo_cierre= fields.Char(string='Motivo Cierre')
@api.multi
def _busca_usuario(self,cod):
query = "select id from res_users where codigo = '"+str(cod)+"' "
self._cr.execute(query)
res = self._cr.dictfetchall()
if res != []:
iden = res[0]['id']
else:
iden = False
return iden
@api.multi
def _busca_pedido_cab(self,cod):
query = "select id from analisis_pedidos where pedido = '"+str(cod)+"' "
self._cr.execute(query)
res = self._cr.dictfetchall()
if res != []:
iden = res[0]['id']
else:
iden = False
return iden
@api.multi
def fecha(self,dt):
if dt != None:
FCH = dt[6:10]+'-'+dt[3:5]+'-'+dt[0:2]+' '+dt[11:19]
return FCH
if dt is None:
FCH = ''
return dt#FCH
@api.multi
def fecha2(self,dt):
if dt != None:
FCH = dt[6:10]+'-'+dt[3:5]+'-'+dt[0:2]
return FCH
if dt is None:
FCH = ''
return dt#FCH
@api.multi
def import_data_bit(self,):
self._cr.execute("TRUNCATE TABLE analisis_bitacora_pedidos cascade")#
self._cr.commit()
dct = {}
conn_str='openside/[email protected]:1521/proqimsa'
db_conn = cx_Oracle.connect(conn_str)
cursor = db_conn.cursor()
cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'")
cursor.execute('select CLASE,CODIGO,DECRIPCION,DIA,PEDIDO,VENDEDOR,CLI_DES,CERRADO,CANTIDAD,ESTADO,BODEGA,TOTAL,DISPONIBLE_CIA,RESERVADO_CIA,DISPONIBLE_BOD,RESERVADO_BOD,CIA,PERIODO,VENDEDOR_COD,CANTIDADPEDIDA,CANTIDADFACTURADA,ESTADOFINAL,ES_VENTA_PERDIDA,INICIO,FIN,HORAS,CALIFICACION,MOTIVO_CIERRE from WEB_SIDE_BIT_PED t')
registros = cursor.fetchall()
for r in registros:
mi_lista = list(r)
dct = {'clase':mi_lista[0],
'codigo':mi_lista[1],
'descripcion':mi_lista[2],
'dia':self.fecha2(mi_lista[3]),
'pedido':mi_lista[4],
'vendedor':mi_lista[5],
'cli_des':mi_lista[6],
'cerrado':mi_lista[7],
'cantidad':mi_lista[8],
'estado':mi_lista[9],
'bodega':mi_lista[10],
'total':mi_lista[11],
'disponible_cia':mi_lista[12],
'reservado_cia':mi_lista[13],
'disponible_bod':mi_lista[14],
'reservado_bod':mi_lista[15],
'pedidos_id':int(self._busca_pedido_cab(mi_lista[4])),
'cia':mi_lista[16],
'periodo':mi_lista[17],
'vendedor_cod':mi_lista[18],
'cantidadpedida':mi_lista[19],
'cantidadfacturada':mi_lista[20],
'estadofinal':mi_lista[21],
'es_venta_perdida':mi_lista[22],
'inicio':self.fecha(mi_lista[23]),
'fin':self.fecha(mi_lista[24]),
'horas':mi_lista[25],
'calificacion':mi_lista[26],
'user_id': int(self._busca_usuario(mi_lista[18])) or False,
'motivo_cierre':mi_lista[27],
}
self.create(dct)
return True
analisis_bitacora_pedidos()
#-----------------------------------------------
#-----------------------------------
#- * REPORTE ANALISIS DE PEDIDOS * -
#-----------------------------------
class analisis_pedidos_det(models.Model):
_name = 'analisis.pedidos.det'
##_order = 'codigo_ped , indice_det'
##_rec_name = 'pedido'
codigo_ped = fields.Char(string='CODIGO_PED')
estado_linea = fields.Char(string='Est')
articulo = fields.Char(string='Articulo')
descripcion = fields.Char(string='Descripcion')
bodega = fields.Char(string='Bod')
unidad = fields.Char(string='Un')
cantidad = fields.Float(string='Cant')
precio = fields.Float(string='Precio')
total_art = fields.Float(string='Total')
existencia_actual = fields.Float(string='Existencia')
indice_det = fields.Float(string='Indice')
pedidos_id = fields.Many2one('analisis.pedidos',string="Pedidos",requrired="True",ondelete="cascade")
@api.multi
def _busca_pedido_cab(self,cod):
query = "select id from analisis_pedidos where pedido = '"+str(cod)+"' "
self._cr.execute(query)
res = self._cr.dictfetchall()
if res != []:
iden = res[0]['id']
else:
iden = False
return iden
@api.multi
def import_data_det(self,):
self._cr.execute("TRUNCATE TABLE analisis_pedidos_det cascade")#
self._cr.commit()
dct = {}
conn_str='openside/[email protected]:1521/proqimsa'
db_conn = cx_Oracle.connect(conn_str)
cursor = db_conn.cursor()
cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'")
cursor.execute('SELECT CODIGO_PED,ESTADO_LINEA,ARTICULO,DESCRIPCION,BODEGA,UNIDAD,CANTIDAD,PRECIO,TOTAL_ART,EXISTENCIA_ACTUAL,INDICE_DET FROM openside.WEB_REP_ATENCION_PEDIDOS_DET')
registros = cursor.fetchall()
for r in registros:
mi_lista = list(r)
dct = {'codigo_ped':mi_lista[0],
'estado_linea':mi_lista[1],
'articulo':mi_lista[2],
'descripcion':mi_lista[3],
'bodega':mi_lista[4],
'unidad':mi_lista[5],
'cantidad':mi_lista[6],
'precio':mi_lista[7],
'total_art':mi_lista[8],
'existencia_actual':mi_lista[9],
'indice_det':mi_lista[10],
'pedidos_id':int(self._busca_pedido_cab(mi_lista[0]))
}
self.create(dct)
return True
analisis_pedidos_det()
class analisis_pedidos(models.Model):
_name = 'analisis.pedidos'
_order = 'fecha_digita_pedido asc'
_rec_name = 'pedido'
compania = fields.Char(string='COMPAÑIA')
pedido = fields.Char(string='NUMERO PEDIDO')# coidgo del pedido
fecha_digita_pedido = fields.Datetime(string='FECHA INGRESO PEDIDO')
mes = fields.Char(string='MES')
anio = fields.Char(string='AÑO')
cliente_codigo = fields.Char(string='CLIENTE CODIGO')
cliente_descripcion = fields.Char(string='CLIENTE DESCRIPCION')
estado_pedido = fields.Char(string='ESTADO PEDIDO')
vendedor = fields.Char(string='VENDEDOR')
cod_vendedor = fields.Char(string='CODIGO VENDEDOR')
localidad = fields.Char(string='LOCALIDAD')
#fecha_pedido = fields.Date(string='FECHA PEDIDO')
dias_limite = fields.Char(string='DIAS LIMITE')
eficiencia = fields.Float(string='A tiempo',group_operator="avg")
no_eficiencia = fields.Float(string='Fuera de tiempo',group_operator="avg")
fecha_ultima_factura = fields.Datetime(string='ULTIMA FACTURA')
fecha_ultima_guia = fields.Date(string='ULTIMA GUIA')
fecha_ultima_planificacion = fields.Date(string='ULTIMA PLANIFICACION')
diferencia = fields.Char(string='DIFERENCIA')
atencion = fields.Char(string='ATENCION')
user_id = fields.Many2one(string='usuario')
refacturado = fields.Char(string='REFACTURADO')
oficina = fields.Char(string='OFICINA')
line_id = fields.One2many('analisis.pedidos.det','pedidos_id',string="Detalle")
line_id_bit = fields.One2many('analisis.bitacora.pedidos','pedidos_id',string="Bitacora")
subtotal = fields.Float(string='Subtotal')
iva = fields.Float(string='Iva')
total = fields.Float(string='Total')
motivo_cierre = fields.Char(string='Motivo Cierre')
estado_cerrado = fields.Char(string='Cerrado')
@api.multi
def _busca_usuario(self,cod):
query = "select id from res_users where codigo = '"+str(cod)+"' "
self._cr.execute(query)
res = self._cr.dictfetchall()
if res != []:
iden = res[0]['id']
else:
iden = False
return iden
@api.multi
def fecha(self,dt):
if dt != None:
FCH = dt[6:10]+'-'+dt[3:5]+'-'+dt[0:2]+' '+dt[11:19]
return FCH
if dt is None:
FCH = ''
return FCH
@api.multi
def mes_cnv(self,psc):
dct=['Enero','Febrero','Marzo','Abril','Mayo','Junio','Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']
return dct[int(psc)-1]
@api.multi
def import_data(self,):
print 'Importar pedidos'
self._cr.execute("TRUNCATE TABLE analisis_pedidos cascade")
self._cr.commit()
dct = {}
conn_str='openside/[email protected]:1521/proqimsa'
db_conn = cx_Oracle.connect(conn_str)
cursor = db_conn.cursor()
cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'")
print 'ñññññññññññññññññññññññññññññññ'
print 'ñññññññññññññññññññññññññññññññ'
print 'ñññññññññññññññññññññññññññññññ'
print 'ñññññññññññññññññññññññññññññññ'
print 'ñññññññññññññññññññññññññññññññ'
print 'ñññññññññññññññññññññññññññññññ'
print 'ñññññññññññññññññññññññññññññññ'
cursor.execute('SELECT compania,pedido,fecha_digitacion_pedido,mes,anio,cliente_codigo,cliente_descripcion,estado_pedido,estado_cerrado,vendedor,cod_vendedor, localidad,dias_limite,eficiencia,no_eficiencia,fecha_ultimafactura, fecha_ultimaguia,fecha_ultimaplanificacion,diferencia,a_tiempo,refacturado,oficina,subtotal,iva,total,motivo_cierre FROM openside.WEB_REP_ATENCION_PEDIDOS_2')
#registros = cursor.dictfetchall()
# print '|||||||||||||||||||||||||||||||||||||'
# print '|||||||||||||||||||||||||||||||||||||'
# print '|||||||||||||||||||||||||||||||||||||'
# print '|||||||||||||||||||||||||||||||||||||'
# print '|||||||||||||||||||||||||||||||||||||'
# print '|||||||||||||||||||||||||||||||||||||'
registros = cursor.fetchall()
for r in registros:
mi_lista = list(r)
dct = {'compania':mi_lista[0],
'pedido':mi_lista[1],
'fecha_digita_pedido':self.fecha(mi_lista[2]),
'mes':self.mes_cnv(mi_lista[3]),
'anio':mi_lista[4],
'cliente_codigo':mi_lista[5],
'cliente_descripcion':mi_lista[6],
'estado_pedido':mi_lista[7],
'estado_cerrado':mi_lista[8],
'vendedor':mi_lista[9],
'cod_vendedor':mi_lista[10],
'localidad':mi_lista[11],
# 'fecha_pedido':mi_lista[12],
'dias_limite':mi_lista[12],
'eficiencia':mi_lista[13],
'no_eficiencia':mi_lista[14],
'fecha_ultima_factura':self.fecha(mi_lista[15]) or False,
'fecha_ultima_guia':self.fecha(mi_lista[16]) or False,
'fecha_ultima_planificacion':self.fecha(mi_lista[17]) or False,
'diferencia':mi_lista[18],
'atencion':mi_lista[19],
'user_id': int(self._busca_usuario(mi_lista[10])) or False,
'refacturado':mi_lista[20],
'oficina':mi_lista[21],
'subtotal':mi_lista[22],
'iva':mi_lista[23],
'total':mi_lista[24],
'motivo_cierre':mi_lista[25],
}
print 'kkkkkkkkkkkkkkkkkkkkkk'
print 'kkkkkkkkkkkkkkkkkkkkkk'
print 'kkkkkkkkkkkkkkkkkkkkkk'
print 'kkkkkkkkkkkkkkkkkkkkkk'
print 'kkkkkkkkkkkkkkkkkkkkkk'
print 'kkkkkkkkkkkkkkkkkkkkkk'
print 'kkkkkkkkkkkkkkkkkkkkkk'
print 'kkkkkkkkkkkkkkkkkkkkkk'
self.create(dct)
#analisis.pedidos.det()
#import analisis_pedidos_det
obj = self.env['analisis.pedidos.det']
obj.import_data_det()
obj2 = self.env['analisis.bitacora.pedidos']
obj2.import_data_bit()
return True
analisis_pedidos()
|
python
|
import requests
import os
if 'TPU_DRIVER_MODE' not in globals():
url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver0.1-dev20191206'
resp = requests.post(url)
TPU_DRIVER_MODE = 1
# The following is required to use TPU Driver as JAX's backend.
from jax.config import config
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
|
python
|
"""usersエンティティ用モジュール"""
from dataclasses import dataclass
from decimal import Decimal
from typing import Optional
from aws.dynamodb.base import Base
from aws.exceptions import DynamoDBError
ALL = 0
WEST_JR = 1
HANKYU = 2
HANSHIN = 3
@dataclass
class DelayInfoMessages(Base):
"""鉄道遅延情報メッセージ群クラス"""
west_jr: str
hankyu: str
hanshin: str
all: str
def extract_message(self, company_type: int) -> str:
"""鉄道遅延情報メッセージ群から対象の鉄道遅延情報メッセージを抽出する
Args:
company_type: 運営会社種類
Raises:
DynamoDBError: 運営会社種類が正しく設定されていない
Returns:
対象の鉄道遅延情報メッセージ
"""
if company_type == WEST_JR:
extracted_message = self.west_jr
elif company_type == HANKYU:
extracted_message = self.hankyu
elif company_type == HANSHIN:
extracted_message = self.hanshin
elif company_type == ALL:
extracted_message = self.all
else:
raise DynamoDBError(f"運営会社種類が正しく設定されていません。運営会社種類: {company_type}")
return extracted_message
@classmethod
def from_dict(cls, delay_info_messages: dict):
"""dict型のデータからDelayInfoMessagesインスタンスを生成する
Args:
delay_info_messages: dict型データ
Returns:
DelayInfoMessagesインスタンス
"""
return cls(delay_info_messages["west_jr"],
delay_info_messages["hankyu"],
delay_info_messages["hanshin"],
delay_info_messages["all"])
@dataclass
class User(Base):
"""ユーザクラス"""
user_id: str
created_time: Decimal
updated_time: Decimal
delay_info_messages: Optional[DelayInfoMessages]
@classmethod
def from_dict(cls, user: dict):
"""dict型のデータからUserインスタンスを生成する
Args:
user: dict型データ
Returns:
Userインスタンス
"""
delay_info_messages = user.get("delay_info_messages")
type_delay_info_messages = DelayInfoMessages.from_dict(
delay_info_messages) if delay_info_messages else None
return cls(user["user_id"],
user.get("created_time"),
user.get("updated_time"),
type_delay_info_messages)
|
python
|
from typing import Optional, Iterable, Any
from platypush.message.event import Event
class DbusSignalEvent(Event):
"""
Event triggered when a signal is received on the D-Bus.
"""
def __init__(
self, bus: str, interface: str, sender: str, path: str, signal: str,
params: Optional[Iterable[Any]] = None, **kwargs
):
"""
:param bus: Bus type (``session`` or ``system``).
:param interface: Name of the interface associated to the signal.
:param sender: D-Bus name of the sender of the signal.
:param path: Path of the object associated to the signal.
:param signal: Signal name.
:param params: Signal payload.
"""
super().__init__(bus=bus, interface=interface, sender=sender,
path=path, signal=signal, params=params, **kwargs)
|
python
|
"""
https://github.com/invl/retry
"""
import random
import time
from functools import partial, wraps
from typing import Callable, Optional, ParamSpecArgs, ParamSpecKwargs, Tuple, Type, List, Any
# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint...
from pydantic import Field, PositiveFloat, PositiveInt, validate_arguments
from sel4.utils.typeutils import AnyCallable, OptionalFloat, DictStrAny
_MAX_WAIT = 1_073_741_823
__all__ = ["retry", "retry_call"]
def __retry_internal(
func: Callable,
exceptions: Type[Exception] | Tuple[Type[Exception]],
tries=-1,
delay=0.0,
timeout_ms=_MAX_WAIT,
max_delay: OptionalFloat = None,
backoff=1.0,
jitter=0.0,
):
"""
Executes a function and retries it if it failed.
:param func: the function to execute.
:param exceptions: an exception or a tuple of exceptions to catch. default: Exception.
:param tries: the maximum number of attempts. default: -1 (infinite).
:param delay: initial delay between attempts. default: 0.
:param timeout_ms: max retries delay. default: _MAX_WAIT.
:param max_delay: the maximum value of delay. default: None (no limit).
:param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).
:param jitter: extra seconds added to delay between attempts. default: 0.
fixed if a number, random if a range tuple (min, max)
:returns: the result of the func function.
"""
_tries, _delay = tries, delay
attempt_number = 1
start_time = int(round(time.time() * 1000))
while _tries:
try:
return func()
except exceptions as e:
_tries -= 1
if not _tries:
raise e
delay_since_first_attempt_ms = int(round(time.time() * 1000)) - start_time
if delay_since_first_attempt_ms > timeout_ms:
raise e
# LOGGER.warning('%s, retrying in %s seconds... (attempt #%d)', e, _delay, attempt_number)
time.sleep(_delay)
_delay *= backoff
if isinstance(jitter, tuple):
_delay += random.uniform(*jitter)
else:
_delay += jitter
if max_delay is not None:
_delay = min(_delay, max_delay)
attempt_number += 1
@validate_arguments
def retry(
exceptions: Type[Exception] | Tuple[Type[Exception]] = Field(default_factory=Exception),
tries: int = Field(default=-1),
delay: float = Field(default=0, ge=0),
max_delay: OptionalFloat = Field(default=None, ge=0.0),
timeout_ms: PositiveInt = Field(default=_MAX_WAIT),
backoff: PositiveFloat = Field(default=1.0),
jitter: PositiveFloat = Field(default=0.0),
):
"""Returns a retry decorator.
:param exceptions: an exception or a tuple of exceptions to catch. default: Exception.
:param tries: the maximum number of attempts. default: -1 (infinite).
:param delay: initial delay between attempts. default: 0.
:param max_delay: the maximum value of delay. default: None (no limit).
:param timeout_ms: max retries delay. default: _MAX_WAIT.
:param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).
:param jitter: extra seconds added to delay between attempts. default: 0.
fixed if a number, random if a range tuple (min, max)
:returns: a retry decorator.
"""
@decorator
def retry_decorator(func, *f_args: ParamSpecArgs, **f_kwargs: ParamSpecKwargs):
args = f_args if f_args else list()
kwargs = f_kwargs if f_kwargs else dict()
return __retry_internal(
partial(func, *args, **kwargs),
exceptions=exceptions,
tries=tries,
delay=delay,
max_delay=max_delay,
timeout_ms=timeout_ms,
backoff=backoff,
jitter=jitter,
)
return retry_decorator
def decorator(caller):
"""Turns caller into a decorator.
Unlike decorator module, function signature is not preserved.
:param caller: caller(f, *args, **kwargs)
"""
def decor(f):
@wraps(f)
def wrapper(*args, **kwargs):
return caller(f, *args, **kwargs)
return wrapper
return decor
# class RetryError(Exception):
# """
# A RetryError encapsulates the last Attempt instance right before giving up.
# """
#
# def __init__(self, last_attempt):
# self.last_attempt = last_attempt
#
# def __str__(self):
# return f"RetryError[{self.last_attempt}]"
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def retry_call(
func: AnyCallable,
f_args: Optional[List[Any]] = None,
f_kwargs: Optional[DictStrAny] = None,
exceptions: Type[Exception] | Tuple[Type[Exception]] = Exception,
tries: int = -1,
delay: float = 0,
max_delay: OptionalFloat = None,
backoff=1.0,
jitter=0.0,
):
"""
Calls a function and re-executes it if it failed.
:param func: the function to execute.
:param f_args: the positional arguments of the function to execute.
:param f_kwargs: the named arguments of the function to execute.
:param exceptions: an exception or a tuple of exceptions to catch. default: Exception.
:param tries: the maximum number of attempts. default: -1 (infinite).
:param delay: initial delay between attempts. default: 0.
:param max_delay: the maximum value of delay. default: None (no limit).
:param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).
:param jitter: extra seconds added to delay between attempts. default: 0.
fixed if a number, random if a range tuple (min, max)
:returns: the result of the f function.
"""
args = f_args if f_args else list()
kwargs = f_kwargs if f_kwargs else dict()
return __retry_internal(
partial(func, *args, **kwargs),
exceptions,
tries,
delay,
_MAX_WAIT,
max_delay,
backoff,
jitter,
)
|
python
|
# Distributed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE, distributed with this software.
#
# Author: Philipp Schmidt <[email protected]>
# Copyright (c) 2020, European X-Ray Free-Electron Laser Facility GmbH.
# All rights reserved.
from collections.abc import Sequence
import numpy as np
class MapTarget:
"""Target for map operation.
This object wraps the value processed in a map operation to control
its distribution into individual work units and then iterating over
the entries in each work unit. The simplest target type is
SequenceTarget, which distributes a set of indices to each worker
and then iterates over the assigned indices in its body.
If a custom type extends this class and implements the wrap
classmethod, then it take advantage of automatic wrapping of values
passed to a map call.
"""
_target_types = []
@classmethod
def __init_subclass__(cls):
cls._target_types.append(cls)
@classmethod
def get_default_target(cls, value):
"""Get default map target.
Args:
value (Any): Value to wrap for map operation.
Returns:
(MapTarget) Target object wrapping the given value.
Raises:
ValueError: If no or more than one default target types can
be found.
"""
if isinstance(value, cls):
return value
target = None
for target_type in cls._target_types:
cur_target = target_type.wrap(value)
if cur_target is not None:
if target is not None:
raise ValueError('ambiguous default target requires an '
'explicit map target')
target = cur_target
if target is None:
raise ValueError(f'no default target for {type(value)}')
return target
@classmethod
def wrap(self, value):
"""Wrap value in this target type, if possible.
Args:
value (Any): Value to wrap for map operation.
Returns:
(MapTarget) Target object if wrapping is possible or None.
"""
return
def split(self, num_workers):
"""Split this target into work units.
The values contained in the returned Iterable are passed to this
target's iterate method later on. It may consist of any value
suitable to describe each work unit, e.g. an iterable of indices
of a sequence.
Args:
num_workers (int): Number of workers processing this target.
Returns:
(Iterable) Iterable of elements for each work unit.
"""
raise NotImplementedError('split')
def iterate(self, share):
"""Iterate over a share of this target.
Args:
share (Any): Element of the Iterable returned by
:method:split to iterate over.
Returns:
None
"""
raise NotImplementedError('iterate')
class SequenceTarget(MapTarget):
"""Map target for a sequence.
This target wraps any indexable collection, e.g. list, tuples, numpy
ndarrays or any other type implementing __getitem__. The kernel is
passed the current index and sequence value at that index.
Note that only ndarray and types implementing the Sequence interface
are currently automatically detected to use this target type. Other
types, e.g. xarray's DataArray, need to be wrapped manually.
"""
def __init__(self, sequence):
"""Initialize this sequence target.
Args:
sequence (Sequence): Sequence to process.
"""
self.sequence = sequence
@classmethod
def wrap(cls, value):
if isinstance(value, (Sequence, np.ndarray)):
# Note that ndarray does NOT implement Sequence itself!
return cls(value)
def split(self, num_workers):
return np.array_split(np.arange(len(self.sequence)), num_workers)
def iterate(self, indices):
for index in indices:
yield index, self.sequence[index]
# Ideas for targets: xarray.DataArray/Dataset, pandas
class ExtraDataTarget(MapTarget):
"""Map target for EXtra-data DataCollection.
This target wraps an EXtra-data DataCollection and performs the map
operation over its trains. The kernel is passed the current train's
index in the collection, the train ID and the data mapping.
"""
def __init__(self, dc):
self.dc = dc
import extra_data as xd
ExtraDataTarget.xd = xd
@classmethod
def wrap(cls, value):
if value.__class__.__name__ != 'DataCollection':
# Avoid importing EXtra-data if not even the name matches.
return
try:
import extra_data as xd
except ImportError:
return
if isinstance(value, xd.DataCollection):
return cls(value)
def split(self, num_workers):
return np.array_split(np.arange(len(self.dc.train_ids)), num_workers)
def iterate(self, indices):
subdc = self.dc.select_trains(ExtraDataTarget.xd.by_index[indices])
# Close all file handles inherited from the parent collection
# to force re-opening them in each worker process.
for f in subdc.files:
f.close()
for index, (train_id, data) in zip(indices, subdc.trains()):
yield index, train_id, data
|
python
|
# coding: utf-8
"""
SnapshotSnapshotsApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class SnapshotSnapshotsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_snapshot_lock(self, snapshot_lock, sid, **kwargs):
"""
Create a new lock on this snapshot.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_snapshot_lock(snapshot_lock, sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param SnapshotLockCreateParams snapshot_lock: (required)
:param str sid: (required)
:return: CreateSnapshotLockResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_lock', 'sid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_snapshot_lock" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_lock' is set
if ('snapshot_lock' not in params) or (params['snapshot_lock'] is None):
raise ValueError("Missing the required parameter `snapshot_lock` when calling `create_snapshot_lock`")
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `create_snapshot_lock`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks'.replace('{format}', 'json')
path_params = {}
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_lock' in params:
body_params = params['snapshot_lock']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateSnapshotLockResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_snapshot_lock(self, snapshot_lock_id, sid, **kwargs):
"""
Delete the snapshot lock.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_snapshot_lock(snapshot_lock_id, sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str snapshot_lock_id: Delete the snapshot lock. (required)
:param str sid: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_lock_id', 'sid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_lock" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_lock_id' is set
if ('snapshot_lock_id' not in params) or (params['snapshot_lock_id'] is None):
raise ValueError("Missing the required parameter `snapshot_lock_id` when calling `delete_snapshot_lock`")
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `delete_snapshot_lock`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks/{SnapshotLockId}'.replace('{format}', 'json')
path_params = {}
if 'snapshot_lock_id' in params:
path_params['SnapshotLockId'] = params['snapshot_lock_id']
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_snapshot_locks(self, sid, **kwargs):
"""
Delete all locks. Will try to drain count of recursively held locks so that the snapshot can be deleted.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_snapshot_locks(sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str sid: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_locks" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `delete_snapshot_locks`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks'.replace('{format}', 'json')
path_params = {}
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_snapshot_lock(self, snapshot_lock_id, sid, **kwargs):
"""
Retrieve lock information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_snapshot_lock(snapshot_lock_id, sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str snapshot_lock_id: Retrieve lock information. (required)
:param str sid: (required)
:return: SnapshotLocks
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_lock_id', 'sid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_lock" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_lock_id' is set
if ('snapshot_lock_id' not in params) or (params['snapshot_lock_id'] is None):
raise ValueError("Missing the required parameter `snapshot_lock_id` when calling `get_snapshot_lock`")
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `get_snapshot_lock`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks/{SnapshotLockId}'.replace('{format}', 'json')
path_params = {}
if 'snapshot_lock_id' in params:
path_params['SnapshotLockId'] = params['snapshot_lock_id']
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotLocks',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_snapshot_locks(self, sid, **kwargs):
"""
List all locks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_snapshot_locks(sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str sid: (required)
:param str sort: The field that will be used for sorting. Choices are id, expires, and comment. Default is id.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotLocksExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sid', 'sort', 'limit', 'dir', 'resume']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_snapshot_locks" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `list_snapshot_locks`")
if 'limit' in params and params['limit'] < 1.0:
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_locks`, must be a value greater than or equal to `1.0`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks'.replace('{format}', 'json')
path_params = {}
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
if 'sort' in params:
query_params['sort'] = params['sort']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'dir' in params:
query_params['dir'] = params['dir']
if 'resume' in params:
query_params['resume'] = params['resume']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotLocksExtended',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_snapshot_lock(self, snapshot_lock, snapshot_lock_id, sid, **kwargs):
"""
Modify lock. All input fields are optional, but one or more must be supplied.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_snapshot_lock(snapshot_lock, snapshot_lock_id, sid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param SnapshotLock snapshot_lock: (required)
:param str snapshot_lock_id: Modify lock. All input fields are optional, but one or more must be supplied. (required)
:param str sid: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_lock', 'snapshot_lock_id', 'sid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_snapshot_lock" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_lock' is set
if ('snapshot_lock' not in params) or (params['snapshot_lock'] is None):
raise ValueError("Missing the required parameter `snapshot_lock` when calling `update_snapshot_lock`")
# verify the required parameter 'snapshot_lock_id' is set
if ('snapshot_lock_id' not in params) or (params['snapshot_lock_id'] is None):
raise ValueError("Missing the required parameter `snapshot_lock_id` when calling `update_snapshot_lock`")
# verify the required parameter 'sid' is set
if ('sid' not in params) or (params['sid'] is None):
raise ValueError("Missing the required parameter `sid` when calling `update_snapshot_lock`")
resource_path = '/platform/1/snapshot/snapshots/{Sid}/locks/{SnapshotLockId}'.replace('{format}', 'json')
path_params = {}
if 'snapshot_lock_id' in params:
path_params['SnapshotLockId'] = params['snapshot_lock_id']
if 'sid' in params:
path_params['Sid'] = params['sid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_lock' in params:
body_params = params['snapshot_lock']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
python
|
#
# [To-Do] 1. Loop-Unrolling
# 2. Boundary Checks
# 3. Instuction Reordering
#
def tc_code_kernel_Store_Results(f, opt_gen_full, l_t3_mapping_tb_2D, l_t3_mapping_reg, size_reg_x, size_reg_y, idx_kernel, opt_accumulated):
#
#
#
f.write("\n")
f.write("\t// Store Results (Registers) to Global Memory\n");
f.write("\t// Part: Generalized Threads\n")
f.write("\t// Part: Generalized Register-Tiling\n")
#
# Option #1: None-Full-Tile (Partial-Tile)
#
if opt_gen_full == 1:
#
f.write("\tif (")
#
axis_count = 0
for axis_idx in l_t3_mapping_tb_2D:
#
if axis_count != 0:
f.write(" && ")
# Per Each-Axis
idx_count = 0
for each_idx in axis_idx:
if idx_count == 0:
f.write("idx_" + each_idx + " < rng_" + each_idx)
else:
f.write(" && idx_" + each_idx + " < rng_" + each_idx)
#
idx_count += 1
axis_count += 1
#
f.write(")\n")
#
# Option #1: Full-Tile
#
else:
f.write("\t#pragma unroll " + str(size_reg_y) + "\n")
#
#
#
f.write("\tfor (int i = 0; i < ")
f.write(str(size_reg_y))
f.write("; i++)\n")
f.write("\t{\n")
f.write("\t\tfor (int j = 0; j < ")
f.write(str(size_reg_x))
f.write("; j++)\n")
f.write("\t\t{\n")
#
# if
#
if opt_gen_full == 1:
f.write("\t\t\tif(i < rng_" + l_t3_mapping_reg[1] + " && j < rng_" + l_t3_mapping_reg[0] + ")\n")
f.write("\t\t\t{\n")
#. Output:
if idx_kernel == 1:
if opt_accumulated == 1:
#
# [To-Do] Instruction Reordering
#
f.write("\t\t\t\tdev_t3[t3_base_thread + (i * stride_reg_y) + (j * stride_reg_x)] += reg_tile[i][j];\n")
#f.write("\t\t\t\tint t3_addr = t3_base_thread + (i * stride_reg_y) + (j * stride_reg_x);\n")
#f.write("\t\t\t\tdouble tmp_dev_t3 = dev_t3[t3_addr];\n")
#f.write("\t\t\t\tdev_t3[t3_addr] = tmp_dev_t3 + reg_tile[i][j];\n")
else:
f.write("\t\t\tdev_t3[t3_base_thread + (i * stride_reg_y) + (j * stride_reg_x)] = reg_tile[i][j];\n")
else:
f.write("\t\t\tdev_t3[t3_base_thread + (i * stride_reg_y) + (j * stride_reg_x)] += reg_tile[i][j];\n")
#
# if-end
#
if opt_gen_full == 1:
f.write("\t\t\t}\n")
f.write("\t\t}\n")
f.write("\t}\n")
|
python
|
# -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import opresource
from openprocurement.auctions.core.views.mixins import AuctionAuctionResource
@opresource(name='belowThreshold:Auction Auction',
collection_path='/auctions/{auction_id}/auction',
path='/auctions/{auction_id}/auction/{auction_lot_id}',
auctionsprocurementMethodType="belowThreshold",
description="Auction auction data")
class AuctionAuctionResource(AuctionAuctionResource):
pass
|
python
|
from django.db import models
# ePrice(比價王)
# Create your models here.
class Post(models.Model):
### id
post_id = models.CharField(max_length=64) #帖子ID
# 以 https://www.eprice.com.tw/life/talk/9/5242470/1/ 而言, 帖子ID = 5242470
### user
pu_id = models.CharField(max_length=255) #發帖人ID 'a.nickname > small'
pu_name = models.CharField(max_length=255) #發帖人暱稱 'a.nickname'["title"]
pu_identity = models.CharField(max_length=255) #發帖人身份 'span.badge-mark.badge-warning'
### info
post_topic = models.CharField(max_length=64) #主題
post_url = models.URLField(max_length=512) #帖子URL
post_time = models.DateTimeField(max_length=64) #發文時間
post_title = models.TextField() #標題
post_info = models.TextField(blank=True, null=True, default=None) #內容
### statistics
pn_like = models.IntegerField(default=0) #愛心數
### system
lastupdate_datetime = models.DateTimeField(null=True, default=None)
entrance_id = models.PositiveIntegerField(default=0) #版塊入口id
class Reply(models.Model):
### id
reply_id = models.CharField(max_length=64,blank=True, null=True) #回帖ID
### user
ru_id = models.CharField(max_length=255) #留言/回覆人ID 'a.nickname > small.account'
ru_name = models.CharField(max_length=128) #留言/回覆人暱稱 'a.nickname'["title"]
ru_identity = models.CharField(max_length=255,null=True, default=None) #留言人身份 'span.badge-mark.badge-default'
### info
reply_time = models.DateTimeField(max_length=64) #回帖時間
reply_info = models.TextField(blank=True, null=True, default=None) #回帖內容
reply_url = models.URLField(max_length=512,blank=True, null=True) # 回帖对应的网址
### system
parent = models.ForeignKey('Reply', on_delete=models.CASCADE, blank=True, null=True)#留言的回覆
post = models.ForeignKey('Post', on_delete=models.CASCADE) #對應post
'''
測試項目1: 每個列表自動存取 3 篇文章
外部版塊列表
https://www.eprice.com.tw/mobile/talk/0/0/1/
https://www.eprice.com.tw/telecom/talk/0/0/1/
https://www.eprice.com.tw/smartos/talk/0/0/1/
https://www.eprice.com.tw/tech/talk/0/0/1/
https://www.eprice.com.tw/life/talk/0/0/1/
https://www.eprice.com.tw/funky/talk/0/0/1/
https://www.eprice.com.tw/pad/talk/0/0/1/
測試項目2: 抓取回覆
測試項目3: 須能判斷是否有下一頁留言, 自動翻頁並爬蟲
指定測試帖
https://www.eprice.com.tw/life/talk/9/5242470/1/
'''
|
python
|
import lmfit
from time import time
class ModelFit:
""" We collect all information related to a fit between a pygom model and a set of data in this class
It has access to the model structure and defines all required parameters and details of fit """
def dumpparams(self,run_id=''): # Have to add self since this will become a method
"""stores params in a file './params/Model_Name.pk'
This stuff needs modules os, sys, pickle as pk.
If run_id is nonempty, it is used to construct the filename, and self.run_id is set to its value."""
mname = self.modelname
country = self.dbparams['country']
rname = self.run_id
dirnm = os.getcwd()
if run_id != '': # if run_id, turn it into self.run_id and use it for output filename
if run_id != rname:
print("warning: changing run_id from ",rname,'to',run_id)
self.run_id = run_id
else:
run_id = self.run_id # should always be something from __init__
pfile = dirnm+'/params/'+run_id+'.pk'
self.paramfile = pfile
try:
all_params = {'params':self.params,
'sbparams':self.sbparams,
'fbparams':self.fbparams,
'cbparams':self.cbparams,
'dbparams':self.dbparams,
'initial_values':self.initial_values
}
with open(pfile,'wb') as fp:
pk.dump(all_params,fp)
#print('dumped params to',pfile)
except:
print('problem dumping params to ',pfile)
def loadparams(self,run_id=''):
"""loads params from same file. returns None if any problem finding the file.
This stuff needs modules os, sys, pickle as pk.
If run_id is nonempty, it is used to construct the filename, and self.run_id is set to its value."""
if run_id == '':
run_id = self.run_id
elif self.run_id != run_id:
print("warning: changing run_id from ",self.run_id,'to',run_id)
self.run_id = run_id
dirnm = os.getcwd()
pfile = dirnm+'/params/'+run_id+'.pk'
self.paramfile = pfile
try:
with open(pfile,'rb') as fp:
all_params = pk.load(fp)
print('loaded params from ',pfile,':')
except:
print("For this run_id, a fresh file: ",pfile)
return None
#print('------- params from file:')
#ppr.pprint(all_params)
# check to see that all params being loaded match params of model, if not: fail.
for pp in ['params','sbparams','fbparams','cbparams','dbparams']:
try:
ppp = eval('self.'+pp) # fail first time when ModelFit doesn't have params.
selfkk = [kk for kk in ppp]
newkk = [k for k in all_params[pp]]
if newkk != selfkk:
print("params don't match when loading the params from ",pfile)
print('old keys:',selfkk)
print('new keys:',newkk)
return None
except:
pass # ok to fail 1st time
try:
self.params = all_params['params']
self.model.parameters = self.params
self.sbparams = all_params['sbparams']
self.fbparams = all_params['fbparams']
self.cbparams = all_params['cbparams']
self.dbparams = all_params['dbparams']
self.initial_values = all_params['initial_values'] # will get copied properly?
except:
print('problem loading the params from ',pfile)
return None
return True
def set_param(self,param,value):
plist = [p.name for p in self.model.param_list]
if param not in plist:
print('Error: param name',param,'is not a parameter for this',self.modelname,'model.')
self.params[param] = value
tmp = {param:value}
self.model.parameters = tmp # pygom magic sets the right parameter in the model.parameters dictionary.
def set_initial_values(self,ival,t0=None):
# consistency check:
if len(self.initial_values[0]) != len(self.model.initial_values[0]):
print('warning: inconsistent initial values in model.')
if len(ival) != len(self.model.initial_values[0]):
print('error: initial value must be of length', len(self.model.initial_values[0]))
self.model.initial_values[0] = [x for x in ival]
self.initial_values[0] = [x for x in ival]
if t0 is not None:
self.model.initial_values[1] = t0
self.initial_values[1] = t0
def set_I0(self,logI_0):
I0 = 10**logI_0
self.model.initial_values[0][0] = 1.0 - I0
self.model.initial_values[0][2] = I0
self.initial_values[0][0] = 1.0 - I0
self.initial_values[0][2] = I0
def difference(self,datain):
dataout = np.zeros(np.shape(datain))
for i in range(1,len(datain)):
dataout[i,...] = datain[i,...]-datain[i-1,...]
return dataout
def rolling_average(self,datain,period):
(tmax,n) = np.shape(datain)
dataout = np.zeros((tmax,n),dtype=float)
moving_av = np.zeros(n,dtype=float)
for k in range(len(datain)):
if k-period >= 0:
moving_av[:] = moving_av[:] - datain[k-7,...]
moving_av[:] = moving_av[:] + datain[k,...]
dataout[k] = moving_av/min(float(period),float(k+1))
return dataout
def plotdata(self,dtypes=['confirmed','deaths']):
if type(dtypes)==str:
dtypes = [dtypes]
xx = np.array(range(len(self.tdata)-1))
print(len(xx))
print([(x,len(self.data[x])) for x in dtypes])
for dt in dtypes:
try:
yy = self.data[dt]
except:
print("data type '"+dt+"' not found.")
try:
plt.plot(xx,yy)
except:
print("couldn't plot xx,yy",xx,yy)
plt.show()
def get_fitdata(self,species=['deaths'],datasets=['new_deaths_corrected_smoothed']):
if not isinstance(species,list):
lspecies = [species]
ldatasets =[datasets]
else:
lspecies = species
ldatasets =datasets
if not len(datasets)==len(lspecies):
print('Error in input to get_fitdata: species and datasets parameters not same length')
#
tvec = self.tsim
tvec1 = tvec[1:]
fitdata = {}
if not self.data is {}:
for i,ls in enumerate(lspecies):
ds = ldatasets[i]
if ls == 'confirmed': # John corrected this Oct 1st, was 'deaths'
datmp = self.data[ds] # confirmed cases data, corrected by FracConfirmedDet
fitdata[ls] = [x/self.fbparams['FracConfirmedDet']/self.population for x in datmp]
elif ls == 'deaths':
datmp = self.data[ds] # deaths cases data, corrected by FracDeathsDet
fitdata[ls] = [x/self.fbparams['FracDeathsDet']/self.population for x in datmp]
else:
fitdata[ls] = np.array(self.data[ds])
else:
print('missing fit data')
for ls in lspecies:
fitdata[ls] = None
return fitdata
def solvefit(self,species = ['deaths'],datasets=['deaths_corrected_smoothed']):
fitdata = self.get_fitdata(species,datasets)
lspecies = [x for x in fitdata]
tmaxf = len(fitdata[lspecies[0]])
tvec = self.tsim
tvecf=np.arange(0,tmaxf,1)
tvecf1 = tvecf[1:]
self.soln = scipy.integrate.odeint(self.model.ode, self.model.initial_values[0], tvec)
rtn = {}
slices = {}
for ls in lspecies:
if ls == 'deaths':
slices['deaths'] = self.model.deaths
if ls == 'confirmed':
slices['confirmed'] = self.model.confirmed
for ls in lspecies:
rtn[ls] = {}
rtn[ls]['data'] = np.array(fitdata[ls])
rtn[ls]['soln'] = self.soln[:,slices[ls]][:,0]
rtn[ls]['resid'] = rtn[ls]['soln']-rtn[ls]['data']
return rtn
def solvefitlog(self,species = ['deaths'],datasets=['deaths_corrected_smoothed']):
"""
like solvefit() but take log of data and soln before computing residual.
"""
fitdata = self.get_fitdata(species,datasets)
lspecies = [x for x in fitdata]
tmaxf = len(fitdata[lspecies[0]])
tvec = self.tsim
tvecf=np.arange(0,tmaxf,1)
tvecf1 = tvecf[1:]
self.soln = scipy.integrate.odeint(self.model.ode, self.model.initial_values[0], tvec)
rtn = {}
slices = {}
for ls in lspecies:
if ls == 'deaths':
slices['deaths'] = self.model.deaths
if ls == 'confirmed':
slices['confirmed'] = self.model.confirmed
for ls in lspecies:
rtn[ls] = {}
rtn[ls]['data'] = np.array(fitdata[ls])
rtn[ls]['soln'] = self.soln[:,slices[ls]][:,0]
mn = min([x for x in fitdata[ls] if x>0])
fdat = [x if x > 0 else mn for x in fitdata[ls]]
lfdat = np.array([np.log(x) for x in fdat])
sdata = rtn[ls]['soln']
mn = min([x for x in sdata if x>0])
sdat = [x if x > 0 else mn for x in sdata]
lsdat = np.array([np.log(x) for x in sdat])
rtn[ls]['resid'] = lsdat - lfdat
self.logresid = [sdat,lsdat,fdat,lfdat,lsdat-lfdat]
return rtn
def solveplot(self, species=['confirmed'],summing='daily',averaging='weekly',mag = {'deaths':10},axis=None,
scale='linear',plottitle= '',label='',newplot = True, gbrcolors=False, figsize = None, outfile = None,datasets=['confirmed_corrected_smoothed']):
"""
solve ODEs and plot for fitmodel indicated
species : alternatives 'all', 'EI', 'confirmed', 'deaths', ...
tmax : max time for simulation
summing: type of summing smoothing options : 'daily', ...
averaging : None, 'daily', 'weekly'
fitdata : data to fit
axes : previous axes to plot on [None]
scale : alternative 'linear' or 'log'
plottitle : title for plot
label : label for curve when called as part of multicurve plot
newplot : whether to open new plot True/False
gbrcolors : color types to use
figsize : size of fig in inches (binary tuple)
"""
# tmax = self.tsim[-1]
# tvec=np.arange(0,tmax,1)
if not isinstance(species,list):
lspecies = [species]
ldatasets = [datasets]
else:
lspecies = species
ldatasets = datasets
dspecies = [dt if dt != 'caution_fraction' else 'stringency' for dt in lspecies]
mags = [mag[dt] if dt in mag.keys() else 1 for dt in dspecies]
tvec = self.tsim
tvec1 = tvec[1:]
if not self.data is {}:
fitdata = np.transpose(np.array([self.data[dt] for dt in datasets]))
else:
fitdata = None
if not fitdata is None:
tmaxf = len(fitdata)
if fitdata.ndim != 2:
print("error in number of dimensions of array")
tvecf=np.arange(0,tmaxf,1)
tvecf1 = tvecf[1:]
if newplot:
axis = None
if (figsize == None):
figsize=(8,6)
plt.figure(figsize=figsize)
# fig, axeslist = plt.subplots(1, nmodels, figsize=(nmodels*8,6))
smodel = self.modelname
model = self.model
self.soln = scipy.integrate.odeint(model.ode, model.initial_values[0], tvec[1::])
#Plot
# ax = axeslist[nm]
if axis == None:
ax = axis = plt.subplot(1,1,1)
else:
ax = axis
if scale == 'log': #Plot on log scale
ax.semilogy()
ax.set_ylim([0.00000001,1.0])
if summing == 'daily':
ssoln = self.difference(self.soln)
if not fitdata is None:
sfit = self.difference(fitdata)
else:
ssoln = self.soln
if not fitdata is None:
sfit = fitdata
if averaging == 'weekly':
srsoln = self.rolling_average(ssoln,7)
if not fitdata is None:
srfit = self.rolling_average(sfit,7)
else:
srsoln = ssoln
if not fitdata is None:
srfit = sfit
for ns,species in enumerate(lspecies):
if species == 'confirmed':
suma = np.sum(srsoln[:,model.confirmed],axis=1)*mags[ns]
if not fitdata is None:
ax.plot(tvec1,suma,label=label,color='green')
fita = srfit[1::,ns]*mags[ns]/self.fbparams['FracConfirmedDet']/self.population # confirmed cases data, corrected by FracConfirmedDet
ax.plot(tvecf1,fita,'o',label=label,color='green')
else:
ax.plot(tvec1,suma,label=label)
if species == 'recovered':
suma = np.sum(srsoln[:,model.recovered],axis=1)*mags[ns]
if not fitdata is None:
ax.plot(tvec1,suma,label=label,color='blue')
fita = srfit[1::,ns]*mags[ns]/self.fbparams['FracRecoveredDet']/self.population # recovered cases data, corrected by FracRecoveredDet
ax.plot(tvecf1,fita,'o',label=label,color='blue')
else:
ax.plot(tvec1,suma,label=label)
elif species == 'deaths':
suma = np.sum(srsoln[:,model.deaths],axis=1)*mags[ns]
if not fitdata is None:
ax.plot(tvec1,suma,label=label,color='darkred')
fita = srfit[1::,ns]*mags[ns]/self.fbparams['FracDeathsDet']/self.population # deaths cases data, corrected by FracDeathsDet
ax.plot(tvecf1,fita,'o',label=label,color='red',alpha=0.2)
else:
ax.plot(tvec1,suma,label=label)
elif species == 'EI':
ax.plot(tvec1,self.soln[:,model.ei],label=label)
# ax.plot(tvec1,self.soln[:,model.ei],label="%s" % count)
if 'I3' in model.modelname:
plt.legend(("E","I1","I2","I3"))
elif 'E' in model.modelname:
plt.legend(("E","I"))
else:
plt.legend(("I"))
elif species == 'caution_fraction':
#print('model name',model.modelname)
susc = self.soln[:,model.S_c]
suma = np.sum(self.soln[:,model.all_susceptibles],axis=1)
old_settings = np.seterr(divide='ignore') #
suma = np.divide(susc,suma)
np.seterr(**old_settings) # reset to default
if not fitdata is None:
ax.plot(tvec1,suma,label=label,color='green')
fita = srfit[1::,ns]*mags[ns] # caution fraction from data (stringency) with correciton to unit scale via mags
ax.plot(tvecf1,fita,'o',label=label,color='green')
else:
ax.plot(tvec1,suma,label=label)
elif species == 'all':
ax.plot(tvec1,self.soln,label=label)
if 'I3' in model.modelname:
if 'C3'in model.modelname:
pspecies=("S","E","I1","I2","I3","R","D","Ic","Sc","Ec")
elif 'C' in model.modelname:
pspecies=("S","E","I1","I2","I3","R","D","Sc")
else:
pspecies=("S","E","I1","I2","I3","R","D")
elif 'E' in model.modelname:
if 'C3'in model.modelname:
pspecies=("S","E","I","R","D","Ic","Sc","Ec")
else:
pspecies=("S","E","I","R","D","Sc")
else:
if 'C2'in model.modelname:
pspecies=("S","I","R","D","Ic","Sc")
else:
pspecies=("S","I","R","D","Sc")
plt.legend(pspecies)
plt.xlabel("Time (days)")
plt.ylabel("Fraction of population")
plt.title(model.modelname +' '+plottitle)
if outfile:
plt.savefig(outfile,bbox_inches='tight')
self.dumpparams() # dump every plot; could be changed by sliders
return
def prparams(self,outfile = ''):
"""
pretty print all params.
If outfile is not '', params are printed to it, in the form of a dictionary that can be read back in.
"""
if outfile != '':
with open(outfile,'w') as out:
pp = pprint.PrettyPrinter(stream=out)
pp.pprint({'params':self.params,
'sbparams':self.sbparams,
'fbparams':self.fbparams,
'cbparams':self.cbparams,
'dbparams':self.dbparams,
'initial_values':self.initial_values})
else:
print('params:')
ppr.pprint(self.params)
print('sbparams:')
ppr.pprint(self.sbparams)
print('pfbarams:')
ppr.pprint(self.fbparams)
print('cbparams:')
ppr.pprint(self.cbparams)
print('dbparams:')
ppr.pprint(self.dbparams)
print('initial_values:')
ppr.pprint(self.initial_values)
def getparams(self):
rtn = {}
for pp in ['params','sbparams','fbparams','cbparams','dbparams']:
ppp = eval('self.'+pp) # fail first time when ModelFit doesn't have params.
rtn[pp] = ppp
return rtn
def fit(self,params_init_min_max,fit_method='leastsq',fit_target='deaths',fit_data='deaths_corrected_smoothed',diag=True):
if fit_target not in ['deaths','confirmed']:
print('can only fit deaths or confirmed for now')
for pp in params_init_min_max:
if pp is not 'logI_0': # add any other special ad hoc params here...
if pp not in list(self.model.param_list):
print(pp,': bad param for',self.model.modelname,'model.')
return
for pp in params_init_min_max:
if len(params_init_min_max[pp]) != 3:
print('params_init_min_max has incorrect form.')
print('should be dictionary with each entry as tuple (initial_value,min,max).')
return
self.params_lmf = lmfit.Parameters()
for pp in params_init_min_max:
self.params_lmf.add(pp,params_init_min_max[pp][0],
min=params_init_min_max[pp][1],
max=params_init_min_max[pp][2])
## set initial params for fit
for x in self.params_lmf:
if x in self.params:
self.set_param(x, self.params_lmf[x].value)
if x == 'logI_0': # set other ad hoc params like this
self.set_I0(self.params_lmf['logI_0'].value)
## modify resid here for other optimizations
def resid(params_lmf):
for x in params_lmf:
if x in self.params:
self.set_param(x, params_lmf[x].value)
if 'logI_0' in params_lmf:
self.set_I0(params_lmf['logI_0'].value)
fittry = self.solvefit(fit_target,fit_data) # use solvefitlog to get residuals as log(soln)-log(data)
#res2 = np.array([x*x for x in fittry['deaths']['resid']])
#sumres2 = np.sqrt(np.sum(res2))
#print('resid: ',sumres2)
return fittry[fit_target]['resid']
## do the fit
try:
if diag:
start = time()
self.residall = []
self.paramall = []
def per_iteration(pars, iteration, resd, *args, **kws):
res2 = np.array([x*x for x in resd])
sumres2 = np.sqrt(np.sum(res2))
self.residall.append(sumres2)
self.paramall.append(pars.copy())
outfit = lmfit.minimize(resid, self.params_lmf, method=fit_method,iter_cb=per_iteration)
print('elapsed time = ',time()-start)
lmfit.report_fit(outfit)
else:
outfit = lmfit.minimize(resid, self.params_lmf, method=fit_method)
except Exception as e:
print('Problem with fit...')
print(e)
## set model params to fitted values, dump to file
if 'outfit' in locals():
for x in outfit.params:
if x in self.params:
self.set_param(x, outfit.params[x].value)
self.set_I0(outfit.params['logI_0'].value)
## dump new fitted values.
self.dumpparams()
else:
print('Problem with fit, model params not changed')
def __init__(self,modelname,model=None,country='Germany',run_id='',datatypes='all',data_src='owid',startdate=None,stopdate=None,simdays=None,new=False):
"""
if run_id is '', self.run_id takes a default value of default_run_id = modelname+'_'+country
if run_id is not '', it is used as self.run_id, used in turn for param filename.
except that if run_id starts with character '_', it is appended to the default run_id,
i.e. if run_id[0]=='_': self.run_id = default_run_id+run_id
"""
global make_model,covid_ts,covid_owid_ts
dirnm = os.getcwd()
# construct default name for file / run_id
if country != '':
defnm = modelname+'_'+country
else:
defnm = modelname
if run_id == '': # use default name
self.run_id = defnm
elif run_id[0]=='_': # use run_id as addon to default
self.run_id = defnm+run_id
else:
self.run_id = run_id # use specified name
#print('=============',self.run_id)
pfile = dirnm+'/params/'+self.run_id+'.pk'
######################################
# set up model
self.modelname = modelname
if model:
self.model = model
if self.model.modelname != modelname:
print("warning: changing model from",modelname,'to',self.model.modelname)
self.modelname = modelname
else:
#model_d = make_model(modelname) # I still prefer this I think, but
model_d = copy.deepcopy(fullmodels[modelname]) # should avoid modifying fullmodels at all from fits, otherwise never clear what parameters are
self.model = model_d['model']
if new:
#print('using default set of parameters for model type',modelname)
self.params = model_d['params']
self.cbparams = model_d['cbparams']
self.sbparams = model_d['sbparams']
self.fbparams = model_d['fbparams']
self.dbparams = model_d['dbparams']
self.initial_values = model_d['initial_values']
else:
if not self.loadparams(self.run_id):
#print('Problem loading paramfile for',run_id,'... using default set of parameters for model type',modelname)
self.params = model_d['params']
self.cbparams = model_d['cbparams']
self.sbparams = model_d['sbparams']
self.fbparams = model_d['fbparams']
self.dbparams = model_d['dbparams']
self.initial_values = model_d['initial_values']
# set up data and times for simulation
if data_src == 'jhu':
ts = covid_ts
elif data_src == 'owid':
ts = covid_owid_ts
else:
print('data_src',data_src,'not yet hooked up: OWID data used instead')
ts = covid_owid_ts
self.country = country
self.population = population_owid[country][-2] # -2 seems to get all countries population (no zeros)
fmt_jhu = '%m/%d/%y'
dates_t = [datetime.datetime.strptime(dd,fmt_jhu) for dd in ts['confirmed']['dates'] ] # ts dates stored in string format of jhu fmt_jhu = '%m/%d/%y'
firstdate_t = dates_t[0]
lastdate_t = dates_t[-1]
if startdate:
startdate_t = datetime.datetime.strptime(startdate,fmt_jhu)
else:
startdate_t = firstdate_t
if stopdate:
stopdate_t = datetime.datetime.strptime(stopdate,fmt_jhu)
print('stopdate',stopdate)
else:
stopdate_t = lastdate_t
if (startdate_t - firstdate_t).days < 0:
print('start date out of data range, setting to data first date',ts['confirmed']['dates'][0])
startdate_t = firstdate_t
daystart = 0
else:
daystart = (startdate_t- firstdate_t).days
if (stopdate_t - startdate_t).days > (lastdate_t - startdate_t).days:
print('stop date out of data range, setting to data last date',ts['confirmed']['dates'][-1])
stopdate_t = lastdate_t
datadays = (stopdate_t-startdate_t).days + 1
if simdays: # simdays allowed greater than datadays to enable predictions
if simdays < datadays:
stopdate_t = startdate_t + datetime.timedelta(days=simdays-1) # if simulation for shorter time than data, restrict data to this
datadays = (stopdate_t-startdate_t).days + 1
else:
simdays = datadays
self.dates = [date.strftime(fmt_jhu) for date in dates_t if date>=startdate_t and date <= lastdate_t]
self.tsim = np.linspace(0, simdays -1, simdays)
self.tdata = np.linspace(0, datadays -1, datadays)
if datatypes == 'all' or not datatypes:
if data_src == 'owid':
datatypes = ['confirmed','deaths','tests', 'stringency','deaths_corrected_smoothed','confirmed_corrected_smoothed','new_deaths_corrected_smoothed','new_confirmed_corrected_smoothed']
else:
datatypes = ['confirmed','deaths','recovered','deaths_corrected_smoothed','confirmed_corrected_smoothed','recovered_corrected_smoothed','new_deaths_corrected_smoothed','new_confirmed_corrected_smoothed','new_recovered_corrected_smoothed']
self.data = {}
for dt in datatypes:
self.data.update({dt:ts[dt][country][daystart:datadays]})
self.startdate = startdate_t.strftime(fmt_jhu)
self.stopdate = stopdate_t.strftime(fmt_jhu)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.