max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
cruft/__main__.py
|
tdhopper/cruft
| 293 |
98356
|
<filename>cruft/__main__.py
from cruft import _cli
_cli.app(prog_name="cruft")
|
packages/pytea/pylib/torch/backends/cudnn/__init__.py
|
Sehun0819/pytea
| 241 |
98362
|
enabled = True
deterministic = False
benchmark = False
|
src/opendr/perception/compressive_learning/multilinear_compressive_learning/algorithm/backbones/model_utils.py
|
ad-daniel/opendr
| 217 |
98388
|
<reponame>ad-daniel/opendr<gh_stars>100-1000
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from urllib.request import urlretrieve
def get_cifar_pretrained_weights(model_name):
if model_name == '':
return
home_dir = os.path.expanduser('~')
model_dir = os.path.join(home_dir,
'.cache',
'opendr',
'checkpoints',
'perception',
'compressive_learning',
'backbone')
if not os.path.exists(model_dir):
os.makedirs(model_dir, exist_ok=True)
model_file = os.path.join(model_dir, '{}.pickle'.format(model_name))
if not os.path.exists(model_file):
server_url = 'ftp://opendrdata.csd.auth.gr/perception/compressive_learning/backbone/'
model_url = os.path.join(server_url, '{}.pickle'.format(model_name))
urlretrieve(model_url, model_file)
print('Pretrained backbone model downloaded')
fid = open(model_file, 'rb')
state_dict = pickle.load(fid)['state_dict']
fid.close()
return state_dict
|
myia/debug/traceback.py
|
strint/myia
| 222 |
98389
|
<reponame>strint/myia
"""Tools to print a traceback for an error in Myia."""
import ast
import sys
import warnings
import prettyprinter as pp
from colorama import Fore, Style
from ..abstract import Reference, data, format_abstract, pretty_struct
from ..ir import ANFNode, Graph
from ..parser import Location, MyiaDisconnectedCodeWarning, MyiaSyntaxError
from ..utils import InferenceError
from .label import label
def skip_node(node):
"""Whether to skip a step in the traceback based on ast node type."""
return isinstance(node, (ast.If, ast.While, ast.For))
def _get_call(ref):
ctx = ref.context
if not hasattr(ctx, "graph"):
return "<unknown>", ()
g = ctx.graph or ref.node.graph
while g and g.has_flags("auxiliary") and ctx.parent and ctx.parent.graph:
ctx = ctx.parent
g = ctx.graph
return g, ctx.argkey
def _get_loc(node):
if node.is_constant_graph():
node = node.value
loc = node.debug.find("location")
genfn = None
if loc is None:
tr = node.debug.find("trace", skip={"copy", "equiv"})
if tr:
idx = len(tr) - 3
while idx >= 0:
fr = tr[idx]
if "myia/myia/ir" in fr.filename:
idx -= 1
continue
loc = Location(fr.filename, fr.lineno, 0, fr.lineno, 0, None)
genfn = fr.name
break
return loc, genfn
def _get_info(x):
skip = False
if isinstance(x, Reference):
g, args = _get_call(x)
loctype = "direct"
loc, genfn = _get_loc(x.node)
elif isinstance(x, ANFNode):
g = x.graph
args = None
loctype = "direct"
loc, genfn = _get_loc(x)
else:
g, args = x
loctype = None
loc = None
genfn = None
if loc and skip_node(loc.node):
skip = True
return (g, args, loctype, loc, genfn, skip)
class _PBlock:
def __init__(self, title, separator, args, kwargs):
self.title = title
self.separator = separator
self.args = args
self.kwargs = kwargs
@pp.register_pretty(_PBlock)
def _pretty_pblock(pb, ctx):
return pretty_struct(ctx, pb.title, pb.args, pb.kwargs)
@pp.register_pretty(data.PrimitiveFunction)
def _pretty_primfunc(x, ctx):
return label(x.prim)
@pp.register_pretty(data.GraphFunction)
def _pretty_graphfunc(x, ctx):
return label(x.graph)
def _format_call(fn, args):
if args is None:
return label(fn)
if isinstance(fn, Graph):
kwargs = {label(p): arg for p, arg in zip(fn.parameters, args)}
args = []
else:
kwargs = {}
return format_abstract(_PBlock(label(fn), " :: ", args, kwargs))
def _show_location(loc, label, mode=None, color="RED", file=sys.stderr):
with open(loc.filename, "r") as contents:
lines = contents.read().split("\n")
_print_lines(
lines,
loc.line,
loc.column,
loc.line_end,
loc.column_end,
label,
mode,
color,
file=file,
)
def _print_lines(
lines, l1, c1, l2, c2, label="", mode=None, color="RED", file=sys.stderr
):
if mode is None:
if file.isatty():
mode = "color"
for ln in range(l1, l2 + 1):
line = lines[ln - 1]
if ln == l1:
trimmed = line.lstrip()
to_trim = len(line) - len(trimmed)
start = c1 - to_trim
else:
trimmed = line[to_trim:]
start = 0
if ln == l2:
end = c2 - to_trim
else:
end = len(trimmed)
if mode == "color":
prefix = trimmed[:start]
hl = trimmed[start:end]
rest = trimmed[end:]
print(
f"{ln}: {prefix}{getattr(Fore, color)}{Style.BRIGHT}"
f"{hl}{Style.RESET_ALL}{rest}",
file=file,
)
else:
print(f"{ln}: {trimmed}", file=file)
prefix = " " * (start + 2 + len(str(ln)))
print(prefix + "^" * (end - start) + label, file=file)
def skip_ref(ref):
"""Return whether display for a ref should be skipped."""
fn, args, loctype, loc, genfn, skip = _get_info(ref)
return skip
def print_ref(ref, file=sys.stderr):
"""Print a ref's location."""
fn, args, loctype, loc, genfn, skip = _get_info(ref)
if loc is not None:
print(f"{loc.filename}:{loc.line}", file=file)
gen = f"via code generated in {genfn}:" if genfn else ""
print("in", _format_call(fn, args), gen, file=file)
if loc is not None:
_show_location(loc, "", file=file)
def print_inference_error(error, file=sys.stderr):
"""Print an InferenceError's traceback."""
refs = [*error.traceback_refs.values()] + error.refs
for ref in refs:
if not skip_ref(ref):
print("=" * 80, file=file)
print_ref(ref, file=file)
print("~" * 80, file=file)
if error.pytb:
print(error.pytb, file=file)
else:
print(f"{type(error).__name__}: {error.message}", file=file)
def print_myia_syntax_error(error, file=sys.stderr):
"""Print MyiaSyntaxError's location."""
loc = error.loc
print("=" * 80, file=file)
if loc is not None:
print(f"{loc.filename}:{loc.line}", file=file)
if loc is not None:
_show_location(loc, "", file=file)
print("~" * 80, file=file)
print(f"{type(error).__name__}: {error}", file=file)
_previous_excepthook = sys.excepthook
def myia_excepthook(exc_type, exc_value, tb):
"""Print out InferenceError and MyiaSyntaxError specially."""
if isinstance(exc_value, InferenceError):
print_inference_error(exc_value, file=sys.stderr)
elif isinstance(exc_value, MyiaSyntaxError):
print_myia_syntax_error(exc_value, file=sys.stderr)
else:
_previous_excepthook(exc_type, exc_value, tb)
sys.excepthook = myia_excepthook
def print_myia_warning(warning, file=sys.stderr):
"""Print Myia Warning's location."""
msg = warning.args[0]
loc = warning.loc
print("=" * 80, file=file)
if loc is not None:
print(f"{loc.filename}:{loc.line}", file=file)
if loc is not None:
_show_location(loc, "", None, "MAGENTA", file=file)
print("~" * 80, file=file)
print(f"{warning.__class__.__name__}: {msg}", file=file)
_previous_warning = warnings.showwarning
def myia_warning(message, category, filename, lineno, file, line):
"""Print out MyiaDisconnectedCodeWarning specially."""
if category is MyiaDisconnectedCodeWarning:
# message is actually a MyiaDisconnectedCodeWarning object,
# even though this parameter of myia_warning is called message
# (in order to match parameter names of overrided showwarning)
print_myia_warning(message, file=sys.stderr)
else:
_previous_warning(message, category, filename, lineno, file, line)
warnings.showwarning = myia_warning
warnings.filterwarnings("always", category=MyiaDisconnectedCodeWarning)
__all__ = [
"myia_excepthook",
"myia_warning",
"print_inference_error",
"print_myia_syntax_error",
"print_myia_warning",
"print_ref",
"skip_node",
"skip_ref",
]
|
scripts/crimson_rewriter.py
|
upenderadepu/crimson
| 108 |
98395
|
# coding: utf-8
#
#
### CREATED BY KARMAZ
#
### FUNCTIONS
#
# 1. CHECK IF X-Original-Url AND X-Rewrite-Url IS HANDLED BY THE SERVER
#
###
import sys, getopt, requests, urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from tqdm import tqdm
### OPTIONS ---
argument_list = sys.argv[1:]
short_options = "w:c:H:o:h"
long_options = ["wordlist", "cookies", "header", "output", "help"]
try:
arguments, values = getopt.getopt(argument_list, short_options, long_options)
except getopt.error as err:
print(err)
sys.exit(2)
### --- (They will be iterated at the bottom of the screen ---
def helper():
'''Print usage in case of wrong options or arguments being used'''
print("""\033[0;31m
βββββββ βββββββββββ ββββββββββ βββββββββββββββββββββββββββ
βββββββββββββββββββ βββββββββββββββββββββββββββββββββββββββ
ββββββββββββββ βββ ββ ββββββββββββββ βββ ββββββ ββββββββ
ββββββββββββββ βββββββββββββββββββββ βββ ββββββ ββββββββ
βββ ββββββββββββββββββββββββ ββββββ βββ βββββββββββ βββ
βββ βββββββββββ ββββββββ βββ ββββββ βββ βββββββββββ βββ\033[0m""")
print("\nUSAGE: python crimson_rewriter.py -w [wordlist_with_urls] -H [headers] -c [Cookie: a=1;] -h [show_help]")
def load_wordlist(wordlist_filepath):
'''Importing wordlist line by line into an array'''
with open(wordlist_filepath) as f:
new_wordlist = [",".join(line.split(",")).strip() for line in f.readlines()]
return new_wordlist
def import_cookies(cookie):
'''Importing cookies from header f.e. "Cookie: auth1=qwe; auth2=asd;" '''
cookies = {}
#cookie_header = cookie.split(":")[0]
cookie_values = cookie.split(":")[1].split(";")[:-1]
for q in cookie_values:
cookies.update(dict([q.lstrip().split("=")]))
return cookies
def rewriter_check(urls,headers,cookies):
output_list = []
print("\033[0;31m [+]\033[0m REWRITER PROGRESS")
for url in tqdm(urls):
try:
r1 = requests.get(url.rstrip(), verify=False)
r2 = requests.get(url.rstrip(), verify=False, headers={'X-Original-Url':'/doesnotextist123'})
r3 = requests.get(url.rstrip(), verify=False, headers={'X-Rewrite-Url':'/doesnotexist321'})
if r1.status_code != r2.status_code:
output_list.append("[+] ORIGINAL HEADER FOUND: " + r2.url)
elif r1.status_code != r3.status_code:
output_list.append("[+] REWRITE HEADER FOUND: " + r1.url)
except KeyboardInterrupt:
sys.exit(0)
except:
pass
return output_list
def logs_saver(logs_list, logs_name):
with open(logs_name, 'w') as f:
for log in logs_list:
print >> f, log
### OPTIONS ---
headers = {}
cookies ={}
show_help = False
try: logs_name
except NameError: logs_name = None
for current_argument, current_value in arguments:
if current_argument in ("-w", "--wordlist"):
list_of_urls = current_value
elif current_argument in ("-c", "--cookies"):
cookies = current_value
elif current_argument in ("-H", "--header"):
headers.update([current_value.split("=")])
elif current_argument in ("-o", "--output"):
output = current_value
elif current_argument in ("-h", "--help"):
show_help = True
### MAIN
if __name__ == '__main__':
if show_help:
helper()
else:
urls = load_wordlist(list_of_urls)
if cookies:
cookies = import_cookies(cookies)
output_list = rewriter_check(urls, headers, cookies)
if logs_name is not None:
logs_saver(output_list, logs_name)
else:
for element in output_list:
print(element.rstrip())
|
examples/volumetric/streamlines2.py
|
hadivafaii/vedo
| 836 |
98396
|
"""Load an existing vtkStructuredGrid and draw
the streamlines of the velocity field"""
from vedo import *
######################## vtk
import vtk
# Read the data and specify which scalars and vectors to read.
pl3d = vtk.vtkMultiBlockPLOT3DReader()
fpath = download(dataurl+"combxyz.bin")
pl3d.SetXYZFileName(fpath)
fpath = download(dataurl+"combq.bin")
pl3d.SetQFileName(fpath)
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
# this vtkStructuredData already has a vector field:
domain = pl3d.GetOutput().GetBlock(0)
######################## vedo
probe= Grid(pos=[9,0,30], normal=[1,0,0], sx=5, sy=5, resx=6, resy=6)
stream = streamLines(domain, probe, direction='backwards')
box = Mesh(domain).alpha(0.1)
show(stream, probe, box, __doc__, axes=7, bg='bb').close()
|
api/conftest.py
|
SolidStateGroup/Bullet-Train-API
| 126 |
98430
|
import pytest
from django.core.cache import cache
from rest_framework.test import APIClient
from environments.identities.models import Identity
from environments.identities.traits.models import Trait
from environments.models import Environment
from features.feature_types import MULTIVARIATE
from features.models import Feature
from features.multivariate.models import MultivariateFeatureOption
from features.value_types import STRING
from organisations.models import Organisation, OrganisationRole
from projects.models import Project
from segments.models import EQUAL, Condition, Segment, SegmentRule
from users.models import FFAdminUser
trait_key = "key1"
trait_value = "value1"
@pytest.fixture()
def admin_client(admin_user):
client = APIClient()
client.force_authenticate(user=admin_user)
return client
@pytest.fixture()
def organisation(db, admin_user):
org = Organisation.objects.create(name="Test Org")
admin_user.add_organisation(org, role=OrganisationRole.ADMIN)
return org
@pytest.fixture()
def project(organisation):
return Project.objects.create(name="Test Project", organisation=organisation)
@pytest.fixture()
def environment(project):
return Environment.objects.create(name="Test Environment", project=project)
@pytest.fixture()
def identity(environment):
return Identity.objects.create(identifier="test_identity", environment=environment)
@pytest.fixture()
def trait(identity):
return Trait.objects.create(
identity=identity, trait_key=trait_key, string_value=trait_value
)
@pytest.fixture()
def multivariate_feature(project):
feature = Feature.objects.create(
name="feature", project=project, type=MULTIVARIATE, initial_value="control"
)
for percentage_allocation in (30, 30, 40):
string_value = f"multivariate option for {percentage_allocation}% of users."
MultivariateFeatureOption.objects.create(
feature=feature,
default_percentage_allocation=percentage_allocation,
type=STRING,
string_value=string_value,
)
return feature
@pytest.fixture()
def identity_matching_segment(project, trait):
segment = Segment.objects.create(name="Matching segment", project=project)
matching_rule = SegmentRule.objects.create(
segment=segment, type=SegmentRule.ALL_RULE
)
Condition.objects.create(
rule=matching_rule,
property=trait.trait_key,
operator=EQUAL,
value=trait.trait_value,
)
return segment
@pytest.fixture()
def api_client():
return APIClient()
@pytest.fixture()
def feature(project, environment):
return Feature.objects.create(name="Test Feature1", project=project)
@pytest.fixture()
def user_password():
return FFAdminUser.objects.make_random_password()
@pytest.fixture()
def reset_cache():
# https://groups.google.com/g/django-developers/c/zlaPsP13dUY
# TL;DR: Use this if your test interacts with cache since django
# does not clear cache after every test
cache.clear()
yield
cache.clear()
|
tests/trac/trac-0196/check.py
|
eLBati/pyxb
| 123 |
98435
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import unittest
import qq0196 as qq
import qu0196 as qu
import uq0196 as uq
import uu0196 as uu
import mix
from pyxb.utils.domutils import BindingDOMSupport
from pyxb.utils import six
BindingDOMSupport.DeclareNamespace(qq.Namespace, 'qq')
BindingDOMSupport.DeclareNamespace(qu.Namespace, 'qu')
BindingDOMSupport.DeclareNamespace(uq.Namespace, 'uq')
BindingDOMSupport.DeclareNamespace(uu.Namespace, 'uu')
BindingDOMSupport.DeclareNamespace(mix.Namespace, 'mix')
qq_bds = BindingDOMSupport(default_namespace=qq.Namespace)
elt_kw = {
'te' : 'te',
'teq' : 'teq',
'teu' : 'teu',
'e' : 'e',
'eq' : 'eq',
'eu' : 'eu',
'a' : 'a',
'aq' : 'aq',
'au' : 'au',
'ta' : 'ta',
'taq' : 'taq',
'tau' : 'tau' }
qq_i = qq.elt(**elt_kw)
qu_i = qu.elt(**elt_kw)
uq_i = uq.elt(**elt_kw)
uu_i = uu.elt(**elt_kw)
i = mix.elt(qq_i, qu_i, uq_i, uu_i)
try:
print(i.toDOM().toprettyxml())
except pyxb.ValidationError as e:
print(e.details())
raise
i = mix.uue(a='a')
print(i.toxml('utf-8'))
class TestTrac0196 (unittest.TestCase):
module_map = { qq : ( qq.Namespace, qq.Namespace ),
qu : ( qu.Namespace, None ),
uq : ( None, uq.Namespace ),
uu : ( None, None ) }
global_a = ( 'a', 'aq', 'au' )
global_e = ('e', 'eq', 'eu' )
local_a = ( 'ta', 'taq', 'tau' )
local_e = ('te', 'teq', 'teu' )
def testQualified (self):
# Top-level declarations are qualified regardless of presence/absence of form attribute.
# Internal declarations follow form attribute or schema default
for (m, ( efd, afd )) in six.iteritems(self.module_map):
for (n, d) in six.iteritems(m.t._AttributeMap):
if n.localName() in ('a', 'au', 'aq'):
self.assertEqual(n.namespace(), m.Namespace)
elif 'taq' == n.localName():
self.assertEqual(n.namespace(), m.Namespace)
elif 'tau' == n.localName():
self.assertEqual(n.namespace(), None)
elif 'ta' == n.localName():
self.assertEqual(n.namespace(), afd)
else:
self.assertFalse()
for (n, d) in six.iteritems(m.t._ElementMap):
if n.localName() in ('e', 'eu', 'eq'):
self.assertEqual(n.namespace(), m.Namespace)
elif 'teq' == n.localName():
self.assertEqual(n.namespace(), m.Namespace)
elif 'teu' == n.localName():
self.assertEqual(n.namespace(), None)
elif 'te' == n.localName():
self.assertEqual(n.namespace(), efd)
else:
self.assertFalse()
if __name__ == '__main__':
unittest.main()
|
homeassistant/components/radio_browser/__init__.py
|
MrDelik/core
| 30,023 |
98437
|
<reponame>MrDelik/core<filename>homeassistant/components/radio_browser/__init__.py
"""The Radio Browser integration."""
from __future__ import annotations
from radios import RadioBrowser, RadioBrowserError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import __version__
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DOMAIN
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Radio Browser from a config entry.
This integration doesn't set up any enitites, as it provides a media source
only.
"""
session = async_get_clientsession(hass)
radios = RadioBrowser(session=session, user_agent=f"HomeAssistant/{__version__}")
try:
await radios.stats()
except RadioBrowserError as err:
raise ConfigEntryNotReady("Could not connect to Radio Browser API") from err
hass.data[DOMAIN] = radios
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
del hass.data[DOMAIN]
return True
|
preprocess/crop_video_sequences.py
|
ashish-roopan/fsgan
| 599 |
98503
|
import os
import sys
import pickle
from tqdm import tqdm
import numpy as np
import cv2
from fsgan.utils.bbox_utils import scale_bbox, crop_img
from fsgan.utils.video_utils import Sequence
def main(input_path, output_dir=None, cache_path=None, seq_postfix='_dsfd_seq.pkl', resolution=256, crop_scale=2.0,
select='all', disable_tqdm=False, encoder_codec='avc1'):
cache_path = os.path.splitext(input_path)[0] + seq_postfix if cache_path is None else cache_path
if output_dir is None:
output_dir = os.path.splitext(input_path)[0]
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Verification
if not os.path.isfile(input_path):
raise RuntimeError('Input video does not exist: ' + input_path)
if not os.path.isfile(cache_path):
raise RuntimeError('Cache file does not exist: ' + cache_path)
if not os.path.isdir(output_dir):
raise RuntimeError('Output directory does not exist: ' + output_dir)
print('=> Cropping video sequences from video: "%s"...' % os.path.basename(input_path))
# Load sequences from file
with open(cache_path, "rb") as fp: # Unpickling
seq_list = pickle.load(fp)
# Select sequences
if select == 'longest':
selected_seq_index = np.argmax([len(s) for s in seq_list])
seq = seq_list[selected_seq_index]
seq.id = 0
seq_list = [seq]
# Open input video file
cap = cv2.VideoCapture(input_path)
if not cap.isOpened():
raise RuntimeError('Failed to read video: ' + input_path)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
input_vid_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
input_vid_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# For each sequence initialize output video file
out_vids = []
fourcc = cv2.VideoWriter_fourcc(*encoder_codec)
for seq in seq_list:
curr_vid_name = os.path.splitext(os.path.basename(input_path))[0] + '_seq%02d.mp4' % seq.id
curr_vid_path = os.path.join(output_dir, curr_vid_name)
out_vids.append(cv2.VideoWriter(curr_vid_path, fourcc, fps, (resolution, resolution)))
# For each frame in the target video
cropped_detections = [[] for seq in seq_list]
cropped_landmarks = [[] for seq in seq_list]
pbar = range(total_frames) if disable_tqdm else tqdm(range(total_frames), file=sys.stdout)
for i in pbar:
ret, frame = cap.read()
if frame is None:
continue
# For each sequence
for s, seq in enumerate(seq_list):
if i < seq.start_index or (seq.start_index + len(seq) - 1) < i:
continue
det = seq[i - seq.start_index]
# Crop frame
bbox = np.concatenate((det[:2], det[2:] - det[:2]))
bbox = scale_bbox(bbox, crop_scale)
frame_cropped = crop_img(frame, bbox)
frame_cropped = cv2.resize(frame_cropped, (resolution, resolution), interpolation=cv2.INTER_CUBIC)
# Write cropped frame to output video
out_vids[s].write(frame_cropped)
# Add cropped detection to list
orig_size = bbox[2:]
axes_scale = np.array([resolution, resolution]) / orig_size
det[:2] -= bbox[:2]
det[2:] -= bbox[:2]
det[:2] *= axes_scale
det[2:] *= axes_scale
cropped_detections[s].append(det)
# Add cropped landmarks to list
if hasattr(seq, 'landmarks'):
curr_landmarks = seq.landmarks[i - seq.start_index]
curr_landmarks[:, :2] -= bbox[:2]
# 3D landmarks case
if curr_landmarks.shape[1] == 3:
axes_scale = np.append(axes_scale, axes_scale.mean())
curr_landmarks *= axes_scale
cropped_landmarks[s].append(curr_landmarks)
# For each sequence write cropped sequence to file
for s, seq in enumerate(seq_list):
# seq.detections = np.array(cropped_detections[s])
# if hasattr(seq, 'landmarks'):
# seq.landmarks = np.array(cropped_landmarks[s])
# seq.start_index = 0
# TODO: this is a hack to change class type (remove this later)
out_seq = Sequence(0)
out_seq.detections = np.array(cropped_detections[s])
if hasattr(seq, 'landmarks'):
out_seq.landmarks = np.array(cropped_landmarks[s])
out_seq.id, out_seq.obj_id, out_seq.size_avg = seq.id, seq.obj_id, seq.size_avg
# Write to file
curr_out_name = os.path.splitext(os.path.basename(input_path))[0] + '_seq%02d%s' % (out_seq.id, seq_postfix)
curr_out_path = os.path.join(output_dir, curr_out_name)
with open(curr_out_path, "wb") as fp: # Pickling
pickle.dump([out_seq], fp)
if __name__ == "__main__":
# Parse program arguments
import argparse
parser = argparse.ArgumentParser('crop_video_sequences')
parser.add_argument('input', metavar='VIDEO',
help='path to input video')
parser.add_argument('-o', '--output', metavar='DIR',
help='output directory')
parser.add_argument('-c', '--cache', metavar='PATH',
help='path to sequence cache file')
parser.add_argument('-sp', '--seq_postfix', default='_dsfd_seq.pkl', metavar='POSTFIX',
help='input sequence file postfix')
parser.add_argument('-r', '--resolution', default=256, type=int, metavar='N',
help='output video resolution (default: 256)')
parser.add_argument('-cs', '--crop_scale', default=2.0, type=float, metavar='F',
help='crop scale relative to bounding box (default: 2.0)')
parser.add_argument('-s', '--select', default='all', metavar='STR',
help='selection method [all|longest]')
parser.add_argument('-dt', '--disable_tqdm', dest='disable_tqdm', action='store_true',
help='if specified disables tqdm progress bar')
parser.add_argument('-ec', '--encoder_codec', default='avc1', metavar='STR',
help='encoder codec code')
args = parser.parse_args()
main(args.input, args.output, args.cache, args.seq_postfix, args.resolution, args.crop_scale, args.select,
args.disable_tqdm, args.encoder_codec)
|
Day3/Python/hamming.py
|
Grace0Hud/dailycodebase
| 249 |
98507
|
"""
@author : udisinghania
@date : 24/12/2018
"""
string1 = input()
string2 = input()
a = 0
if len(string1)!=len(string2):
print("Strings are of different length")
else:
for i in range (len(string1)):
if string1[i]!=string2[i]:
a+=1
print(a)
|
networkx/algorithms/centrality/tests/test_second_order_centrality.py
|
rakschahsa/networkx
| 445 |
98524
|
"""
Tests for second order centrality.
"""
import networkx as nx
from nose import SkipTest
from nose.tools import raises, assert_almost_equal
class TestSecondOrderCentrality(object):
numpy = 1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
try:
import numpy as np
import scipy
except ImportError:
raise SkipTest('NumPy not available.')
@raises(nx.NetworkXException)
def test_empty(self):
G = nx.empty_graph()
nx.second_order_centrality(G)
@raises(nx.NetworkXException)
def test_non_connected(self):
G = nx.Graph()
G.add_node(0)
G.add_node(1)
nx.second_order_centrality(G)
@raises(nx.NetworkXException)
def test_non_negative_edge_weights(self):
G = nx.path_graph(2)
G.add_edge(0, 1, weight=-1)
nx.second_order_centrality(G)
def test_one_node_graph(self):
"""Second order centrality: single node"""
G = nx.Graph()
G.add_node(0)
G.add_edge(0, 0)
assert nx.second_order_centrality(G)[0] == 0
def test_P3(self):
"""Second order centrality: line graph, as defined in paper"""
G = nx.path_graph(3)
b_answer = {0: 3.741, 1: 1.414, 2: 3.741}
b = nx.second_order_centrality(G)
for n in sorted(G):
assert_almost_equal(b[n], b_answer[n], places=2)
def test_K3(self):
"""Second order centrality: complete graph, as defined in paper"""
G = nx.complete_graph(3)
b_answer = {0: 1.414, 1: 1.414, 2: 1.414}
b = nx.second_order_centrality(G)
for n in sorted(G):
assert_almost_equal(b[n], b_answer[n], places=2)
def test_ring_graph(self):
"""Second order centrality: ring graph, as defined in paper"""
G = nx.cycle_graph(5)
b_answer = {0: 4.472, 1: 4.472, 2: 4.472,
3: 4.472, 4: 4.472}
b = nx.second_order_centrality(G)
for n in sorted(G):
assert_almost_equal(b[n], b_answer[n], places=2)
|
mindspore_hub/_utils/download.py
|
mindspore-ai/hub
| 153 |
98541
|
<filename>mindspore_hub/_utils/download.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Download or extract file."""
import os
import shutil
import zipfile
import tarfile
import re
import subprocess
import hashlib
import errno
import stat
import urllib
from urllib.request import urlretrieve, HTTPError, URLError
from tempfile import TemporaryDirectory
from mindspore_hub.manage import get_hub_dir
REPO_INFO_LEN = 5
REAL_PATH = os.path.split(os.path.realpath(__file__))[0]
SPARSE_SHELL_PATH = os.path.join(REAL_PATH, "sparse_download.sh")
FULL_SHELL_PATH = os.path.join(REAL_PATH, "full_download.sh")
MAX_FILE_SIZE = 5 * 1024 * 1024 * 1024 # 5GB
SUFFIX_LIST = ['.ckpt', '.air', '.geir', '.meta', '.onnx', '.md']
def handle_remove_read_only(func, path, exc):
exc_value = exc[1]
if func in (os.rmdir, os.remove, os.unlink) and exc_value.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
def url_exist(url):
"""
Whether the url exist.
"""
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0')]
try:
opener.open(url)
return True
except HTTPError as e:
print(e.code)
except URLError as e:
print(e.reason)
return False
def _unpacking_targz(input_filename, save_path):
"""
Unpacking the input filename to dirs.
"""
try:
t = tarfile.open(input_filename)
t.extractall(path=save_path)
except Exception as e:
raise OSError("Cannot untar file {} for - {}".format(input_filename, e))
def _remove_path_if_exists(path):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_read_only)
def _create_path_if_not_exists(path):
if not os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
os.mkdir(path)
def get_repo_info_from_url(git_url):
"""
Get repo information from url.
"""
if git_url is None:
return None
git_url = git_url.strip('<>')
git_url = git_url.rstrip('/')
webs_name = re.findall(".*http[s]?://(.*).com.*", git_url)
if len(webs_name) != 1:
raise ValueError("invalid repo link: {}".format(git_url))
web_name = webs_name[0]
prefix = re.findall(r'http[s]?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', git_url)
if len(prefix) != 1:
raise ValueError("invalid repo link: {}".format(git_url))
suffix = git_url[len(prefix[0]):].lstrip('/')
repo_info = suffix.split("/", REPO_INFO_LEN-1)
git_info = dict()
git_info["is_repo"] = True
if len(repo_info) == 2:
uid = ':'.join([web_name, repo_info[1], "master"])
git_info["branch"] = "master"
git_info["dst_dir"] = repo_info[1]
elif len(repo_info) == REPO_INFO_LEN - 1:
if repo_info[2] != 'tree' and repo_info[2] != 'blob':
raise ValueError("invalid repo link: {}".format(git_url))
uid = ':'.join([web_name, repo_info[1], repo_info[3]])
git_info["branch"] = repo_info[3]
git_info["dst_dir"] = repo_info[1]
elif len(repo_info) == REPO_INFO_LEN:
if repo_info[2] != 'tree' and repo_info[2] != 'blob':
raise ValueError("invalid repo link: {}".format(git_url))
uid = ':'.join([web_name, repo_info[4], repo_info[3]])
git_info["branch"] = repo_info[3]
git_info["dst_dir"] = repo_info[4]
git_info["is_repo"] = False
else:
raise ValueError("invalid repo link: {}".format(git_url))
git_info["web"] = prefix[0]
git_info["group"] = repo_info[0]
git_info["repo"] = repo_info[1]
git_ssh = '/'.join([git_info["web"], git_info["group"], git_info["repo"]])
git_ssh = ''.join([git_ssh, ".git"])
git_info["git_ssh"] = git_ssh
git_info["uid"] = uid
return git_info
def _download_repo_from_url(url, path=get_hub_dir()):
"""
Download file form url.
Args:
url (str): A url to download file.
path (str): A path to store download file.
Returns:
bool, return whether success download file.
"""
_create_path_if_not_exists(path)
repo_infos = get_repo_info_from_url(url)
arg = dict()
arg["bash"] = "bash"
arg["git_ssh"] = repo_infos["git_ssh"]
arg["path"] = path
arg["model_path"] = repo_infos["dst_dir"]
arg["branch"] = repo_infos["branch"]
is_repo = repo_infos["is_repo"]
with TemporaryDirectory() as git_dir:
arg["git_dir"] = git_dir
# is repo or dir of repo
if is_repo:
arg["shell_path"] = FULL_SHELL_PATH
else:
arg["shell_path"] = SPARSE_SHELL_PATH
cmd = [arg["bash"], arg["shell_path"], arg["git_dir"], arg["path"],
arg["model_path"], arg["git_ssh"], arg["branch"]]
out = subprocess.check_output(cmd, shell=False)
ret = "succeed" in out.decode('utf-8')
return ret
def extract_file(file_path, dst):
"""
Extrace file to specified path.
Args:
file_path (str): The path of compressed file.
dst (str): The target path.
"""
name = None
if zipfile.is_zipfile(file_path):
with zipfile.ZipFile(file_path) as cached_zipfile:
cached_zipfile.extractall(dst)
name = cached_zipfile.infolist()[0].filename
if tarfile.is_tarfile(file_path):
with tarfile.TarFile(file_path) as cached_tarfile:
cached_tarfile.extractall(dst)
name = cached_tarfile.infolist()[0].filename
if isinstance(name, str):
path = os.path.join(dst, name)
if os.path.isdir(path):
files = os.listdir(path)
for item in files:
shutil.move(path + item, dst)
shutil.rmtree(path)
def _download_file_from_url(url, hash_sha256=None, save_path=get_hub_dir()):
"""
download checkpoint weight from giving url.
Args:
url(string): checkpoint url path.
hash_sha256(string): checkpoint file sha256.
save_path(string): checkpoint download save path.
Returns:
string.
"""
def reporthook(a, b, c):
percent = a * b * 100.0 / c
percent = 100 if percent > 100 else percent
if c > 0:
print("\rDownloading...%5.1f%%" % percent, end="")
def sha256sum(file_name, hash_sha256):
fp = open(file_name, 'rb')
content = fp.read()
fp.close()
m = hashlib.sha256()
m.update(content)
download_sha256 = m.hexdigest()
return download_sha256 == hash_sha256
_create_path_if_not_exists(os.path.realpath(save_path))
ckpt_name = os.path.basename(url.split("/")[-1])
# identify file exist or not
file_path = os.path.join(save_path, ckpt_name)
if os.path.isfile(file_path):
if hash_sha256 and sha256sum(file_path, hash_sha256):
print('File already exists!')
return file_path
print('File already exists, but sha256 checking failed. Will download again')
_remove_path_if_exists(file_path)
# download the checkpoint file
print('Downloading data from url {}'.format(url))
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urlretrieve(url, file_path, reporthook=reporthook)
except HTTPError as e:
raise Exception(e.code, e.msg, url)
except URLError as e:
raise Exception(e.errno, e.reason, url)
print('\nDownload finished!')
# Check file integrity
if hash_sha256:
result = sha256sum(file_path, hash_sha256)
if not result:
raise Exception('INTEGRITY ERROR: File: {} is not integral'.format(file_path))
# Check file size
# Get file size and turn the file size to Mb format
file_size = os.path.getsize(file_path)
print('File size = %.2f Mb' % (file_size / 1024 / 1024))
# Start check
if file_size > MAX_FILE_SIZE:
os.remove(file_path)
raise Exception('SIZE ERROR: Download file is too large,'
'the max size is {}Mb'.format(MAX_FILE_SIZE / 1024 / 1024))
# Check file type
suffix = os.path.splitext(file_path)[1]
if suffix not in SUFFIX_LIST:
os.remove(file_path)
raise Exception('SUFFIX ERROR: File: {} with Suffix: {} '
'can not be recognized'.format(file_path, suffix))
return file_path
|
collection/models.py
|
mihaivavram/twitterbots
| 141 |
98548
|
from sqlalchemy import (create_engine, Column, Integer, String, DateTime,
Boolean, BigInteger)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from datetime import datetime
import os
"""A cache for storing account details as they are fetched to help deduplicate
results.
"""
engine = create_engine(os.environ.get('DB_PATH', 'sqlite:///twitter.db'))
Base = declarative_base()
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
class Account(Base):
"""A minimal representation of a Twitter account.
This model is used to store account ID's as they are found to help make
sure we don't request details for the same account twice.
"""
__tablename__ = 'accounts'
id = Column(BigInteger, primary_key=True)
id_str = Column(String(255))
screen_name = Column(String(255))
created_date = Column(DateTime)
found_date = Column(DateTime, default=datetime.now)
fetched_tweets_date = Column(DateTime)
fetched_tweets = Column(Boolean, default=False)
protected = Column(Boolean)
tweet_count = Column(Integer)
source = Column(String(1024))
language = Column(String(32))
@classmethod
def from_dict(cls, account):
"""Loads an account from a valid JSON dict returned from the Twitter API
Arguments:
account {dict} -- The JSON formatted User object from the Twitter
API
source {str} -- The source of the profile (e.g. "tweets", "enum",
etc.)
Returns:
cache.Account -- The Account instance representing this user
"""
return Account(
id=account.get('id'),
id_str=account.get('id_str'),
screen_name=account.get('screen_name'),
created_date=datetime.strptime(
account.get('created_at'), '%a %b %d %H:%M:%S %z %Y'),
protected=account.get('protected'),
tweet_count=account.get('statuses_count'),
language=account.get('lang'),
source=account.get('_tbsource'))
@classmethod
def from_tweepy(cls, account):
return Account(
id=account.id,
id_str=account.id_str,
screen_name=account.screen_name,
created_at=account.created_at,
protected=account.protected,
tweet_count=account.statuses_count,
language=account.lang,
source=account._tbsource)
@classmethod
def exists(cls, id):
return Session.query(Account).get(id) is not None
def summary_dict(self):
return {
'id': self.id,
'id_str': self.id_str,
'screen_name': self.screen_name
}
def save(self, commit=True):
"""Saves an account to the database
Keyword Arguments:
commit {bool} -- Whether or not (default: {True})
Returns:
[type] -- [description]
"""
Session.add(self)
if commit:
Session.commit()
return self
Base.metadata.create_all(engine)
|
media/tools/constrained_network_server/traffic_control_test.py
|
zealoussnow/chromium
| 14,668 |
98554
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""End-to-end tests for traffic control library."""
import os
import re
import sys
import unittest
import traffic_control
class TrafficControlTests(unittest.TestCase):
"""System tests for traffic_control functions.
These tests require root access.
"""
# A dummy interface name to use instead of real interface.
_INTERFACE = 'myeth'
def setUp(self):
"""Setup a dummy interface."""
# If we update to python version 2.7 or newer we can use setUpClass() or
# unittest.skipIf().
if os.getuid() != 0:
sys.exit('You need root access to run these tests.')
command = ['ip', 'link', 'add', 'name', self._INTERFACE, 'type', 'dummy']
traffic_control._Exec(command, 'Error creating dummy interface %s.' %
self._INTERFACE)
def tearDown(self):
"""Teardown the dummy interface and any network constraints on it."""
# Deleting the dummy interface deletes all associated constraints.
command = ['ip', 'link', 'del', self._INTERFACE]
traffic_control._Exec(command)
def testExecOutput(self):
output = traffic_control._Exec(['echo', ' Test '])
self.assertEqual(output, 'Test')
def testExecException(self):
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._Exec, command=['ls', '!doesntExist!'])
def testExecErrorCustomMsg(self):
try:
traffic_control._Exec(['ls', '!doesntExist!'], msg='test_msg')
self.fail('No exception raised for invalid command.')
except traffic_control.TrafficControlError as e:
self.assertEqual(e.msg, 'test_msg')
def testAddRootQdisc(self):
"""Checks adding a root qdisc is successful."""
config = {'interface': self._INTERFACE}
root_detail = 'qdisc htb 1: root'
# Assert no htb root at startup.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(root_detail in output)
traffic_control._AddRootQdisc(config['interface'])
output = traffic_control._Exec(command)
# Assert htb root is added.
self.assertTrue(root_detail in output)
def testConfigureClassAdd(self):
"""Checks adding and deleting a class to the root qdisc."""
config = {
'interface': self._INTERFACE,
'port': 12345,
'server_port': 33333,
'bandwidth': 2000
}
class_detail = ('class htb 1:%x root prio 0 rate %dKbit ceil %dKbit' %
(config['port'], config['bandwidth'], config['bandwidth']))
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Assert class does not exist prior to adding it.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(class_detail in output)
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Assert class is added.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertTrue(class_detail in output)
# Delete class.
traffic_control._ConfigureClass('del', config)
# Assert class is deleted.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(class_detail in output)
def testAddSubQdisc(self):
"""Checks adding a sub qdisc to existing class."""
config = {
'interface': self._INTERFACE,
'port': 12345,
'server_port': 33333,
'bandwidth': 2000,
'latency': 250,
'loss': 5
}
qdisc_re_detail = ('qdisc netem %x: parent 1:%x .* delay %d.0ms loss %d%%' %
(config['port'], config['port'], config['latency'],
config['loss']))
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Assert qdisc does not exist prior to adding it.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
handle_id_re = re.search(qdisc_re_detail, output)
self.assertEqual(handle_id_re, None)
# Add qdisc to class.
traffic_control._AddSubQdisc(config)
# Assert qdisc is added.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
handle_id_re = re.search(qdisc_re_detail, output)
self.assertNotEqual(handle_id_re, None)
def testAddDeleteFilter(self):
config = {
'interface': self._INTERFACE,
'port': 12345,
'bandwidth': 2000
}
# Assert no filter exists.
command = ['tc', 'filter', 'list', 'dev', config['interface'], 'parent',
'1:0']
output = traffic_control._Exec(command)
self.assertEqual(output, '')
# Create the root and class to which the filter will be attached.
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Add the filter.
traffic_control._AddFilter(config['interface'], config['port'])
handle_id = traffic_control._GetFilterHandleId(config['interface'],
config['port'])
self.assertNotEqual(handle_id, None)
# Delete the filter.
# The output of tc filter list is not None because tc adds default filters.
traffic_control._DeleteFilter(config['interface'], config['port'])
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._GetFilterHandleId, config['interface'],
config['port'])
if __name__ == '__main__':
unittest.main()
|
artemis/plotting/fast.py
|
peteroconnor-bc/artemis
| 235 |
98561
|
<reponame>peteroconnor-bc/artemis
try:
from scipy import weave
except ImportError:
print("Cannot Import scipy weave. That's ok for now, you won't be able to use the fastplot function.")
__author__ = 'peter'
import numpy as np
import matplotlib.pyplot as plt
"""
Functions to speed up pylab plotting, which can sometimes be unnecessairily slow.
"""
def fastplot(line_data, xscale = 'linear', yscale = 'linear', resolution = 2000, min_points = 20000):
"""
Fast plot for those times when you have a lot of data points and it becomes too slow for pylab to handle.
The plot produced here should look the same (unless you zoom) but display much faster.
:param line_data: A vector of data
:param xscale: {'linear', 'log'}
:param yscale: {'linear', 'log'}
:param resolution: The number intervals to bin points into
:param min_points: The minimum number of points required to bother with this approach.
:return: A plot handle
"""
assert line_data.ndim == 1
if len(line_data) < min_points:
h= plt.plot(line_data)
else:
if xscale == 'linear':
intervals = np.linspace(0, len(line_data), resolution)
elif xscale == 'log':
intervals = np.logspace(0, np.log10(len(line_data)), resolution)
else:
raise Exception("Can't yet deal with scale %s" % xscale)
extreme_indices = find_interval_extremes(line_data, edges = intervals[1:])
h=plt.plot(extreme_indices, line_data[extreme_indices])
plt.gca().set_xscale(xscale)
plt.gca().set_yscale(yscale)
return h
def fastloglog(line_data, **kwargs):
return fastplot(line_data, xscale='log', yscale = 'symlog', **kwargs)
def find_interval_extremes(array, edges):
"""
Find the indeces of extreme points within each interval, and on the outsides of the two end edges.
:param array: A vector
:param edges: A vector of edges defining the intervals. It's assumed that -Inf, Inf form the true outer edges.
:return: A vector of ints indicating the indeces of extreme points. If a distinct min and max extreme are found
within every interval, this vector will have length 2*(len(edges)+1). Otherwise, it will be shorter.
"""
indices = np.zeros(len(array), dtype = int)-1
how_many = np.array([0])
code = """
float min = INFINITY;
float max = -INFINITY;
int argmin;
int argmax;
bool foundpoint = false;
int in_counter = 0;
int out_counter = 0;
int edge_counter = 0;
while(in_counter<Narray[0]){
float next_edge;
if (edge_counter == Nedges[0])
next_edge = INFINITY;
else
next_edge = edges[edge_counter];
if (array[in_counter] < min){
min = array[in_counter];
argmin = in_counter;
foundpoint = true;
}
if (array[in_counter] > max){
max = array[in_counter];
argmax = in_counter;
foundpoint = true;
}
in_counter++;
if (in_counter > next_edge){
if (foundpoint){
if (argmin < argmax){
indices[out_counter] = argmin;
indices[out_counter+1] = argmax;
out_counter+=2;
}
else if (argmax < argmin){
indices[out_counter] = argmax;
indices[out_counter+1] = argmin;
out_counter+=2;
}
else {
indices[out_counter] = argmax;
out_counter++;
}
min = INFINITY;
max = -INFINITY;
foundpoint = false;
}
edge_counter++;
}
}
how_many[0] = out_counter;
"""
weave.inline(code, ['array', 'edges', 'indices', 'how_many'], compiler = 'gcc')
result = indices[:how_many[0]]
return result
|
tests/test_cli.py
|
pgjones/quart
| 1,085 |
98606
|
<gh_stars>1000+
from __future__ import annotations
import code
import os
from unittest.mock import Mock
import pytest
from _pytest.monkeypatch import MonkeyPatch
from click.testing import CliRunner
import quart.cli
from quart.app import Quart
from quart.cli import __version__, AppGroup, cli, NoAppException, ScriptInfo
@pytest.fixture(scope="module")
def reset_env() -> None:
os.environ.pop("QUART_ENV", None)
os.environ.pop("QUART_DEBUG", None)
@pytest.fixture(name="app")
def loadable_app(monkeypatch: MonkeyPatch) -> Mock:
app = Mock(spec=Quart)
app.cli = AppGroup()
module = Mock()
module.app = app
monkeypatch.setattr(quart.cli, "import_module", lambda _: module)
return app
@pytest.fixture(name="dev_app")
def loadable_dev_app(app: Mock) -> Mock:
app.env == "development"
app.debug = True
return app
@pytest.fixture(name="dev_env")
def dev_env_patch(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("QUART_ENV", "development")
@pytest.fixture(name="debug_env")
def debug_env_patch(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("QUART_DEBUG", "true")
@pytest.fixture(name="no_debug_env")
def no_debug_env_patch(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("QUART_DEBUG", "false")
def test_script_info_load_app(app: Mock) -> None:
info = ScriptInfo("module:app")
assert info.load_app() == app
def test_script_info_load_app_no_app(app: Mock) -> None:
info = ScriptInfo(None)
os.environ.pop("QUART_APP", None)
with pytest.raises(NoAppException):
info.load_app()
def test_version_command() -> None:
runner = CliRunner()
result = runner.invoke(cli, ["--version"])
assert str(__version__) in result.output
def test_shell_command(app: Mock, monkeypatch: MonkeyPatch) -> None:
runner = CliRunner()
interact = Mock()
monkeypatch.setattr(code, "interact", interact)
app.make_shell_context.return_value = {}
app.import_name = "test"
os.environ["QUART_APP"] = "module:app"
runner.invoke(cli, ["shell"])
app.make_shell_context.assert_called_once()
interact.assert_called_once()
def test_run_command(app: Mock) -> None:
runner = CliRunner()
os.environ["QUART_APP"] = "module:app"
runner.invoke(cli, ["run"])
app.run.assert_called_once_with(
debug=False, host="127.0.0.1", port=5000, certfile=None, keyfile=None, use_reloader=True
)
def test_run_command_development(dev_app: Mock, dev_env: None) -> None:
runner = CliRunner()
os.environ["QUART_APP"] = "module:app"
runner.invoke(cli, ["run"])
dev_app.run.assert_called_once_with(
debug=True, host="127.0.0.1", port=5000, certfile=None, keyfile=None, use_reloader=True
)
def test_run_command_development_debug_disabled(
dev_app: Mock, dev_env: None, no_debug_env: None
) -> None:
runner = CliRunner()
os.environ["QUART_APP"] = "module:app"
runner.invoke(cli, ["run"])
dev_app.run.assert_called_once_with(
debug=False, host="127.0.0.1", port=5000, certfile=None, keyfile=None, use_reloader=True
)
|
tests/api/endpoints/admin/test_abuse_reports.py
|
weimens/seahub
| 420 |
98621
|
# -*- coding: utf-8 -*-
import json
from mock import patch, MagicMock
from django.urls import reverse
from seahub.test_utils import BaseTestCase
from seahub.abuse_reports.models import AbuseReport
class AdminAbuseReportsTest(BaseTestCase):
def setUp(self):
self.login_as(self.admin)
self.url = reverse('api-v2.1-admin-abuse-reports')
@patch('seahub.api2.endpoints.admin.abuse_reports.ENABLE_SHARE_LINK_REPORT_ABUSE', MagicMock(return_value=True))
def test_can_get(self):
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
@patch('seahub.api2.endpoints.admin.abuse_reports.ENABLE_SHARE_LINK_REPORT_ABUSE', MagicMock(return_value=True))
def test_no_permission(self):
self.logout()
self.login_as(self.admin_no_other_permission)
resp = self.client.get(self.url)
self.assertEqual(403, resp.status_code)
class AdminAbuseReportTest(BaseTestCase):
def setUp(self):
self.login_as(self.admin)
self.repo = self.repo
self.file_path = self.file
self.url = reverse('api-v2.1-admin-abuse-reports')
def _add_abuse_report(self):
reporter = ''
repo_id = self.repo.id
repo_name = self.repo.name
file_path = self.file_path
abuse_type = 'copyright'
description = ''
report = AbuseReport.objects.add_abuse_report(
reporter, repo_id, repo_name, file_path, abuse_type, description)
return report
def _remove_abuse_report(self, report_id):
report = AbuseReport.objects.get(id=report_id)
report.delete()
@patch('seahub.api2.endpoints.admin.abuse_reports.ENABLE_SHARE_LINK_REPORT_ABUSE', MagicMock(return_value=True))
def test_no_permission(self):
self.logout()
self.login_as(self.admin_no_other_permission)
report = self._add_abuse_report()
data = 'handled=' + str(not report.handled).lower()
resp = self.client.put(self.url + str(report.id) + '/', data, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
@patch('seahub.api2.endpoints.admin.abuse_reports.ENABLE_SHARE_LINK_REPORT_ABUSE', MagicMock(return_value=True))
def test_can_put(self):
report = self._add_abuse_report()
data = 'handled=' + str(not report.handled).lower()
resp = self.client.put(self.url + str(report.id) + '/', data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['file_name'] is not None
assert json_resp['time'] is not None
assert json_resp['handled'] == (not report.handled)
assert json_resp['abuse_type'] == report.abuse_type
assert json_resp['description'] == report.description
assert json_resp['id'] == report.id
assert json_resp['reporter'] == report.reporter
assert json_resp['repo_id'] == report.repo_id
assert json_resp['repo_name'] == report.repo_name
assert json_resp['file_path'] == report.file_path
self._remove_abuse_report(report.id)
|
openmdao/test_suite/matrices/dl_matrix.py
|
friedenhe/OpenMDAO
| 451 |
98645
|
<gh_stars>100-1000
"""
Download a Matlab matrix file from sparse.tamu.edu and save it locally.
"""
import sys
import scipy.io
import urllib.request
def download_matfile(group, name, outfile="matrix.out"):
"""
Downloads a matrix file (matlab format) from sparse.tamu.edu and returns the matrix.
"""
with open(outfile, "wb") as f:
url = 'https://sparse.tamu.edu/mat/%s/%s.mat' % (group, name)
print("Downloading", url)
f.write(urllib.request.urlopen(url).read()) # nosec: https, content vetted
dct = scipy.io.loadmat(outfile)
return dct
if __name__ == '__main__':
mat = download_matfile(sys.argv[1], sys.argv[2])
print(mat['Problem'][0][0][0])
|
tests/api/conftest.py
|
corvust/strawberryfields
| 646 |
98651
|
# Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test fixtures and shared functions for strawberryfields.api tests
"""
import pytest
from strawberryfields import Program, ops
from strawberryfields.api import Connection
# pylint: disable=expression-not-assigned
@pytest.fixture
def prog():
"""Program fixture."""
program = Program(8)
with program.context as q:
ops.Rgate(0.5) | q[0]
ops.Rgate(0.5) | q[4]
ops.MeasureFock() | q
return program
@pytest.fixture
def connection():
"""A mock connection object."""
return Connection(token="token", host="host", port=123, use_ssl=True)
def mock_return(return_value):
"""A helper function for defining a mock function that returns the given value for
any arguments.
"""
return lambda *args, **kwargs: return_value
|
configs/mmpose/pose-detection_static.py
|
xizi/mmdeploy
| 746 |
98665
|
_base_ = ['../_base_/onnx_config.py']
codebase_config = dict(type='mmpose', task='PoseDetection')
|
flatdata-generator/flatdata/generator/tree/nodes/explicit_reference.py
|
gferon/flatdata
| 140 |
98730
|
from flatdata.generator.tree.nodes.node import Node
from flatdata.generator.tree.nodes.references import ResourceReference, FieldReference, StructureReference
class ExplicitReference(Node):
def __init__(self, name, properties=None):
super().__init__(name=name, properties=properties)
@staticmethod
def create(properties):
destination = properties.destination
field = Node.jointwo(properties.source_type, properties.source_field)
result = ExplicitReference(
name="er_{field}_{destination}".format(field=field.replace(Node.PATH_SEPARATOR, '_'),
destination=destination.replace(
Node.PATH_SEPARATOR, '_')),
properties=properties)
result.insert(ResourceReference(name=destination))
result.insert(FieldReference(name=field))
result.insert(StructureReference(name=properties.source_type))
return result
@property
def destination(self):
result = self.children_like(ResourceReference)
assert len(result) == 1
return result[0]
@property
def field(self):
result = self.children_like(FieldReference)
assert len(result) == 1
return result[0]
@property
def structure(self):
result = self.children_like(StructureReference)
assert len(result) == 1
return result[0]
|
torchdiffeq/_impl/fixed_adams.py
|
MaricelaM/torchdiffeq
| 4,088 |
98735
|
<reponame>MaricelaM/torchdiffeq
import collections
import sys
import torch
import warnings
from .solvers import FixedGridODESolver
from .misc import _compute_error_ratio, _linf_norm
from .misc import Perturb
from .rk_common import rk4_alt_step_func
_BASHFORTH_COEFFICIENTS = [
[], # order 0
[11],
[3, -1],
[23, -16, 5],
[55, -59, 37, -9],
[1901, -2774, 2616, -1274, 251],
[4277, -7923, 9982, -7298, 2877, -475],
[198721, -447288, 705549, -688256, 407139, -134472, 19087],
[434241, -1152169, 2183877, -2664477, 2102243, -1041723, 295767, -36799],
[14097247, -43125206, 95476786, -139855262, 137968480, -91172642, 38833486, -9664106, 1070017],
[30277247, -104995189, 265932680, -454661776, 538363838, -444772162, 252618224, -94307320, 20884811, -2082753],
[
2132509567, -8271795124, 23591063805, -46113029016, 63716378958, -63176201472, 44857168434, -22329634920,
7417904451, -1479574348, 134211265
],
[
4527766399, -19433810163, 61633227185, -135579356757, 214139355366, -247741639374, 211103573298, -131365867290,
58189107627, -17410248271, 3158642445, -262747265
],
[
13064406523627, -61497552797274, 214696591002612, -524924579905150, 932884546055895, -1233589244941764,
1226443086129408, -915883387152444, 507140369728425, -202322913738370, 55060974662412, -9160551085734,
703604254357
],
[
27511554976875, -140970750679621, 537247052515662, -1445313351681906, 2854429571790805, -4246767353305755,
4825671323488452, -4204551925534524, 2793869602879077, -1393306307155755, 505586141196430, -126174972681906,
19382853593787, -1382741929621
],
[
173233498598849, -960122866404112, 3966421670215481, -11643637530577472, 25298910337081429, -41825269932507728,
53471026659940509, -53246738660646912, 41280216336284259, -24704503655607728, 11205849753515179,
-3728807256577472, 859236476684231, -122594813904112, 8164168737599
],
[
362555126427073, -2161567671248849, 9622096909515337, -30607373860520569, 72558117072259733,
-131963191940828581, 187463140112902893, -210020588912321949, 186087544263596643, -129930094104237331,
70724351582843483, -29417910911251819, 9038571752734087, -1934443196892599, 257650275915823, -16088129229375
],
[
192996103681340479, -1231887339593444974, 5878428128276811750, -20141834622844109630, 51733880057282977010,
-102651404730855807942, 160414858999474733422, -199694296833704562550, 199061418623907202560,
-158848144481581407370, 100878076849144434322, -50353311405771659322, 19338911944324897550,
-5518639984393844930, 1102560345141059610, -137692773163513234, 8092989203533249
],
[
401972381695456831, -2735437642844079789, 13930159965811142228, -51150187791975812900, 141500575026572531760,
-304188128232928718008, 518600355541383671092, -710171024091234303204, 786600875277595877750,
-706174326992944287370, 512538584122114046748, -298477260353977522892, 137563142659866897224,
-49070094880794267600, 13071639236569712860, -2448689255584545196, 287848942064256339, -15980174332775873
],
[
333374427829017307697, -2409687649238345289684, 13044139139831833251471, -51099831122607588046344,
151474888613495715415020, -350702929608291455167896, 647758157491921902292692, -967713746544629658690408,
1179078743786280451953222, -1176161829956768365219840, 960377035444205950813626, -639182123082298748001432,
343690461612471516746028, -147118738993288163742312, 48988597853073465932820, -12236035290567356418552,
2157574942881818312049, -239560589366324764716, 12600467236042756559
],
[
691668239157222107697, -5292843584961252933125, 30349492858024727686755, -126346544855927856134295,
399537307669842150996468, -991168450545135070835076, 1971629028083798845750380, -3191065388846318679544380,
4241614331208149947151790, -4654326468801478894406214, 4222756879776354065593786, -3161821089800186539248210,
1943018818982002395655620, -970350191086531368649620, 387739787034699092364924, -121059601023985433003532,
28462032496476316665705, -4740335757093710713245, 498669220956647866875, -24919383499187492303
],
]
_MOULTON_COEFFICIENTS = [
[], # order 0
[1],
[1, 1],
[5, 8, -1],
[9, 19, -5, 1],
[251, 646, -264, 106, -19],
[475, 1427, -798, 482, -173, 27],
[19087, 65112, -46461, 37504, -20211, 6312, -863],
[36799, 139849, -121797, 123133, -88547, 41499, -11351, 1375],
[1070017, 4467094, -4604594, 5595358, -5033120, 3146338, -1291214, 312874, -33953],
[2082753, 9449717, -11271304, 16002320, -17283646, 13510082, -7394032, 2687864, -583435, 57281],
[
134211265, 656185652, -890175549, 1446205080, -1823311566, 1710774528, -1170597042, 567450984, -184776195,
36284876, -3250433
],
[
262747265, 1374799219, -2092490673, 3828828885, -5519460582, 6043521486, -4963166514, 3007739418, -1305971115,
384709327, -68928781, 5675265
],
[
703604254357, 3917551216986, -6616420957428, 13465774256510, -21847538039895, 27345870698436, -26204344465152,
19058185652796, -10344711794985, 4063327863170, -1092096992268, 179842822566, -13695779093
],
[
1382741929621, 8153167962181, -15141235084110, 33928990133618, -61188680131285, 86180228689563, -94393338653892,
80101021029180, -52177910882661, 25620259777835, -9181635605134, 2268078814386, -345457086395, 24466579093
],
[
8164168737599, 50770967534864, -102885148956217, 251724894607936, -499547203754837, 781911618071632,
-963605400824733, 934600833490944, -710312834197347, 418551804601264, -187504936597931, 61759426692544,
-14110480969927, 1998759236336, -132282840127
],
[
16088129229375, 105145058757073, -230992163723849, 612744541065337, -1326978663058069, 2285168598349733,
-3129453071993581, 3414941728852893, -2966365730265699, 2039345879546643, -1096355235402331, 451403108933483,
-137515713789319, 29219384284087, -3867689367599, 240208245823
],
[
8092989203533249, 55415287221275246, -131240807912923110, 375195469874202430, -880520318434977010,
1654462865819232198, -2492570347928318318, 3022404969160106870, -2953729295811279360, 2320851086013919370,
-1455690451266780818, 719242466216944698, -273894214307914510, 77597639915764930, -15407325991235610,
1913813460537746, -111956703448001
],
[
15980174332775873, 114329243705491117, -290470969929371220, 890337710266029860, -2250854333681641520,
4582441343348851896, -7532171919277411636, 10047287575124288740, -10910555637627652470, 9644799218032932490,
-6913858539337636636, 3985516155854664396, -1821304040326216520, 645008976643217360, -170761422500096220,
31816981024600492, -3722582669836627, 205804074290625
],
[
12600467236042756559, 93965550344204933076, -255007751875033918095, 834286388106402145800,
-2260420115705863623660, 4956655592790542146968, -8827052559979384209108, 12845814402199484797800,
-15345231910046032448070, 15072781455122686545920, -12155867625610599812538, 8008520809622324571288,
-4269779992576330506540, 1814584564159445787240, -600505972582990474260, 149186846171741510136,
-26182538841925312881, 2895045518506940460, -151711881512390095
],
[
24919383499187492303, 193280569173472261637, -558160720115629395555, 1941395668950986461335,
-5612131802364455926260, 13187185898439270330756, -25293146116627869170796, 39878419226784442421820,
-51970649453670274135470, 56154678684618739939910, -50320851025594566473146, 37297227252822858381906,
-22726350407538133839300, 11268210124987992327060, -4474886658024166985340, 1389665263296211699212,
-325187970422032795497, 53935307402575440285, -5652892248087175675, 281550972898020815
],
]
_DIVISOR = [
None, 11, 2, 12, 24, 720, 1440, 60480, 120960, 3628800, 7257600, 479001600, 958003200, 2615348736000, 5230697472000,
31384184832000, 62768369664000, 32011868528640000, 64023737057280000, 51090942171709440000, 102181884343418880000
]
_BASHFORTH_DIVISOR = [torch.tensor([b / divisor for b in bashforth], dtype=torch.float64)
for bashforth, divisor in zip(_BASHFORTH_COEFFICIENTS, _DIVISOR)]
_MOULTON_DIVISOR = [torch.tensor([m / divisor for m in moulton], dtype=torch.float64)
for moulton, divisor in zip(_MOULTON_COEFFICIENTS, _DIVISOR)]
_MIN_ORDER = 4
_MAX_ORDER = 12
_MAX_ITERS = 4
# TODO: replace this with PyTorch operations (a little hard because y is a deque being used as a circular buffer)
def _dot_product(x, y):
return sum(xi * yi for xi, yi in zip(x, y))
class AdamsBashforthMoulton(FixedGridODESolver):
order = 4
def __init__(self, func, y0, rtol=1e-3, atol=1e-4, implicit=True, max_iters=_MAX_ITERS, max_order=_MAX_ORDER,
**kwargs):
super(AdamsBashforthMoulton, self).__init__(func, y0, rtol=rtol, atol=rtol, **kwargs)
assert max_order <= _MAX_ORDER, "max_order must be at most {}".format(_MAX_ORDER)
if max_order < _MIN_ORDER:
warnings.warn("max_order is below {}, so the solver reduces to `rk4`.".format(_MIN_ORDER))
self.rtol = torch.as_tensor(rtol, dtype=y0.dtype, device=y0.device)
self.atol = torch.as_tensor(atol, dtype=y0.dtype, device=y0.device)
self.implicit = implicit
self.max_iters = max_iters
self.max_order = int(max_order)
self.prev_f = collections.deque(maxlen=self.max_order - 1)
self.prev_t = None
self.bashforth = [x.to(y0.device) for x in _BASHFORTH_DIVISOR]
self.moulton = [x.to(y0.device) for x in _MOULTON_DIVISOR]
def _update_history(self, t, f):
if self.prev_t is None or self.prev_t != t:
self.prev_f.appendleft(f)
self.prev_t = t
def _has_converged(self, y0, y1):
"""Checks that each element is within the error tolerance."""
error_ratio = _compute_error_ratio(torch.abs(y0 - y1), self.rtol, self.atol, y0, y1, _linf_norm)
return error_ratio < 1
def _step_func(self, func, t0, dt, t1, y0):
f0 = func(t0, y0, perturb=Perturb.NEXT if self.perturb else Perturb.NONE)
self._update_history(t0, f0)
order = min(len(self.prev_f), self.max_order - 1)
if order < _MIN_ORDER - 1:
# Compute using RK4.
return rk4_alt_step_func(func, t0, dt, t1, y0, f0=self.prev_f[0], perturb=self.perturb), f0
else:
# Adams-Bashforth predictor.
bashforth_coeffs = self.bashforth[order]
dy = _dot_product(dt * bashforth_coeffs, self.prev_f).type_as(y0) # bashforth is float64 so cast back
# Adams-Moulton corrector.
if self.implicit:
moulton_coeffs = self.moulton[order + 1]
delta = dt * _dot_product(moulton_coeffs[1:], self.prev_f).type_as(y0) # moulton is float64 so cast back
converged = False
for _ in range(self.max_iters):
dy_old = dy
f = func(t1, y0 + dy, perturb=Perturb.PREV if self.perturb else Perturb.NONE)
dy = (dt * (moulton_coeffs[0]) * f).type_as(y0) + delta # moulton is float64 so cast back
converged = self._has_converged(dy_old, dy)
if converged:
break
if not converged:
warnings.warn('Functional iteration did not converge. Solution may be incorrect.')
self.prev_f.pop()
self._update_history(t0, f)
return dy, f0
class AdamsBashforth(AdamsBashforthMoulton):
def __init__(self, func, y0, **kwargs):
super(AdamsBashforth, self).__init__(func, y0, implicit=False, **kwargs)
|
test/test_group_model.py
|
zuarbase/server-client-python
| 470 |
98780
|
<reponame>zuarbase/server-client-python
import unittest
import tableauserverclient as TSC
class GroupModelTests(unittest.TestCase):
def test_invalid_name(self):
self.assertRaises(ValueError, TSC.GroupItem, None)
self.assertRaises(ValueError, TSC.GroupItem, "")
group = TSC.GroupItem("grp")
with self.assertRaises(ValueError):
group.name = None
with self.assertRaises(ValueError):
group.name = ""
def test_invalid_minimum_site_role(self):
group = TSC.GroupItem("grp")
with self.assertRaises(ValueError):
group.minimum_site_role = "Captain"
def test_invalid_license_mode(self):
group = TSC.GroupItem("grp")
with self.assertRaises(ValueError):
group.license_mode = "off"
|
run.py
|
liushengzhong1023/multihead-siamese-nets
| 175 |
98788
|
<filename>run.py
import time
from argparse import ArgumentParser
import tensorflow as tf
from tqdm import tqdm
from data import dataset_type
from data.dataset import Dataset
from models import model_type
from models.model_type import MODELS
from utils.batch_helper import BatchHelper
from utils.config_helpers import MainConfig
from utils.data_utils import DatasetVectorizer
from utils.log_saver import LogSaver
from utils.model_evaluator import ModelEvaluator
from utils.model_saver import ModelSaver
from utils.other_utils import timer, set_visible_gpu, init_config
log = tf.logging.info
def create_experiment_name(model_name, main_config, model_config):
experiment_name = '{}_{}'.format(model_name, main_config['PARAMS']['embedding_size'])
if model_name == model_type.ModelType.rnn.name:
experiment_name += ("_" + model_config['PARAMS']['cell_type'])
experiment_name += ("_" + main_config['PARAMS']['loss_function'])
return experiment_name
def train(
main_config,
model_config,
model_name,
experiment_name,
dataset_name,
):
main_cfg = MainConfig(main_config)
model = MODELS[model_name]
dataset = dataset_type.get_dataset(dataset_name)
train_data = dataset.train_set_pairs()
vectorizer = DatasetVectorizer(
model_dir=main_cfg.model_dir,
char_embeddings=main_cfg.char_embeddings,
raw_sentence_pairs=train_data,
)
dataset_helper = Dataset(vectorizer, dataset, main_cfg.batch_size)
max_sentence_len = vectorizer.max_sentence_len
vocabulary_size = vectorizer.vocabulary_size
train_mini_sen1, train_mini_sen2, train_mini_labels = dataset_helper.pick_train_mini_batch()
train_mini_labels = train_mini_labels.reshape(-1, 1)
test_sentence1, test_sentence2 = dataset_helper.test_instances()
test_labels = dataset_helper.test_labels()
test_labels = test_labels.reshape(-1, 1)
num_batches = dataset_helper.num_batches
model = model(
max_sentence_len,
vocabulary_size,
main_config,
model_config,
)
model_saver = ModelSaver(
model_dir=main_cfg.model_dir,
model_name=experiment_name,
checkpoints_to_keep=main_cfg.checkpoints_to_keep,
)
config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=main_cfg.log_device_placement,
)
with tf.Session(config=config) as session:
global_step = 0
init = tf.global_variables_initializer()
session.run(init)
log_saver = LogSaver(
main_cfg.logs_path,
experiment_name,
dataset_name,
session.graph,
)
model_evaluator = ModelEvaluator(model, session)
metrics = {'acc': 0.0}
time_per_epoch = []
log('Training model for {} epochs'.format(main_cfg.num_epochs))
for epoch in tqdm(range(main_cfg.num_epochs), desc='Epochs'):
start_time = time.time()
train_sentence1, train_sentence2 = dataset_helper.train_instances(shuffle=True)
train_labels = dataset_helper.train_labels()
train_batch_helper = BatchHelper(
train_sentence1,
train_sentence2,
train_labels,
main_cfg.batch_size,
)
# small eval set for measuring dev accuracy
dev_sentence1, dev_sentence2, dev_labels = dataset_helper.dev_instances()
dev_labels = dev_labels.reshape(-1, 1)
tqdm_iter = tqdm(range(num_batches), total=num_batches, desc="Batches", leave=False,
postfix=metrics)
for batch in tqdm_iter:
global_step += 1
sentence1_batch, sentence2_batch, labels_batch = train_batch_helper.next(batch)
feed_dict_train = {
model.x1: sentence1_batch,
model.x2: sentence2_batch,
model.is_training: True,
model.labels: labels_batch,
}
loss, _ = session.run([model.loss, model.opt], feed_dict=feed_dict_train)
if batch % main_cfg.eval_every == 0:
feed_dict_train = {
model.x1: train_mini_sen1,
model.x2: train_mini_sen2,
model.is_training: False,
model.labels: train_mini_labels,
}
train_accuracy, train_summary = session.run(
[model.accuracy, model.summary_op],
feed_dict=feed_dict_train,
)
log_saver.log_train(train_summary, global_step)
feed_dict_dev = {
model.x1: dev_sentence1,
model.x2: dev_sentence2,
model.is_training: False,
model.labels: dev_labels
}
dev_accuracy, dev_summary = session.run(
[model.accuracy, model.summary_op],
feed_dict=feed_dict_dev,
)
log_saver.log_dev(dev_summary, global_step)
tqdm_iter.set_postfix(
dev_acc='{:.2f}'.format(float(dev_accuracy)),
train_acc='{:.2f}'.format(float(train_accuracy)),
loss='{:.2f}'.format(float(loss)),
epoch=epoch
)
if global_step % main_cfg.save_every == 0:
model_saver.save(session, global_step=global_step)
model_evaluator.evaluate_dev(
x1=dev_sentence1,
x2=dev_sentence2,
labels=dev_labels,
)
end_time = time.time()
total_time = timer(start_time, end_time)
time_per_epoch.append(total_time)
model_saver.save(session, global_step=global_step)
model_evaluator.evaluate_test(test_sentence1, test_sentence2, test_labels)
model_evaluator.save_evaluation(
model_path='{}/{}'.format(
main_cfg.model_dir,
experiment_name,
),
epoch_time=time_per_epoch[-1],
dataset=dataset,
)
def predict(
main_config,
model_config,
model,
experiment_name,
):
model = MODELS[model]
main_cfg = MainConfig(main_config)
# model_dir = str(main_config['DATA']['model_dir'])
vectorizer = DatasetVectorizer(
model_dir=main_cfg.model_dir,
char_embeddings=main_cfg.char_embeddings,
)
max_doc_len = vectorizer.max_sentence_len
vocabulary_size = vectorizer.vocabulary_size
model = model(max_doc_len, vocabulary_size, main_config, model_config)
with tf.Session() as session:
saver = tf.train.Saver()
last_checkpoint = tf.train.latest_checkpoint(
'{}/{}'.format(
main_cfg.model_dir,
experiment_name,
)
)
saver.restore(session, last_checkpoint)
while True:
x1 = input('First sentence:')
x2 = input('Second sentence:')
x1_sen = vectorizer.vectorize(x1)
x2_sen = vectorizer.vectorize(x2)
feed_dict = {model.x1: x1_sen, model.x2: x2_sen, model.is_training: False}
prediction = session.run([model.temp_sim], feed_dict=feed_dict)
print(prediction)
def main():
parser = ArgumentParser()
parser.add_argument(
'mode',
choices=['train', 'predict'],
help='pipeline mode',
)
parser.add_argument(
'model',
choices=['rnn', 'cnn', 'multihead'],
help='model to be used',
)
parser.add_argument(
'dataset',
choices=['QQP', 'SNLI', 'ANLI'],
nargs='?',
help='dataset to be used',
)
parser.add_argument(
'--experiment_name',
required=False,
help='the name of run experiment',
)
parser.add_argument(
'--gpu',
default='0',
help='index of GPU to be used (default: %(default))',
)
args = parser.parse_args()
if 'train' in args.mode:
if args.dataset is None:
parser.error('Positional argument [dataset] is mandatory')
set_visible_gpu(args.gpu)
main_config = init_config()
model_config = init_config(args.model)
mode = args.mode
experiment_name = args.experiment_name
if experiment_name is None:
experiment_name = create_experiment_name(args.model, main_config, model_config)
if 'train' in mode:
train(main_config, model_config, args.model, experiment_name, args.dataset)
else:
predict(main_config, model_config, args.model, experiment_name)
if __name__ == '__main__':
main()
|
polaris_common/topology.py
|
gribnut/polaris-gslb
| 225 |
98869
|
# -*- coding: utf-8 -*-
import ipaddress
__all__ = [
'config_to_map',
'get_region'
]
def config_to_map(topology_config):
"""
args:
topology_config: dict
{
'region1': [
'10.1.1.0/24',
'10.1.10.0/24',
'172.16.1.0/24'
],
'region2': [
'192.168.1.0/24',
'10.2.0.0/16',
]
}
Region cannot be "_default"
returns:
topology_map: dict
{
ip_network('10.1.1.0/24'): 'region1',
ip_network('10.1.10.0/24'): 'region1',
ip_network('172.16.1.0/24'): 'region1',
ip_network('192.168.1.0/24'): 'region2',
ip_network('10.2.0.0/16'): 'region2',
}
raises:
ValueError: if a region value is "_default"
"""
topology_map = {}
for region in topology_config:
# "_default" cannot be used as a region name
if region == '_default':
raise ValueError('cannot use "_default" as a region name')
for net_str in topology_config[region]:
net = ipaddress.ip_network(net_str)
topology_map[net] = region
return topology_map
def get_region(ip_str, topology_map):
"""Return name of a region from the topology map for
the given IP address, if multiple networks contain the IP,
region of the most specific(longest prefix length) match is returned,
if multiple equal prefix length found the behavior of which
entry is returned is undefined.
args:
ip_str: string representing an IP address
returns:
string: region name
None: if no region has been found
raises:
ValueError: raised by ipaddress if ip_str isn't a valid IP address
"""
ip = ipaddress.ip_address(ip_str)
# find all the matching networks
matches = []
for net in topology_map:
if ip in net:
matches.append(net)
# if only a single match is found return it
if len(matches) == 1:
return topology_map[matches[0]]
# if more than 1 match is found, sort the matches
# by prefixlen, return the longest prefixlen entry
elif len(matches) > 1:
matches.sort(key=lambda net: net.prefixlen)
return topology_map[matches[-1]]
# no matches found
return None
|
58.ε·¦ζ转ε符串/58.ε·¦ζ转ε符串.py
|
shenweichen/coding_interviews
| 483 |
98878
|
# -*- coding:utf-8 -*-
class Solution:
def LeftRotateString(self, s, n):
# write code here
if len(s)==0:
return s
s = list(s)
def flip(s,start,end):
for i in range(start,(start+end)//2 + 1):
s[i],s[end-i+start] = s[end - i+start],s[i]
return s
n %= len(s)
s = flip(s,0,n-1)
s = flip(s,n,len(s)-1)
s = flip(s,0,len(s)-1)
return "".join(s)
|
examples/wav2vec/unsupervised/scripts/mean_pool.py
|
Shiguang-Guo/fairseq
| 16,259 |
98880
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import math
import numpy as np
import tqdm
import torch
import torch.nn.functional as F
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="mean pools representations by compressing uniform splits of the data"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--subsample-rate', type=float, default=0.5, help='size to subsample data to')
parser.add_argument('--remove-extra', action='store_true', help='if true, removes extra states that cant be pooled, otherwise pads with 0s')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
if os.path.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if os.path.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if os.path.exists(osp.join(args.source, "dict.phn.txt")):
copyfile(
osp.join(args.source, "dict.phn.txt"),
osp.join(args.save_dir, "dict.phn.txt"),
)
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
with open(source_path + ".lengths", "r") as lf:
lengths = lf.readlines()
fsz = features.shape[-1]
start = 0
with torch.no_grad():
with open(save_path + ".lengths", "w") as lengths_out:
for length in tqdm.tqdm(lengths):
length = int(length)
end = start + length
feats = features[start:end]
start += length
x = torch.from_numpy(feats).cuda()
target_num = math.ceil(length * args.subsample_rate)
rem = length % target_num
if rem > 0:
if args.remove_extra:
to_rem = target_num - rem
target_num -= 1
x = x[:-to_rem]
else:
to_add = target_num - rem
x = F.pad(x, [0, 0, 0, to_add])
x[-to_add:] = x[-to_add - 1]
x = x.view(target_num, -1, fsz)
x = x.mean(dim=-2)
print(target_num, file=lengths_out)
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
|
openbook_moderation/migrations/0014_auto_20191205_1704.py
|
TamaraAbells/okuna-api
| 164 |
98947
|
# Generated by Django 2.2.5 on 2019-12-05 16:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openbook_moderation', '0013_auto_20190909_1236'),
]
operations = [
migrations.AlterField(
model_name='moderatedobject',
name='object_type',
field=models.CharField(choices=[('P', 'Post'), ('PC', 'Post Comment'), ('C', 'Community'), ('U', 'User'), ('H', 'Hashtag')], max_length=5),
),
]
|
examples/04_krige_geometric.py
|
ehxan139/PyKrige
| 280 |
98955
|
# -*- coding: utf-8 -*-
"""
Geometric example
=================
A small example script showing the usage of the 'geographic' coordinates type
for ordinary kriging on a sphere.
"""
from pykrige.ok import OrdinaryKriging
import numpy as np
from matplotlib import pyplot as plt
# Make this example reproducible:
np.random.seed(89239413)
# Generate random data following a uniform spatial distribution
# of nodes and a uniform distribution of values in the interval
# [2.0, 5.5]:
N = 7
lon = 360.0 * np.random.random(N)
lat = 180.0 / np.pi * np.arcsin(2 * np.random.random(N) - 1)
z = 3.5 * np.random.rand(N) + 2.0
# Generate a regular grid with 60Β° longitude and 30Β° latitude steps:
grid_lon = np.linspace(0.0, 360.0, 7)
grid_lat = np.linspace(-90.0, 90.0, 7)
# Create ordinary kriging object:
OK = OrdinaryKriging(
lon,
lat,
z,
variogram_model="linear",
verbose=False,
enable_plotting=False,
coordinates_type="geographic",
)
# Execute on grid:
z1, ss1 = OK.execute("grid", grid_lon, grid_lat)
# Create ordinary kriging object ignoring curvature:
OK = OrdinaryKriging(
lon, lat, z, variogram_model="linear", verbose=False, enable_plotting=False
)
# Execute on grid:
z2, ss2 = OK.execute("grid", grid_lon, grid_lat)
###############################################################################
# Print data at equator (last longitude index will show periodicity):
print("Original data:")
print("Longitude:", lon.astype(int))
print("Latitude: ", lat.astype(int))
print("z: ", np.array_str(z, precision=2))
print("\nKrige at 60Β° latitude:\n======================")
print("Longitude:", grid_lon)
print("Value: ", np.array_str(z1[5, :], precision=2))
print("SigmaΒ²: ", np.array_str(ss1[5, :], precision=2))
print("\nIgnoring curvature:\n=====================")
print("Value: ", np.array_str(z2[5, :], precision=2))
print("SigmaΒ²: ", np.array_str(ss2[5, :], precision=2))
###############################################################################
# We can see that the data point at longitude 122, latitude 50 correctly
# dominates the kriged results, since it is the closest node in spherical
# distance metric, as longitude differences scale with cos(latitude).
# When kriging using longitude / latitude linearly, the value for grid points
# with longitude values further away as longitude is now incorrectly
# weighted equally as latitude.
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(z1, extent=[0, 360, -90, 90], origin="lower")
ax1.set_title("geo-coordinates")
ax2.imshow(z2, extent=[0, 360, -90, 90], origin="lower")
ax2.set_title("non geo-coordinates")
plt.show()
|
functorch/_src/cse.py
|
pytorch/functorch
| 423 |
98981
|
import torch
import torch.fx as fx
from torch.utils._pytree import tree_flatten
aten = torch.ops.aten
rand_ops = [aten.dropout, aten._fused_dropout, aten._standard_gamma,
aten.bernoulli, aten.multinomial, aten.native_dropout,
aten.normal, aten.poisson, aten.binomial, aten.rrelu,
aten.rand_like, aten.rand, aten.randint, aten.randn, aten.randperm]
# return a new copy of torch.fx.graph.Graph with CSE applied to the input graph
def fx_graph_cse(fx_g: torch.fx.graph.Graph):
new_graph = fx.Graph()
env = {} # map from node in the old graph to node in the new graph
hash_env = {} # map from hash to a node in the new graph
token_map = {} # map from hash to token
for n in fx_g.nodes:
# The placeholder, output, and get_attr nodes are copied to the new grpah without change
# do not CSE away random operations
if n.op == 'placeholder' or n.op == 'output' or n.op == 'get_attr' or n.target in rand_ops:
new_node = new_graph.node_copy(n, lambda x: env[x])
env[n] = new_node
else: # n.op == 'call_function', should never see n.op == 'call_module' or 'call_method'
# substitute args and kwargs memebrs to their mapping in env if exists
# specs can be used to reconstruct nested list/dictionaries
def substitute(arg_list):
arg_list, spec = tree_flatten(arg_list)
for i in range(len(arg_list)):
v = arg_list[i]
if isinstance(v, torch.fx.node.Node) and v in env:
arg_list[i] = env[v]
return tuple(arg_list), spec
args, args_spec = substitute(n.args)
kwargs, kwargs_spec = substitute(n.kwargs)
# each token corresponds to a unique node
# nodes with the same token can be substituted
token = {"target": n.target, "args": args, "args_spec": args_spec,
"kwargs": kwargs, "kwargs_spec": kwargs_spec}
# hash substituted args to a number, do not hash specs because specs are not hashable
hash_arg = hash((args, kwargs))
hash_val = (n.target, hash_arg)
# check if a node has a substitute and can be eliminated
hash_val_in_hash_env = hash_val in hash_env
if hash_val_in_hash_env and token_map[hash_val] == token:
env[n] = hash_env[hash_val]
continue
new_node = new_graph.node_copy(n, lambda x: env[x])
env[n] = new_node
if not hash_val_in_hash_env:
hash_env[hash_val] = new_node
token_map[hash_val] = token
return new_graph
|
moshmosh/rewrite_helper.py
|
Aloxaf/moshmosh
| 114 |
99020
|
<gh_stars>100-1000
import ast
def ast_to_literal(node):
"""
Convert an AST to a python literal.
We avoid the use of comprehension expressions here
to get more human-friendly call stacks.
"""
if isinstance(node, ast.AST):
field_names = node._fields
res = {'constructor': node.__class__.__name__}
for field_name in field_names:
field = getattr(node, field_name, None)
field = ast_to_literal(field)
res[field_name] = field
if hasattr(node, 'lineno'):
res['lineno'] = node.lineno
if hasattr(node, 'col_offset'):
res['col_offset'] = node.col_offset
return res
if isinstance(node, list):
res = []
for each in node:
res.append(ast_to_literal(each))
return res
return node
def ast_to_literal_without_locations(node):
if isinstance(node, ast.AST):
field_names = node._fields
res = {'constructor': node.__class__.__name__}
for field_name in field_names:
field = getattr(node, field_name, None)
field = ast_to_literal_without_locations(field)
res[field_name] = field
return res
if isinstance(node, list):
res = []
for each in node:
res.append(ast_to_literal_without_locations(each))
return res
return node
def literal_to_ast(literal):
"""
Convert a python literal to an AST.
"""
if isinstance(literal, dict):
ctor = literal.pop('constructor')
ctor = getattr(ast, ctor)
return ctor(**{k: literal_to_ast(v) for k, v in literal.items()})
if isinstance(literal, list):
return list(map(literal_to_ast, literal))
return literal
|
rpython/jit/backend/ppc/test/test_ztranslation_call_assembler.py
|
nanjekyejoannah/pypy
| 381 |
99031
|
from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestCallAssembler
class TestTranslationCallAssemblerPPC(TranslationTestCallAssembler):
pass
|
cockpit/instruments/grad_norm_gauge.py
|
wx-b/cockpit
| 367 |
99036
|
<reponame>wx-b/cockpit
"""Gradient Norm Gauge."""
import warnings
from cockpit.instruments.utils_instruments import check_data, create_basic_plot
from cockpit.quantities.utils_quantities import _root_sum_of_squares
def grad_norm_gauge(self, fig, gridspec):
"""Showing the gradient norm versus iteration.
If the training gets stuck, due to a small
:class:`~cockpit.quantities.UpdateSize` it can be the result of both a badly
chosen learning rate, or from a flat plateau in the loss landscape.
This instrument shows the gradient norm at each iteration, overlayed with an
exponentially weighted average, and can thus distinguish these two cases.
**Preview**
.. image:: ../../_static/instrument_previews/GradientNorm.png
:alt: Preview GradientNorm Gauge
**Requires**
The gradient norm instrument requires data from the
:class:`~cockpit.quantities.GradNorm` quantity class.
Args:
self (CockpitPlotter): The cockpit plotter requesting this instrument.
fig (matplotlib.figure.Figure): Figure of the Cockpit.
gridspec (matplotlib.gridspec.GridSpec): GridSpec where the instrument should be
placed
"""
# Plot Trace vs iteration
title = "Gradient Norm"
# Check if the required data is available, else skip this instrument
requires = ["GradNorm"]
plot_possible = check_data(self.tracking_data, requires)
if not plot_possible:
if self.debug:
warnings.warn(
"Couldn't get the required data for the " + title + " instrument",
stacklevel=1,
)
return
# Compute
self.tracking_data["GradNorm_all"] = self.tracking_data.GradNorm.map(
lambda x: _root_sum_of_squares(x) if type(x) == list else x
)
plot_args = {
"x": "iteration",
"y": "GradNorm_all",
"data": self.tracking_data,
"x_scale": "symlog" if self.show_log_iter else "linear",
"y_scale": "linear",
"cmap": self.cmap,
"EMA": "y",
"EMA_alpha": self.EMA_alpha,
"EMA_cmap": self.cmap2,
"title": title,
"xlim": "tight",
"ylim": None,
"fontweight": "bold",
"facecolor": self.bg_color_instruments,
}
ax = fig.add_subplot(gridspec)
create_basic_plot(**plot_args, ax=ax)
|
tests/r/test_macdonell_df.py
|
hajime9652/observations
| 199 |
99058
|
<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.macdonell_df import macdonell_df
def test_macdonell_df():
"""Test module macdonell_df.py by downloading
macdonell_df.csv and testing shape of
extracted data has 3000 rows and 2 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = macdonell_df(test_path)
try:
assert x_train.shape == (3000, 2)
except:
shutil.rmtree(test_path)
raise()
|
xnu-4903.241.1/tools/lldbmacros/ntstat.py
|
DogeCoding/iOSCompiledRuntime
| 672 |
99080
|
<filename>xnu-4903.241.1/tools/lldbmacros/ntstat.py
""" Please make sure you read the README COMPLETELY BEFORE reading anything below.
It is very critical that you read coding guidelines in Section E in README file.
"""
from xnu import *
from utils import *
from string import *
from socket import *
import xnudefines
from netdefines import *
from routedefines import *
def ShowNstatTUShadow(inshadow):
""" Display summary for an nstat_tu_shadow struct
params:
inshadow : cvalue object which points to 'struct nstat_tu_shadow *'
"""
shad = Cast(inshadow, 'struct nstat_tu_shadow *')
procdetails = shad.shad_procdetails
out_string = ""
if shad :
format_string = "nstat_tu_shadow {0: <s}: next={1: <s} prev={2: <s} context (necp_client *)={3: <s} live={4: <d}"
out_string += format_string.format(hex(shad), hex(shad.shad_link.tqe_next), hex(shad.shad_link.tqe_prev), hex(shad.shad_provider_context),shad.shad_live)
magic = unsigned(shad.shad_magic)
if (magic != 0xfeedf00d) :
format_string = " INVALID shad magic {0: <s}"
out_string += format_string.format(hex(magic))
if (procdetails) :
format_string = " --> procdetails {0: <s}: pid={1: <d} name={2: <s} numflows={3: <d}"
out_string += format_string.format(hex(procdetails), procdetails.pdet_pid, procdetails.pdet_procname, procdetails.pdet_numflows)
procmagic = unsigned(procdetails.pdet_magic)
if (procmagic != 0xfeedc001) :
format_string = " INVALID proc magic {0: <s}"
out_string += format_string.format(hex(procmagic))
print out_string
def GetNstatProcdetailsBrief(procdetails):
""" Display a brief summary for an nstat_procdetails struct
params:
procdetails : cvalue object which points to 'struct nstat_procdetails *'
returns:
str : A string describing various information for the nstat_procdetails structure
"""
procdetails = Cast(procdetails, 'struct nstat_procdetails *')
out_string = ""
if (procdetails) :
format_string = " --> pid={0: <d} name={1: <s} numflows={2: <d}"
out_string += format_string.format(procdetails.pdet_pid, procdetails.pdet_procname, procdetails.pdet_numflows)
procmagic = unsigned(procdetails.pdet_magic)
if (procmagic != 0xfeedc001) :
format_string = " INVALID proc magic {0: <s}"
out_string += format_string.format(hex(procmagic))
return out_string
def ShowNstatProcdetails(procdetails):
""" Display a summary for an nstat_procdetails struct
params:
procdetails : cvalue object which points to 'struct nstat_procdetails *'
"""
procdetails = Cast(procdetails, 'struct nstat_procdetails *')
out_string = ""
if (procdetails) :
format_string = "nstat_procdetails: {0: <s} next={1: <s} prev={2: <s} "
out_string += format_string.format(hex(procdetails), hex(procdetails.pdet_link.tqe_next), hex(procdetails.pdet_link.tqe_prev))
out_string += GetNstatProcdetailsBrief(procdetails)
print out_string
def GetNstatTUShadowBrief(shadow):
""" Display a summary for an nstat_tu_shadow struct
params:
shadow : cvalue object which points to 'struct nstat_tu_shadow *'
returns:
str : A string describing various information for the nstat_tu_shadow structure
"""
out_string = ""
shad = Cast(shadow, 'struct nstat_tu_shadow *')
procdetails = shad.shad_procdetails
procdetails = Cast(procdetails, 'struct nstat_procdetails *')
out_string = ""
if shad :
format_string = " shadow {0: <s}: necp_client ={1: <s} live={2: <d}"
out_string += format_string.format(hex(shad),hex(shad.shad_provider_context),shad.shad_live)
magic = unsigned(shad.shad_magic)
if (magic != 0xfeedf00d) :
format_string = " INVALID shad magic {0: <s}"
out_string += format_string.format(hex(magic))
elif (procdetails) :
out_string += GetNstatProcdetailsBrief(procdetails)
return out_string
def ShowNstatSrc(insrc):
""" Display summary for an nstat_src struct
params:
insrc : cvalue object which points to 'struct nstat_src *'
"""
src = Cast(insrc, 'nstat_src *')
prov = src.provider
prov = Cast(prov, 'nstat_provider *')
prov_string = "?"
if (prov.nstat_provider_id == 2):
prov_string = "TCP k"
elif (prov.nstat_provider_id == 3):
prov_string = "TCP u"
elif (prov.nstat_provider_id == 4):
prov_string = "UDP k"
elif (prov.nstat_provider_id == 5):
prov_string = "UDP u"
elif (prov.nstat_provider_id == 1):
prov_string = "Route"
elif (prov.nstat_provider_id == 6):
prov_string = "ifnet"
elif (prov.nstat_provider_id == 7):
prov_string = "sysinfo"
else:
prov_string = "unknown-provider"
out_string = ""
if src :
format_string = " nstat_src {0: <s}: prov={1: <s} next={2: <s} prev={3: <s} ref={4: <d}"
out_string += format_string.format(hex(src), prov_string, hex(src.ns_control_link.tqe_next), hex(src.ns_control_link.tqe_prev), src.srcref)
if (prov.nstat_provider_id == 3):
out_string += GetNstatTUShadowBrief(src.cookie);
print out_string
def ShowNstatCtrl(inctrl):
""" Display an nstat_control_state struct
params:
ctrl : value object representing an nstat_control_state in the kernel
"""
ctrl = Cast(inctrl, 'nstat_control_state *')
out_string = ""
if ctrl :
format_string = "nstat_control_state {0: <s}: next={1: <s} src head={2: <s} tail={3: <s}"
out_string += format_string.format(hex(ctrl), hex(ctrl.ncs_next), hex(ctrl.ncs_src_queue.tqh_first), hex(ctrl.ncs_src_queue.tqh_last))
print out_string
for src in IterateTAILQ_HEAD(ctrl.ncs_src_queue, 'ns_control_link'):
ShowNstatSrc(src)
# Macro: showallntstat
@lldb_command('showallntstat')
def ShowAllNtstat(cmd_args=None) :
""" Show the contents of various ntstat (network statistics) data structures
"""
print "nstat_controls list:\n"
ctrl = kern.globals.nstat_controls
ctrl = cast(ctrl, 'nstat_control_state *')
while ctrl != 0:
ShowNstatCtrl(ctrl)
ctrl = cast(ctrl.ncs_next, 'nstat_control_state *')
print "\nnstat_userprot_shad list:\n"
shadows = kern.globals.nstat_userprot_shad_head
for shad in IterateTAILQ_HEAD(shadows, 'shad_link'):
ShowNstatTUShadow(shad)
print "\nnstat_procdetails list:\n"
procdetails_head = kern.globals.nstat_procdetails_head
for procdetails in IterateTAILQ_HEAD(procdetails_head, 'pdet_link'):
ShowNstatProcdetails(procdetails)
# EndMacro: showallntstat
|
tests/cloudcraft_graph_test.py
|
tjunnone/modules.tf-lambda
| 312 |
99087
|
<filename>tests/cloudcraft_graph_test.py<gh_stars>100-1000
# #!/usr/bin/env pytest
#
# from json import loads
# from os import environ
#
# import pytest
# from modulestf.cloudcraft.graph import populate_graph
#
# events = (
# (
# {
# "AlarmType": "Unsupported alarm type",
# "AWSAccountId": "000000000000",
# "NewStateValue": "ALARM",
# }
# ),
# (
# {
# "Records": [
# {
# "EventSource": "aws:sns",
# "EventVersion": "1.0",
# "EventSubscriptionArn": "arn:aws:sns:OMITTED:OMITTED:slack-notification:15405ea9-77dc-40d4-ba55-3f87997e5abb",
# "Sns": {
# "Type": "Notification",
# "MessageId": "7c9f6458-2b6c-55f4-aee9-3222e6745e5a",
# "TopicArn": "arn:aws:sns:OMITTED:OMITTED:slack-notification",
# "Subject": "RDS Notification Message",
# "Message": "{\"Event Source\":\"db-snapshot\",\"Event Time\":\"2019-12-23 14:10:24.176\",\"IdentifierLink\":\"https://console.aws.amazon.com/rds/home?region=OMITTED#snapshot:id=MY-TEST\"}",
# "Timestamp": "2019-12-23T14:10:32.199Z",
# "SignatureVersion": "1",
# "Signature": "kPGKHJ9rWTgK0Lw/UJow59z4B6cjoPfbnYlwDCbO/Wk/IlPmqjQMib94+GqozIPw4F9QEwwzb7RyaQ4IC3/iBoPYM5shVXkxdl2I8a7fyYqer4QgJWCUijZ60HhYZ7m2WeO7NJei5/8ahtLtyIPoD+8rHNiGJ9JV2RXokdsgWzbXIhbsQ6xGmcbwNe5FkpiqTcw7/52uJUWyUUcRz1E/BZEC5kFAw///u8JlioRmIC95e0isq724+5hf3BEryab3HC+5+BlWMPGug4FQ8kS8rquiXLKTl/e4ubFqz1GEUjiNoNXHqOqm9Bq+WNcBrmKMGNGhzr6In8Kh4srr56oGfQ==",
# "SigningCertUrl": "https://sns.OMITTED.amazonaws.com/SimpleNotificationService-6aad65c2f9911b05cd53efda11f913f9.pem",
# "UnsubscribeUrl": "https://sns.OMITTED.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:OMITTED:OMITTED:slack-notification:15405ea9-77dc-40d4-ba55-3f87997e5abb",
# "MessageAttributes": {}
# }
# }
# ]
# }
# )
# )
#
#
# @pytest.fixture(scope='module', autouse=True)
# def check_environment_variables():
# required_environment_variables = ("AA")
# missing_environment_variables = []
# for k in required_environment_variables:
# if k not in environ:
# missing_environment_variables.append(k)
#
# if len(missing_environment_variables) > 0:
# pytest.exit('Missing environment variables: {}'.format(", ".join(missing_environment_variables)))
#
#
# @pytest.mark.parametrize("event", events)
# def test_cloudcraft_graph(event):
# # if 'Records' in event:
# # response = notify_slack.lambda_handler(event, 'self-context')
# #
# # else:
# file = open(event, 'r')
# data = json.load(file)
#
# graph = populate_graph(data)
#
# # response = loads(response)
# assert graph is not False
|
source/speech/priorities.py
|
marlon-sousa/nvda
| 1,592 |
99097
|
<filename>source/speech/priorities.py
# -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2017-2019 NV Access Limited, Babbage B.V.
"""Speech priority enumeration."""
from enum import IntEnum
class SpeechPriority(IntEnum):
#: Indicates that a speech sequence should have normal priority.
NORMAL = 0
#: Indicates that a speech sequence should be spoken after the next utterance of lower priority is complete.
NEXT = 1
#: Indicates that a speech sequence is very important and should be spoken right now,
#: interrupting low priority speech.
#: After it is spoken, interrupted speech will resume.
#: Note that this does not interrupt previously queued speech at the same priority.
NOW = 2
#: Easy shorthand for the Speechpriority class
Spri = SpeechPriority
#: The speech priorities ordered from highest to lowest.
SPEECH_PRIORITIES = tuple(reversed(SpeechPriority))
|
scripts/obtain_video_id.py
|
shirayu/jtubespeech
| 108 |
99175
|
<filename>scripts/obtain_video_id.py
import time
import requests
import argparse
import re
import sys
from pathlib import Path
from util import make_query_url
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description="Obtaining video IDs from search words",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("lang", type=str, help="language code (ja, en, ...)")
parser.add_argument("wordlist", type=str, help="filename of word list")
parser.add_argument("--outdir", type=str, default="videoid", help="dirname to save video IDs")
return parser.parse_args(sys.argv[1:])
def obtain_video_id(lang, fn_word, outdir="videoid", wait_sec=0.2):
fn_videoid = Path(outdir) / lang / f"{Path(fn_word).stem}.txt"
fn_videoid.parent.mkdir(parents=True, exist_ok=True)
with open(fn_videoid, "w") as f:
for word in tqdm(list(open(fn_word, "r").readlines())):
try:
# download search results
url = make_query_url(word)
html = requests.get(url).content
# find video IDs
videoids_found = [x.split(":")[1].strip("\"").strip(" ") for x in re.findall(r"\"videoId\":\"[\w\_\-]+?\"", str(html))]
videoids_found = list(set(videoids_found))
# write
f.writelines([v + "\n" for v in videoids_found])
f.flush()
except:
print(f"No video found for {word}.")
# wait
if wait_sec > 0.01:
time.sleep(wait_sec)
return fn_videoid
if __name__ == "__main__":
args = parse_args()
filename = obtain_video_id(args.lang, args.wordlist, args.outdir)
print(f"save {args.lang.upper()} video IDs to {filename}.")
|
build/build/void/makepkg/astroid/src/astroid/site_scons/site_tools/notmuch_test_db.py
|
scobiehague/dotfiles
| 117 |
99185
|
import SCons.Builder
import os
import shutil
from subprocess import Popen
def nmAction(target, source, env):
'''
set up notmuch test db in target directory
'''
config = os.path.abspath(os.path.join (os.path.curdir, 'test/mail/test_config'))
env['ENV']['NOTMUCH_CONFIG'] = config
# run notmuch
myenv = os.environ.copy()
myenv['NOTMUCH_CONFIG'] = config
# remove old db
print "Remove test/mail/.notmuch.."
shutil.rmtree ('test/mail/test_mail/.notmuch', ignore_errors = True)
t = open ('test/mail/test_config.template', 'r')
o = open ('test/mail/test_config', 'w')
for l in t.readlines ():
if l == 'path=\n':
o.write ("path=" + os.path.abspath (os.path.join (os.path.curdir, 'test/mail/test_mail')) + "\n")
else:
o.write (l)
t.close ()
o.flush ()
o.close ()
p = Popen ("notmuch new", env = myenv, shell = True)
p.wait ()
open(str(target[0]),'w').write("SETUP\n")
def nmActionString(target, source, env):
'''
Return output string which will be seen when setting up test db
'''
return 'Setting up test database in ' + str(source[0])
def generate (env):
env['BUILDERS']['NotmuchTestDb'] = env.Builder(
action = env.Action(nmAction, nmActionString),
suffix='.setup')
class NotmuchNotFound (SCons.Warnings.Warning):
pass
def _detect (env):
""" Try to detect notmuch """
# from http://www.scons.org/wiki/ToolsForFools
try:
return env['notmuch']
except KeyError:
pass
nm = env.WhereIs('notmuch')
if nm:
return nm
raise SCons.Errors.StopError(
NotmuchNotFound,
"Could not find notmuch binary")
return None
def exists (env):
return _detect (env)
|
earth_enterprise/src/server/wsgi/search/common/exceptions.py
|
ezeeyahoo/earthenterprise
| 2,661 |
99205
|
<reponame>ezeeyahoo/earthenterprise
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all exception's which search services may raise."""
from search.common import utils
class Error(Exception):
"""Generic error."""
def ToString(self, error_prefix):
"""Builds error message string escaping it for HTML.
Args:
error_prefix: an error prefix.
Returns:
HTML escaped error message.
"""
if error_prefix:
return utils.HtmlEscape(
"{0}: {1}".format(error_prefix, str("\n".join(self.args))))
else:
return utils.HtmlEscape("Error: {0}".format(str("\n".join(self.args))))
def __str__(self):
return self.ToString("Error")
class BadQueryException(Error):
"""BadQueryException error."""
def __str__(self):
return self.ToString("BadQueryException")
# Places search service pool exception.
class PoolConnectionException(Error):
"""PoolConnectionException error."""
def __str__(self):
return self.ToString("PoolConnectionException")
def main():
pass
if __name__ == "__main__":
main()
|
examples/three_link_conical_pendulum/derive.py
|
JanMV/pydy
| 298 |
99207
|
<reponame>JanMV/pydy<gh_stars>100-1000
#!/usr/bin/env python
from sympy import symbols
import sympy.physics.mechanics as me
print("Defining the problem.")
# The conical pendulum will have three links and three bobs.
n = 3
# Each link's orientation is described by two spaced fixed angles: alpha and
# beta.
# Generalized coordinates
alpha = me.dynamicsymbols('alpha:{}'.format(n))
beta = me.dynamicsymbols('beta:{}'.format(n))
# Generalized speeds
omega = me.dynamicsymbols('omega:{}'.format(n))
delta = me.dynamicsymbols('delta:{}'.format(n))
# At each joint there are point masses (i.e. the bobs).
m_bob = symbols('m:{}'.format(n))
# Each link is modeled as a cylinder so it will have a length, mass, and a
# symmetric inertia tensor.
l = symbols('l:{}'.format(n))
m_link = symbols('M:{}'.format(n))
Ixx = symbols('Ixx:{}'.format(n))
Iyy = symbols('Iyy:{}'.format(n))
Izz = symbols('Izz:{}'.format(n))
# Acceleration due to gravity will be used when prescribing the forces
# acting on the links and bobs.
g = symbols('g')
# Now defining an inertial reference frame for the system to live in. The Y
# axis of the frame will be aligned with, but opposite to, the gravity
# vector.
I = me.ReferenceFrame('I')
# Three reference frames will track the orientation of the three links.
A = me.ReferenceFrame('A')
A.orient(I, 'Space', [alpha[0], beta[0], 0], 'ZXY')
B = me.ReferenceFrame('B')
B.orient(A, 'Space', [alpha[1], beta[1], 0], 'ZXY')
C = me.ReferenceFrame('C')
C.orient(B, 'Space', [alpha[2], beta[2], 0], 'ZXY')
# Define the kinematical differential equations such that the generalized
# speeds equal the time derivative of the generalized coordinates.
kinematic_differentials = []
for i in range(n):
kinematic_differentials.append(omega[i] - alpha[i].diff())
kinematic_differentials.append(delta[i] - beta[i].diff())
# The angular velocities of the three frames can then be set.
A.set_ang_vel(I, omega[0] * I.z + delta[0] * I.x)
B.set_ang_vel(I, omega[1] * I.z + delta[1] * I.x)
C.set_ang_vel(I, omega[2] * I.z + delta[2] * I.x)
# The base of the pendulum will be located at a point O which is stationary
# in the inertial reference frame.
O = me.Point('O')
O.set_vel(I, 0)
# The location of the bobs (at the joints between the links) are created by
# specifiying the vectors between the points.
P1 = O.locatenew('P1', -l[0] * A.y)
P2 = P1.locatenew('P2', -l[1] * B.y)
P3 = P2.locatenew('P3', -l[2] * C.y)
# The velocities of the points can be computed by taking advantage that
# pairs of points are fixed on the referene frames.
P1.v2pt_theory(O, I, A)
P2.v2pt_theory(P1, I, B)
P3.v2pt_theory(P2, I, C)
points = [P1, P2, P3]
# Now create a particle to represent each bob.
Pa1 = me.Particle('Pa1', points[0], m_bob[0])
Pa2 = me.Particle('Pa2', points[1], m_bob[1])
Pa3 = me.Particle('Pa3', points[2], m_bob[2])
particles = [Pa1, Pa2, Pa3]
# The mass centers of each link need to be specified and, assuming a
# constant density cylinder, it is equidistance from each joint.
P_link1 = O.locatenew('P_link1', -l[0] / 2 * A.y)
P_link2 = P1.locatenew('P_link2', -l[1] / 2 * B.y)
P_link3 = P2.locatenew('P_link3', -l[2] / 2 * C.y)
# The linear velocities can be specified the same way as the bob points.
P_link1.v2pt_theory(O, I, A)
P_link2.v2pt_theory(P1, I, B)
P_link3.v2pt_theory(P2, I, C)
points_rigid_body = [P_link1, P_link2, P_link3]
# The inertia tensors for the links are defined with respect to the mass
# center of the link and the link's reference frame.
inertia_link1 = (me.inertia(A, Ixx[0], Iyy[0], Izz[0]), P_link1)
inertia_link2 = (me.inertia(B, Ixx[1], Iyy[1], Izz[1]), P_link2)
inertia_link3 = (me.inertia(C, Ixx[2], Iyy[2], Izz[2]), P_link3)
# Now rigid bodies can be created for each link.
link1 = me.RigidBody('link1', P_link1, A, m_link[0], inertia_link1)
link2 = me.RigidBody('link2', P_link2, B, m_link[1], inertia_link2)
link3 = me.RigidBody('link3', P_link3, C, m_link[2], inertia_link3)
links = [link1, link2, link3]
# The only contributing forces to the system is the force due to gravity
# acting on each particle and body.
forces = []
for particle in particles:
mass = particle.mass
point = particle.point
forces.append((point, -mass * g * I.y))
for link in links:
mass = link.mass
point = link.masscenter
forces.append((point, -mass * g * I.y))
# Make a list of all the particles and bodies in the system.
total_system = links + particles
# Lists of all generalized coordinates and speeds.
q = alpha + beta
u = omega + delta
# Now the equations of motion of the system can be formed.
print("Generating equations of motion.")
kane = me.KanesMethod(I, q_ind=q, u_ind=u, kd_eqs=kinematic_differentials)
fr, frstar = kane.kanes_equations(forces, total_system)
print("Derivation complete.")
|
Coach.py
|
morozig/muzero
| 111 |
99225
|
"""
Define the base self-play/ data gathering class. This class should work with any MCTS-based neural network learning
algorithm like AlphaZero or MuZero. Self-play, model-fitting, and pitting is performed sequentially on a single-thread
in this default implementation.
Notes:
- Code adapted from https://github.com/suragnair/alpha-zero-general
- Base implementation done.
- Base implementation sufficiently abstracted to accommodate both AlphaZero and MuZero.
- Documentation 15/11/2020
"""
import os
import sys
import typing
from pickle import Pickler, Unpickler, HIGHEST_PROTOCOL
from collections import deque
from abc import ABC, abstractmethod
import numpy as np
from tqdm import trange
from Experimenter import Arena
from utils import DotDict
from utils.selfplay_utils import GameHistory, ParameterScheduler
from utils import debugging
class Coach(ABC):
"""
This class controls the self-play and learning loop. Subclass this abstract class to define implementation
specific procedures for sampling data for the learning algorithm. See MuZero/MuNeuralNet.py or
AlphaZero/AlphaNeuralNet.py for examples.
"""
def __init__(self, game, neural_net, args: DotDict, search_engine, player) -> None:
"""
Initialize the self-play class with an environment, an agent to train, requisite hyperparameters, a MCTS search
engine, and an agent-interface.
:param game: Game Implementation of Game class for environment logic.
:param neural_net: Some implementation of a neural network class to be trained.
:param args: DotDict Data structure containing parameters for self-play.
:param search_engine: Class containing the logic for performing MCTS using the neural_net.
:param player: Class containing the logic for agent-environment interaction.
"""
self.game = game
self.args = args
# Initialize replay buffer and helper variable
self.trainExamplesHistory = deque(maxlen=self.args.selfplay_buffer_window)
self.update_on_checkpoint = False # Can be overridden in loadTrainExamples()
# Initialize network and search engine
self.neural_net = neural_net
self.mcts = search_engine(self.game, self.neural_net, self.args)
self.arena_player = player(self.game, None)
self.arena_player.set_variables(self.neural_net, self.mcts, 'p1')
# Initialize adversary if specified.
if self.args.pitting:
self.opponent_net = self.neural_net.__class__(self.game, neural_net.net_args, neural_net.architecture)
self.opponent_mcts = search_engine(self.game, self.opponent_net, self.args)
self.arena_opponent = player(self.game, None)
self.arena_opponent.set_variables(self.opponent_net, self.opponent_mcts, 'p2')
# Initialize MCTS visit count exponentiation factor schedule.
self.temp_schedule = ParameterScheduler(self.args.temperature_schedule)
self.update_temperature = self.temp_schedule.build()
@staticmethod
def getCheckpointFile(iteration: int) -> str:
""" Helper function to format model checkpoint filenames """
return f'checkpoint_{iteration}.pth.tar'
@abstractmethod
def sampleBatch(self, histories: typing.List[GameHistory]) -> typing.List:
"""
Sample a batch of data from the current replay buffer (with or without prioritization).
This method is left abstract as different algorithm instances may require different data-targets.
:param histories: List of GameHistory objects. Contains all game-trajectories in the replay-buffer.
:return: List of training examples.
"""
def executeEpisode(self) -> GameHistory:
"""
Perform one episode of self-play for gathering data to train neural networks on.
The implementation details of the neural networks/ agents, temperature schedule, data storage
is kept highly transparent on this side of the algorithm. Hence for implementation details
see the specific implementations of the function calls.
At every step we record a snapshot of the state into a GameHistory object, this includes the observation,
MCTS search statistics, performed action, and observed rewards. After the end of the episode, we close the
GameHistory object and compute internal target values.
:return: GameHistory Data structure containing all observed states and statistics required for network training.
"""
history = GameHistory()
state = self.game.getInitialState() # Always from perspective of player 1 for boardgames.
step = 0
while not state.done and step < self.args.max_episode_moves:
if debugging.RENDER: # Display visualization of the environment if specified.
self.game.render(state)
# Update MCTS visit count temperature according to an episode or weight update schedule.
temp = self.update_temperature(self.neural_net.steps if self.temp_schedule.args.by_weight_update else step)
# Compute the move probability vector and state value using MCTS for the current state of the environment.
pi, v = self.mcts.runMCTS(state, history, temp=temp)
# Take a step in the environment and observe the transition and store necessary statistics.
state.action = np.random.choice(len(pi), p=pi)
next_state, r = self.game.getNextState(state, state.action)
history.capture(state, pi, r, v)
# Update state of control
state = next_state
step += 1
# Cleanup environment and GameHistory
self.game.close(state)
history.terminate()
history.compute_returns(gamma=self.args.gamma, n=(self.args.n_steps if self.game.n_players == 1 else None))
return history
def learn(self) -> None:
"""
Control the data gathering and weight optimization loop. Perform 'num_selfplay_iterations' iterations
of self-play to gather data, each of 'num_episodes' episodes. After every self-play iteration, train the
neural network with the accumulated data. If specified, the previous neural network weights are evaluated
against the newly fitted neural network weights, the newly fitted weights are then accepted based on some
specified win/ lose ratio. Neural network weights and the replay buffer are stored after every iteration.
Note that for highly granular vision based environments, that the replay buffer may grow to large sizes.
"""
for i in range(1, self.args.num_selfplay_iterations + 1):
print(f'------ITER {i}------')
if not self.update_on_checkpoint or i > 1: # else: go directly to backpropagation
# Self-play/ Gather training data.
iteration_train_examples = list()
for _ in trange(self.args.num_episodes, desc="Self Play", file=sys.stdout):
self.mcts.clear_tree()
iteration_train_examples.append(self.executeEpisode())
if sum(map(len, iteration_train_examples)) > self.args.max_buffer_size:
iteration_train_examples.pop(0)
# Store data from previous self-play iterations into the history.
self.trainExamplesHistory.append(iteration_train_examples)
# Print out statistics about the replay buffer, and back-up the data history to a file (can be slow).
GameHistory.print_statistics(self.trainExamplesHistory)
self.saveTrainExamples(i - 1)
# Flatten examples over self-play episodes and sample a training batch.
complete_history = GameHistory.flatten(self.trainExamplesHistory)
# Training new network, keeping a copy of the old one
self.neural_net.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')
# Backpropagation
for _ in trange(self.args.num_gradient_steps, desc="Backpropagation", file=sys.stdout):
batch = self.sampleBatch(complete_history)
self.neural_net.train(batch)
self.neural_net.monitor.log_batch(batch)
# Pitting
accept = True
if self.args.pitting:
# Load in the old network.
self.opponent_net.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')
# Perform trials with the new network against the old network.
arena = Arena(self.game, self.arena_player, self.arena_opponent, self.args.max_trial_moves)
accept = arena.pitting(self.args, self.neural_net.monitor)
if accept:
print('ACCEPTING NEW MODEL')
self.neural_net.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))
self.neural_net.save_checkpoint(folder=self.args.checkpoint, filename=self.args.load_folder_file[-1])
else:
print('REJECTING NEW MODEL')
self.neural_net.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')
def saveTrainExamples(self, iteration: int) -> None:
"""
Store the current accumulated data to a compressed file using pickle. Note that for highly dimensional
environments, that the stored files may be considerably large and that storing/ loading the data may
introduce a significant bottleneck to the runtime of the algorithm.
:param iteration: int Current iteration of the self-play. Used as indexing value for the data filename.
"""
folder = self.args.checkpoint
if not os.path.exists(folder):
os.makedirs(folder)
filename = os.path.join(folder, self.getCheckpointFile(iteration) + ".examples")
with open(filename, "wb+") as f:
Pickler(f, protocol=HIGHEST_PROTOCOL).dump(self.trainExamplesHistory)
# Don't hog up storage space and clean up old (never to be used again) data.
old_checkpoint = os.path.join(folder, self.getCheckpointFile(iteration - 1) + '.examples')
if os.path.isfile(old_checkpoint):
os.remove(old_checkpoint)
def loadTrainExamples(self) -> None:
"""
Load in a previously generated replay buffer from the path specified in the .json arguments.
"""
model_file = os.path.join(self.args.load_folder_file[0], self.args.load_folder_file[1])
examples_file = model_file + ".examples"
if not os.path.isfile(examples_file):
r = input(f"Data file {examples_file} could not be found. Continue with a fresh buffer? [y|n]")
if r != "y":
sys.exit()
else:
print(f"Data file {examples_file} found. Read it.")
with open(examples_file, "rb") as f:
self.trainExamplesHistory = Unpickler(f).load()
|
scripts/pyxtal_test.py
|
ubikpt/PyXtal
| 127 |
99228
|
<filename>scripts/pyxtal_test.py
#!/usr/bin/env python
# encoding: utf-8
# Test script for pyXtal v-0.1.4. Tests core functions for all modules.
import sys
import numpy as np
import warnings
from time import time
from copy import deepcopy
from spglib import get_symmetry_dataset
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pyxtal.symmetry import (
Group,
get_wyckoffs,
get_layer,
get_rod,
get_point,
)
from pyxtal import pyxtal
from pyxtal.operations import distance, filtered_coords
_summary_text_ = ""
def fprint(text):
"""Custom print function for output to file
"""
global _summary_text_
print(text)
if _summary_text_ != "":
_summary_text_ += "\n"
_summary_text_ += text
sys.settrace(None)
outstructs = []
outstrings = []
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open("test_summary.txt", "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
sys.stdout = Logger()
# Check if module and classes work correctly
def passed():
global failed_module
global failed
if failed_module is False and failed is False:
return True
else:
return False
# Reset flags for module and class
def reset():
global failed_module
global failed
failed_module = False
failed = False
# Set flags for package, module, class if error occurs
def fail(*argv):
if argv != ():
e = argv[0]
else:
e = "Unknown error"
global failed_package
global failed_module
global failed
failed_package = True
failed_module = True
failed = True
try:
fprint("~~~ Error:")
import pdb, traceback
extype, value, tb = sys.exc_info()
traceback.print_exc()
except:
fprint("~~~ Error: ", e)
# Print whether module passed or failed
def check():
if passed():
pass # fprint("Success!")
else:
fprint("~~~ Failed module ~~~")
# Call at end of script, or if module fails
def end(condition=1):
fprint("===")
if failed_package is False:
fprint("All modules passed!")
if condition == 1:
sys.exit(0)
elif condition == 2:
pass
else:
fprint("One or more modules failed. Try reinstalling the package.")
sys.exit(0)
def compare_wyckoffs(num1, num2, dim=3):
"""Given 2 groups, return whether the second point
group has equal or greater symmetry than the first group."""
if num1 == "???":
fprint("Error: invalid value for num1 passed to compare_wyckoffs")
return
if num2 == "???":
return False
# Get general positions for both groups
if dim == 3:
g1 = get_wyckoffs(num1)[0]
g2 = get_wyckoffs(num2)[0]
elif dim == 2:
g1 = get_layer(num1)[0]
g2 = get_layer(num2)[0]
elif dim == 1:
g1 = get_rod(num1)[0]
g2 = get_rod(num2)[0]
elif dim == 0:
g1 = get_point(num1)[0]
g2 = get_point(num2)[0]
# If group 2 has higher symmetry
if len(g2) > len(g1):
return True
# Compare point group operations
for i, op2 in enumerate(g2):
op1 = g1[i]
m1 = op1.rotation_matrix
m2 = op2.rotation_matrix
if not np.allclose(m1, m2):
return False
return True
def check_struct_group(crystal, group, dim=3, tol=1e-2):
# Supress pymatgen/numpy complex casting warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
"""Given a pymatgen structure, group number, and dimension, return
whether or not the structure matches the group number."""
if isinstance(crystal, pyxtal):
pmg_struc = crystal.to_pymatgen()
if dim > 0:
lattice = pmg_struc.lattice.matrix
else:
lattice = crystal.lattice.matrix
if dim != 0:
old_coords = deepcopy(pmg_struc.frac_coords)
old_species = deepcopy(pmg_struc.atomic_numbers)
elif dim == 0:
old_coords = deepcopy(pmg_struc.cart_coords)
old_species = deepcopy(pmg_struc.species)
else:
lattice = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
old_coords = np.array(crystal)
old_species = ["C"] * len(old_coords)
PBC = [1, 1, 1]
# Obtain the generators for the group
if dim == 3:
generators = get_wyckoffs(group)[0]
elif dim == 2:
generators = get_layer(group)[0]
PBC = [1, 1, 0]
elif dim == 1:
generators = get_rod(group)[0]
PBC = [0, 0, 1]
elif dim == 0:
generators = Group(group, dim=0)[0]
PBC = [0, 0, 0]
# TODO: Add check for lattice symmetry
# Apply SymmOps to generate new points
# old_coords = filtered_coords(struct.frac_coords,PBC=PBC)
new_coords = []
new_species = []
for i, point in enumerate(old_coords):
for j, op in enumerate(generators):
if j != 0:
new_coords.append(op.operate(point))
new_species.append(old_species[i])
# new_coords = filtered_coords(new_coords,PBC=PBC)
# Check that all points in new list are still in old
failed = False
i_list = list(range(len(new_coords)))
for i, point1 in enumerate(new_coords):
found = False
for j, point2 in enumerate(old_coords):
if new_species[i] == old_species[j]:
difference = filtered_coords(point2 - point1, PBC=PBC)
if distance(difference, lattice, PBC=PBC) <= tol:
found = True
break
if found is False:
failed = True
break
if failed is False:
return True
else:
return False
def test_atomic():
global outstructs
global outstrings
fprint("=== Testing generation of atomic 3D crystals. This may take some time. ===")
my_crystal1 = pyxtal()
my_crystal1.from_random(3, 99, ['Ba','Ti','O'], [1,1,3], 1.0, sites=[["1b"], ["1b"], ["2c", "1b"]])
print(my_crystal1)
#my_crystal1.to_file("1.cif")
my_crystal2 = pyxtal()
my_crystal2.from_random(3, 225, ['C'], [12], 1.0, sites=[["4a", "8c"]])
print(my_crystal2)
#my_crystal2.to_file("2.cif")
my_crystal3 = pyxtal()
my_crystal3.from_random(3, 225, ['C','Si'], [12, 4], 1.0, sites=[["4a", "8c"], None])
print(my_crystal3)
#my_crystal3.to_file("3.cif")
slow = []
failed = []
fprint(" Spacegroup # |Generated (SPG)|Generated (PMG)| Time Elapsed")
skip = []
# skip = (
# [124, 139, 166, 167, 196, 202, 203, 204, 207, 209, 210, 216, 217,
# 219, 220, 221, 223, 225, 226, 227, 228, 229, 230] #slow to generate
# )
for sg in range(1, 231):
if sg not in skip:
multiplicity = len(get_wyckoffs(sg)[0])
start = time()
rand_crystal = pyxtal()
rand_crystal.from_random(3, sg, ["C"], [multiplicity], 1.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(sg)
if rand_crystal.valid:
check = False
#ans1 = get_symmetry_dataset(rand_crystal.spg_struct, symprec=1e-1)
ans1 = get_symmetry_dataset(rand_crystal.to_ase(), symprec=1e-1)
if ans1 is None:
ans1 = "???"
else:
ans1 = ans1["number"]
sga = SpacegroupAnalyzer(rand_crystal.to_pymatgen())
ans2 = "???"
if sga is not None:
try:
ans2 = sga.get_space_group_number()
except:
ans2 = "???"
if ans2 is None:
ans2 = "???"
# Compare expected and detected groups
if ans1 == "???" and ans2 == "???":
check = True
elif ans1 == "???":
if int(ans2) > sg:
pass
elif ans2 == "???":
if int(ans1) > sg:
pass
else:
if ans1 < sg and ans2 < sg:
if compare_wyckoffs(sg, ans1) or compare_wyckoffs(sg, ans2):
pass
else:
check = True
# output cif files for incorrect space groups
if check is True:
if check_struct_group(rand_crystal, sg, dim=3):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.struct)
outstrings.append(str("3D_Atomic_" + str(sg) + ".vasp"))
fprint("\t{}\t|\t{}\t|\t{}\t|\t{}".format(sg, ans1, ans2, t))
else:
fprint(
"~~~~ Error: Could not generate space group {} after {}".format(sg, t)
)
failed.append(sg)
if slow != []:
fprint("~~~~ The following space groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following space groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_molecular():
global outstructs
global outstrings
fprint(
"=== Testing generation of molecular 3D crystals. This may take some time. ==="
)
slow = []
failed = []
fprint(" Spacegroup # |Generated (SPG)|Generated (PMG)| Time Elapsed")
skip = [
225,
226,
227,
228,
] # [24, 183, 202, 203, 209, 210, 216, 219, 225, 226, 227, 228, 229, 230] #slow
for sg in range(1, 231):
if sg not in skip:
multiplicity = len(get_wyckoffs(sg)[0])
start = time()
rand_crystal = pyxtal(molecular=True)
rand_crystal.from_random(3, sg, ["H2O"], [multiplicity], 2.5)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(sg)
if rand_crystal.valid:
check = False
ans1 = get_symmetry_dataset(rand_crystal.to_ase(), symprec=1e-1)
if ans1 is None:
ans1 = "???"
else:
ans1 = ans1["number"]
sga = SpacegroupAnalyzer(rand_crystal.to_pymatgen())
ans2 = "???"
if sga is not None:
try:
ans2 = sga.get_space_group_number()
except:
ans2 = "???"
if ans2 is None:
ans2 = "???"
# Compare expected and detected groups
if ans1 == "???" and ans2 == "???":
check = True
elif ans1 == "???":
if int(ans2) > sg:
pass
elif ans2 == "???":
if int(ans1) > sg:
pass
else:
if ans1 < sg and ans2 < sg:
if compare_wyckoffs(sg, ans1) or compare_wyckoffs(sg, ans2):
pass
else:
check = True
# output cif files for incorrect space groups
if check is True:
if check_struct_group(rand_crystal, sg, dim=3):
pass
else:
t += " xxxxx"
# rand_crystal.to_file("poscar", "1.vasp")
# import sys
# sys.exit()
outstructs.append(rand_crystal.to_pymatgen())
outstrings.append(str("3D_Molecular_" + str(sg) + ".vasp"))
fprint("\t{}\t|\t{}\t|\t{}\t|\t{}".format(sg, ans1, ans2, t))
else:
fprint(
"~~~~ Error: Could not generate space group {} after {}".format(sg, t)
)
failed.append(sg)
if slow != []:
fprint("~~~~ The following space groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following space groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_atomic_2D():
global outstructs
global outstrings
fprint("=== Testing generation of atomic 2D crystals. This may take some time. ===")
slow = []
failed = []
fprint(" Layer group # | Symbol | Time Elapsed")
skip = []
for sg in range(1, 81):
if sg not in skip:
g = Group(sg, dim=2)
multiplicity = len(g[0]) # multiplicity of the general position
start = time()
rand_crystal = pyxtal()
rand_crystal.from_random(2, sg, ["C"], [multiplicity], 4.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(sg)
if rand_crystal.valid:
if check_struct_group(rand_crystal, sg, dim=2):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.to_pymatgen())
outstrings.append(str("atomic_2D_" + str(sg) + ".vasp"))
symbol = g.symbol
fprint("\t{}\t|\t{}\t|\t{}".format(sg, symbol, t))
else:
fprint(
"~~~~ Error: Could not generate layer group {} after {}".format(sg, t)
)
failed.append(sg)
if slow != []:
fprint("~~~~ The following layer groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following layer groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_molecular_2D():
global outstructs
global outstrings
fprint(
"=== Testing generation of molecular 2D crystals. This may take some time. ==="
)
slow = []
failed = []
fprint(" Layer group # | Symbol | Time Elapsed")
skip = []
for sg in range(1, 81):
if sg not in skip:
g = Group(sg, dim=2)
multiplicity = len(g[0]) # multiplicity of the general position
start = time()
rand_crystal = pyxtal(molecular=True)
rand_crystal.from_random(2, sg, ["H2O"], [multiplicity], 4.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(sg)
if rand_crystal.valid:
if check_struct_group(rand_crystal, sg, dim=2):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.to_pymatgen())
outstrings.append(str("molecular_2D_" + str(sg) + ".vasp"))
symbol = g.symbol
fprint("\t{}\t|\t{}\t|\t{}".format(sg, symbol, t))
else:
fprint(
"~~~~ Error: Could not generate layer group {} after {}".format(sg, t)
)
failed.append(sg)
if slow != []:
fprint("~~~~ The following layer groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following layer groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_atomic_1D():
global outstructs
global outstrings
fprint("=== Testing generation of atomic 1D crystals. This may take some time. ===")
slow = []
failed = []
fprint(" Rod group | Gen sg. (SPG) | Gen. sg (PMG) |Time Elapsed")
skip = [] # slow to generate
for num in range(1, 76):
if num not in skip:
multiplicity = len(get_rod(num)[0]) # multiplicity of the general position
start = time()
rand_crystal = pyxtal()
rand_crystal.from_random(1, num, ["H"], [multiplicity], 4.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(num)
if rand_crystal.valid:
try:
ans1 = get_symmetry_dataset(rand_crystal.to_ase(), symprec=1e-1)
except:
ans1 = "???"
if ans1 is None or ans1 == "???":
ans1 = "???"
else:
ans1 = ans1["number"]
sga = SpacegroupAnalyzer(rand_crystal.to_pymatgen())
try:
ans2 = sga.get_space_group_number()
except:
ans2 = "???"
if ans2 is None:
ans2 = "???"
check = True
# output cif files for incorrect space groups
if check is True:
if check_struct_group(rand_crystal, num, dim=1):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.to_pymatgen)
outstrings.append(str("1D_Atomic_" + str(num) + ".vasp"))
fprint("\t{}\t|\t{}\t|\t{}\t|\t{}".format(num, ans1, ans2, t))
else:
fprint(
"~~~~ Error: Could not generate layer group {} after {}".format(
num, t
)
)
failed.append(num)
if slow != []:
fprint("~~~~ The following layer groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following layer groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_molecular_1D():
global outstructs
global outstrings
fprint(
"=== Testing generation of molecular 1D crystals. This may take some time. ==="
)
slow = []
failed = []
fprint(" Rod group | Gen sg. (SPG) | Gen. sg (PMG) |Time Elapsed")
skip = [] # slow to generate
for num in range(1, 76):
if num not in skip:
multiplicity = len(get_rod(num)[0]) # multiplicity of the general position
start = time()
rand_crystal = pyxtal(molecular=True)
rand_crystal.from_random(1, num, ["H2O"], [multiplicity], 4.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(num)
if rand_crystal.valid:
try:
ans1 = get_symmetry_dataset(rand_crystal.to_ase(), symprec=1e-1)
except:
ans1 = "???"
if ans1 is None or ans1 == "???":
ans1 = "???"
else:
ans1 = ans1["number"]
sga = SpacegroupAnalyzer(rand_crystal.to_pymatgen())
try:
ans2 = sga.get_space_group_number()
except:
ans2 = "???"
if ans2 is None:
ans2 = "???"
check = True
# output cif files for incorrect space groups
if check is True:
if check_struct_group(rand_crystal, num, dim=1):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.to_pymatgen())
outstrings.append(str("1D_Molecular_" + str(num) + ".vasp"))
fprint("\t{}\t|\t{}\t|\t{}\t|\t{}".format(num, ans1, ans2, t))
else:
fprint(
"~~~~ Error: Could not generate layer group {} after {}".format(
num, t
)
)
failed.append(num)
if slow != []:
fprint("~~~~ The following layer groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following layer groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_cluster():
global outstructs
global outstrings
fprint("=== Testing generation of point group clusters. This may take some time. ===")
slow = []
failed = []
fprint(" Point group # | Symbol | Time Elapsed")
skip = [56] # [32,55,56]#[28,29,30,31,32,55,56]
for sg in range(1, 57):
if sg not in skip:
multiplicity = len(
Group(sg, dim=0)[0]
) # multiplicity of the general position
start = time()
rand_crystal = pyxtal()
rand_crystal.from_random(0, sg, ["C"], [multiplicity], 1.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(sg)
if rand_crystal.valid:
if check_struct_group(rand_crystal, sg, dim=0):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.to_pymatgen())
outstrings.append(str("Cluster_" + str(sg) + ".vasp"))
pgsymbol = Group(sg, dim=0).symbol
fprint("\t{}\t|\t{}\t|\t{}".format(sg, pgsymbol, t))
else:
fprint(
"~~~~ Error: Could not generate space group {} after {}".format(sg, t)
)
failed.append(sg)
if slow != []:
fprint("~~~~ The following space groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following space groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_modules():
fprint("====== Testing functionality for pyXtal version 0.1dev ======")
global failed_package
failed_package = False # Record if errors occur at any level
reset()
fprint("Importing sys...")
try:
import sys
fprint("Success!")
except Exception as e:
fail(e)
sys.exit(0)
fprint("Importing numpy...")
try:
import numpy as np
fprint("Success!")
except Exception as e:
fail(e)
sys.exit(0)
fprint("Importing pymatgen...")
try:
import pymatgen
fprint("Success!")
except Exception as e:
fail(e)
sys.exit(0)
try:
from pymatgen.core.operations import SymmOp
except Exception as e:
fail(e)
sys.exit(0)
fprint("Importing pandas...")
try:
import pandas
fprint("Success!")
except Exception as e:
fail(e)
sys.exit(0)
fprint("Importing spglib...")
try:
import spglib
fprint("Success!")
except Exception as e:
fail(e)
sys.exit(0)
fprint("Importing ase...")
try:
import ase
fprint("Success!")
except:
fprint("Error: could not import openbabel. Try reinstalling the package.")
fprint("=== Testing modules ===")
# =====database.element=====
fprint("pyxtal.database.element")
reset()
try:
import pyxtal.database.element
except Exception as e:
fail(e)
fprint(" class Element")
try:
from pyxtal.database.element import Element
except Exception as e:
fail(e)
if passed():
for i in range(1, 95):
if passed():
try:
ele = Element(i)
except:
fail("Could not access Element # " + str(i))
try:
y = ele.sf
y = ele.z
y = ele.short_name
y = ele.long_name
y = ele.valence
y = ele.valence_electrons
y = ele.covalent_radius
y = ele.vdw_radius
y = ele.get_all(0)
except:
fail("Could not access attribute for element # " + str(i))
try:
ele.all_z()
ele.all_short_names()
ele.all_long_names()
ele.all_valences()
ele.all_valence_electrons()
ele.all_covalent_radii()
ele.all_vdw_radii()
except:
fail("Could not access class methods")
check()
# =====database.hall=====
fprint("pyxtal.database.hall")
reset()
try:
import pyxtal.database.hall
except Exception as e:
fail(e)
fprint(" hall_from_hm")
try:
from pyxtal.database.hall import hall_from_hm
except Exception as e:
fail(e)
if passed():
for i in range(1, 230):
if passed():
try:
hall_from_hm(i)
except:
fail("Could not access hm # " + str(i))
check()
# =====database.collection=====
fprint("pyxtal.database.collection")
reset()
try:
import pyxtal.database.collection
except Exception as e:
fail(e)
fprint(" Collection")
try:
from pyxtal.database.collection import Collection
except Exception as e:
fail(e)
if passed():
for i in range(1, 230):
if passed():
try:
molecule_collection = Collection("molecules")
except:
fail("Could not access hm # " + str(i))
check()
# =====operations=====
fprint("pyxtal.operations")
reset()
try:
import pyxtal.operations
except Exception as e:
fail(e)
from pyxtal.lattice import random_shear_matrix, random_vector
fprint(" angle")
try:
from pyxtal.operations import angle
except Exception as e:
fail(e)
if passed():
try:
for i in range(10):
v1 = random_vector()
v2 = random_vector()
angle(v1, v2)
except Exception as e:
fail(e)
check()
fprint(" is_orthogonal")
try:
from pyxtal.operations import is_orthogonal
except Exception as e:
fail(e)
if passed():
try:
a = is_orthogonal([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
b = is_orthogonal([[0, 0, 1], [1, 0, 0], [1, 0, 0]])
if a is True and b is False:
pass
else:
fail()
except Exception as e:
fail(e)
check()
fprint(" rotate_vector")
try:
from pyxtal.operations import rotate_vector
except Exception as e:
fail(e)
if passed():
try:
for i in range(10):
v1 = random_vector()
v2 = random_vector()
rotate_vector(v1, v2)
except Exception as e:
fail(e)
check()
fprint(" are_equal")
try:
from pyxtal.operations import are_equal
except Exception as e:
fail(e)
if passed():
try:
op1 = SymmOp.from_xyz_string("x,y,z")
op2 = SymmOp.from_xyz_string("x,y,z+1")
a = are_equal(op1, op2, PBC=[0, 0, 1])
b = are_equal(op1, op2, PBC=[1, 0, 0])
if a is True and b is False:
pass
else:
fail()
except Exception as e:
fail(e)
check()
fprint(" class OperationAnalyzer")
try:
from pyxtal.operations import OperationAnalyzer
except Exception as e:
fail(e)
if passed():
try:
m = np.eye(3)
t = random_vector()
op1 = SymmOp.from_rotation_and_translation(m, t)
OperationAnalyzer(op1)
except Exception as e:
fail(e)
check()
# =====symmetry=====
fprint("pyxtal.symmetry")
reset()
try:
import pyxtal.symmetry
except Exception as e:
fail(e)
fprint(" get_wyckoffs (may take a moment)")
try:
from pyxtal.symmetry import get_wyckoffs
except Exception as e:
fail(e)
if passed():
try:
for i in [1, 2, 229, 230]:
get_wyckoffs(i)
get_wyckoffs(i, organized=True)
except:
fail(" Could not access Wyckoff positions for space group # " + str(i))
check()
fprint(" get_wyckoff_symmetry (may take a moment)")
try:
from pyxtal.symmetry import get_wyckoff_symmetry
except Exception as e:
fail(e)
if passed():
try:
for i in [1, 2, 229, 230]:
get_wyckoff_symmetry(i)
get_wyckoff_symmetry(i, molecular=True)
except:
fail("Could not access Wyckoff symmetry for space group # " + str(i))
check()
fprint(" get_wyckoffs_generators (may take a moment)")
try:
from pyxtal.symmetry import get_wyckoff_generators
except Exception as e:
fail(e)
if passed():
try:
for i in [1, 2, 229, 230]:
get_wyckoff_generators(i)
except:
fail("Could not access Wyckoff generators for space group # " + str(i))
check()
fprint(" letter_from_index")
try:
from pyxtal.symmetry import letter_from_index
except Exception as e:
fail(e)
if passed():
try:
if letter_from_index(0, get_wyckoffs(47)) == "A":
pass
else:
fail()
except Exception as e:
fail(e)
check()
fprint(" index_from_letter")
try:
from pyxtal.symmetry import index_from_letter
except Exception as e:
fail(e)
if passed():
try:
if index_from_letter("A", get_wyckoffs(47)) == 0:
pass
else:
fail()
except Exception as e:
fail(e)
check()
fprint(" jk_from_i")
try:
from pyxtal.symmetry import jk_from_i
except Exception as e:
fail(e)
if passed():
try:
w = get_wyckoffs(2, organized=True)
j, k = jk_from_i(1, w)
if j == 1 and k == 0:
pass
else:
fprint(j, k)
fail()
except Exception as e:
fail(e)
check()
fprint(" i_from_jk")
try:
from pyxtal.symmetry import i_from_jk
except Exception as e:
fail(e)
if passed():
try:
w = get_wyckoffs(2, organized=True)
j, k = jk_from_i(1, w)
i = i_from_jk(j, k, w)
if i == 1:
pass
else:
fprint(j, k)
fail()
except Exception as e:
fail(e)
check()
fprint(" ss_string_from_ops")
try:
from pyxtal.symmetry import ss_string_from_ops
except Exception as e:
fail(e)
if passed():
try:
strings = ["1", "4 . .", "2 3 ."]
for i, sg in enumerate([1, 75, 195]):
ops = get_wyckoffs(sg)[0]
ss_string_from_ops(ops, sg, dim=3)
except Exception as e:
fail(e)
check()
fprint(" Wyckoff_position")
try:
from pyxtal.symmetry import Wyckoff_position
except Exception as e:
fail(e)
if passed():
try:
wp = Wyckoff_position.from_group_and_index(20, 1)
except Exception as e:
fail(e)
check()
fprint(" Group")
try:
from pyxtal.symmetry import Group
except Exception as e:
fail(e)
if passed():
try:
g3 = Group(230)
g2 = Group(80, dim=2)
g1 = Group(75, dim=1)
except Exception as e:
fail(e)
check()
# =====molecule=====
fprint("pyxtal.molecule")
reset()
try:
from pyxtal.molecule import pyxtal_molecule
except Exception as e:
fail(e)
if passed():
try:
h2o = pyxtal_molecule("H2O").mol
ch4 = pyxtal_molecule("CH4").mol
except Exception as e:
fail(e)
check()
fprint(" reoriented_molecule")
try:
from pyxtal.molecule import reoriented_molecule
except Exception as e:
fail(e)
if passed():
try:
reoriented_molecule(h2o)
reoriented_molecule(ch4)
except Exception as e:
fail(e)
check()
fprint(" orientation_in_wyckoff_position")
try:
from pyxtal.molecule import orientation_in_wyckoff_position
except Exception as e:
fail(e)
if passed():
try:
w = get_wyckoffs(20)
ws = get_wyckoff_symmetry(20, molecular=True)
wp = Wyckoff_position.from_group_and_index(20, 1)
orientation_in_wyckoff_position(h2o, wp)
orientation_in_wyckoff_position(ch4, wp)
except Exception as e:
fail(e)
check()
end(condition=2)
if __name__ == "__main__":
from pyxtal import print_logo
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(
"-m",
"--module",
dest="module",
metavar="module",
default="all",
type=str,
help="modules options: 'all', 'atomic', 'molecular', \
'atomic_2D', 'molecular_2D', 'atomic_1D',\
'molecular_1D', 'cluster' ",
)
options = parser.parse_args()
print_logo()
modules_lib = {
"atomic": "test_atomic()",
"molecular": "test_molecular()",
"atomic_2D": "test_atomic_2D()",
"molecular_2D": "test_molecular_2D()",
"atomic_1D": "test_atomic_1D()",
"molecular_1D": "test_molecular_1D()",
"cluster": "test_cluster()",
}
if options.module == "all":
modules = modules_lib
else:
if options.module in modules_lib.keys():
modules = [options.module]
else:
fprint("please choose the modules from the followings:")
for module in modules_lib.keys():
fprint(module)
masterstart = time()
test_modules()
for module in modules:
eval(modules_lib[module])
masterend = time()
mastertime = masterend - masterstart
fprint("TEST COMPLETE")
fprint("\nTotal time elapsed: {:.2f}s".format(mastertime))
|
recipes/Python/578828_Indexing_text_files_with_Python/recipe-578828.py
|
tdiprima/code
| 2,023 |
99249
|
<reponame>tdiprima/code
"""
text_file_indexer.py
A program to index a text file.
Author: <NAME> - www.dancingbison.com
Copyright 2014 <NAME>
Given a text file somefile.txt, the program will read it completely,
and while doing so, record the occurrences of each unique word,
and the line numbers on which they occur. This information is
then written to an index file somefile.idx, which is also a text
file.
"""
import sys
import os
import string
from debug1 import debug1
def index_text_file(txt_filename, idx_filename,
delimiter_chars=",.;:!?"):
"""
Function to read txt_file name and create an index of the
occurrences of words in it. The index is written to idx_filename.
There is one index entry per line in the index file. An index entry
is of the form: word line_num line_num line_num ...
where "word" is a word occurring in the text file, and the instances
of "line_num" are the line numbers on which that word occurs in the
text file. The lines in the index file are sorted by the leading word
on the line. The line numbers in an index entry are sorted in
ascending order. The argument delimiter_chars is a string of one or
more characters that may adjoin words and the input and are not
wanted to be considered as part of the word. The function will remove
those delimiter characters from the edges of the words before the rest
of the processing.
"""
try:
txt_fil = open(txt_filename, "r")
"""
Dictionary to hold words and the line numbers on which
they occur. Each key in the dictionary is a word and the
value corresponding to that key is a list of line numbers
on which that word occurs in txt_filename.
"""
word_occurrences = {}
line_num = 0
for lin in txt_fil:
line_num += 1
debug1("line_num", line_num)
# Split the line into words delimited by whitespace.
words = lin.split()
debug1("words", words)
# Remove unwanted delimiter characters adjoining words.
words2 = [ word.strip(delimiter_chars) for word in words ]
debug1("words2", words2)
# Find and save the occurrences of each word in the line.
for word in words2:
if word_occurrences.has_key(word):
word_occurrences[word].append(line_num)
else:
word_occurrences[word] = [ line_num ]
debug1("Processed {} lines".format(line_num))
if line_num < 1:
print "No lines found in text file, no index file created."
txt_fil.close()
sys.exit(0)
# Display results.
word_keys = word_occurrences.keys()
print "{} unique words found.".format(len(word_keys))
debug1("Word_occurrences", word_occurrences)
word_keys = word_occurrences.keys()
debug1("word_keys", word_keys)
# Sort the words in the word_keys list.
word_keys.sort()
debug1("after sort, word_keys", word_keys)
# Create the index file.
idx_fil = open(idx_filename, "w")
# Write the words and their line numbers to the index file.
# Since we read the text file sequentially, there is no need
# to sort the line numbers associated with each word; they are
# already in sorted order.
for word in word_keys:
line_nums = word_occurrences[word]
idx_fil.write(word + " ")
for line_num in line_nums:
idx_fil.write(str(line_num) + " ")
idx_fil.write("\n")
txt_fil.close()
idx_fil.close()
except IOError as ioe:
sys.stderr.write("Caught IOError: " + repr(ioe) + "\n")
sys.exit(1)
except Exception as e:
sys.stderr.write("Caught Exception: " + repr(e) + "\n")
sys.exit(1)
def usage(sys_argv):
sys.stderr.write("Usage: {} text_file.txt index_file.txt\n".format(
sys_argv[0]))
def main():
if len(sys.argv) != 3:
usage(sys.argv)
sys.exit(1)
index_text_file(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
# EOF
|
imapfw/conf/__init__.py
|
paralax/imapfw
| 492 |
99250
|
<reponame>paralax/imapfw
from .conf import ImapfwConfig
from .clioptions import Parser
|
src/sage/categories/commutative_algebras.py
|
bopopescu/sage
| 1,742 |
99295
|
r"""
Commutative algebras
"""
#*****************************************************************************
# Copyright (C) 2005 <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# 2008-2009 <NAME> <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.categories.category_with_axiom import CategoryWithAxiom_over_base_ring
from sage.categories.algebras import Algebras
class CommutativeAlgebras(CategoryWithAxiom_over_base_ring):
"""
The category of commutative algebras with unit over a given base ring.
EXAMPLES::
sage: M = CommutativeAlgebras(GF(19))
sage: M
Category of commutative algebras over Finite Field of size 19
sage: CommutativeAlgebras(QQ).super_categories()
[Category of algebras over Rational Field, Category of commutative rings]
This is just a shortcut for::
sage: Algebras(QQ).Commutative()
Category of commutative algebras over Rational Field
TESTS::
sage: Algebras(QQ).Commutative() is CommutativeAlgebras(QQ)
True
sage: TestSuite(CommutativeAlgebras(ZZ)).run()
Todo:
- product ( = Cartesian product)
- coproduct ( = tensor product over base ring)
"""
def __contains__(self, A):
"""
EXAMPLES::
sage: QQ['a'] in CommutativeAlgebras(QQ)
True
sage: QQ['a,b'] in CommutativeAlgebras(QQ)
True
sage: FreeAlgebra(QQ,2,'a,b') in CommutativeAlgebras(QQ)
False
TODO: get rid of this method once all commutative algebras in
Sage declare themselves in this category
"""
return super(CommutativeAlgebras, self).__contains__(A) or \
(A in Algebras(self.base_ring()) and hasattr(A, "is_commutative") and A.is_commutative())
|
test/integration/expected_out_single_line/percent_dict.py
|
Inveracity/flynt
| 487 |
99344
|
<reponame>Inveracity/flynt<gh_stars>100-1000
a = 2
b = "wuga"
print(f'{a:f} {b}')
|
parsifal/apps/reviews/tests/test_new_review_view.py
|
ShivamPytho/parsifal
| 342 |
99352
|
<filename>parsifal/apps/reviews/tests/test_new_review_view.py<gh_stars>100-1000
from django.test.testcases import TestCase
from django.urls import reverse
from parsifal.apps.authentication.tests.factories import UserFactory
from parsifal.apps.reviews.tests.factories import ReviewFactory
from parsifal.utils.test import login_redirect_url
class TestNewReviewView(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserFactory()
cls.url = reverse("reviews:new")
def test_login_required(self):
response = self.client.get(self.url)
self.assertRedirects(response, login_redirect_url(self.url))
def test_get_success(self):
self.client.force_login(self.user)
response = self.client.get(self.url)
with self.subTest(msg="Test get status code"):
self.assertEqual(200, response.status_code)
parts = ("csrfmiddlewaretoken", "title", "description")
for part in parts:
with self.subTest(msg="Test response body", part=part):
self.assertContains(response, part)
def test_post_success(self):
data = {"title": "Test SLR", "description": "This is a test SLR"}
self.client.force_login(self.user)
response = self.client.post(self.url, data, follow=True)
with self.subTest(msg="Test post status code"):
self.assertEqual(302, response.redirect_chain[0][1])
with self.subTest(msg="Test post redirect status code"):
self.assertEqual(200, response.status_code)
with self.subTest(msg="Test success message"):
self.assertContains(response, "Review created successfully.")
review = self.user.review_set.first()
with self.subTest(msg="Test generated slug"):
self.assertEqual("test-slr", review.name)
def test_post_fail(self):
data = {"title": "", "description": ""}
self.client.force_login(self.user)
response = self.client.post(self.url, data, follow=True)
with self.subTest(msg="Test post status code"):
self.assertEqual(200, response.status_code)
with self.subTest(msg="Test error message"):
self.assertContains(response, "This field is required.")
with self.subTest(msg="No review created"):
self.assertFalse(self.user.review_set.exists())
def test_post_conflicting_slug(self):
ReviewFactory(author=self.user, name="test-slr")
data = {"title": "Test SLR", "description": "This is a test SLR"}
self.client.force_login(self.user)
response = self.client.post(self.url, data, follow=True)
with self.subTest(msg="Test post redirect status code"):
self.assertEqual(200, response.status_code)
with self.subTest(msg="Test review created"):
self.assertEqual(2, self.user.review_set.count())
review = self.user.review_set.order_by("-id").first()
with self.subTest(msg="Test generated slug"):
self.assertEqual("test-slr-1", review.name)
def test_post_invalid_slug(self):
data = {"title": ")", "description": "This is a test SLR"}
self.client.force_login(self.user)
response = self.client.post(self.url, data, follow=True)
with self.subTest(msg="Test post redirect status code"):
self.assertEqual(200, response.status_code)
review = self.user.review_set.first()
with self.subTest(msg="Test review created"):
self.assertIsNotNone(review)
with self.subTest(msg="Test generated slug"):
self.assertEqual("literature-review", review.name)
|
Unit 9 Dry-Gas Reservoirs/functions/drygas_equivalence.py
|
datasolver/reservoir-engineering
| 139 |
99354
|
def condensate_to_gas_equivalence(api, stb):
"Derivation from real gas equation"
Tsc = 519.57 # standard temp in Rankine
psc = 14.7 # standard pressure in psi
R = 10.732
rho_w = 350.16 # water density in lbm/STB
so = 141.5 / (api + 131.5) # so: specific gravity of oil (dimensionless)
Mo = 5854 / (api - 8.811) # molecular weight of oil
n = (rho_w * so) / Mo
V1stb = ((n * R * Tsc) / psc)
V = V1stb * stb
return(V)
def general_equivalence(gamma, M):
"Calculate equivalence of 1 STB of water/condensate to scf of gas"
# gamma: specific gravity of condensate/water. oil specific gravity use formula: so=141.5/(api+131.5). water specific gravity = 1
# M: molecular weight of condensate/water. oil: Mo = 5854 / (api - 8.811). water: Mw = 18
V1stb = 132849 * (gamma / M)
return(V1stb)
|
solutions/problem_008.py
|
ksvr444/daily-coding-problem
| 1,921 |
99376
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def __repr__(self):
return str(self.data)
def count_unival_trees(root):
if not root:
return 0
elif not root.left and not root.right:
return 1
elif not root.left and root.data == root.right.data:
return 1 + count_unival_trees(root.right)
elif not root.right and root.data == root.left.data:
return 1 + count_unival_trees(root.left)
child_counts = count_unival_trees(root.left) + count_unival_trees(root.right)
current_node_count = 0
if root.data == root.left.data and root.data == root.left.data:
current_node_count = 1
return current_node_count + child_counts
node_a = Node('0')
node_b = Node('1')
node_c = Node('0')
node_d = Node('1')
node_e = Node('0')
node_f = Node('1')
node_g = Node('1')
node_a.left = node_b
node_a.right = node_c
node_c.left = node_d
node_c.right = node_e
node_d.left = node_f
node_d.right = node_g
assert count_unival_trees(None) == 0
assert count_unival_trees(node_a) == 5
assert count_unival_trees(node_c) == 4
assert count_unival_trees(node_g) == 1
assert count_unival_trees(node_d) == 3
|
plugins/logging/syslog/__init__.py
|
madflojo/automon
| 414 |
99382
|
''' Syslog logging handler '''
import logging
import logging.handlers
import sys
from core.logs import BaseLogging
class Logger(BaseLogging):
''' Handler class for Syslog Logging '''
def setup(self):
''' Setup class for handler '''
lh = logging.handlers.SysLogHandler(
facility=self.config['logging']['plugins']['syslog']['facility'])
lh.setLevel(logging.DEBUG)
logfmt = logging.Formatter("%(name)s[%(process)d] - %(levelname)s - %(message)s")
lh.setFormatter(logfmt)
return lh
|
driver_55x4.py
|
mpi3d/goodix-fp-dump
| 136 |
99392
|
from hashlib import sha256
from hmac import new as hmac
from random import randint
from re import fullmatch
from socket import socket
from struct import pack as encode
from subprocess import PIPE, STDOUT, Popen
from crcmod.predefined import mkCrcFun
from goodix import FLAGS_TRANSPORT_LAYER_SECURITY_DATA, Device
from protocol import USBProtocol
from tool import connect_device, decode_image, warning, write_pgm
TARGET_FIRMWARE: str = "GF3268_RTSEC_APP_10041"
IAP_FIRMWARE: str = "MILAN_RTSEC_IAP_10027"
VALID_FIRMWARE: str = "GF32[0-9]{2}_RTSEC_APP_100[0-9]{2}"
PSK: bytes = bytes.fromhex(
"0000000000000000000000000000000000000000000000000000000000000000")
PSK_WHITE_BOX: bytes = bytes.fromhex(
"ec35ae3abb45ed3f12c4751f1e5c2cc05b3c5452e9104d9f2a3118644f37a04b"
"6fd66b1d97cf80f1345f76c84f03ff30bb51bf308f2a9875c41e6592cd2a2f9e"
"60809b17b5316037b69bb2fa5d4c8ac31edb3394046ec06bbdacc57da6a756c5")
PMK_HASH: bytes = bytes.fromhex(
"81b8ff490612022a121a9449ee3aad2792f32b9f3141182cd01019945ee50361")
DEVICE_CONFIG: bytes = bytes.fromhex(
"6011607124952cc114d510e500e514f9030402000008001111ba000180ca0007"
"008400c0b38600bbc48800baba8a00b2b28c00aaaa8e00c1c19000bbbb9200b1"
"b1940000a8960000b6980000bf9a0000ba50000105d000000070000000720078"
"56740034122600001220001040120003042a0102002200012024003200800001"
"005c008000560008205800010032002c028200800cba000180ca0007002a0182"
"03200010402200012024001400800005005c0000015600082058000300820080"
"142a0108005c0080006200090364001800220000202a0108005c000001520008"
"0054000001000000000000000000000000000000000000000000000000009a69")
SENSOR_WIDTH = 88
SENSOR_HEIGHT = 108
def init_device(product: int) -> Device:
device = Device(product, USBProtocol)
device.nop()
return device
def check_psk(device: Device) -> bool:
reply = device.preset_psk_read(0xbb020007)
if not reply[0]:
raise ValueError("Failed to read PSK")
if reply[1] != 0xbb020007:
raise ValueError("Invalid flags")
return reply[2] == PMK_HASH
def write_psk(device: Device) -> bool:
if not device.preset_psk_write(0xbb010003, PSK_WHITE_BOX):
return False
if not check_psk(device):
return False
return True
def erase_firmware(device: Device) -> None:
device.mcu_erase_app(50, False)
device.disconnect()
def update_firmware(device: Device) -> None:
firmware_file = open(f"firmware/55x4/{TARGET_FIRMWARE}.bin", "rb")
firmware = firmware_file.read()
firmware_file.close()
mod = b""
for i in range(1, 65):
mod += encode("<B", i)
raw_pmk = (encode(">H", len(PSK)) + PSK) * 2
pmk = sha256(raw_pmk).digest()
pmk_hmac = hmac(pmk, mod, sha256).digest()
firmware_hmac = hmac(pmk_hmac, firmware, sha256).digest()
try:
length = len(firmware)
for i in range(0, length, 256):
if not device.write_firmware(i, firmware[i:i + 256]):
raise ValueError("Failed to write firmware")
if not device.check_firmware(0, length,
mkCrcFun("crc-32-mpeg")(firmware),
firmware_hmac):
raise ValueError("Failed to check firmware")
except Exception as error:
print(
warning(f"The program went into serious problems while trying to "
f"update the firmware: {error}"))
erase_firmware(device)
raise error
device.reset(False, True, 100)
device.disconnect()
def run_driver(device: Device):
tls_server = Popen([
"openssl", "s_server", "-nocert", "-psk",
PSK.hex(), "-port", "4433", "-quiet"
],
stdout=PIPE,
stderr=STDOUT)
try:
if not device.reset(True, False, 20)[0]:
raise ValueError("Reset failed")
device.read_sensor_register(0x0000, 4) # Read chip ID (0x00a1)
device.read_otp()
# OTP: 0867860a12cc02faa65d2b4b0204e20cc20c9664087bf80706000000c02d431d
tls_client = socket()
tls_client.connect(("localhost", 4433))
try:
connect_device(device, tls_client)
if not device.upload_config_mcu(DEVICE_CONFIG):
raise ValueError("Failed to upload config")
device.mcu_switch_to_fdt_mode(
b"\x0d\x01\x80\x12\x80\x12\x80\x98"
b"\x80\x82\x80\x12\x80\xa0\x80\x99"
b"\x80\x7f\x80\x12\x80\x9f\x80\x93"
b"\x80\x7e", True)
tls_client.sendall(
device.mcu_get_image(b"\x01\x00",
FLAGS_TRANSPORT_LAYER_SECURITY_DATA)[9:])
write_pgm(decode_image(tls_server.stdout.read(14260)[:-4]),
SENSOR_WIDTH, SENSOR_HEIGHT, "clear-0.pgm")
device.mcu_switch_to_fdt_mode(
b"\x0d\x01\x80\x12\x80\x12\x80\x98"
b"\x80\x82\x80\x12\x80\xa0\x80\x99"
b"\x80\x7f\x80\x12\x80\x9f\x80\x93"
b"\x80\x7e", True)
device.mcu_switch_to_idle_mode(20)
device.read_sensor_register(0x0082, 2)
tls_client.sendall(
device.mcu_get_image(b"\x01\x00",
FLAGS_TRANSPORT_LAYER_SECURITY_DATA)[9:])
write_pgm(decode_image(tls_server.stdout.read(14260)[:-4]),
SENSOR_WIDTH, SENSOR_HEIGHT, "clear-1.pgm")
device.mcu_switch_to_fdt_mode(
b"\x0d\x01\x80\x12\x80\x12\x80\x98"
b"\x80\x82\x80\x12\x80\xa0\x80\x99"
b"\x80\x7f\x80\x12\x80\x9f\x80\x93"
b"\x80\x7e", True)
if not device.switch_to_sleep_mode(0x6c):
raise ValueError("Failed to switch to sleep mode")
print("Waiting for finger...")
device.mcu_switch_to_fdt_down(
b"\x0c\x01\x80\xb0\x80\xc4\x80\xba"
b"\x80\xa6\x80\xb7\x80\xc7\x80\xc0"
b"\x80\xaa\x80\xb4\x80\xc4\x80\xba"
b"\x80\xa6", True)
tls_client.sendall(
device.mcu_get_image(b"\x01\x00",
FLAGS_TRANSPORT_LAYER_SECURITY_DATA)[9:])
write_pgm(decode_image(tls_server.stdout.read(14260)[:-4]),
SENSOR_WIDTH, SENSOR_HEIGHT, "fingerprint.pgm")
finally:
tls_client.close()
finally:
tls_server.terminate()
def main(product: int) -> None:
print(
warning("This program might break your device.\n"
"Consider that it may flash the device firmware.\n"
"Continue at your own risk.\n"
"But don't hold us responsible if your device is broken!\n"
"Don't run this program as part of a regular process."))
code = randint(0, 9999)
if input(f"Type {code} to continue and confirm that you are not a bot: "
) != str(code):
print("Abort")
return
previous_firmware = None
device = init_device(product)
while True:
firmware = device.firmware_version()
print(f"Firmware: {firmware}")
valid_psk = check_psk(device)
print(f"Valid PSK: {valid_psk}")
if firmware == IAP_FIRMWARE:
iap = IAP_FIRMWARE
else:
iap = device.get_iap_version(25)
print(f"IAP: {iap}")
if iap != IAP_FIRMWARE:
raise ValueError(
"Invalid IAP\n" +
warning("Please consider that removing this security "
"is a very bad idea!"))
if firmware == previous_firmware:
raise ValueError("Unchanged firmware")
previous_firmware = firmware
if fullmatch(TARGET_FIRMWARE, firmware):
if not valid_psk:
if not write_psk(device):
raise ValueError("Failed to write PSK")
run_driver(device)
return
if fullmatch(VALID_FIRMWARE, firmware):
erase_firmware(device)
device = init_device(product)
continue
if fullmatch(IAP_FIRMWARE, firmware):
if not valid_psk:
if not write_psk(device):
raise ValueError("Failed to write PSK")
update_firmware(device)
device = init_device(product)
continue
raise ValueError("Invalid firmware\n" +
warning("Please consider that removing this security "
"is a very bad idea!"))
|
updater/reports/ReportGitVersionsNew.py
|
eisenhowerj/hubble
| 146 |
99406
|
from .ReportDaily import *
# Lists which git version was used by how many users yesterday
class ReportGitVersionsNew(ReportDaily):
def name(self):
return "git-versions-new"
def updateDailyData(self):
newHeader, newData = self.parseData(
self.executeScript(self.scriptPath("git-versions.sh")))
self.header = ["date"] + newHeader
newData = [[str(self.yesterday())] + row for row in newData]
self.data.extend(newData)
self.truncateData(self.timeRangeTotal())
self.sortDataByDate()
|
meshpy/meshpy/lighting.py
|
peter0749/PointNetGPD
| 193 |
99427
|
"""
Classes for lighting in renderer
Author: <NAME>
"""
import numpy as np
from autolab_core import RigidTransform
class Color(object):
WHITE = np.array([255, 255, 255])
BLACK = np.array([0, 0, 0])
RED = np.array([255, 0, 0])
GREEN = np.array([0, 255, 0])
BLUE = np.array([0, 0, 255])
class MaterialProperties(object):
""" Struct to encapsulate material properties for
OpenGL rendering.
Attributes
----------
color : :obj:`numpy.ndarray`
3-array of integers between 0 and 255
"""
def __init__(self, color=Color.WHITE,
ambient=0.2,
diffuse=0.8,
specular=0,
shininess=0):
# set params
self.color = np.array(color).astype(np.uint8)
self.ambient = ambient
self.diffuse = diffuse
self.specular = specular
self.shininess = shininess
def __str__(self):
s = ''
s += 'Color: %s\n' %(str(self.color))
s += 'Ambient: %f\n' %(self.ambient)
s += 'Diffuse: %f\n' %(self.diffuse)
s += 'Specular: %f\n' %(self.specular)
s += 'Shininess: %f\n' %(self.shininess)
return s
@property
def arr(self):
""" Returns the material properties as a contiguous numpy array. """
return np.r_[self.color,
self.ambient * np.ones(3), 1,
self.diffuse * np.ones(3), 1,
self.specular * np.ones(3), 1,
self.shininess].astype(np.float64)
class LightingProperties(object):
""" Struct to encapsulate lighting properties for
OpenGL rendering.
"""
def __init__(self, ambient=0,
diffuse=1,
specular=1,
T_light_camera=RigidTransform(rotation=np.eye(3),
translation=np.zeros(3),
from_frame='light',
to_frame='camera'),
cutoff=180.0):
self.ambient = ambient
self.diffuse = diffuse
self.specular = specular
self.T_light_camera = T_light_camera
self.cutoff = cutoff
self.T_light_obj = None
def __str__(self):
s = ''
s += 'Ambient: %f\n' %(self.ambient)
s += 'Diffuse: %f\n' %(self.diffuse)
s += 'Specular: %f\n' %(self.specular)
s += 'T_light_camera: %s\n' %(str(self.T_light_camera))
s += 'Cutoff: %f\n' %(self.cutoff)
return s
def set_pose(self, T_obj_camera):
self.T_light_obj = T_obj_camera.inverse() * self.T_light_camera.as_frames('light', T_obj_camera.to_frame)
@property
def arr(self):
""" Returns the lighting properties as a contiguous numpy array. """
if self.T_light_obj is None:
raise ValueError('Need to set pose relative to object!')
return np.r_[self.ambient * np.ones(3), 1,
self.diffuse * np.ones(3), 1,
self.specular * np.ones(3), 1,
self.T_light_obj.translation,
self.T_light_obj.z_axis,
self.cutoff].astype(np.float64)
|
CalibMuon/DTCalibration/python/dtVDriftSegmentCalibration_cfi.py
|
ckamtsikis/cmssw
| 852 |
99453
|
import FWCore.ParameterSet.Config as cms
from CalibMuon.DTCalibration.dtSegmentSelection_cfi import dtSegmentSelection
dtVDriftSegmentCalibration = cms.EDAnalyzer("DTVDriftSegmentCalibration",
# Segment selection
dtSegmentSelection,
recHits4DLabel = cms.InputTag('dt4DSegments'),
rootFileName = cms.untracked.string('DTVDriftHistos.root'),
# Choose the chamber you want to calibrate (default = "All"), specify the chosen chamber
# in the format "wheel station sector" (i.e. "-1 3 10")
calibChamber = cms.untracked.string('All')
)
|
ichnaea/models/api.py
|
mikiec84/ichnaea
| 348 |
99469
|
from sqlalchemy import Boolean, Column, String
from sqlalchemy.dialects.mysql import INTEGER as Integer, TINYINT as TinyInteger
from ichnaea.models.base import _Model
class ApiKey(_Model):
"""
ApiKey model.
The allow_fallback and fallback columns determine if and what
fallback location provider should be used.
The url specifies the external endpoint supporting the
:ref:`api_geolocate_latest` API.
Requests to the fallback service can optionally be rate limited.
Two settings control the rate limit:
``ratelimit`` specifies how many requests are allowed to be made.
``ratelimit_interval`` specifies the interval in seconds for which
the ``ratelimit`` number applies, so for example one could
configure 60 requests per 60 seconds, or 86400 requests per
86400 seconds (one day). Both would on average allow one request
per second.
Finally the fallback service might allow caching of results inside
the projects own Redis cache. ``cache_expire`` specifies the number
of seconds for which entries are allowed to be cached.
"""
__tablename__ = "api_key"
valid_key = Column(String(40), primary_key=True) # UUID API key.
maxreq = Column(Integer) # Maximum number of requests per day.
allow_fallback = Column(Boolean) # Use the fallback source?
allow_locate = Column(Boolean) # Allow locate queries?
allow_region = Column(Boolean) # Allow region queries?
fallback_name = Column(String(40)) # Fallback metric name.
fallback_schema = Column(String(64)) # Fallback API schema.
fallback_url = Column(String(256)) # URL of the fallback provider.
fallback_ratelimit = Column(Integer) # Fallback rate limit count.
fallback_ratelimit_interval = Column(Integer) # Interval in seconds.
fallback_cache_expire = Column(Integer) # Cache expiry in seconds.
store_sample_locate = Column(TinyInteger) # Sample rate 0-100.
store_sample_submit = Column(TinyInteger) # Sample rate 0-100.
|
profiles/views/billing_group_views.py
|
Sispheor/squest
| 112 |
99509
|
<filename>profiles/views/billing_group_views.py
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.models import User
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.utils.safestring import mark_safe
from profiles.forms import AddUserForm
from profiles.forms.billing_group_forms import BillingGroupForm
from profiles.models import BillingGroup
@user_passes_test(lambda u: u.is_superuser)
def billing_group_edit(request, billing_group_id):
group = get_object_or_404(BillingGroup, id=billing_group_id)
form = BillingGroupForm(request.POST or None, instance=group)
if form.is_valid():
form.save()
return redirect("profiles:billing_group_list")
breadcrumbs = [
{'text': 'Billing groups', 'url': reverse('profiles:billing_group_list')},
{'text': group.name, 'url': ""},
]
context = {'form': form, 'group': group, 'object_name': "billing_group", 'breadcrumbs': breadcrumbs}
return render(request, 'profiles/group/group-edit.html', context)
@user_passes_test(lambda u: u.is_superuser)
def billing_group_create(request):
if request.method == 'POST':
form = BillingGroupForm(request.POST)
if form.is_valid():
form.save()
return redirect("profiles:billing_group_list")
else:
form = BillingGroupForm()
breadcrumbs = [
{'text': 'Billing groups', 'url': reverse('profiles:billing_group_list')},
{'text': 'Create a new billing group', 'url': ""},
]
context = {'form': form, 'object_name': "billing_group", 'breadcrumbs': breadcrumbs}
return render(request, 'profiles/group/group-create.html', context)
@user_passes_test(lambda u: u.is_superuser)
def billing_group_delete(request, billing_group_id):
group = get_object_or_404(BillingGroup, id=billing_group_id)
if request.method == 'POST':
group.delete()
return redirect("profiles:billing_group_list")
args = {
"billing_group_id": billing_group_id,
}
breadcrumbs = [
{'text': 'Billing groups', 'url': reverse('profiles:billing_group_list')},
{'text': group.name, 'url': ""}
]
context = {
'breadcrumbs': breadcrumbs,
'confirm_text': mark_safe(f"Confirm deletion of <strong>{group.name}</strong>?"),
'action_url': reverse('profiles:billing_group_delete', kwargs=args),
'button_text': 'Delete',
'details': {'warning_sentence': 'Warning: some users are still present in this group:',
'details_list': [user.username for user in group.user_set.all()]
} if group.user_set.all() else None
}
return render(request, 'generics/confirm-delete-template.html', context=context)
@user_passes_test(lambda u: u.is_superuser)
def user_in_billing_group_update(request, billing_group_id):
group = get_object_or_404(BillingGroup, id=billing_group_id)
form = AddUserForm(request.POST or None, current_users=group.user_set.all())
if request.method == 'POST':
if form.is_valid():
users_id = form.cleaned_data.get('users')
current_users = group.user_set.all()
selected_users = [User.objects.get(id=user_id) for user_id in users_id]
to_remove = list(set(current_users) - set(selected_users))
to_add = list(set(selected_users) - set(current_users))
for user in to_remove:
group.user_set.remove(user)
for user in to_add:
group.user_set.add(user)
return redirect("profiles:user_by_billing_group_list", billing_group_id=billing_group_id)
breadcrumbs = [
{'text': 'Billing groups', 'url': reverse('profiles:billing_group_list')},
{'text': group.name, 'url': reverse('profiles:user_by_billing_group_list', args=[billing_group_id])},
{'text': "Users", 'url': ""}
]
context = {'form': form, 'group': group, 'object_name': "billing_group", 'breadcrumbs': breadcrumbs}
return render(request, 'profiles/group/user-in-group-update.html', context)
@user_passes_test(lambda u: u.is_superuser)
def user_in_billing_group_remove(request, billing_group_id, user_id):
group = get_object_or_404(BillingGroup, id=billing_group_id)
user = User.objects.get(id=user_id)
if request.method == 'POST':
group.user_set.remove(user)
return redirect('profiles:user_by_billing_group_list', billing_group_id=billing_group_id)
args = {
"billing_group_id": billing_group_id,
"user_id": user_id
}
breadcrumbs = [
{'text': 'Billing groups', 'url': reverse('profiles:billing_group_list')},
{'text': group.name, 'url': reverse('profiles:user_by_billing_group_list', args=[billing_group_id])},
{'text': "Users", 'url': ""}
]
context = {
'breadcrumbs': breadcrumbs,
'confirm_text': mark_safe(f"Confirm to remove the user <strong>{ user.username }</strong> from { group }?"),
'action_url': reverse('profiles:user_in_billing_group_remove', kwargs=args),
'button_text': 'Remove'
}
return render(request, 'generics/confirm-delete-template.html', context=context)
|
keras/downstream_tasks/BraTS/DataSet.py
|
joeranbosma/ModelsGenesis
| 574 |
99510
|
#!/usr/bin/env python
"""
File: DataSet
Date: 5/1/18
Author: <NAME> (<EMAIL>)
This file provides loading of the BraTS datasets
for ease of use in TensorFlow models.
"""
import os
import pandas as pd
import numpy as np
import nibabel as nib
from tqdm import tqdm
from BraTS.Patient import *
from BraTS.structure import *
from BraTS.modalities import *
from BraTS.load_utils import *
survival_df_cache = {} # Prevents loading CSVs more than once
class DataSubSet:
def __init__(self, directory_map, survival_csv, data_set_type=None):
self.directory_map = directory_map
self._patient_ids = sorted(list(directory_map.keys()))
self._survival_csv = survival_csv
self._num_patients = len(self._patient_ids)
self.type = data_set_type
# Data caches
self._mris = None
self._segs = None
self._patients = {}
self._survival_df_cached = None
self._patients_fully_loaded = False
self._id_indexer = {patient_id: i for i, patient_id in enumerate(self._patient_ids)}
def subset(self, patient_ids):
"""
Split this data subset into a small subset by patient ID
:param n: The number of elements in the smaller training set
:return: A new data subset with only the specified number of items
"""
dir_map = {id: self.directory_map[id] for id in patient_ids}
return DataSubSet(dir_map, self._survival_csv)
@property
def ids(self):
"""
List of all patient IDs in this dataset
Will copy the ids... so modify them all you want
:return: Copy of the patient IDs
"""
return list(self._patient_ids)
@property
def mris(self):
if self._mris is not None:
return self._mris
self._load_images()
return self._mris
@property
def segs(self):
if self._segs is None:
self._load_images()
return self._segs
def _load_images(self):
mris_shape = (self._num_patients,) + mri_shape
segs_shape = (self._num_patients,) + image_shape
self._mris = np.empty(shape=mris_shape)
self._segs = np.empty(shape=segs_shape)
if self._patients_fully_loaded:
# All the patients were already loaded
for i, patient in enumerate(tqdm(self._patients.values())):
self._mris[i] = patient.mri_data
self._segs[i] = patient.seg
else:
# Load it from scratch
for i, patient_id in enumerate(self._patient_ids):
patient_dir = self.directory_map[patient_id]
load_patient_data_inplace(patient_dir, self._mris, self._segs, i)
@property
def patients(self):
"""
Loads ALL of the patients from disk into patient objects
:return: A dictionary containing ALL patients
"""
for patient_id in self.ids:
yield self.patient(patient_id)
self._patients_fully_loaded = True
def patient(self, patient_id):
"""
Loads only a single patient from disk
:param patient_id: The patient ID
:return: A Patient object loaded from disk
"""
if patient_id not in self._patient_ids:
raise ValueError("Patient id \"%s\" not present." % patient_id)
# Return cached value if present
if patient_id in self._patients:
return self._patients[patient_id]
# Load patient data into memory
patient = Patient(patient_id)
patient_dir = self.directory_map[patient_id]
df = self._survival_df
if patient_id in df.id.values:
patient.age = float(df.loc[df.id == patient_id].age)
patient.survival = int(df.loc[df.id == patient_id].survival)
if self._mris is not None and self._segs is not None:
# Load from _mris and _segs if possible
index = self._id_indexer[patient_id]
patient.mri = self._mris[index]
patient.seg = self._segs[index]
else:
# Load the mri and segmentation data from disk
patient.mri, patient.seg = load_patient_data(patient_dir)
self._patients[patient_id] = patient # cache the value for later
return patient
def drop_cache(self):
self._patients.clear()
self._mris = None
self._segs = None
@property
def _survival_df(self):
if self._survival_csv in survival_df_cache:
return survival_df_cache[self._survival_csv]
df = load_survival(self._survival_csv)
survival_df_cache[self._survival_csv] = df
return df
class DataSet(object):
def __init__(self, data_set_dir=None, brats_root=None, year=None):
if data_set_dir is not None:
# The data-set directory was specified explicitly
assert isinstance(data_set_dir, str)
self._data_set_dir = data_set_dir
elif brats_root is not None and isinstance(year, int):
# Find the directory by specifying the year
assert isinstance(brats_root, str)
year_dir = find_file_containing(brats_root, str(year % 100))
self._data_set_dir = os.path.join(brats_root, year_dir)
self._brats_root = brats_root
self._year = year
else:
# BraTS data-set location was not improperly specified
raise Exception("Specify BraTS location with \"data_set_dir\" or with \"brats_root\" and \"year\"")
self._validation = None
self._train = None
self._hgg = None
self._lgg = None
self._dir_map_cache = None
self._val_dir = None
self._train_dir_cached = None
self._hgg_dir = os.path.join(self._train_dir, "HGG")
self._lgg_dir = os.path.join(self._train_dir, "LGG")
self._train_survival_csv_cached = None
self._validation_survival_csv_cached = None
self._train_ids = None
self._hgg_ids_cached = None
self._lgg_ids_cached = None
self._train_dir_map_cache = None
self._validation_dir_map_cache = None
self._hgg_dir_map_cache = None
self._lgg_dir_map_cache = None
def set(self, data_set_type):
"""
Get a data subset by type
:param data_set_type: The DataSubsetType to get
:return: The data sub-set of interest
"""
assert isinstance(data_set_type, DataSubsetType)
if data_set_type == DataSubsetType.train:
return self.train
if data_set_type == DataSubsetType.hgg:
return self.hgg
if data_set_type == DataSubsetType.lgg:
return self.lgg
if data_set_type == DataSubsetType.validation:
return self.validation
@property
def train(self):
"""
Training data
Loads the training data from disk, utilizing caching
:return: A tf.data.Dataset object containing the training data
"""
if self._train is None:
try:
self._train = DataSubSet(self._train_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.train)
except FileNotFoundError:
return None
return self._train
@property
def validation(self):
"""
Validation data
:return: Validation data
"""
if self._validation is None:
try:
self._validation = DataSubSet(self._validation_dir_map,
self._validation_survival_csv,
data_set_type=DataSubsetType.validation)
except FileNotFoundError:
return None
return self._validation
@property
def hgg(self):
if self._hgg is None:
try:
self._hgg = DataSubSet(self._hgg_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.hgg)
except FileNotFoundError:
return None
return self._hgg
@property
def lgg(self):
if self._lgg is None:
try:
self._lgg = DataSubSet(self._lgg_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.lgg)
except FileNotFoundError:
return None
return self._lgg
def drop_cache(self):
"""
Drops the cached values in the object
:return: None
"""
self._validation = None
self._train = None
self._hgg = None
self._lgg = None
self._dir_map_cache = None
self._val_dir = None
self._train_dir_cached = None
self._train_survival_csv_cached = None
self._validation_survival_csv_cached = None
self._train_ids = None
self._hgg_ids_cached = None
self._lgg_ids_cached = None
self._train_dir_map_cache = None
self._validation_dir_map_cache = None
self._hgg_dir_map_cache = None
self._lgg_dir_map_cache = None
@property
def _train_survival_csv(self):
if self._train_survival_csv_cached is None:
self._train_survival_csv_cached = find_file_containing(self._train_dir, "survival")
if self._train_survival_csv_cached is None:
raise FileNotFoundError("Could not find survival CSV in %s" % self._train_dir)
return self._train_survival_csv_cached
@property
def _validation_survival_csv(self):
if self._validation_survival_csv_cached is None:
self._validation_survival_csv_cached = find_file_containing(self._validation_dir, "survival")
if self._validation_survival_csv_cached is None:
raise FileNotFoundError("Could not find survival CSV in %s" % self._validation_dir)
return self._validation_survival_csv_cached
@property
def _train_dir(self):
if self._train_dir_cached is not None:
return self._train_dir_cached
self._train_dir_cached = find_file_containing(self._data_set_dir, "training")
if self._train_dir_cached is None:
raise FileNotFoundError("Could not find training directory in %s" % self._data_set_dir)
return self._train_dir_cached
@property
def _validation_dir(self):
if self._val_dir is not None:
return self._val_dir
self._val_dir = find_file_containing(self._data_set_dir, "validation")
if self._val_dir is None:
raise FileNotFoundError("Could not find validation directory in %s" % self._data_set_dir)
return self._val_dir
@property
def _train_dir_map(self):
if self._train_dir_map_cache is None:
self._train_dir_map_cache = dict(self._hgg_dir_map)
self._train_dir_map_cache.update(self._lgg_dir_map)
return self._train_dir_map_cache
@property
def _validation_dir_map(self):
if self._validation_dir_map_cache is None:
self._validation_dir_map_cache = self._directory_map(self._validation_dir)
return self._validation_dir_map_cache
@property
def _hgg_dir_map(self):
if self._hgg_dir_map_cache is None:
self._hgg_dir_map_cache = self._directory_map(self._hgg_dir)
return self._hgg_dir_map_cache
@property
def _lgg_dir_map(self):
if self._lgg_dir_map_cache is None:
self._lgg_dir_map_cache = self._directory_map(self._lgg_dir)
return self._lgg_dir_map_cache
@property
def _hgg_ids(self):
if self._hgg_ids_cached is None:
self._hgg_ids_cached = os.listdir(self._hgg_dir)
return self._hgg_ids_cached
@property
def _lgg_ids(self):
if self._lgg_ids_cached is None:
self._lgg_ids_cached = os.listdir(self._lgg_dir)
return self._lgg_ids_cached
@classmethod
def _directory_map(cls, dir):
return {file: os.path.join(dir, file)
for file in os.listdir(dir)
if os.path.isdir(os.path.join(dir, file))}
|
components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_submit_job.py
|
TheDutchDevil/pipelines
| 102 |
99581
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from ._client import DataprocClient
from kfp_component.core import KfpExecutionContext, display
from .. import common as gcp_common
def submit_job(project_id, region, cluster_name, job, wait_interval=30,
job_id_output_path='/tmp/kfp/output/dataproc/job_id.txt',
job_object_output_path='/tmp/kfp/output/dataproc/job.json',
):
"""Submits a Cloud Dataproc job.
Args:
project_id (str): Required. The ID of the Google Cloud Platform project
that the cluster belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the
request.
cluster_name (str): Required. The cluster to run the job.
job (dict): Optional. The full payload of a [Dataproc job](
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
wait_interval (int): The wait seconds between polling the operation.
Defaults to 30s.
job_id_output_path (str): Path for the ID of the created job
job_object_output_path (str): Path for the created job object
Returns:
The created job payload.
"""
if 'reference' not in job:
job['reference'] = {}
job['reference']['projectId'] = project_id
if 'placement' not in job:
job['placement'] = {}
job['placement']['clusterName'] = cluster_name
client = DataprocClient()
job_id = None
with KfpExecutionContext(
on_cancel=lambda: client.cancel_job(
project_id, region, job_id)) as ctx:
submitted_job = client.submit_job(project_id, region, job,
request_id=ctx.context_id())
job_id = submitted_job['reference']['jobId']
_dump_metadata(submitted_job, region)
submitted_job = _wait_for_job_done(client, project_id, region,
job_id, wait_interval)
gcp_common.dump_file(job_object_output_path, json.dumps(submitted_job))
gcp_common.dump_file(job_id_output_path, submitted_job.get('reference').get('jobId'))
return submitted_job
def _wait_for_job_done(client, project_id, region, job_id, wait_interval):
while True:
job = client.get_job(project_id, region, job_id)
state = job['status']['state']
if state == 'DONE':
return job
if state == 'ERROR':
raise RuntimeError(job['status']['details'])
time.sleep(wait_interval)
def _dump_metadata(job, region):
display.display(display.Link(
'https://console.cloud.google.com/dataproc/jobs/{}?project={}®ion={}'.format(
job.get('reference').get('jobId'),
job.get('reference').get('projectId'),
region),
'Job Details'
))
|
samples/advanced/authentication.py
|
amerkel2/azure-storage-python
| 348 |
99582
|
# coding: utf-8
from azure.storage.blob import BlockBlobService
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.storage.common import CloudStorageAccount
class AuthenticationSamples():
def __init__(self):
pass
def run_all_samples(self):
self.key_auth()
self.sas_auth()
self.emulator()
self.public()
self.connection_string()
def key_auth(self):
# With account
account = CloudStorageAccount(account_name='<account_name>', account_key='<account_key>')
client = account.create_block_blob_service()
# Directly
client = BlockBlobService(account_name='<account_name>', account_key='<account_key>')
def sas_auth(self):
# With account
account = CloudStorageAccount(account_name='<account_name>', sas_token='<sas_token>')
client = account.create_block_blob_service()
# Directly
client = BlockBlobService(account_name='<account_name>', sas_token='<sas_token>')
def emulator(self):
# With account
account = CloudStorageAccount(is_emulated=True)
client = account.create_block_blob_service()
# Directly
client = BlockBlobService(is_emulated=True)
# The emulator does not at the time of writing support append blobs or
# the file service.
def public(self):
# This applies to the blob services only
# Public access must be enabled on the container or requests will fail
# With account
account = CloudStorageAccount(account_name='<account_name>')
client = account.create_block_blob_service()
# Directly
client = BlockBlobService(account_name='<account_name>')
def connection_string(self):
# Connection strings may be retrieved from the Portal or constructed manually
connection_string = 'AccountName=<account_name>;AccountKey=<account_key>;'
client = BlockBlobService(connection_string=connection_string)
|
examples/mhsa.py
|
Siyuan89/self-attention-cv
| 759 |
99597
|
<gh_stars>100-1000
import torch
from self_attention_cv import MultiHeadSelfAttention
model = MultiHeadSelfAttention(dim=64)
x = torch.rand(16, 10, 64) # [batch, tokens, dim]
mask = torch.zeros(10, 10) # tokens X tokens
mask[5:8, 5:8] = 1
y = model(x, mask)
assert y.shape == x.shape
print("MultiHeadSelfAttentionAISummer OK")
|
pyocd/utility/server.py
|
claymation/pyOCD
| 276 |
99639
|
<filename>pyocd/utility/server.py
# pyOCD debugger
# Copyright (c) 2015-2019 Arm Limited
# Copyright (c) 2021 <NAME>
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import socket
from .sockets import ListenerSocket
from .compatibility import to_bytes_safe
LOG = logging.getLogger(__name__)
class StreamServer(threading.Thread):
"""! @brief File-like object that serves data over a TCP socket.
The user can connect to the socket with telnet or netcat.
The server thread will automatically be started by the constructor. To shut down the
server and its thread, call the stop() method.
"""
def __init__(self, port, serve_local_only=True, name=None, is_read_only=True, extra_info=None):
"""! @brief Constructor.
Starts the server immediately.
@param self
@param port The TCP/IP port number on which the server should listen. If 0 is passed,
then an arbitrary unused port is selected by the OS. In this case, the `port` property
can be used to get the actual port number.
@param serve_local_only Whether to allow connections from remote clients.
@param name Optional server name.
@param is_read_only If the server is read-only, from the perspective of the client,
then any incoming data sent by the client is discarded. Otherwise it is buffered so
it can be read with the read() methods.
@param extra_info Optional string with extra information about the server, e.g. "core 0".
"""
super(StreamServer, self).__init__()
self.name = name
self._name = name
self._extra_info = extra_info
self._formatted_name = (name + " ") if (name is not None) else ""
self._is_read_only = is_read_only
self._abstract_socket = None
self._abstract_socket = ListenerSocket(port, 4096)
if not serve_local_only:
# We really should be binding to explicit interfaces, not all available.
self._abstract_socket.host = ''
self._abstract_socket.init()
self._port = self._abstract_socket.port
self._buffer = bytearray()
self._buffer_lock = threading.Lock()
self.connected = None
self._shutdown_event = threading.Event()
self.daemon = True
self.start()
@property
def port(self):
return self._port
def stop(self):
self._shutdown_event.set()
self.join()
def run(self):
LOG.info("%sserver started on port %d%s", self._formatted_name, self._port,
(" (%s)" % self._extra_info) if self._extra_info else "")
self.connected = None
try:
while not self._shutdown_event.is_set():
# Wait for a client to connect.
# TODO support multiple client connections
while not self._shutdown_event.is_set():
self.connected = self._abstract_socket.connect()
if self.connected is not None:
LOG.debug("%sclient connected", self._formatted_name)
break
if self._shutdown_event.is_set():
break
# Set timeout on new connection.
self._abstract_socket.set_timeout(0.1)
# Keep reading from the client until we either get a shutdown event, or
# the client disconnects. The incoming data is appended to our read buffer.
while not self._shutdown_event.is_set():
try:
data = self._abstract_socket.read()
if len(data) == 0:
# Client disconnected.
self._abstract_socket.close()
self.connected = None
break
if not self._is_read_only:
self._buffer_lock.acquire()
self._buffer += bytearray(data)
self._buffer_lock.release()
except socket.timeout:
pass
finally:
self._abstract_socket.cleanup()
LOG.info("%sserver stopped", self._formatted_name)
def write(self, data):
"""! @brief Write bytes into the connection."""
# If nobody is connected, act like all data was written anyway.
if self.connected is None:
return 0
data = to_bytes_safe(data)
size = len(data)
remaining = size
while remaining:
count = self._abstract_socket.write(data)
remaining -= count
if remaining:
data = data[count:]
return size
def _get_input(self, length=-1):
"""! @brief Extract requested amount of data from the read buffer."""
self._buffer_lock.acquire()
try:
if length == -1:
actualLength = len(self._buffer)
else:
actualLength = min(length, len(self._buffer))
if actualLength:
data = self._buffer[:actualLength]
self._buffer = self._buffer[actualLength:]
else:
data = bytearray()
return data
finally:
self._buffer_lock.release()
def read(self, size=-1):
"""! @brief Return bytes read from the connection."""
if self.connected is None:
return None
# Extract requested amount of data from the read buffer.
data = self._get_input(size)
return data
def readinto(self, b):
"""! @brief Read bytes into a mutable buffer."""
if self.connected is None:
return None
# Extract requested amount of data from the read buffer.
b[:] = self._get_input()
if len(b):
return len(b)
else:
return None
|
epg.py
|
jerocobo/LegalStream
| 365 |
99656
|
<reponame>jerocobo/LegalStream<filename>epg.py
import datetime
import string
import math
def ToDay():
global year
year = datetime.datetime.now().year
global month
month = '%02d' % datetime.datetime.now().month
global day
day = '%02d' % datetime.datetime.today().day
global hour
hour = '%02d' % datetime.datetime.now().hour
global minute
minute = '%02d' % datetime.datetime.now().minute
global second
second = '%02d' % datetime.datetime.now().second
global numbers
numbers = str(year) + str(month) + str(day) + str(day) + str(hour) + str(second) + "00"
ToDay()
StartYear = int(year)
StartMonth = int(month)
StartDay = int(day)
StartHour = int(hour)
StartMinute = int(minute)
StartSecond = int(second)
ToDay()
EndYear = int(year)
EndMonth = int(month)
EndDay = int(day)
EndHour = int(hour)
EndMinute = int(minute)
EndSecond = int(second)
MinuteLength = EndMinute - StartMinute
SecondLength = EndSecond - StartSecond
def DoubleDigit(Integer):
return "%02d"%Integer
def PlusOneDay():
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
global numbers
if day is 30:
day = DoubleDigit(0)
else:
day = DoubleDigit(day + 1)
month = DoubleDigit(month)
numbers = str(year) + str(month) + str(day) + str(day) + str(hour) + str(second) + "00"
def RetPlusOneDay():
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
global numbers
if day is 30:
day = DoubleDigit(0)
else:
day = DoubleDigit(day + 1)
month = DoubleDigit(month)
return str(year) + str(month) + str(day) + str(day) + str(hour) + str(second) + "00"
def RetPlusOneHour():
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
if hour is 23:
hour = DoubleDigit(0)
else:
hour = hour + 1
hour = DoubleDigit(hour)
global minute
minute = int(minute)
global second
second = int(second)
global numbers
if day is 30:
day = DoubleDigit(0)
else:
day = DoubleDigit(day + 1)
if month is 11:
month = DoubleDigit(0)
year = year + 1
else:
month = DoubleDigit(month + 1)
return str(year) + str(month) + str(day) + str(day) + str(hour) + str(second) + "00"
print RetPlusOneHour()
DoubleDigit(8)
prompt = raw_input("Are you sure you want to run this program? Avg. run time: 1m 25s.")
if "yes" in prompt:
pass
elif "Yes" in prompt:
pass
elif "y" in prompt:
pass
elif "Y" in prompt:
pass
elif "yeah" in prompt:
pass
elif "Yeah" in prompt:
pass
elif "ok" in prompt:
pass
elif "OK" in prompt:
pass
elif "okay" in prompt:
pass
elif "Okay" in prompt:
pass
else:
exit()
def ABC1():
ToDay()
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
i = 0
Program = []
for i in range(0, 365):
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneHour() + '00 -0400" channel="ABCN1"><title lang="en">Now on ABC News</title><category lang="en">News</category></programme>')
i = i + 1
print str(round(float(i)/365*100, 1)) + "% of ABC News Digital 1 Schedule Complete."
return Program
ABC1 = ABC1()
def ABC2():
ToDay()
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
i = 0
Program = []
for i in range(0, 365):
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneDay() + '00 -0400" channel="ABCN2"><title lang="en">Regularly Scheduled Programming</title><category lang="en">News</category></programme>')
i = i + 1
print str(round(float(i)/365*100, 1)) + "% of ABC News Digital 2 Schedule Complete."
return Program
ABC2 = ABC2()
def ABC3():
ToDay()
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
i = 0
Program = []
for i in range(0, 365):
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneDay() + '00 -0400" channel="ABCN3"><title lang="en">Regularly Scheduled Programming</title><category lang="en">News</category></programme>')
i = i + 1
print str(round(float(i)/365*100, 1)) + "% of ABC News Digital 3 Schedule Complete."
return Program
ABC3 = ABC3()
def ABC4():
ToDay()
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
i = 0
Program = []
for i in range(0, 365):
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneDay() + '00 -0400" channel="ABCN4"><title lang="en">Regularly Scheduled Programming</title><category lang="en">News</category></programme>')
i = i + 1
print str(round(float(i)/365*100, 1)) + "% of ABC News Digital 4 Schedule Complete."
return Program
ABC4 = ABC4()
def ABC5():
ToDay()
global year
year = int(year)
global month
month = int(month)
global day
day = int(day)
global hour
hour = int(hour)
global minute
minute = int(minute)
global second
second = int(second)
i = 0
Program = []
for i in range(0, 365):
Program.append('<programme start="' + str(year) + str(month) + str(day) + str(hour) + str(second) + '00 -0400" stop="' + RetPlusOneDay() + '00 -0400" channel="ABCN5"><title lang="en">Regularly Scheduled Programming</title><category lang="en">News</category></programme>')
i = i + 1
print str(round(float(i)/365*100, 1)) + "% of ABC News Digital 5 Schedule Complete."
return Program
#File = open('workfile', 'w')
Filee = '<?xml version="1.0" encoding="utf-8" ?><!DOCTYPE tv SYSTEM "http://www.teleguide.info/download/xmltv.dtd"><tv generator-info-name="LegalStream Python EPG Generator" generator-info-url=https://github.com/notanewbie/LegalStream/blob/master/epg.py"><channel id="300093"><display-name lang="en">France 24</display-name></channel><channel id="ABCN1"><display-name lang="en">ABC News Digital 1</display-name></channel><channel id="ABCN2"><display-name lang="en">ABC News Digital 2</display-name></channel><channel id="ABCN3"><display-name lang="en">ABC News Digital 3</display-name></channel><channel id="ABCN4"><display-name lang="en">ABC News Digital 4</display-name></channel><channel id="ABCN5"><display-name lang="en">ABC News Digital 5</display-name></channel>'
i = 0
for object in ABC1:
Filee = Filee + ABC1[i]
i = i + 1
i = 0
for object in ABC2:
Filee = Filee + ABC2[i]
i = i + 1
i = 0
for object in ABC3:
Filee = Filee + ABC3[i]
i = i + 1
i = 0
for object in ABC4:
Filee = Filee + ABC4[i]
i = i + 1
file_ = open('output.xml', 'w')
file_.write(Filee + "</tv>")
file_.close()
ToDay()
EndYear = int(year)
EndMonth = int(month)
EndDay = int(day)
EndHour = int(hour)
EndMinute = int(minute)
EndSecond = int(second)
MinuteLength = EndMinute - StartMinute
SecondLength = EndSecond - StartSecond
print "Generating EPG data took " + str(MinuteLength) + "m and " + str(SecondLength) + "s."
|
survae/tests/transforms/bijections/coupling/coupling_linear.py
|
alisiahkoohi/survae_flows
| 262 |
99673
|
import numpy as np
import torch
import torch.nn as nn
import torchtestcase
import unittest
from survae.transforms.bijections.coupling import *
from survae.nn.layers import ElementwiseParams, ElementwiseParams2d
from survae.tests.transforms.bijections import BijectionTest
class AdditiveCouplingBijectionTest(BijectionTest):
def test_bijection_is_well_behaved(self):
batch_size = 10
self.eps = 1e-6
for shape in [(6,),
(6,8,8)]:
for num_condition in [None, 1]:
with self.subTest(shape=shape, num_condition=num_condition):
x = torch.randn(batch_size, *shape)
if num_condition is None:
if len(shape) == 1: net = nn.Linear(3,3)
if len(shape) == 3: net = nn.Conv2d(3,3, kernel_size=3, padding=1)
else:
if len(shape) == 1: net = nn.Linear(1,5)
if len(shape) == 3: net = nn.Conv2d(1,5, kernel_size=3, padding=1)
bijection = AdditiveCouplingBijection(net, num_condition=num_condition)
self.assert_bijection_is_well_behaved(bijection, x, z_shape=(batch_size, *shape))
z, _ = bijection.forward(x)
if num_condition is None:
self.assertEqual(x[:,:3], z[:,:3])
else:
self.assertEqual(x[:,:1], z[:,:1])
class AffineCouplingBijectionTest(BijectionTest):
def test_bijection_is_well_behaved(self):
batch_size = 10
self.eps = 5e-6
for shape in [(6,),
(6,8,8)]:
for num_condition in [None, 1]:
with self.subTest(shape=shape, num_condition=num_condition):
x = torch.randn(batch_size, *shape)
if num_condition is None:
if len(shape) == 1: net = nn.Sequential(nn.Linear(3,3*2), ElementwiseParams(2))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(3,3*2, kernel_size=3, padding=1), ElementwiseParams2d(2))
else:
if len(shape) == 1: net = nn.Sequential(nn.Linear(1,5*2), ElementwiseParams(2))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(1,5*2, kernel_size=3, padding=1), ElementwiseParams2d(2))
bijection = AffineCouplingBijection(net, num_condition=num_condition)
self.assert_bijection_is_well_behaved(bijection, x, z_shape=(batch_size, *shape))
z, _ = bijection.forward(x)
if num_condition is None:
self.assertEqual(x[:,:3], z[:,:3])
else:
self.assertEqual(x[:,:1], z[:,:1])
if __name__ == '__main__':
unittest.main()
|
examples/reverb.py
|
Bakkom/micropython
| 594 |
99689
|
<reponame>Bakkom/micropython
import audio
def from_file(file, frame):
ln = -1
while ln:
ln = file.readinto(frame)
yield frame
def reverb_gen(src, buckets, reflect, fadeout):
bucket_count = len(buckets)
bucket = 0
for frame in src:
echo = buckets[bucket]
echo *= reflect
echo += frame
yield echo
buckets[bucket] = echo
bucket += 1
if bucket == bucket_count:
bucket = 0
while fadeout:
fadeout -= 1
echo = buckets[bucket]
echo *= reflect
yield echo
buckets[bucket] = echo
bucket += 1
if bucket == bucket_count:
bucket = 0
def reverb(src, delay, reflect):
#Do all allocation up front, so we don't need to do any in the generator.
bucket_count = delay>>2
buckets = [ None ] * bucket_count
for i in range(bucket_count):
buckets[i] = audio.AudioFrame()
vol = 1.0
fadeout = 0
while vol > 0.05:
fadeout += bucket_count
vol *= reflect
return reverb_gen(src, buckets, reflect, fadeout)
def play_file(name, delay=80, reflect=0.5):
#Do allocation here, as we can't do it in an interrupt.
frame = audio.AudioFrame()
with open(name) as file:
gen = from_file(file, frame)
r = reverb(gen, delay, reflect)
audio.play(r)
|
WebMirror/management/rss_parser_funcs/feed_parse_extractThehlifestyleCom.py
|
fake-name/ReadableWebProxy
| 193 |
99731
|
def extractThehlifestyleCom(item):
'''
Parser for 'thehlifestyle.com'
'''
tstr = str(item['tags']).lower()
if 'review' in tstr:
return None
if 'actors' in tstr:
return None
if 'game' in tstr:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('The Beloved Imperial Consort translation', 'The Beloved Imperial Consort', 'translated'),
('Good Morning, Miss Undercover Translation', 'Good Morning, Miss Undercover', 'translated'),
('Hilarous Pampered Consort Translation', 'Hilarous Pampered Consort', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
alipay/aop/api/domain/AlipayMarketingCampaignRuleCrowdCreateModel.py
|
snowxmas/alipay-sdk-python-all
| 213 |
99733
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingCampaignRuleCrowdCreateModel(object):
def __init__(self):
self._mdatacrowdsource = None
self._mpid = None
self._ruledesc = None
@property
def mdatacrowdsource(self):
return self._mdatacrowdsource
@mdatacrowdsource.setter
def mdatacrowdsource(self, value):
self._mdatacrowdsource = value
@property
def mpid(self):
return self._mpid
@mpid.setter
def mpid(self, value):
self._mpid = value
@property
def ruledesc(self):
return self._ruledesc
@ruledesc.setter
def ruledesc(self, value):
self._ruledesc = value
def to_alipay_dict(self):
params = dict()
if self.mdatacrowdsource:
if hasattr(self.mdatacrowdsource, 'to_alipay_dict'):
params['mdatacrowdsource'] = self.mdatacrowdsource.to_alipay_dict()
else:
params['mdatacrowdsource'] = self.mdatacrowdsource
if self.mpid:
if hasattr(self.mpid, 'to_alipay_dict'):
params['mpid'] = self.mpid.to_alipay_dict()
else:
params['mpid'] = self.mpid
if self.ruledesc:
if hasattr(self.ruledesc, 'to_alipay_dict'):
params['ruledesc'] = self.ruledesc.to_alipay_dict()
else:
params['ruledesc'] = self.ruledesc
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingCampaignRuleCrowdCreateModel()
if 'mdatacrowdsource' in d:
o.mdatacrowdsource = d['mdatacrowdsource']
if 'mpid' in d:
o.mpid = d['mpid']
if 'ruledesc' in d:
o.ruledesc = d['ruledesc']
return o
|
corehq/apps/userreports/tests/test_report_data.py
|
dimagilg/commcare-hq
| 471 |
99755
|
<gh_stars>100-1000
import uuid
from collections import namedtuple
from django.test import TestCase
from corehq.apps.userreports.models import (
DataSourceConfiguration,
ReportConfiguration,
)
from corehq.apps.userreports.reports.data_source import (
ConfigurableReportDataSource,
)
from corehq.apps.userreports.tests.utils import doc_to_change
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.pillows.case import get_case_pillow
ReportDataTestRow = namedtuple('ReportDataTestRow', ['name', 'number', 'sort_key'])
class ReportDataTest(TestCase):
def setUp(self):
super(ReportDataTest, self).setUp()
# Create report
self.domain = 'test-ucr-report-data'
self.data_source = DataSourceConfiguration(
domain=self.domain,
referenced_doc_type='CommCareCase',
table_id=uuid.uuid4().hex,
configured_filter={},
configured_indicators=[
{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": 'name'
},
"column_id": 'name',
"display_name": 'name',
"datatype": "string"
},
{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": 'number'
},
"column_id": 'number',
"display_name": 'number',
"datatype": "integer"
},
{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": 'number'
},
"column_id": 'string-number',
"display_name": 'string-number',
"datatype": "string"
},
{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": 'just_for_sorting'
},
"column_id": 'just_for_sorting',
"display_name": 'just_for_sorting',
"datatype": "string"
}
],
)
self.data_source.validate()
self.data_source.save()
self.adapter = get_indicator_adapter(self.data_source)
self.adapter.rebuild_table()
self.addCleanup(self.data_source.delete)
# initialize a report on the data
self.report_config = ReportConfiguration(
domain=self.domain,
config_id=self.data_source._id,
aggregation_columns=['doc_id'],
columns=[
{
"type": "field",
"field": "name",
"column_id": "name",
"display": "Name",
"aggregation": "simple",
},
{
"type": "field",
"field": "number",
"column_id": "number",
"display": "Number",
"aggregation": "simple",
"calculate_total": True,
},
{
"type": "expression",
"column_id": "ten",
"display": "The Number Ten",
"expression": {
'type': 'constant',
'constant': 10,
}
},
{
"type": "expression",
"column_id": "by_tens",
"display": "Counting by tens",
"expression": {
"type": "evaluator",
"statement": "a * b",
"context_variables": {
"a": {
"type": "property_name",
"property_name": "number",
},
"b": {
"type": "property_name",
"property_name": "ten",
}
}
}
},
{
"type": "field",
"field": 'string-number',
"display": 'Display Number',
"aggregation": "simple",
"transform": {
"type": "translation",
"translations": {
"0": "zero",
"1": {"en": "one", "es": "uno"},
"2": {"en": "two", "es": "dos"}
},
},
}
],
filters=[],
configured_charts=[],
sort_expression=[{'field': 'just_for_sorting', 'order': 'DESC'}]
)
self.report_config.save()
self.addCleanup(self.report_config.delete)
def _add_some_rows(self, count):
rows = [ReportDataTestRow(uuid.uuid4().hex, i, i) for i in range(count)]
self._add_rows(rows)
return rows
def _add_rows(self, rows):
pillow = get_case_pillow(ucr_configs=[self.data_source])
def _get_case(row):
return {
'_id': uuid.uuid4().hex,
'domain': self.domain,
'doc_type': 'CommCareCase',
'type': 'city',
'name': row.name,
'number': row.number,
'just_for_sorting': row.sort_key,
}
for row in rows:
pillow.process_change(doc_to_change(_get_case(row)))
def test_basic_query(self):
# add a few rows to the data source
rows = self._add_some_rows(3)
# check the returned data from the report looks right
report_data_source = ConfigurableReportDataSource.from_spec(self.report_config)
report_data = report_data_source.get_data()
self.assertEqual(len(rows), len(report_data))
rows_by_name = {r.name: r for r in rows}
for row in report_data:
self.assertTrue(row['name'] in rows_by_name)
self.assertEqual(rows_by_name[row['name']].number, row['number'])
self.assertEqual(10, row['ten'])
self.assertEqual(10 * row['number'], row['by_tens'])
def test_limit(self):
count = 5
self._add_some_rows(count)
report_data_source = ConfigurableReportDataSource.from_spec(self.report_config)
original_data = report_data_source.get_data()
self.assertEqual(count, len(original_data))
limited_data = report_data_source.get_data(limit=3)
self.assertEqual(3, len(limited_data))
self.assertEqual(original_data[:3], limited_data)
def test_skip(self):
count = 5
self._add_some_rows(count)
report_data_source = ConfigurableReportDataSource.from_spec(self.report_config)
original_data = report_data_source.get_data()
self.assertEqual(count, len(original_data))
skipped = report_data_source.get_data(start=3)
self.assertEqual(count - 3, len(skipped))
self.assertEqual(original_data[3:], skipped)
def test_total_row(self):
rows = self._add_some_rows(3)
report_data_source = ConfigurableReportDataSource.from_spec(self.report_config)
total_number = sum(row.number for row in rows)
self.assertEqual(report_data_source.get_total_row(), ['Total', total_number, '', '', ''])
def test_transform(self):
count = 5
self._add_some_rows(count)
report_data_source = ConfigurableReportDataSource.from_spec(self.report_config)
original_data = report_data_source.get_data()
self.assertEqual(count, len(original_data))
rows_by_number = {int(row['number']): row for row in original_data}
# Make sure the translations happened
self.assertEqual(rows_by_number[0]['string-number'], "zero")
self.assertEqual(rows_by_number[1]['string-number'], "one")
self.assertEqual(rows_by_number[2]['string-number'], "two")
# These last two are untranslated
self.assertEqual(rows_by_number[3]['string-number'], "3")
self.assertEqual(rows_by_number[4]['string-number'], "4")
|
janitor/functions/to_datetime.py
|
thatlittleboy/pyjanitor
| 225 |
99832
|
from typing import Hashable
import pandas_flavor as pf
import pandas as pd
from janitor.utils import deprecated_alias
@pf.register_dataframe_method
@deprecated_alias(column="column_name")
def to_datetime(
df: pd.DataFrame, column_name: Hashable, **kwargs
) -> pd.DataFrame:
"""Convert column to a datetime type, in-place.
Intended to be the method-chaining equivalent of:
df[column_name] = pd.to_datetime(df[column_name], **kwargs)
This method mutates the original DataFrame.
Example: Converting a string column to datetime type with custom format.
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({'date': ['20200101', '20200202', '20200303']})
>>> df
date
0 20200101
1 20200202
2 20200303
>>> df.to_datetime('date', format='%Y%m%d')
date
0 2020-01-01
1 2020-02-02
2 2020-03-03
Read the pandas documentation for [`to_datetime`][pd_docs] for more information.
[pd_docs]: https://pandas.pydata.org/docs/reference/api/pandas.to_datetime.html
:param df: A pandas DataFrame.
:param column_name: Column name.
:param kwargs: Provide any kwargs that `pd.to_datetime` can take.
:returns: A pandas DataFrame with updated datetime data.
""" # noqa: E501
df[column_name] = pd.to_datetime(df[column_name], **kwargs)
return df
|
babi/cached_property.py
|
ClasherKasten/babi
| 223 |
99875
|
<reponame>ClasherKasten/babi
from __future__ import annotations
import sys
if sys.version_info >= (3, 8): # pragma: >=3.8 cover
from functools import cached_property
else: # pragma: <3.8 cover
from typing import Callable
from typing import Generic
from typing import TypeVar
TSelf = TypeVar('TSelf')
TRet = TypeVar('TRet')
class cached_property(Generic[TSelf, TRet]):
def __init__(self, func: Callable[[TSelf], TRet]) -> None:
self._func = func
def __get__(
self,
instance: TSelf | None,
owner: type[TSelf] | None = None,
) -> TRet:
assert instance is not None
ret = instance.__dict__[self._func.__name__] = self._func(instance)
return ret
|
cookbook/mesher_tesseroidmesh.py
|
XuesongDing/fatiando
| 179 |
99876
|
"""
Meshing: Make and plot a tesseroid mesh
"""
from fatiando import mesher
from fatiando.vis import myv
mesh = mesher.TesseroidMesh((-60, 60, -30, 30, 100000, -500000), (10, 10, 10))
myv.figure(zdown=False)
myv.tesseroids(mesh)
myv.earth(opacity=0.3)
myv.continents()
myv.meridians(range(-180, 180, 30))
myv.parallels(range(-90, 90, 30))
myv.show()
|
alipay/aop/api/domain/HealthServiceSku.py
|
antopen/alipay-sdk-python-all
| 213 |
99913
|
<filename>alipay/aop/api/domain/HealthServiceSku.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class HealthServiceSku(object):
def __init__(self):
self._merchant_item_sku_bar_code = None
self._sku_id = None
@property
def merchant_item_sku_bar_code(self):
return self._merchant_item_sku_bar_code
@merchant_item_sku_bar_code.setter
def merchant_item_sku_bar_code(self, value):
self._merchant_item_sku_bar_code = value
@property
def sku_id(self):
return self._sku_id
@sku_id.setter
def sku_id(self, value):
self._sku_id = value
def to_alipay_dict(self):
params = dict()
if self.merchant_item_sku_bar_code:
if hasattr(self.merchant_item_sku_bar_code, 'to_alipay_dict'):
params['merchant_item_sku_bar_code'] = self.merchant_item_sku_bar_code.to_alipay_dict()
else:
params['merchant_item_sku_bar_code'] = self.merchant_item_sku_bar_code
if self.sku_id:
if hasattr(self.sku_id, 'to_alipay_dict'):
params['sku_id'] = self.sku_id.to_alipay_dict()
else:
params['sku_id'] = self.sku_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = HealthServiceSku()
if 'merchant_item_sku_bar_code' in d:
o.merchant_item_sku_bar_code = d['merchant_item_sku_bar_code']
if 'sku_id' in d:
o.sku_id = d['sku_id']
return o
|
examples/pytorch/diffpool/model/dgl_layers/__init__.py
|
ketyi/dgl
| 9,516 |
99930
|
<reponame>ketyi/dgl
from .gnn import GraphSage, GraphSageLayer, DiffPoolBatchedGraphLayer
|
kmip/tests/unit/core/messages/payloads/test_modify_attribute.py
|
ondrap/PyKMIP
| 179 |
99960
|
<gh_stars>100-1000
# Copyright (c) 2019 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from kmip.core import enums
from kmip.core import exceptions
from kmip.core import objects
from kmip.core import primitives
from kmip.core import utils
from kmip.core.messages import payloads
class TestModifyAttributeRequestPayload(testtools.TestCase):
"""
A unit test suite for the ModifyAttribute request payload.
"""
def setUp(self):
super(TestModifyAttributeRequestPayload, self).setUp()
# This encoding was taken from test case 3.1.4-6 from the KMIP 1.1
# test suite.
#
# This encoding matches the following set of values.
# Request Payload
# Unique Identifier - b4faee10-aa2a-4446-8ad4-0881f3422959
# Attribute
# Attribute Name - x-attribute1
# Attribute Value - ModifiedValue1
self.full_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x68'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x62\x34\x66\x61\x65\x65\x31\x30\x2D\x61\x61\x32\x61\x2D\x34\x34'
b'\x34\x36\x2D\x38\x61\x64\x34\x2D\x30\x38\x38\x31\x66\x33\x34\x32'
b'\x32\x39\x35\x39\x00\x00\x00\x00'
b'\x42\x00\x08\x01\x00\x00\x00\x30'
b'\x42\x00\x0A\x07\x00\x00\x00\x0C'
b'\x78\x2D\x61\x74\x74\x72\x69\x62\x75\x74\x65\x31\x00\x00\x00\x00'
b'\x42\x00\x0B\x07\x00\x00\x00\x0E'
b'\x4D\x6F\x64\x69\x66\x69\x65\x64\x56\x61\x6C\x75\x65\x31\x00\x00'
)
# This encoding was adapted from test case 3.1.4-6 from the KMIP 1.1
# test suite. It was modified to reflect the ModifyAttribute operation
# changes in KMIP 2.0. The attribute encoding was removed and the
# current and new attribute encodings were manually added.
#
# This encoding matches the following set of values.
# Request Payload
# Unique Identifier - b4faee10-aa2a-4446-8ad4-0881f3422959
# Current Attribute
# Cryptographic Algorithm - AES
# New Attribute
# Cryptographic Algorithm - RSA
self.full_encoding_kmip_2_0 = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x60'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x62\x34\x66\x61\x65\x65\x31\x30\x2D\x61\x61\x32\x61\x2D\x34\x34'
b'\x34\x36\x2D\x38\x61\x64\x34\x2D\x30\x38\x38\x31\x66\x33\x34\x32'
b'\x32\x39\x35\x39\x00\x00\x00\x00'
b'\x42\x01\x3C\x01\x00\x00\x00\x10'
b'\x42\x00\x28\x05\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
b'\x42\x01\x3D\x01\x00\x00\x00\x10'
b'\x42\x00\x28\x05\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x00'
)
self.empty_encoding = utils.BytearrayStream(
b'\x42\x00\x79\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestModifyAttributeRequestPayload, self).tearDown()
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a ModifyAttribute request payload.
"""
kwargs = {"unique_identifier": 0}
self.assertRaisesRegex(
TypeError,
"The unique identifier must be a string.",
payloads.ModifyAttributeRequestPayload,
**kwargs
)
args = (
payloads.ModifyAttributeRequestPayload(),
"unique_identifier",
0
)
self.assertRaisesRegex(
TypeError,
"The unique identifier must be a string.",
setattr,
*args
)
def test_invalid_attribute(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the attribute of a ModifyAttribute request payload.
"""
kwargs = {"attribute": "invalid"}
self.assertRaisesRegex(
TypeError,
"The attribute must be an Attribute object.",
payloads.ModifyAttributeRequestPayload,
**kwargs
)
args = (
payloads.ModifyAttributeRequestPayload(),
"attribute",
"invalid"
)
self.assertRaisesRegex(
TypeError,
"The attribute must be an Attribute object.",
setattr,
*args
)
def test_invalid_current_attribute(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the current attribute of a ModifyAttribute request payload.
"""
kwargs = {"current_attribute": "invalid"}
self.assertRaisesRegex(
TypeError,
"The current attribute must be a CurrentAttribute object.",
payloads.ModifyAttributeRequestPayload,
**kwargs
)
args = (
payloads.ModifyAttributeRequestPayload(),
"current_attribute",
"invalid"
)
self.assertRaisesRegex(
TypeError,
"The current attribute must be a CurrentAttribute object.",
setattr,
*args
)
def test_invalid_new_attribute(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the new attribute of a ModifyAttribute request payload.
"""
kwargs = {"new_attribute": "invalid"}
self.assertRaisesRegex(
TypeError,
"The new attribute must be a NewAttribute object.",
payloads.ModifyAttributeRequestPayload,
**kwargs
)
args = (
payloads.ModifyAttributeRequestPayload(),
"new_attribute",
"invalid"
)
self.assertRaisesRegex(
TypeError,
"The new attribute must be a NewAttribute object.",
setattr,
*args
)
def test_read(self):
"""
Test that a ModifyAttribute request payload can be read from a buffer.
"""
payload = payloads.ModifyAttributeRequestPayload()
self.assertIsNone(payload.unique_identifier)
self.assertIsNone(payload.attribute)
self.assertIsNone(payload.current_attribute)
self.assertIsNone(payload.new_attribute)
payload.read(self.full_encoding)
self.assertEqual(
"b4faee10-aa2a-4446-8ad4-0881f3422959",
payload.unique_identifier
)
self.assertEqual(
objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute1"),
attribute_value=primitives.TextString(
value="ModifiedValue1",
tag=enums.Tags.ATTRIBUTE_VALUE
)
),
payload.attribute
)
self.assertIsNone(payload.current_attribute)
self.assertIsNone(payload.new_attribute)
def test_read_kmip_2_0(self):
"""
Test that a ModifyAttribute request payload can be read from a buffer
with KMIP 2.0 fields.
"""
payload = payloads.ModifyAttributeRequestPayload()
self.assertIsNone(payload.unique_identifier)
self.assertIsNone(payload.attribute)
self.assertIsNone(payload.current_attribute)
self.assertIsNone(payload.new_attribute)
payload.read(
self.full_encoding_kmip_2_0,
kmip_version=enums.KMIPVersion.KMIP_2_0
)
self.assertEqual(
"b4faee10-aa2a-4446-8ad4-0881f3422959",
payload.unique_identifier
)
self.assertIsNone(payload.attribute)
self.assertEqual(
objects.CurrentAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
payload.current_attribute
)
self.assertEqual(
objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.RSA,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
payload.new_attribute
)
def test_read_no_attribute(self):
"""
Test that an InvalidKmipEncoding error is raised when an invalid
encoding containing no encoded attribute is used to decode a
ModifyAttribute request payload.
"""
payload = payloads.ModifyAttributeRequestPayload()
args = (self.empty_encoding, )
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The ModifyAttribute request payload encoding is missing the "
"attribute field.",
payload.read,
*args
)
def test_read_no_new_attribute(self):
"""
Test that an InvalidKmipEncoding error is raised when an invalid
encoding containing no encoded new attribute is used to decode a
ModifyAttribute request payload.
"""
payload = payloads.ModifyAttributeRequestPayload()
args = (self.empty_encoding, )
kwargs = {"kmip_version": enums.KMIPVersion.KMIP_2_0}
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The ModifyAttribute request payload encoding is missing the "
"new attribute field.",
payload.read,
*args,
**kwargs
)
def test_write(self):
"""
Test that a ModifyAttribute request payload can be written to a buffer.
"""
payload = payloads.ModifyAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
attribute=objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute1"),
attribute_value=primitives.TextString(
value="ModifiedValue1",
tag=enums.Tags.ATTRIBUTE_VALUE
)
)
)
buffer = utils.BytearrayStream()
payload.write(buffer)
self.assertEqual(len(self.full_encoding), len(buffer))
self.assertEqual(str(self.full_encoding), str(buffer))
def test_write_kmip_2_0(self):
"""
Test that a ModifyAttribute request payload can be written to a buffer
with KMIP 2.0 fields.
"""
payload = payloads.ModifyAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
current_attribute=objects.CurrentAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
new_attribute=objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.RSA,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
)
)
buffer = utils.BytearrayStream()
payload.write(buffer, kmip_version=enums.KMIPVersion.KMIP_2_0)
self.assertEqual(len(self.full_encoding_kmip_2_0), len(buffer))
self.assertEqual(str(self.full_encoding_kmip_2_0), str(buffer))
def test_write_no_attribute(self):
"""
Test that an InvalidField error is raised when attempting to write a
ModifyAttribute request payload to a buffer with no attribute field
specified.
"""
payload = payloads.ModifyAttributeRequestPayload()
args = (utils.BytearrayStream(), )
self.assertRaisesRegex(
exceptions.InvalidField,
"The ModifyAttribute request payload is missing the attribute "
"field.",
payload.write,
*args
)
def test_write_no_new_attribute(self):
"""
Test that an InvalidField error is raised when attempting to write a
ModifyAttribute request payload to a buffer with no new attribute
field specified.
"""
payload = payloads.ModifyAttributeRequestPayload()
args = (utils.BytearrayStream(), )
kwargs = {"kmip_version": enums.KMIPVersion.KMIP_2_0}
self.assertRaisesRegex(
exceptions.InvalidField,
"The ModifyAttribute request payload is missing the new attribute "
"field.",
payload.write,
*args,
**kwargs
)
def test_repr(self):
"""
Test that repr can be applied to a ModifyAttribute request payload.
"""
payload = payloads.ModifyAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
args = [
"unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959'",
"attribute=None",
"current_attribute=None",
"new_attribute=None"
]
self.assertEqual(
"ModifyAttributeRequestPayload({})".format(", ".join(args)),
repr(payload)
)
def test_str(self):
"""
Test that str can be applied to a ModifyAttribute request payload.
"""
payload = payloads.ModifyAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
s = str(
{
"unique_identifier": "b4faee10-aa2a-4446-8ad4-0881f3422959",
"attribute": None,
"current_attribute": None,
"new_attribute": None
}
)
self.assertEqual(s, str(payload))
def test_comparison(self):
"""
Test that the equality/inequality operators return True/False when
comparing two ModifyAttribute request payloads with the same data.
"""
a = payloads.ModifyAttributeRequestPayload()
b = payloads.ModifyAttributeRequestPayload()
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
a = payloads.ModifyAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
attribute=objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute1"),
attribute_value=primitives.TextString(
value="ModifiedValue1",
tag=enums.Tags.ATTRIBUTE_VALUE
)
),
current_attribute=objects.CurrentAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
new_attribute=objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.RSA,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
)
)
b = payloads.ModifyAttributeRequestPayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
attribute=objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute1"),
attribute_value=primitives.TextString(
value="ModifiedValue1",
tag=enums.Tags.ATTRIBUTE_VALUE
)
),
current_attribute=objects.CurrentAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
new_attribute=objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.RSA,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_comparison_on_different_unique_identifiers(self):
"""
Test that the equality/inequality operators return False/True when
comparing two ModifyAttribute request payloads with different unique
identifiers.
"""
a = payloads.ModifyAttributeRequestPayload(unique_identifier="1")
b = payloads.ModifyAttributeRequestPayload(unique_identifier="2")
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_different_attributes(self):
"""
Test that the equality/inequality operators return False/True when
comparing two ModifyAttribute request payloads with different
attributes.
"""
a = payloads.ModifyAttributeRequestPayload(
attribute=objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute1"),
attribute_value=primitives.TextString(
value="ModifiedValue1",
tag=enums.Tags.ATTRIBUTE_VALUE
)
)
)
b = payloads.ModifyAttributeRequestPayload(
attribute=objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute2"),
attribute_value=primitives.TextString(
value="ModifiedValue2",
tag=enums.Tags.ATTRIBUTE_VALUE
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_different_current_attributes(self):
"""
Test that the equality/inequality operators return False/True when
comparing two ModifyAttribute request payloads with different current
attributes.
"""
a = payloads.ModifyAttributeRequestPayload(
current_attribute=objects.CurrentAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
)
)
b = payloads.ModifyAttributeRequestPayload(
current_attribute=objects.CurrentAttribute(
attribute=primitives.Integer(
128,
enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_different_new_attributes(self):
"""
Test that the equality/inequality operators return False/True when
comparing two ModifyAttribute request payloads with different new
attributes.
"""
a = payloads.ModifyAttributeRequestPayload(
new_attribute=objects.NewAttribute(
attribute=primitives.Enumeration(
enums.CryptographicAlgorithm,
enums.CryptographicAlgorithm.AES,
enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
)
)
b = payloads.ModifyAttributeRequestPayload(
new_attribute=objects.NewAttribute(
attribute=primitives.Integer(
128,
enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_type_mismatch(self):
"""
Test that the equality/inequality operators return False/True when
comparining a ModifyAttribute request payload against a different type.
"""
a = payloads.ModifyAttributeRequestPayload()
b = "invalid"
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
class TestModifyAttributeResponsePayload(testtools.TestCase):
"""
A unit test suite for the ModifyAttribute response payload.
"""
def setUp(self):
super(TestModifyAttributeResponsePayload, self).setUp()
# This encoding was taken from test case 3.1.4-6 from the KMIP 1.1
# test suite.
#
# This encoding matches the following set of values:
# Response Payload
# Unique Identifier - b4faee10-aa2a-4446-8ad4-0881f3422959
# Attribute
# Attribute Name - x-attribute1
# Attribute Value - ModifiedValue1
self.full_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x68'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x62\x34\x66\x61\x65\x65\x31\x30\x2D\x61\x61\x32\x61\x2D\x34\x34'
b'\x34\x36\x2D\x38\x61\x64\x34\x2D\x30\x38\x38\x31\x66\x33\x34\x32'
b'\x32\x39\x35\x39\x00\x00\x00\x00'
b'\x42\x00\x08\x01\x00\x00\x00\x30'
b'\x42\x00\x0A\x07\x00\x00\x00\x0C'
b'\x78\x2D\x61\x74\x74\x72\x69\x62\x75\x74\x65\x31\x00\x00\x00\x00'
b'\x42\x00\x0B\x07\x00\x00\x00\x0E'
b'\x4D\x6F\x64\x69\x66\x69\x65\x64\x56\x61\x6C\x75\x65\x31\x00\x00'
)
# This encoding was adapted from test case 3.1.4-6 from the KMIP 1.1
# test suite. The attribute encoding was removed.
#
# This encoding matches the following set of values:
# Response Payload
# Unique Identifier - b4faee10-aa2a-4446-8ad4-0881f3422959
self.full_encoding_kmip_2_0 = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x62\x34\x66\x61\x65\x65\x31\x30\x2D\x61\x61\x32\x61\x2D\x34\x34'
b'\x34\x36\x2D\x38\x61\x64\x34\x2D\x30\x38\x38\x31\x66\x33\x34\x32'
b'\x32\x39\x35\x39\x00\x00\x00\x00'
)
self.empty_encoding = utils.BytearrayStream(
b'\x42\x00\x7C\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestModifyAttributeResponsePayload, self).tearDown()
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a ModifyAttribute response payload.
"""
kwargs = {"unique_identifier": 0}
self.assertRaisesRegex(
TypeError,
"The unique identifier must be a string.",
payloads.ModifyAttributeResponsePayload,
**kwargs
)
args = (
payloads.ModifyAttributeResponsePayload(),
"unique_identifier",
0
)
self.assertRaisesRegex(
TypeError,
"The unique identifier must be a string.",
setattr,
*args
)
def test_invalid_attribute(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the attribute of a ModifyAttribute response payload.
"""
kwargs = {"attribute": "invalid"}
self.assertRaisesRegex(
TypeError,
"The attribute must be an Attribute object.",
payloads.ModifyAttributeResponsePayload,
**kwargs
)
args = (
payloads.ModifyAttributeResponsePayload(),
"attribute",
"invalid"
)
self.assertRaisesRegex(
TypeError,
"The attribute must be an Attribute object.",
setattr,
*args
)
def test_read(self):
"""
Test that a ModifyAttribute response payload can be read from a buffer.
"""
payload = payloads.ModifyAttributeResponsePayload()
self.assertIsNone(payload.unique_identifier)
self.assertIsNone(payload.attribute)
payload.read(self.full_encoding)
self.assertEqual(
"b4faee10-aa2a-4446-8ad4-0881f3422959",
payload.unique_identifier
)
self.assertEqual(
objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute1"),
attribute_value=primitives.TextString(
value="ModifiedValue1",
tag=enums.Tags.ATTRIBUTE_VALUE
)
),
payload.attribute
)
def test_read_kmip_2_0(self):
"""
Test that a ModifyAttribute response payload can be read from a buffer.
"""
payload = payloads.ModifyAttributeResponsePayload()
self.assertIsNone(payload.unique_identifier)
self.assertIsNone(payload.attribute)
payload.read(
self.full_encoding_kmip_2_0,
kmip_version=enums.KMIPVersion.KMIP_2_0
)
self.assertEqual(
"b4faee10-aa2a-4446-8ad4-0881f3422959",
payload.unique_identifier
)
self.assertIsNone(payload.attribute)
def test_read_no_unique_identifier(self):
"""
Test that an InvalidKmipEncoding error is raised when an invalid
encoding containing no encoded unique identifier is used to decode
a ModifyAttribute response payload.
"""
payload = payloads.ModifyAttributeResponsePayload()
args = (self.empty_encoding, )
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The ModifyAttribute response payload encoding is missing the "
"unique identifier field.",
payload.read,
*args
)
def test_read_no_attribute(self):
"""
Test that an InvalidKmipEncoding error is raised when an invalid
encoding containing no encoded attribute is used to decode a
ModifyAttribute response payload.
"""
payload = payloads.ModifyAttributeResponsePayload()
args = (self.full_encoding_kmip_2_0, )
self.assertRaisesRegex(
exceptions.InvalidKmipEncoding,
"The ModifyAttribute response payload encoding is missing the "
"attribute field.",
payload.read,
*args
)
def test_write(self):
"""
Test that a ModifyAttribute response payload can be written to a
buffer.
"""
payload = payloads.ModifyAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
attribute=objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute1"),
attribute_value=primitives.TextString(
value="ModifiedValue1",
tag=enums.Tags.ATTRIBUTE_VALUE
)
)
)
buffer = utils.BytearrayStream()
payload.write(buffer)
self.assertEqual(len(self.full_encoding), len(buffer))
self.assertEqual(str(self.full_encoding), str(buffer))
def test_write_kmip_2_0(self):
"""
Test that a ModifyAttribute response payload can be written to a
buffer with KMIP 2.0 fields.
"""
payload = payloads.ModifyAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
buffer = utils.BytearrayStream()
payload.write(buffer, kmip_version=enums.KMIPVersion.KMIP_2_0)
self.assertEqual(len(self.full_encoding_kmip_2_0), len(buffer))
self.assertEqual(str(self.full_encoding_kmip_2_0), str(buffer))
def test_write_no_unique_identifier(self):
"""
Test that an InvalidField error is raised when attempting to write
a ModifyAttribute response payload to a buffer with no unique
identifier field specified.
"""
payload = payloads.ModifyAttributeResponsePayload()
args = (utils.BytearrayStream(), )
self.assertRaisesRegex(
exceptions.InvalidField,
"The ModifyAttribute response payload is missing the unique "
"identifier field.",
payload.write,
*args
)
def test_write_no_attribute(self):
"""
Test that an InvalidField error is raised when attempting to write
a ModifyAttribute response payload to a buffer with no unique
identifier field specified.
"""
payload = payloads.ModifyAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
args = (utils.BytearrayStream(), )
self.assertRaisesRegex(
exceptions.InvalidField,
"The ModifyAttribute response payload is missing the attribute "
"field.",
payload.write,
*args
)
def test_repr(self):
"""
Test that repr can be applied to a ModifyAttribute response payload.
"""
payload = payloads.ModifyAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
args = [
"unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959'",
"attribute=None"
]
self.assertEqual(
"ModifyAttributeResponsePayload({})".format(", ".join(args)),
repr(payload)
)
def test_str(self):
"""
Test that str can be applied to a ModifyAttribute response payload.
"""
payload = payloads.ModifyAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959"
)
s = str(
{
"unique_identifier": "b4faee10-aa2a-4446-8ad4-0881f3422959",
"attribute": None
}
)
self.assertEqual(s, str(payload))
def test_comparison(self):
"""
Test that the equality/inequality operators return True/False when
comparing two ModifyAttribute response payloads with the same data.
"""
a = payloads.ModifyAttributeResponsePayload()
b = payloads.ModifyAttributeResponsePayload()
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
a = payloads.ModifyAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
attribute=objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute1"),
attribute_value=primitives.TextString(
value="ModifiedValue1",
tag=enums.Tags.ATTRIBUTE_VALUE
)
)
)
b = payloads.ModifyAttributeResponsePayload(
unique_identifier="b4faee10-aa2a-4446-8ad4-0881f3422959",
attribute=objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute1"),
attribute_value=primitives.TextString(
value="ModifiedValue1",
tag=enums.Tags.ATTRIBUTE_VALUE
)
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_comparison_on_different_unique_identifiers(self):
"""
Test that the equality/inequality operators return False/True when
comparing two ModifyAttribute response payloads with different unique
identifiers.
"""
a = payloads.ModifyAttributeResponsePayload(unique_identifier="1")
b = payloads.ModifyAttributeResponsePayload(unique_identifier="2")
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_different_attributes(self):
"""
Test that the equality/inequality operators return False/True when
comparing two ModifyAttribute response payloads with different
attributes.
"""
a = payloads.ModifyAttributeResponsePayload(
attribute=objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute1"),
attribute_value=primitives.TextString(
value="ModifiedValue1",
tag=enums.Tags.ATTRIBUTE_VALUE
)
))
b = payloads.ModifyAttributeResponsePayload(
attribute=objects.Attribute(
attribute_name=objects.Attribute.AttributeName("x-attribute2"),
attribute_value=primitives.TextString(
value="ModifiedValue2",
tag=enums.Tags.ATTRIBUTE_VALUE
)
))
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_comparison_on_type_mismatch(self):
"""
Test that the equality/inequality operators return False/True when
comparining a ModifyAttribute response payload against a different
type.
"""
a = payloads.ModifyAttributeResponsePayload()
b = "invalid"
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
|
devices/filters.py
|
maznu/peering-manager
| 127 |
99978
|
import django_filters
from django.db.models import Q
from devices.enums import PasswordAlgorithm
from devices.models import Configuration, Platform
from utils.filters import (
BaseFilterSet,
CreatedUpdatedFilterSet,
NameSlugSearchFilterSet,
TagFilter,
)
class ConfigurationFilterSet(BaseFilterSet, CreatedUpdatedFilterSet):
q = django_filters.CharFilter(method="search", label="Search")
tag = TagFilter()
class Meta:
model = Configuration
fields = ["id", "jinja2_trim", "jinja2_lstrip"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(name__icontains=value) | Q(template__icontains=value))
class PlatformFilterSet(
BaseFilterSet, CreatedUpdatedFilterSet, NameSlugSearchFilterSet
):
password_algorithm = django_filters.MultipleChoiceFilter(
choices=PasswordAlgorithm.choices, null_value=None
)
class Meta:
model = Platform
fields = ["id", "name", "slug", "napalm_driver", "description"]
|
tests/helper.py
|
rahulbahal7/restricted-python
| 236 |
99990
|
from RestrictedPython import compile_restricted_eval
from RestrictedPython import compile_restricted_exec
import RestrictedPython.Guards
def _compile(compile_func, source):
"""Compile some source with a compile func."""
result = compile_func(source)
assert result.errors == (), result.errors
assert result.code is not None
return result.code
def _execute(code, glb, exc_func):
"""Execute compiled code using `exc_func`.
glb ... globals, gets injected with safe_builtins
"""
if glb is None:
glb = {}
if '__builtins__' not in glb:
glb['__builtins__'] = RestrictedPython.Guards.safe_builtins.copy()
if exc_func == 'eval':
return eval(code, glb)
else:
exec(code, glb)
return glb
def restricted_eval(source, glb=None):
"""Call compile_restricted_eval and actually eval it."""
code = _compile(compile_restricted_eval, source)
return _execute(code, glb, 'eval')
def restricted_exec(source, glb=None):
"""Call compile_restricted_eval and actually exec it."""
code = _compile(compile_restricted_exec, source)
return _execute(code, glb, 'exec')
|
src/sensing/drivers/radar/umrr_driver/src/smartmicro/Services/basicCanServices/canService.py
|
P4nos/Aslan
| 227 |
100017
|
import queue
import struct
class CanIDService:
"""
Can ID service interface class.
This interface class has to be used to implement any can id based service that shall have be able to be
registered with the communication module :class:`test_framework.communication.communication`
"""
# ---------------------------------------------------------------------------------------------------------------- #
# function: initialization #
# ---------------------------------------------------------------------------------------------------------------- #
def __init__(self):
"""
The function initializes the basic values of the class.
"""
self.recvQueue = None
# ---------------------------------------------------------------------------------------------------------------- #
# function: getRXQueue #
# ---------------------------------------------------------------------------------------------------------------- #
def getRXQueue(self):
"""
The function returns the memory information of the queue.
Returns
-------
queue
"""
return self.recvQueue
# ---------------------------------------------------------------------------------------------------------------- #
# function: clearQueue #
# ---------------------------------------------------------------------------------------------------------------- #
def clearQueue(self):
"""
Flushes the recvQueue.
Returns
-------
None
"""
while self.isEmpty() is False:
self.getRXQueue()
# ---------------------------------------------------------------------------------------------------------------- #
# function: isEmpty #
# ---------------------------------------------------------------------------------------------------------------- #
def isEmpty(self):
"""
Checks if the queue is empty or not.
Returns
-------
retValue : bool
True queue is empty. False queue is not empty.
"""
retValue = self.recvQueue.empty()
return retValue
# ---------------------------------------------------------------------------------------------------------------- #
# function: decode #
# ---------------------------------------------------------------------------------------------------------------- #
@classmethod
def decode(cls, msg, extractRule):
"""
The function provides the basic rule to decode messages.
Parameters
----------
msg : binary
the message is a binary format of interested information
extractRule : dict
the dictionary is a set of information to decode the information from the message
Returns
-------
retValue : value
the value represents the extracted information
"""
data = cls._extractBytes(msg, extractRule)
if 'resolution' in extractRule:
resolution = extractRule['resolution']
else:
resolution = 1
if 'offset' in extractRule:
offset = extractRule['offset']
else:
offset = 0
retValue = (data - offset) * resolution
return retValue
# ---------------------------------------------------------------------------------------------------------------- #
# function: _extractBytes #
# ---------------------------------------------------------------------------------------------------------------- #
@classmethod
def _extractBytes(cls, msg, extractRule):
"""
The function extracts the requested bytes from the array.
Parameters
----------
msg : bytearray
msg with the necessary data
extractRule : dictionary
dictionary with the extraction rule
Returns
-------
data : integer
extracted data
"""
# get message length
msgLength = len(msg)
# append data to message
if msgLength < 8:
msg += bytearray(8 - msgLength)
convertedData = struct.unpack("<Q", msg)[0]
startBitShift = 8 * extractRule['start_byte'] + extractRule['start_bit']
endBitShift = 8 * (8 - extractRule['end_byte']) - (extractRule['end_bit'] + 1)
referenceShiftHigh = 64 - endBitShift
deactivationMaskTemp = convertedData >> referenceShiftHigh
deactivationMaskHigh = deactivationMaskTemp << referenceShiftHigh
data = (convertedData & ~deactivationMaskHigh) >> startBitShift
return data
# ---------------------------------------------------------------------------------------------------------------- #
# function: decode #
# ---------------------------------------------------------------------------------------------------------------- #
@staticmethod
def encode(msg, packingRule, value):
"""
The function provides the basic rule to encode data to a messages.
Parameters
----------
msg : byte
currently used message
packingRule : dictionary
rule to encode the given data
value : int or float
value to be packed by the function
Returns
-------
msg : byte
currently used message with added data
"""
# decode already packed data
packedData = struct.unpack("<Q", msg)[0]
# calculate number of bits
endBit = packingRule['end_byte'] * 8 + packingRule['end_bit']
startBit = packingRule['start_byte'] * 8 + packingRule['start_bit']
lengthOfData = endBit - startBit + 1
codingMask = 0
# set coding mask
for bitNumber in range (0, lengthOfData):
codingMask <<= 1
codingMask |= 1
# calculate number to pack
packingValue = ( int( value / packingRule['resolution'] ) ) + packingRule['offset']
# ensure that the value is not bigger than the coding mask
packingValue &= codingMask
# configure pre data mask
preDataMask = 0
# set coding mask
for bitNumber in range (0, startBit):
preDataMask <<= 1
preDataMask |= 1
# configure pre data mask
postDataMask = 0
# set coding mask
for bitNumber in range( 0, ( 64 - endBit ) ):
postDataMask <<= 1
postDataMask |= 1
postDataMask <<= endBit
# save existing data
preDataSet = packedData & preDataMask
currDataSet = packingValue << startBit
postDataSet = packedData & postDataMask
packedData = postDataSet | currDataSet | preDataSet
return packedData.to_bytes(8, byteorder='little')
|
spconv/pytorch/__init__.py
|
xiaobaishu0097/spconv
| 909 |
100053
|
import platform
from pathlib import Path
import numpy as np
import torch
from spconv.pytorch import ops
from spconv.pytorch.conv import (SparseConv2d, SparseConv3d, SparseConvTranspose2d,
SparseConvTranspose3d, SparseInverseConv2d,
SparseInverseConv3d, SubMConv2d, SubMConv3d)
from spconv.pytorch.core import SparseConvTensor
from spconv.pytorch.identity import Identity
from spconv.pytorch.modules import SparseModule, SparseSequential
from spconv.pytorch.ops import ConvAlgo
from spconv.pytorch.pool import SparseMaxPool2d, SparseMaxPool3d
from spconv.pytorch.tables import AddTable, ConcatTable, JoinTable
class ToDense(SparseModule):
"""convert SparseConvTensor to NCHW dense tensor.
"""
def forward(self, x: SparseConvTensor):
return x.dense()
class RemoveGrid(SparseModule):
"""remove pre-allocated grid buffer.
"""
def forward(self, x: SparseConvTensor):
x.grid = None
return x
|
peregrinearb/__init__.py
|
kecheon/peregrine
| 954 |
100056
|
from .async_find_opportunities import *
from .async_build_markets import *
from .bellman_multi_graph import bellman_ford_multi, NegativeWeightFinderMulti
from .bellmannx import bellman_ford, calculate_profit_ratio_for_path, NegativeWeightFinder, NegativeWeightDepthFinder, \
find_opportunities_on_exchange, get_starting_volume
from .utils import *
from .fetch_exchange_tickers import *
from .settings import *
from .multi_graph_builder import *
|
libs/fuel/fuel/iterator.py
|
dendisuhubdy/attention-lvcsr
| 767 |
100058
|
<filename>libs/fuel/fuel/iterator.py<gh_stars>100-1000
import six
class DataIterator(six.Iterator):
"""An iterator over data, representing a single epoch.
Parameters
----------
data_stream : :class:`DataStream` or :class:`Transformer`
The data stream over which to iterate.
request_iterator : iterator
An iterator which returns the request to pass to the data stream
for each step.
as_dict : bool, optional
If `True`, return dictionaries mapping source names to data
from each source. If `False` (default), return tuples in the
same order as `data_stream.sources`.
"""
def __init__(self, data_stream, request_iterator=None, as_dict=False):
self.data_stream = data_stream
self.request_iterator = request_iterator
self.as_dict = as_dict
def __iter__(self):
return self
def __next__(self):
if self.request_iterator is not None:
data = self.data_stream.get_data(next(self.request_iterator))
else:
data = self.data_stream.get_data()
if self.as_dict:
return dict(zip(self.data_stream.sources, data))
else:
return data
|
test/test_trace.py
|
chellvs/PyPCAPKit
| 131 |
100073
|
<filename>test/test_trace.py
# -*- coding: utf-8 -*-
import pprint
import pcapkit
trace = pcapkit.extract(fin='../sample/http.pcap', nofile=True, verbose=True,
trace=True, trace_format='json', trace_fout='../sample/trace')
pprint.pprint(trace.trace)
|
pylightgbm/models.py
|
ArdalanM/pyLightGBM
| 390 |
100084
|
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
@brief:
"""
from __future__ import print_function
import os
import re
import sys
import shutil
import tempfile
import subprocess
import numpy as np
import scipy.sparse as sps
from pylightgbm.utils import io_utils
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
class GenericGMB(BaseEstimator):
def __init__(self, exec_path="LighGBM/lightgbm",
config="",
application="regression",
num_iterations=10,
learning_rate=0.1,
num_leaves=127,
tree_learner="serial",
num_threads=1,
min_data_in_leaf=100,
metric='l2,',
is_training_metric=False,
feature_fraction=1.,
feature_fraction_seed=2,
bagging_fraction=1.,
bagging_freq=0,
bagging_seed=3,
metric_freq=1,
early_stopping_round=0,
max_bin=255,
is_unbalance=False,
num_class=1,
boosting_type='gbdt',
min_sum_hessian_in_leaf=10,
drop_rate=0.01,
drop_seed=4,
max_depth=-1,
lambda_l1=0.,
lambda_l2=0.,
min_gain_to_split=0.,
verbose=True,
model=None):
# '~/path/to/lightgbm' becomes 'absolute/path/to/lightgbm'
try:
self.exec_path = os.environ['LIGHTGBM_EXEC']
except KeyError:
print("pyLightGBM is looking for 'LIGHTGBM_EXEC' environment variable, cannot be found.")
print("exec_path will be deprecated in favor of environment variable")
self.exec_path = os.path.expanduser(exec_path)
self.config = config
self.model = model
self.verbose = verbose
self.param = {
'application': application,
'num_iterations': num_iterations,
'learning_rate': learning_rate,
'num_leaves': num_leaves,
'tree_learner': tree_learner,
'num_threads': num_threads,
'min_data_in_leaf': min_data_in_leaf,
'metric': metric,
'is_training_metric': is_training_metric,
'feature_fraction': feature_fraction,
'feature_fraction_seed': feature_fraction_seed,
'bagging_fraction': bagging_fraction,
'bagging_freq': bagging_freq,
'bagging_seed': bagging_seed,
'metric_freq': metric_freq,
'early_stopping_round': early_stopping_round,
'max_bin': max_bin,
'is_unbalance': is_unbalance,
'num_class': num_class,
'boosting_type': boosting_type,
'min_sum_hessian_in_leaf': min_sum_hessian_in_leaf,
'drop_rate': drop_rate,
'drop_seed': drop_seed,
'max_depth': max_depth,
'lambda_l1': lambda_l1,
'lambda_l2': lambda_l2,
'min_gain_to_split': min_gain_to_split,
}
def fit(self, X, y, test_data=None, init_scores=[]):
# create tmp dir to hold data and model (especially the latter)
tmp_dir = tempfile.mkdtemp()
issparse = sps.issparse(X)
f_format = "svm" if issparse else "csv"
train_filepath = os.path.abspath("{}/X.{}".format(tmp_dir, f_format))
init_filepath = train_filepath + ".init"
io_utils.dump_data(X, y, train_filepath, issparse)
if len(init_scores) > 0:
assert len(init_scores) == X.shape[0]
np.savetxt(init_filepath, X=init_scores, delimiter=',', newline=os.linesep)
# else:
# if self.param['application'] in ['binary', 'multiclass']:
# np.savetxt(init_filepath, X=0.5 * np.ones(X.shape[0]),
# delimiter=',', newline=os.linesep)
if test_data:
valid = []
for i, (x_test, y_test) in enumerate(test_data):
test_filepath = os.path.abspath("{}/X{}_test.{}".format(tmp_dir, i, f_format))
valid.append(test_filepath)
io_utils.dump_data(x_test, y_test, test_filepath, issparse)
self.param['valid'] = ",".join(valid)
self.param['task'] = 'train'
self.param['data'] = train_filepath
self.param['output_model'] = os.path.join(tmp_dir, "LightGBM_model.txt")
calls = ["{}={}\n".format(k, self.param[k]) for k in self.param]
if self.config == "":
conf_filepath = os.path.join(tmp_dir, "train.conf")
with open(conf_filepath, 'w') as f:
f.writelines(calls)
process = subprocess.Popen([self.exec_path, "config={}".format(conf_filepath)],
stdout=subprocess.PIPE, bufsize=1)
else:
process = subprocess.Popen([self.exec_path, "config={}".format(self.config)],
stdout=subprocess.PIPE, bufsize=1)
with process.stdout:
for line in iter(process.stdout.readline, b''):
print(line.strip().decode('utf-8')) if self.verbose else None
# wait for the subprocess to exit
process.wait()
with open(self.param['output_model'], mode='r') as file:
self.model = file.read()
shutil.rmtree(tmp_dir)
if test_data and self.param['early_stopping_round'] > 0:
self.best_round = max(map(int, re.findall("Tree=(\d+)", self.model))) + 1
def predict(self, X):
tmp_dir = tempfile.mkdtemp()
issparse = sps.issparse(X)
f_format = "svm" if issparse else "csv"
predict_filepath = os.path.abspath(os.path.join(tmp_dir, "X_to_pred.{}".format(f_format)))
output_model = os.path.abspath(os.path.join(tmp_dir, "model"))
output_results = os.path.abspath(os.path.join(tmp_dir, "LightGBM_predict_result.txt"))
conf_filepath = os.path.join(tmp_dir, "predict.conf")
with open(output_model, mode="w") as file:
file.write(self.model)
io_utils.dump_data(X, np.zeros(X.shape[0]), predict_filepath, issparse)
calls = ["task = predict\n",
"data = {}\n".format(predict_filepath),
"input_model = {}\n".format(output_model),
"output_result={}\n".format(output_results)]
with open(conf_filepath, 'w') as f:
f.writelines(calls)
process = subprocess.Popen([self.exec_path, "config={}".format(conf_filepath)],
stdout=subprocess.PIPE, bufsize=1)
with process.stdout:
for line in iter(process.stdout.readline, b''):
print(line.strip().decode('utf-8')) if self.verbose else None
# wait for the subprocess to exit
process.wait()
y_pred = np.loadtxt(output_results, dtype=float)
shutil.rmtree(tmp_dir)
return y_pred
def get_params(self, deep=True):
params = dict(self.param)
params['exec_path'] = self.exec_path
params['config'] = self.config
params['model'] = self.model
params['verbose'] = self.verbose
if 'output_model' in params:
del params['output_model']
return params
def set_params(self, **kwargs):
params = self.get_params()
params.update(kwargs)
self.__init__(**params)
return self
def feature_importance(self, feature_names=[], importance_type='weight'):
"""Get feature importance of each feature.
Importance type can be defined as:
'weight' - the number of times a feature is used to split the data across all trees.
'gain' - the average gain of the feature when it is used in trees
'cover' - the average coverage of the feature when it is used in trees
Parameters
----------
feature_names: list (optional)
List of feature names.
importance_type: string
The type of feature importance
"""
assert importance_type in ['weight'], 'For now, only weighted feature importance is implemented'
match = re.findall("Column_(\d+)=(\d+)", self.model)
if importance_type == 'weight':
if len(match) > 0:
dic_fi = {int(k): int(value) for k, value in match}
if len(feature_names) > 0:
dic_fi = {feature_names[key]: dic_fi[key] for key in dic_fi}
else:
dic_fi = {}
return dic_fi
class GBMClassifier(GenericGMB, ClassifierMixin):
def __init__(self, exec_path="LighGBM/lightgbm",
config="",
application='binary',
num_iterations=10,
learning_rate=0.1,
num_leaves=127,
tree_learner="serial",
num_threads=1,
min_data_in_leaf=100,
metric='binary_logloss,',
is_training_metric='False',
feature_fraction=1.,
feature_fraction_seed=2,
bagging_fraction=1.,
bagging_freq=0,
bagging_seed=3,
metric_freq=1,
early_stopping_round=0,
max_bin=255,
is_unbalance=False,
num_class=1,
boosting_type='gbdt',
min_sum_hessian_in_leaf=10,
drop_rate=0.01,
drop_seed=4,
max_depth=-1,
lambda_l1=0.,
lambda_l2=0.,
min_gain_to_split=0.,
verbose=True,
model=None):
super(GBMClassifier, self).__init__(exec_path=exec_path,
config=config,
application=application,
num_iterations=num_iterations,
learning_rate=learning_rate,
num_leaves=num_leaves,
tree_learner=tree_learner,
num_threads=num_threads,
min_data_in_leaf=min_data_in_leaf,
metric=metric,
is_training_metric=is_training_metric,
feature_fraction=feature_fraction,
feature_fraction_seed=feature_fraction_seed,
bagging_fraction=bagging_fraction,
bagging_freq=bagging_freq,
bagging_seed=bagging_seed,
metric_freq=metric_freq,
early_stopping_round=early_stopping_round,
max_bin=max_bin,
is_unbalance=is_unbalance,
num_class=num_class,
boosting_type=boosting_type,
min_sum_hessian_in_leaf=min_sum_hessian_in_leaf,
drop_rate=drop_rate,
drop_seed=drop_seed,
max_depth=max_depth,
lambda_l1=lambda_l1,
lambda_l2=lambda_l2,
min_gain_to_split=min_gain_to_split,
verbose=verbose,
model=model)
def predict_proba(self, X):
tmp_dir = tempfile.mkdtemp()
issparse = sps.issparse(X)
f_format = "svm" if issparse else "csv"
predict_filepath = os.path.abspath(os.path.join(tmp_dir, "X_to_pred.{}".format(f_format)))
output_model = os.path.abspath(os.path.join(tmp_dir, "model"))
conf_filepath = os.path.join(tmp_dir, "predict.conf")
output_results = os.path.abspath(os.path.join(tmp_dir, "LightGBM_predict_result.txt"))
with open(output_model, mode="w") as file:
file.write(self.model)
io_utils.dump_data(X, np.zeros(X.shape[0]), predict_filepath, issparse)
calls = [
"task = predict\n",
"data = {}\n".format(predict_filepath),
"input_model = {}\n".format(output_model),
"output_result={}\n".format(output_results)
]
with open(conf_filepath, 'w') as f:
f.writelines(calls)
process = subprocess.Popen([self.exec_path, "config={}".format(conf_filepath)],
stdout=subprocess.PIPE, bufsize=1)
with process.stdout:
for line in iter(process.stdout.readline, b''):
print(line.strip().decode('utf-8')) if self.verbose else None
# wait for the subprocess to exit
process.wait()
raw_probabilities = np.loadtxt(output_results, dtype=float)
if self.param['application'] == 'multiclass':
y_prob = raw_probabilities
elif self.param['application'] == 'binary':
probability_of_one = raw_probabilities
probability_of_zero = 1 - probability_of_one
y_prob = np.transpose(np.vstack((probability_of_zero, probability_of_one)))
else:
raise
shutil.rmtree(tmp_dir)
return y_prob
def predict(self, X):
y_prob = self.predict_proba(X)
return y_prob.argmax(-1)
class GBMRegressor(GenericGMB, RegressorMixin):
def __init__(self, exec_path="LighGBM/lightgbm",
config="",
application='regression',
num_iterations=10,
learning_rate=0.1,
num_leaves=127,
tree_learner="serial",
num_threads=1,
min_data_in_leaf=100,
metric='l2,',
is_training_metric=False,
feature_fraction=1.,
feature_fraction_seed=2,
bagging_fraction=1.,
bagging_freq=0,
bagging_seed=3,
metric_freq=1,
early_stopping_round=0,
max_bin=255,
is_unbalance=False,
num_class=1,
boosting_type='gbdt',
min_sum_hessian_in_leaf=10,
drop_rate=0.01,
drop_seed=4,
max_depth=-1,
lambda_l1=0.,
lambda_l2=0.,
min_gain_to_split=0.,
verbose=True,
model=None):
super(GBMRegressor, self).__init__(exec_path=exec_path,
config=config,
application=application,
num_iterations=num_iterations,
learning_rate=learning_rate,
num_leaves=num_leaves,
tree_learner=tree_learner,
num_threads=num_threads,
min_data_in_leaf=min_data_in_leaf,
metric=metric,
is_training_metric=is_training_metric,
feature_fraction=feature_fraction,
feature_fraction_seed=feature_fraction_seed,
bagging_fraction=bagging_fraction,
bagging_freq=bagging_freq,
bagging_seed=bagging_seed,
metric_freq=metric_freq,
early_stopping_round=early_stopping_round,
max_bin=max_bin,
is_unbalance=is_unbalance,
num_class=num_class,
boosting_type=boosting_type,
min_sum_hessian_in_leaf=min_sum_hessian_in_leaf,
drop_rate=drop_rate,
drop_seed=drop_seed,
max_depth=max_depth,
lambda_l1=lambda_l1,
lambda_l2=lambda_l2,
min_gain_to_split=min_gain_to_split,
verbose=verbose,
model=model)
|
slack_bolt/workflows/step/utilities/async_update.py
|
hirosassa/bolt-python
| 504 |
100085
|
from slack_sdk.web.async_client import AsyncWebClient
class AsyncUpdate:
"""`update()` utility to tell Slack the processing results of a `save` listener.
async def save(ack, view, update):
await ack()
values = view["state"]["values"]
task_name = values["task_name_input"]["name"]
task_description = values["task_description_input"]["description"]
inputs = {
"task_name": {"value": task_name["value"]},
"task_description": {"value": task_description["value"]}
}
outputs = [
{
"type": "text",
"name": "task_name",
"label": "Task name",
},
{
"type": "text",
"name": "task_description",
"label": "Task description",
}
]
await update(inputs=inputs, outputs=outputs)
ws = AsyncWorkflowStep(
callback_id="add_task",
edit=edit,
save=save,
execute=execute,
)
app.step(ws)
This utility is a thin wrapper of workflows.stepFailed API method.
Refer to https://api.slack.com/methods/workflows.updateStep for details.
"""
def __init__(self, *, client: AsyncWebClient, body: dict):
self.client = client
self.body = body
async def __call__(self, **kwargs) -> None:
await self.client.workflows_updateStep(
workflow_step_edit_id=self.body["workflow_step"]["workflow_step_edit_id"],
**kwargs,
)
|
servicecatalog_factory/workflow/portfolios/associate_product_with_portfolio_task.py
|
RobBrazier/aws-service-catalog-factory
| 116 |
100100
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import luigi
from servicecatalog_factory import aws
from servicecatalog_factory.workflow.portfolios.create_portfolio_task import (
CreatePortfolioTask,
)
from servicecatalog_factory.workflow.portfolios.create_product_task import (
CreateProductTask,
)
from servicecatalog_factory.workflow.tasks import FactoryTask, logger
class AssociateProductWithPortfolioTask(FactoryTask):
region = luigi.Parameter()
portfolio_args = luigi.DictParameter()
product_args = luigi.DictParameter()
def params_for_results_display(self):
return {
"region": self.region,
"portfolio": f"{self.portfolio_args.get('portfolio_group_name')}-{self.portfolio_args.get('display_name')}",
"product": self.product_args.get("name"),
}
def output(self):
return luigi.LocalTarget(
f"output/AssociateProductWithPortfolioTask/"
f"{self.region}"
f"{self.product_args.get('name')}"
f"_{self.portfolio_args.get('portfolio_group_name')}"
f"_{self.portfolio_args.get('display_name')}.json"
)
def requires(self):
return {
"create_portfolio_task": CreatePortfolioTask(**self.portfolio_args),
"create_product_task": CreateProductTask(**self.product_args),
}
def run(self):
logger_prefix = f"{self.region}-{self.portfolio_args.get('portfolio_group_name')}-{self.portfolio_args.get('display_name')}"
portfolio = json.loads(
self.input().get("create_portfolio_task").open("r").read()
)
portfolio_id = portfolio.get("Id")
product = json.loads(self.input().get("create_product_task").open("r").read())
product_id = product.get("ProductId")
with self.regional_client("servicecatalog") as service_catalog:
logger.info(f"{logger_prefix}: Searching for existing association")
aws.ensure_portfolio_association_for_product(
portfolio_id, product_id, service_catalog
)
with self.output().open("w") as f:
logger.info(f"{logger_prefix}: about to write!")
f.write("{}")
|
autotest/gdrivers/heif.py
|
jpapadakis/gdal
| 3,100 |
100116
|
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test HEIF driver
# Author: <NAME>, <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2020, <NAME> <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import pytest
import gdaltest
from osgeo import gdal
pytestmark = pytest.mark.require_driver('HEIF')
def get_version():
return [int(x) for x in gdal.GetDriverByName('HEIF').GetMetadataItem('LIBHEIF_VERSION').split('.')]
@pytest.mark.parametrize('endianness', ['big_endian', 'little_endian'])
def test_heif_exif_endian(endianness):
filename = 'data/heif/byte_exif_%s.heic' % endianness
gdal.ErrorReset()
ds = gdal.Open(filename)
assert gdal.GetLastErrorMsg() == ''
assert ds
assert ds.RasterXSize == 64
assert ds.RasterYSize == 64
assert ds.RasterCount == 3
stats = ds.GetRasterBand(1).ComputeStatistics(False)
assert stats[0] == pytest.approx(89, abs=2)
assert stats[1] == pytest.approx(243, abs=2)
assert stats[2] == pytest.approx(126.7, abs=0.5)
assert stats[3] == pytest.approx(18.8, abs=0.5)
assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_RedBand
assert ds.GetRasterBand(2).GetColorInterpretation() == gdal.GCI_GreenBand
assert ds.GetRasterBand(3).GetColorInterpretation() == gdal.GCI_BlueBand
assert ds.GetRasterBand(1).GetOverviewCount() == 0
if get_version() >= [1, 4, 0]:
assert 'EXIF' in ds.GetMetadataDomainList()
assert 'xml:XMP' in ds.GetMetadataDomainList()
assert len(ds.GetMetadata('EXIF')) > 0
assert 'xpacket' in ds.GetMetadata('xml:XMP')[0]
ds = None
gdal.Unlink(filename + '.aux.xml')
def test_heif_thumbnail():
ds = gdal.Open('data/heif/byte_thumbnail.heic')
assert ds
assert ds.RasterXSize == 128
assert ds.RasterYSize == 128
assert ds.RasterCount == 3
assert ds.GetRasterBand(1).GetOverviewCount() == 1
assert ds.GetRasterBand(1).GetOverview(-1) is None
assert ds.GetRasterBand(1).GetOverview(1) is None
ovrband = ds.GetRasterBand(1).GetOverview(0)
assert ovrband is not None
assert ovrband.XSize == 64
assert ovrband.YSize == 64
assert ovrband.Checksum() != 0
def test_heif_rgb_16bit():
if get_version() < [1, 4, 0]:
pytest.skip()
ds = gdal.Open('data/heif/small_world_16.heic')
assert ds
assert ds.RasterXSize == 400
assert ds.RasterYSize == 200
assert ds.RasterCount == 3
assert ds.GetRasterBand(1).DataType == gdal.GDT_UInt16
assert ds.GetRasterBand(1).GetMetadataItem('NBITS', 'IMAGE_STRUCTURE') == '10'
assert ds.GetRasterBand(1).ComputeRasterMinMax() == pytest.approx((0,1023), abs=2)
def test_heif_rgba():
ds = gdal.Open('data/heif/stefan_full_rgba.heic')
assert ds
assert ds.RasterCount == 4
assert ds.RasterXSize == 162
assert ds.RasterYSize == 150
assert ds.GetRasterBand(1).GetOverviewCount() == 1
ovrband = ds.GetRasterBand(1).GetOverview(0)
assert ovrband is not None
assert ovrband.XSize == 96
assert ovrband.YSize == 88
assert ovrband.Checksum() != 0
def test_heif_rgba_16bit():
if get_version() < [1, 4, 0]:
pytest.skip()
ds = gdal.Open('data/heif/stefan_full_rgba_16.heic')
assert ds
assert ds.RasterCount == 4
assert ds.GetRasterBand(1).DataType == gdal.GDT_UInt16
def test_heif_subdatasets():
ds = gdal.Open('data/heif/subdatasets.heic')
assert ds
assert len(ds.GetSubDatasets()) == 2
subds1_name = ds.GetSubDatasets()[0][0]
subds2_name = ds.GetSubDatasets()[1][0]
ds = gdal.Open(subds1_name)
assert ds
assert ds.RasterXSize == 64
ds = gdal.Open(subds2_name)
assert ds
assert ds.RasterXSize == 162
with gdaltest.error_handler():
assert gdal.Open('HEIF:0:data/heif/subdatasets.heic') is None
assert gdal.Open('HEIF:3:data/heif/subdatasets.heic') is None
assert gdal.Open('HEIF:1:non_existing.heic') is None
assert gdal.Open('HEIF:') is None
assert gdal.Open('HEIF:1') is None
assert gdal.Open('HEIF:1:') is None
|
sfaira/versions/topologies/mouse/embedding/__init__.py
|
theislab/sfaira
| 110 |
100125
|
from sfaira.versions.topologies.mouse.embedding.ae import AE_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.linear import LINEAR_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.nmf import NMF_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vae import VAE_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vaeiaf import VAEIAF_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vaevamp import VAEVAMP_TOPOLOGIES
|
env/Lib/site-packages/prompt_toolkit/cursor_shapes.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
| 4,028 |
100155
|
from abc import ABC, abstractmethod
from enum import Enum
from typing import TYPE_CHECKING, Any, Callable, Union
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.key_binding.vi_state import InputMode
if TYPE_CHECKING:
from .application import Application
__all__ = [
"CursorShape",
"CursorShapeConfig",
"SimpleCursorShapeConfig",
"ModalCursorShapeConfig",
"DynamicCursorShapeConfig",
"to_cursor_shape_config",
]
class CursorShape(Enum):
# Default value that should tell the output implementation to never send
# cursor shape escape sequences. This is the default right now, because
# before this `CursorShape` functionality was introduced into
# prompt_toolkit itself, people had workarounds to send cursor shapes
# escapes into the terminal, by monkey patching some of prompt_toolkit's
# internals. We don't want the default prompt_toolkit implemetation to
# interefere with that. E.g., IPython patches the `ViState.input_mode`
# property. See: https://github.com/ipython/ipython/pull/13501/files
_NEVER_CHANGE = "_NEVER_CHANGE"
BLOCK = "BLOCK"
BEAM = "BEAM"
UNDERLINE = "UNDERLINE"
BLINKING_BLOCK = "BLINKING_BLOCK"
BLINKING_BEAM = "BLINKING_BEAM"
BLINKING_UNDERLINE = "BLINKING_UNDERLINE"
class CursorShapeConfig(ABC):
@abstractmethod
def get_cursor_shape(self, application: "Application[Any]") -> CursorShape:
"""
Return the cursor shape to be used in the current state.
"""
AnyCursorShapeConfig = Union[CursorShape, CursorShapeConfig, None]
class SimpleCursorShapeConfig(CursorShapeConfig):
"""
Always show the given cursor shape.
"""
def __init__(self, cursor_shape: CursorShape = CursorShape._NEVER_CHANGE) -> None:
self.cursor_shape = cursor_shape
def get_cursor_shape(self, application: "Application[Any]") -> CursorShape:
return self.cursor_shape
class ModalCursorShapeConfig(CursorShapeConfig):
"""
Show cursor shape according to the current input mode.
"""
def get_cursor_shape(self, application: "Application[Any]") -> CursorShape:
if application.editing_mode == EditingMode.VI:
if application.vi_state.input_mode == InputMode.INSERT:
return CursorShape.BEAM
if application.vi_state.input_mode == InputMode.REPLACE:
return CursorShape.UNDERLINE
# Default
return CursorShape.BLOCK
class DynamicCursorShapeConfig(CursorShapeConfig):
def __init__(
self, get_cursor_shape_config: Callable[[], AnyCursorShapeConfig]
) -> None:
self.get_cursor_shape_config = get_cursor_shape_config
def get_cursor_shape(self, application: "Application[Any]") -> CursorShape:
return to_cursor_shape_config(self.get_cursor_shape_config()).get_cursor_shape(
application
)
def to_cursor_shape_config(value: AnyCursorShapeConfig) -> CursorShapeConfig:
"""
Take a `CursorShape` instance or `CursorShapeConfig` and turn it into a
`CursorShapeConfig`.
"""
if value is None:
return SimpleCursorShapeConfig()
if isinstance(value, CursorShape):
return SimpleCursorShapeConfig(value)
return value
|
psonic/psonic.py
|
m-roberts/python-sonic
| 263 |
100164
|
import random
from .samples import Sample
from .synthesizers import SAW
from .notes import C5
from .internals.chords import _CHORD_QUALITY
from .internals.scales import _SCALE_MODE
from .synth_server import (
SonicPi,
use_synth,
)
__debug = False
def synth(name, note=None, attack=None, decay=None,
sustain_level=None, sustain=None, release=None,
cutoff=None, cutoff_attack=None, amp=None, pan=None):
arguments = locals()
arguments.pop('name')
parameters = ['{0}: {1}'.format(k, v) for k, v in arguments.items() if v is not None]
parameter = ''
if len(parameters) > 0: parameter = ',' + ','.join(parameters)
command = 'synth :{0}{1}'.format(name.name, parameter)
_debug('synth command={}'.format(command))
synth_server.synth(command)
def play(note, attack=None, decay=None,
sustain_level=None, sustain=None, release=None,
cutoff=None, cutoff_attack=None, amp=None, pan=None):
arguments = locals()
arguments.pop('note')
parameters = ['{0}: {1}'.format(k, v) for k, v in arguments.items() if v is not None]
parameter = ''
if len(parameters) > 0: parameter = ',' + ','.join(parameters)
command = 'play {0}{1}'.format(note, parameter)
_debug('play command={}'.format(command))
synth_server.play(command)
def play_pattern_timed(notes, times, release=None):
"""play notes
:param notes:
:param times:
:return:
"""
if not type(notes) is list: notes = [notes]
if not type(times) is list: times = [times]
for t in times:
for i in notes:
play(i, release=release)
sleep(t)
def play_pattern(notes):
""":param notes:
:return:
"""
play_pattern_timed(notes, 1)
def sample(sample, rate=None, attack=None, sustain=None,
release=None, beat_stretch=None, start=None,
finish=None, amp=None, pan=None):
arguments = locals()
arguments.pop('sample')
parameters = ['{0}: {1}'.format(k, v) for k, v in arguments.items() if v is not None]
parameter = ''
command = ''
if len(parameters) > 0: parameter = ',' + ','.join(parameters)
if type(sample) == Sample:
command = 'sample :{0}{1}'.format(sample.name, parameter)
else:
command = 'sample "{0}"{1}'.format(sample, parameter)
_debug('sample command={}'.format(command))
synth_server.sample(command)
def sleep(duration):
"""the same as time.sleep
:param duration:
:return:
"""
synth_server.sleep(duration)
_debug('sleep', duration)
def sample_duration(sample):
"""Returns the duration of the sample (in seconds)
:param sample:
:return: number
"""
return sample.duration
def one_in(max):
"""random function returns True in one of max cases
:param max:
:return: boolean
"""
return random.randint(1, max) == 1
def invert_pitches(pitches, inversion):
"""Inverts a list of pitches, wrapping the top pitches an octave below the root
:param pitches: list
:param inversion: int
:return: list
"""
for i in range(1, (inversion % (len(pitches)))+1):
pitches[-i] = pitches[-i] - 12
pitches.sort()
return pitches
def chord(root_note, chord_quality, inversion=None):
"""Generates a list of notes of a chord
:param root_note:
:param chord_quality:
:param inversion:
:return: list
"""
result = []
n = root_note
half_tone_steps = _CHORD_QUALITY[chord_quality]
for i in half_tone_steps:
q = n + i
result.append(q)
if inversion:
result = invert_pitches(result, inversion)
return result
def scale(root_note, scale_mode, num_octaves=1):
"""Genarates a liste of notes of scale
:param root_note:
:param scale_mode:
:param num_octaves:
:return: list
"""
result = []
n = root_note
half_tone_steps = _SCALE_MODE[scale_mode]
for o in range(num_octaves):
n = root_note + o * 12
result.append(n)
for i in half_tone_steps:
n = n + i
result.append(n)
return result
def run(command):
synth_server.run(command)
def stop():
synth_server.stop()
def send_message(message, *parameter):
synth_server.send_message(message, *parameter)
def start_recording():
synth_server.start_recording()
def stop_recording():
synth_server.stop_recording()
def save_recording(name):
synth_server.save_recording(name)
synth_server = SonicPi()
def set_server_parameter(udp_ip="", udp_port=-1, udp_port_osc_message=-1):
synth_server.set_parameter(udp_ip, udp_port, udp_port_osc_message)
def _debug(*args):
if __debug: print(args)
if __name__ == '__main__':
use_synth(SAW)
play(C5, amp=2, pan=-1)
|
tests/common/checks_infra/test_registry.py
|
jamesholland-uk/checkov
| 4,013 |
100211
|
<filename>tests/common/checks_infra/test_registry.py
import os
import unittest
from checkov.common.checks_infra.registry import Registry
from checkov.common.checks_infra.checks_parser import NXGraphCheckParser
class TestRegistry(unittest.TestCase):
def test_invalid_check_yaml_does_not_throw_exception(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/test-registry-data/invalid-yaml"
r = Registry(checks_dir=test_files_dir)
r.load_checks()
def test_valid_yaml_but_invalid_check_does_not_throw_exception(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/test-registry-data/valid-yaml-invalid-check"
r = Registry(checks_dir=test_files_dir, parser=NXGraphCheckParser())
r.load_checks()
|
dsgcn/datasets/cluster_processor.py
|
LLLjun/learn-to-cluster
| 620 |
100231
|
import numpy as np
class ClusterProcessor(object):
def __init__(self, dataset):
self.dataset = dataset
self.dtype = np.float32
def __len__(self):
return self.dataset.size
def build_adj(self, node, edge):
node = list(node)
abs2rel = {}
rel2abs = {}
for i, n in enumerate(node):
abs2rel[n] = i
rel2abs[i] = n
size = len(node)
adj = np.eye(size)
for e in edge:
w = 1.
if len(e) == 2:
e1, e2 = e
elif len(e) == 3:
e1, e2, dist = e
if not self.dataset.wo_weight:
w = 1. - dist
else:
raise ValueError('Unknown length of e: {}'.format(e))
v1 = abs2rel[e1]
v2 = abs2rel[e2]
adj[v1][v2] = w
adj[v2][v1] = w
if self.dataset.is_norm_adj:
adj /= adj.sum(axis=1, keepdims=True)
return adj, abs2rel, rel2abs
def build_features(self, node):
if self.dataset.featureless:
features = np.ones(len(node)).reshape(-1, 1)
else:
features = self.dataset.features[node, :]
return features
def __getitem__(self, idx):
raise NotImplementedError
|
flyingsquid/_observables.py
|
oxu2/flyingsquid
| 269 |
100236
|
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import JointProbabilityDistribution, DiscreteFactor
from itertools import combinations
from flyingsquid.helpers import *
import numpy as np
import math
from tqdm import tqdm
import sys
import random
class Mixin:
'''
Functions to compute observable properties.
'''
def _compute_class_balance(self, class_balance=None, Y_dev=None):
# generate class balance of Ys
Ys_ordered = [ 'Y_{}'.format(i) for i in range(self.v) ]
cardinalities = [ 2 for i in range(self.v) ]
if class_balance is not None:
class_balance = class_balance / sum(class_balance)
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities, class_balance
)
elif Y_dev is not None:
Ys_ordered = [ 'Y_{}'.format(i) for i in range(self.v) ]
vals = { Y: (-1, 1) for Y in Ys_ordered }
Y_vecs = sorted([
[ vec_dict[Y] for Y in Ys_ordered ]
for vec_dict in dict_product(vals)
])
counts = {
tuple(Y_vec): 0
for Y_vec in Y_vecs
}
for data_point in Y_dev:
counts[tuple(data_point)] += 1
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities,
[
float(counts[tuple(Y_vec)]) / len(Y_dev)
for Y_vec in Y_vecs
])
else:
num_combinations = 2 ** self.v
cb = JointProbabilityDistribution(
Ys_ordered, cardinalities, [
1. / num_combinations for i in range(num_combinations)
])
return cb
def _compute_Y_marginals(self, Y_marginals):
for marginal in Y_marginals:
nodes = [ 'Y_{}'.format(idx) for idx in marginal ]
Y_marginals[marginal] = self.cb.marginal_distribution(
nodes,
inplace=False
)
return Y_marginals
def _compute_Y_equals_one(self, Y_equals_one):
# compute from class balance
for factor in Y_equals_one:
nodes = [ 'Y_{}'.format(idx) for idx in factor ]
Y_marginal = self.cb.marginal_distribution(
nodes,
inplace=False
)
vals = { Y: (-1, 1) for Y in nodes }
Y_vecs = sorted([
[ vec_dict[Y] for Y in nodes ]
for vec_dict in dict_product(vals)
])
# add up the probabilities of all the vectors whose values multiply to +1
total_prob = 0
for Y_vec in Y_vecs:
if np.prod(Y_vec) == 1:
vector_prob = Y_marginal.reduce(
[
(Y_i, Y_val if Y_val == 1 else 0)
for Y_i, Y_val in zip(nodes, Y_vec)
],
inplace=False
).values
total_prob += vector_prob
Y_equals_one[factor] = total_prob
return Y_equals_one
|
ue4docker/dockerfiles/ue4-minimal/windows/fix-targets.py
|
meetakshay99/ue4-docker
| 579 |
100266
|
<filename>ue4docker/dockerfiles/ue4-minimal/windows/fix-targets.py
#!/usr/bin/env python3
import os, re, sys
def readFile(filename):
with open(filename, "rb") as f:
return f.read().decode("utf-8")
def writeFile(filename, data):
with open(filename, "wb") as f:
f.write(data.encode("utf-8"))
# Ensure the `PlatformType` field is set correctly for Client and Server targets in BaseEngine.ini
iniFile = sys.argv[1]
config = readFile(iniFile)
config = re.sub(
'PlatformType="Game", RequiredFile="(.+UE4(Client|Server).*\\.target)"',
'PlatformType="\\2", RequiredFile="\\1"',
config,
)
writeFile(iniFile, config)
|
cupy/sparse/__init__.py
|
prkhrsrvstv1/cupy
| 6,180 |
100283
|
<gh_stars>1000+
import sys
import warnings
import cupyx.scipy.sparse
# Raise a `DeprecationWarning` for `cupy.sparse` submodule when its functions
# are called. We could raise the warning on importing the submodule, but we
# use module level `__getattr__` function here as the submodule is also
# imported in cupy/__init__.py. Unfortunately, module level `__getattr__` is
# supported on Python 3.7 and higher, so we need to keep the explicit import
# list for older Python.
if (3, 7) <= sys.version_info:
def __getattr__(name):
if hasattr(cupyx.scipy.sparse, name):
msg = 'cupy.sparse is deprecated. Use cupyx.scipy.sparse instead.'
warnings.warn(msg, DeprecationWarning)
return getattr(cupyx.scipy.sparse, name)
raise AttributeError(
"module 'cupy.sparse' has no attribute {!r}".format(name))
else:
from cupyx.scipy.sparse import * # NOQA
|
lib/django-1.4/django/contrib/localflavor/fr/fr_department.py
|
MiCHiLU/google_appengine_sdk
| 790 |
100295
|
# -*- coding: utf-8 -*-
# See the "Code officiel gΓ©ographique" on the INSEE website <www.insee.fr>.
DEPARTMENT_CHOICES = (
# Metropolitan departments
('01', u'01 - Ain'),
('02', u'02 - Aisne'),
('03', u'03 - Allier'),
('04', u'04 - Alpes-de-Haute-Provence'),
('05', u'05 - Hautes-Alpes'),
('06', u'06 - Alpes-Maritimes'),
('07', u'07 - Ardèche'),
('08', u'08 - Ardennes'),
('09', u'09 - Ariège'),
('10', u'10 - Aube'),
('11', u'11 - Aude'),
('12', u'12 - Aveyron'),
('13', u'13 - Bouches-du-RhΓ΄ne'),
('14', u'14 - Calvados'),
('15', u'15 - Cantal'),
('16', u'16 - Charente'),
('17', u'17 - Charente-Maritime'),
('18', u'18 - Cher'),
('19', u'19 - Corrèze'),
('2A', u'2A - Corse-du-Sud'),
('2B', u'2B - Haute-Corse'),
('21', u'21 - CΓ΄te-d\'Or'),
('22', u'22 - CΓ΄tes-d\'Armor'),
('23', u'23 - Creuse'),
('24', u'24 - Dordogne'),
('25', u'25 - Doubs'),
('26', u'26 - DrΓ΄me'),
('27', u'27 - Eure'),
('28', u'28 - Eure-et-Loir'),
('29', u'29 - Finistère'),
('30', u'30 - Gard'),
('31', u'31 - Haute-Garonne'),
('32', u'32 - Gers'),
('33', u'33 - Gironde'),
('34', u'34 - HΓ©rault'),
('35', u'35 - Ille-et-Vilaine'),
('36', u'36 - Indre'),
('37', u'37 - Indre-et-Loire'),
('38', u'38 - Isère'),
('39', u'39 - Jura'),
('40', u'40 - Landes'),
('41', u'41 - Loir-et-Cher'),
('42', u'42 - Loire'),
('43', u'43 - Haute-Loire'),
('44', u'44 - Loire-Atlantique'),
('45', u'45 - Loiret'),
('46', u'46 - Lot'),
('47', u'47 - Lot-et-Garonne'),
('48', u'48 - Lozère'),
('49', u'49 - Maine-et-Loire'),
('50', u'50 - Manche'),
('51', u'51 - Marne'),
('52', u'52 - Haute-Marne'),
('53', u'53 - Mayenne'),
('54', u'54 - Meurthe-et-Moselle'),
('55', u'55 - Meuse'),
('56', u'56 - Morbihan'),
('57', u'57 - Moselle'),
('58', u'58 - Nièvre'),
('59', u'59 - Nord'),
('60', u'60 - Oise'),
('61', u'61 - Orne'),
('62', u'62 - Pas-de-Calais'),
('63', u'63 - Puy-de-DΓ΄me'),
('64', u'64 - PyrΓ©nΓ©es-Atlantiques'),
('65', u'65 - Hautes-PyrΓ©nΓ©es'),
('66', u'66 - PyrΓ©nΓ©es-Orientales'),
('67', u'67 - Bas-Rhin'),
('68', u'68 - Haut-Rhin'),
('69', u'69 - RhΓ΄ne'),
('70', u'70 - Haute-SaΓ΄ne'),
('71', u'71 - SaΓ΄ne-et-Loire'),
('72', u'72 - Sarthe'),
('73', u'73 - Savoie'),
('74', u'74 - Haute-Savoie'),
('75', u'75 - Paris'),
('76', u'76 - Seine-Maritime'),
('77', u'77 - Seine-et-Marne'),
('78', u'78 - Yvelines'),
('79', u'79 - Deux-Sèvres'),
('80', u'80 - Somme'),
('81', u'81 - Tarn'),
('82', u'82 - Tarn-et-Garonne'),
('83', u'83 - Var'),
('84', u'84 - Vaucluse'),
('85', u'85 - VendΓ©e'),
('86', u'86 - Vienne'),
('87', u'87 - Haute-Vienne'),
('88', u'88 - Vosges'),
('89', u'89 - Yonne'),
('90', u'90 - Territoire de Belfort'),
('91', u'91 - Essonne'),
('92', u'92 - Hauts-de-Seine'),
('93', u'93 - Seine-Saint-Denis'),
('94', u'94 - Val-de-Marne'),
('95', u'95 - Val-d\'Oise'),
# Overseas departments, communities, and other territories
('971', u'971 - Guadeloupe'),
('972', u'972 - Martinique'),
('973', u'973 - Guyane'),
('974', u'974 - La RΓ©union'),
('975', u'975 - Saint-Pierre-et-Miquelon'),
('976', u'976 - Mayotte'),
('977', u'977 - Saint-BarthΓ©lemy'),
('978', u'978 - Saint-Martin'),
('984', u'984 - Terres australes et antarctiques franΓ§aises'),
('986', u'986 - Wallis et Futuna'),
('987', u'987 - PolynΓ©sie franΓ§aise'),
('988', u'988 - Nouvelle-CalΓ©donie'),
('989', u'989 - Γle de Clipperton'),
)
|
Python/zip-bomb/get_filesize.py
|
MartinThoma/algorithms
| 209 |
100316
|
<gh_stars>100-1000
import zipfile
zp = zipfile.ZipFile("example.zip")
size = sum([zinfo.file_size for zinfo in zp.filelist])
zip_kb = float(size) / 1000 # kB
|
qf_lib/analysis/strategy_monitoring/assets_monitoring_sheet.py
|
webclinic017/qf-lib
| 198 |
100317
|
# Copyright 2016-present CERN β European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import List, Union, Dict
import numpy as np
import matplotlib as plt
import pandas as pd
from pandas.core.dtypes.common import is_numeric_dtype
from qf_lib.analysis.strategy_monitoring.pnl_calculator import PnLCalculator
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.analysis.common.abstract_document import AbstractDocument
from qf_lib.backtesting.contract.contract_to_ticker_conversion.base import ContractTickerMapper
from qf_lib.backtesting.portfolio.transaction import Transaction
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.enums.price_field import PriceField
from qf_lib.common.exceptions.future_contracts_exceptions import NoValidTickerException
from qf_lib.common.tickers.tickers import Ticker
from qf_lib.common.utils.dateutils.timer import SettableTimer
from qf_lib.common.utils.error_handling import ErrorHandling
from qf_lib.common.utils.logging.qf_parent_logger import qf_logger
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame
from qf_lib.containers.futures.future_tickers.future_ticker import FutureTicker
from qf_lib.containers.futures.futures_adjustment_method import FuturesAdjustmentMethod
from qf_lib.containers.futures.futures_chain import FuturesChain
from qf_lib.containers.series.prices_series import PricesSeries
from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries
from qf_lib.data_providers.data_provider import DataProvider
from qf_lib.documents_utils.document_exporting.element.df_table import DFTable
from qf_lib.documents_utils.document_exporting.element.heading import HeadingElement
from qf_lib.documents_utils.document_exporting.element.new_page import NewPageElement
from qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement
from qf_lib.documents_utils.document_exporting.pdf_exporter import PDFExporter
from qf_lib.settings import Settings
@ErrorHandling.class_error_logging()
class AssetPerfAndDrawdownSheet(AbstractDocument):
"""
For each of the given tickers, provides performance and drawdown comparison of the strategy vs buy and hold.
It also computed the performance contribution and PnL of each of the assets with the given frequency (either yearly
or monthly).
Note: It is assumed that at the beginning no positions are open in the portfolio.
Parameters
-----------
category_to_model_tickers: Dict[str, List[Ticker]]
Dictionary mapping a string, which denotes a category / sector etc, into a list of tickers. The categories are
used to provide aggregated information about performance contribution in each of them (e.g. to compute
performance contribution of different sectors, a dictionary mapping sector names into tickers objects).
contract_ticker_mapper: ContractTickerMapper
An instance of the ContractTickerMapper used to map the tickers into corresponding contracts, which are
used in the Transactions objects
transactions: Union[List[Transaction], str]
Either list of Transaction objects or a path to the Transactions file.
start_date: datetime
end_date: datetime
Dates to used as start and end date for the statistics
data_provider: DataProvider
Data provider used to download the prices and future contracts information, necessary to compute Buy and Hold
benchmark performance
settings: Settings
Necessary settings
pdf_exporter: PDFExporter
Used to export the document to PDF
title: str
Title of the document, will be a part of the filename. Do not use special characters.
initial_cash: int
Initial cash in the portfolio (used to compute the performance contribution for each asset)
frequency: Frequency
Frequency which should be used to compute the performance contribution. Currently only Yearly and Monthly
frequencies are supported.
"""
def __init__(self, category_to_model_tickers: Dict[str, List[Ticker]], contract_ticker_mapper: ContractTickerMapper,
transactions: Union[List[Transaction], str], start_date: datetime, end_date: datetime,
data_provider: DataProvider, settings: Settings, pdf_exporter: PDFExporter,
title: str = "Assets Monitoring Sheet", initial_cash: int = 10000000,
frequency: Frequency = Frequency.YEARLY):
super().__init__(settings, pdf_exporter, title=title)
self.tickers = [t for tickers_list in category_to_model_tickers.values() for t in tickers_list]
self._ticker_to_category = {ticker: c for c, tickers_list in category_to_model_tickers.items()
for ticker in tickers_list}
self._contract_ticker_mapper = contract_ticker_mapper
self._pnl_calculator = PnLCalculator(data_provider, contract_ticker_mapper)
self.transactions = self._parse_transactions_file(transactions) if isinstance(transactions, str) \
else transactions
self._start_date = start_date
self._end_date = end_date
self._data_provider = data_provider
self._initial_cash = initial_cash
if frequency not in (Frequency.MONTHLY, Frequency.YEARLY):
raise NotImplementedError("Only monthly and yearly frequencies are currently supported.")
self._frequency = frequency
self._max_columns_per_page = 7
self._logger = qf_logger.getChild(self.__class__.__name__)
def build_document(self):
self._add_header()
self.document.add_element(ParagraphElement("\n"))
ticker_to_pnl_series = self._compute_pnl()
self._add_pnl_and_performance_contribution_tables(ticker_to_pnl_series)
self._add_performance_statistics(ticker_to_pnl_series)
def _parse_transactions_file(self, path_to_transactions_file: str) -> List[Transaction]:
""" Parse the Transactions csv file created by the Monitor and generate a list of transactions objects. """
transactions_df = pd.read_csv(path_to_transactions_file)
transactions = [
Transaction(
time=pd.to_datetime(row.loc["Timestamp"]),
contract=Contract(
symbol=row.loc["Contract symbol"],
security_type=row.loc["Security type"],
exchange=row.loc["Exchange"],
contract_size=row.loc["Contract size"]
),
quantity=row.loc["Quantity"],
price=row.loc["Price"],
commission=row.loc["Commission"]
) for _, row in transactions_df.iterrows()
]
return transactions
def _compute_pnl(self) -> Dict[Ticker, PricesSeries]:
""" Computes PnL time series for each of the tickers. """
ticker_to_pnl_series = {ticker: self._pnl_calculator.compute_pnl(ticker, self.transactions, self._start_date,
self._end_date) for ticker in self.tickers}
return ticker_to_pnl_series
def _add_performance_statistics(self, ticker_to_pnl_series: Dict[Ticker, PricesSeries]):
""" Generate performance and drawdown plots, which provide the comparison between the strategy performance
and Buy and Hold performance for each of the assets.
"""
self.document.add_element(NewPageElement())
self.document.add_element(HeadingElement(level=2, text="Performance and Drawdowns - Strategy vs Buy and Hold"))
self.document.add_element(ParagraphElement("\n"))
for ticker in self.tickers:
grid = self._get_new_grid()
buy_and_hold_returns = self._generate_buy_and_hold_returns(ticker)
strategy_exposure_series = ticker_to_pnl_series[ticker].to_simple_returns().fillna(0.0)
strategy_exposure_series = strategy_exposure_series.where(strategy_exposure_series == 0.0).fillna(1.0)
strategy_returns = buy_and_hold_returns * strategy_exposure_series
strategy_returns = strategy_returns.dropna()
strategy_returns.name = "Strategy"
if len(strategy_returns) > 0:
perf_chart = self._get_perf_chart([buy_and_hold_returns, strategy_returns], False,
"Performance - {}".format(ticker.name))
underwater_chart = self._get_underwater_chart(strategy_returns.to_prices(),
title="Drawdown - {}".format(ticker.name),
benchmark_series=buy_and_hold_returns.to_prices(),
rotate_x_axis=True)
grid.add_chart(perf_chart)
grid.add_chart(underwater_chart)
self.document.add_element(grid)
else:
self._logger.warning("No data is available for {}. No plots will be generated.".format(ticker.name))
def _generate_buy_and_hold_returns(self, ticker: Ticker) -> SimpleReturnsSeries:
""" Computes series of simple returns, which would be returned by the Buy and Hold strategy. """
if isinstance(ticker, FutureTicker):
try:
ticker.initialize_data_provider(SettableTimer(self._end_date), self._data_provider)
futures_chain = FuturesChain(ticker, self._data_provider, FuturesAdjustmentMethod.BACK_ADJUSTED)
prices_series = futures_chain.get_price(PriceField.Close, self._start_date, self._end_date)
except NoValidTickerException:
prices_series = PricesSeries()
else:
prices_series = self._data_provider.get_price(ticker, PriceField.Close, self._start_date, self._end_date)
returns_tms = prices_series.to_simple_returns().replace([-np.inf, np.inf], np.nan).fillna(0.0)
returns_tms.name = "Buy and Hold"
return returns_tms
def _add_pnl_and_performance_contribution_tables(self, ticker_to_pnl: Dict[Ticker, PricesSeries]):
# For each ticker compute the PnL for each period (each year, month etc)
pnl_df = QFDataFrame.from_dict(ticker_to_pnl)
agg_performance = pnl_df.groupby(pd.Grouper(key=pnl_df.index.name, freq=self._frequency.to_pandas_freq())) \
.apply(lambda s: s.iloc[-1] - s.iloc[0])
# Format the column labels, so that they point exactly to the considered time frame
column_labels_format = {
Frequency.YEARLY: "%Y",
Frequency.MONTHLY: "%b %Y",
}
columns_format = column_labels_format[self._frequency]
performance_df = agg_performance.rename(index=lambda timestamp: timestamp.strftime(columns_format))
# Transpose the original data frame, so that performance for each period is presented in a separate column
performance_df = performance_df.transpose()
performance_df.index = performance_df.index.set_names("Asset")
performance_df = performance_df.reset_index()
performance_df["Asset"] = performance_df["Asset"].apply(lambda t: t.name)
performance_tables = self._create_performance_tables(performance_df.copy())
performance_contribution_tables = self._create_performance_contribution_tables(performance_df.copy())
# Add the text and all figures into the document
self.document.add_element(HeadingElement(level=2, text="Profit and Loss"))
self.document.add_element(ParagraphElement("The following tables provide the details on the Total profit and "
"loss for each asset (notional in currency units)."))
self.document.add_element(ParagraphElement("\n"))
for table in performance_tables:
self.document.add_element(HeadingElement(level=3, text="Performance between: {} - {}".format(
table.model.data.columns[1], table.model.data.columns[-1])))
self.document.add_element(table)
self.document.add_element(ParagraphElement("\n"))
self.document.add_element(NewPageElement())
# Add performance contribution table
self.document.add_element(HeadingElement(level=2, text="Performance contribution"))
for table in performance_contribution_tables:
self.document.add_element(HeadingElement(level=3, text="Performance contribution between {} - {}".format(
table.model.data.columns[1], table.model.data.columns[-1])))
self.document.add_element(table)
def _create_performance_tables(self, performance_df: QFDataFrame) -> List[DFTable]:
""" Create a formatted DFTable out of the performance_df data frame. """
numeric_columns = [col for col in performance_df.columns if is_numeric_dtype(performance_df[col])]
performance_df[numeric_columns] = performance_df[numeric_columns].applymap(lambda x: '{:,.0f}'.format(x))
performance_df = performance_df.set_index("Asset").sort_index()
# Divide the performance df into a number of data frames, so that each of them contains up to
# self.max_col_per_page columns, but keep the first column of the original df in all of them
split_dfs = np.array_split(performance_df, np.ceil(performance_df.num_of_columns / self._max_columns_per_page),
axis=1)
df_tables = [DFTable(df.reset_index(), css_classes=['table', 'shrink-font', 'right-align', 'wide-first-column'])
for df in split_dfs]
return df_tables
def _create_performance_contribution_tables(self, performance_df: QFDataFrame) -> List[DFTable]:
"""
Create a list of DFTables with assets names in the index and different years / months in columns, which contains
details on the performance contribution for each asset.
"""
# Create a QFSeries which contains the initial amount of cash in the portfolio for each year / month
numeric_columns = [col for col in performance_df.columns if is_numeric_dtype(performance_df[col])]
portfolio_values = performance_df[numeric_columns].sum().shift(fill_value=self._initial_cash).cumsum()
performance_df[numeric_columns] = performance_df[numeric_columns] / portfolio_values[numeric_columns]
# Add category column and aggregate data accordingly
ticker_name_to_category = {t.name: category for t, category in self._ticker_to_category.items()}
performance_df["Category"] = performance_df["Asset"].apply(lambda t: ticker_name_to_category[t])
all_categories = list(set(ticker_name_to_category.values()))
performance_df = performance_df.sort_values(by=["Category", "Asset"])
performance_df = performance_df.groupby("Category").apply(
lambda d: pd.concat([PricesDataFrame({**{"Asset": [d.name], "Category": [d.name]},
**{c: [d[c].sum()] for c in numeric_columns}}), d],
ignore_index=True)).drop(columns=["Category"])
# Add the Total Performance row (divide by 2 as the df contains already aggregated data for each group)
total_sum_row = performance_df[numeric_columns].sum() / 2
total_sum_row["Asset"] = "Total Performance"
performance_df = performance_df.append(total_sum_row, ignore_index=True)
# Format the rows using the percentage formatter
performance_df[numeric_columns] = performance_df[numeric_columns].applymap(lambda x: '{:.2%}'.format(x))
# Divide the performance dataframe into a number of dataframes, so that each of them contains up to
# self._max_columns_per_page columns
split_dfs = np.array_split(performance_df.set_index("Asset"),
np.ceil((performance_df.num_of_columns - 1) / self._max_columns_per_page), axis=1)
df_tables = [DFTable(df.reset_index(), css_classes=['table', 'shrink-font', 'right-align', 'wide-first-column'])
for df in split_dfs]
# Get the indices of rows, which contain category info
category_indices = performance_df[performance_df["Asset"].isin(all_categories)].index
for df_table in df_tables:
# Add table formatting, highlight rows showing the total contribution of the given category
df_table.add_rows_styles(category_indices, {"font-weight": "bold", "font-size": "0.95em",
"background-color": "#cbd0d2"})
df_table.add_rows_styles([performance_df.index[-1]], {"font-weight": "bold", "font-size": "0.95em",
"background-color": "#b9bcbd"})
return df_tables
def save(self, report_dir: str = ""):
# Set the style for the report
plt.style.use(['tearsheet'])
filename = "%Y_%m_%d-%H%M {}.pdf".format(self.title)
filename = datetime.now().strftime(filename)
return self.pdf_exporter.generate([self.document], report_dir, filename)
|
colors_script/calc_colormap.py
|
cemlyn007/mindboggle
| 118 |
100319
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import numpy as np
from mindboggle.mio.colors import distinguishable_colors, label_adjacency_matrix
if __name__ == "__main__":
description = ('calculate colormap for labeled image;'
'calculated result is stored in output_dirname/colors.npy')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('label_filename', help='path to the label image')
parser.add_argument('output_dirname', help='path to the folder storing '
'temporary files and result')
parser.add_argument('-v', '--verbose', action='store_true', default=False)
args = parser.parse_args()
if not os.path.isdir(args.output_dirname):
os.makedirs(args.output_dirname)
matrix_filename = os.path.join(args.output_dirname, 'matrix.npy')
colormap_filename = os.path.join(args.output_dirname, 'colormap.npy')
labels_filename = os.path.join(args.output_dirname, 'labels.npy')
colors_filename = os.path.join(args.output_dirname, 'colors.npy')
if args.verbose:
print('finding adjacency maps...')
if not os.path.isfile(matrix_filename) or \
not os.path.isfile(labels_filename):
labels, matrix = label_adjacency_matrix(args.label_filename,
out_dir=args.output_dirname)[:2]
matrix = matrix.as_matrix()[:, 1:]
np.save(matrix_filename, matrix)
np.save(labels_filename, labels)
else:
labels = np.load(labels_filename)
matrix = np.load(matrix_filename)
if args.verbose:
print('finding colormap...')
if not os.path.isfile(colormap_filename):
num_colors = len(labels)
colormap = distinguishable_colors(ncolors=num_colors,
plot_colormap=False,
save_csv=False,
out_dir=args.output_dirname)
np.save(colormap_filename, colormap)
else:
colormap = np.load(colormap_filename)
if args.verbose:
print('finding label colors')
if not os.path.isfile(colors_filename):
label_colors = colors.group_colors(colormap,
args.label_filename,
IDs=labels,
adjacency_matrix=matrix,
out_dir=args.output_dirname,
plot_colors=False,
plot_graphs=False)
np.save(colors_filename, label_colors)
|
staplelib/__init__.py
|
pivotman/stapler
| 186 |
100360
|
class CommandError(Exception):
"""
Exception class indicating a problem while executing a stapler command.
"""
pass
OPTIONS = None # optparse options
def main(arguments=None):
from . import stapler
stapler.main(arguments)
|
xfel/ui/components/timeit.py
|
dperl-sol/cctbx_project
| 155 |
100399
|
<filename>xfel/ui/components/timeit.py
from __future__ import absolute_import, division, print_function
import time, math
def now():
return "%02d:%02d:%02d" % (time.localtime().tm_hour, time.localtime().tm_min, time.localtime().tm_sec)
def duration(t1, t2):
diff = t2 - t1
seconds = int(math.floor(diff))
frac = diff - seconds
hh = seconds // 3600
mm = seconds // 60
if hh > 0:
mm = mm % 60
ss = seconds % 60
return "%02dh %02dm %fs" % (hh, mm, ss + frac)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.