max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
module/M_drown.py
|
mehrdad-shokri/a2sv
| 655 |
108968
|
<filename>module/M_drown.py<gh_stars>100-1000
import socket
from C_display import *
#Module
def check_tls(hostname,port):
client_hello = '16030100d8010000d403037d408377c8e5204623867604ab0ee4a140043a4e383f770a1e6b66c2d45d34e820de8656a211d79fa9809e9ae6404bb7bcc372afcdd6f51882e39ac2241a8535090016c02bc02fc00ac009c013c01400330039002f0035000a0100007500000014001200000f7777772e65746973616c61742e6567ff01000100000a00080006001700180019000b00020100002300003374000000100017001502683208737064792f332e3108687474702f312e31000500050100000000000d001600140401050106010201040305030603020304020202'
s = socket.socket()
s.settimeout(5)
s.connect((hostname,int(port)))
s.send(client_hello.decode('hex'))
try:
data = s.recv(1024*1024)
except socket.timeout:
data = ''
if data:
server_hello_len = int(data[3:5].encode('hex'),16)
index = 5
index += server_hello_len
cert_msg = data[index:]
return cert_msg
def m_drown_run(hostname,port,displayMode):
client_hello_payload = '803e0100020015001000100100800200800600400400800700c00800800500806161616161616161616161616161616161616161616161616161616161616161'
s = socket.socket()
s.settimeout(5)
s.connect((hostname,int(port)))
s.sendall(client_hello_payload.decode('hex'))
try:
server_hello = s.recv(10*1024)
except socket.timeout:
server_hello = ''
except socket.error:
showDisplay(displayMode," - [LOG] Execption")
return "0x02"
if server_hello:
try:
#parse incoming packet to extract the certificate
index = 0
length = server_hello[index:index+2].encode('hex')
index +=2
msg_type = server_hello[index].encode('hex')
index +=1
session_id = server_hello[index].encode('hex')
index +=1
cert_type = server_hello[index].encode('hex')
index +=1
ssl_version = server_hello[index:index+2]
index +=2
cert_len = int(server_hello[index:index+2].encode('hex'),16)
#showDisplay(displayMode,'cert_len',cert_len)
index +=2
cipher_spec_len = server_hello[index:index+2]
index +=2
conn_id = server_hello[index:index+2]
index +=2
cert = server_hello[index:cert_len+1]
data = check_tls(hostname,port)
if data:
showDisplay(displayMode," - [LOG] Check the TLS CERT")
showDisplay(displayMode," - [LOG] Check the SSLv2 CERT")
if cert.encode('hex') in data.encode('hex'):
showDisplay(displayMode," - [LOG] SSLv2 Enable - Same cert")
return "0x01"
else:
showDisplay(displayMode," - [LOG] SSLv2 Enable - Not same cert")
return "0x01"
except Exception as e:
showDisplay(displayMode,str(e))
return "0x02"
else:
showDisplay(displayMode," - [LOG] Not connected SSLv2")
return "0x00"
s.close()
|
accessify/__init__.py
|
dmytrostriletskyi/design-kit
| 107 |
108994
|
from accessify.access import (
accessify,
private,
protected,
)
from accessify.interfaces import (
implements,
throws,
)
|
tests/test_provider_pmorillon_grid5000.py
|
mjuenema/python-terrascript
| 507 |
108998
|
# tests/test_provider_pmorillon_grid5000.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:17:36 UTC)
def test_provider_import():
import terrascript.provider.pmorillon.grid5000
def test_resource_import():
from terrascript.resource.pmorillon.grid5000 import grid5000_ceph_pool
from terrascript.resource.pmorillon.grid5000 import grid5000_deployment
from terrascript.resource.pmorillon.grid5000 import grid5000_job
def test_datasource_import():
from terrascript.data.pmorillon.grid5000 import grid5000_ceph_auth
from terrascript.data.pmorillon.grid5000 import grid5000_node
from terrascript.data.pmorillon.grid5000 import grid5000_site
from terrascript.data.pmorillon.grid5000 import grid5000_vlan_nodelist
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.pmorillon.grid5000
#
# t = terrascript.provider.pmorillon.grid5000.grid5000()
# s = str(t)
#
# assert 'https://github.com/pmorillon/terraform-provider-grid5000' in s
# assert '0.0.7' in s
|
ghostwriter/commandcenter/migrations/0002_auto_20201009_1918.py
|
bbhunter/Ghostwriter
| 601 |
109000
|
# Generated by Django 2.2.3 on 2020-10-09 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commandcenter', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='cloudservicesconfiguration',
name='ignore_tag',
field=models.CharField(default='gw_ignore', help_text='Ghostwriter will ignore cloud assets with one of these tags (comma-separated list)', max_length=255, verbose_name='Ignore Tags'),
),
]
|
examples/starwars/data.py
|
radekwlsk/graphene-django
| 4,038 |
109015
|
<filename>examples/starwars/data.py
from .models import Character, Faction, Ship
def initialize():
human = Character(name="Human")
human.save()
droid = Character(name="Droid")
droid.save()
rebels = Faction(id="1", name="Alliance to Restore the Republic", hero=human)
rebels.save()
empire = Faction(id="2", name="Galactic Empire", hero=droid)
empire.save()
xwing = Ship(id="1", name="X-Wing", faction=rebels)
xwing.save()
human.ship = xwing
human.save()
ywing = Ship(id="2", name="Y-Wing", faction=rebels)
ywing.save()
awing = Ship(id="3", name="A-Wing", faction=rebels)
awing.save()
# Yeah, technically it's Corellian. But it flew in the service of the rebels,
# so for the purposes of this demo it's a rebel ship.
falcon = Ship(id="4", name="Millenium Falcon", faction=rebels)
falcon.save()
homeOne = Ship(id="5", name="Home One", faction=rebels)
homeOne.save()
tieFighter = Ship(id="6", name="TIE Fighter", faction=empire)
tieFighter.save()
tieInterceptor = Ship(id="7", name="TIE Interceptor", faction=empire)
tieInterceptor.save()
executor = Ship(id="8", name="Executor", faction=empire)
executor.save()
def create_ship(ship_name, faction_id):
new_ship = Ship(name=ship_name, faction_id=faction_id)
new_ship.save()
return new_ship
def get_ship(_id):
return Ship.objects.get(id=_id)
def get_ships():
return Ship.objects.all()
def get_faction(_id):
return Faction.objects.get(id=_id)
def get_rebels():
return get_faction(1)
def get_empire():
return get_faction(2)
|
reckoner/meta.py
|
LynRodWS/reckoner
| 192 |
109031
|
# -- coding: utf-8 --
# pylint: skip-file
# Copyright 2019 FairwindsOps Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pkg_resources import get_distribution, DistributionNotFound
import re
__version_modifier__ = re.compile(r'^([0-9]+\.[0-9]+\.[0-9]+)\.(.*)$')
__distribution_name__ = 'reckoner'
try:
__version__ = re.sub(__version_modifier__, r'\g<1>-\g<2>', get_distribution(__distribution_name__).version)
except DistributionNotFound:
# Attempt to discover Version from pyinstaller data
from pkgutil import get_data
_raw_ver = get_data(__distribution_name__, 'version.txt').decode('UTF-8', 'ignore').rstrip("\r\n")
__version__ = re.sub(__version_modifier__, r'\g<1>-\g<2>', _raw_ver)
__author__ = 'FairwindsOps, Inc.'
|
tests/features/change_theme_on_legacy_widgets.py
|
dmalves/ttkbootstrap
| 406 |
109041
|
import tkinter as tk
from ttkbootstrap import Style
from random import choice
root = tk.Tk()
root.minsize(500, 500)
style = Style('superhero')
def new_theme():
theme = choice(style.theme_names())
print(theme)
style.theme_use(theme)
btn = tk.Button(root, text='Primary')
btn.configure(command=new_theme)
btn.pack(padx=10, pady=10, fill=tk.BOTH, expand=tk.YES)
label = tk.Label(text="Hello world!")
label.pack(padx=10, pady=10)
text = tk.Text()
text.pack(padx=10, pady=10)
text.insert(tk.END, 'This is a demo of themes applied to regular tk widgets.')
frame = tk.Frame()
frame.pack(padx=10, pady=10, fill=tk.X)
cb1 = tk.Checkbutton(frame, text="Check 1")
cb1.pack(padx=10, pady=10, side=tk.LEFT)
cb1.invoke()
cb2 = tk.Checkbutton(frame, text="Check 2")
cb2.pack(padx=10, pady=10, side=tk.LEFT)
rb_var = tk.Variable(value=1)
rb1 = tk.Radiobutton(frame, text='Radio 1', value=1, variable=rb_var)
rb1.pack(padx=10, pady=10, side=tk.LEFT)
rb2 = tk.Radiobutton(frame, text='Radio 2', value=2, variable=rb_var)
rb2.pack(padx=10, pady=10, side=tk.LEFT)
frame2 = tk.LabelFrame(text="Items")
frame2.pack(padx=10, pady=10, fill=tk.X)
entry = tk.Entry(frame2)
entry.pack(padx=10, pady=10, side=tk.LEFT)
scale = tk.Scale(frame2, orient=tk.HORIZONTAL)
scale.set(25)
scale.pack(padx=10, pady=10, side=tk.LEFT)
sb = tk.Spinbox(frame2)
sb.pack(padx=10, pady=10, side=tk.LEFT)
lb = tk.Listbox(height=3)
lb.insert(tk.END, 'one', 'two', 'three')
lb.pack(padx=10, pady=10)
mb = tk.Menubutton(frame2, text="Hello world")
menu = tk.Menu(mb)
menu.add_checkbutton(label="Option 1")
menu.add_checkbutton(label="Option 2")
mb['menu'] = menu
mb.pack(padx=10, pady=10)
root.mainloop()
|
test/modules/redfish_auth.py
|
arunrordell/RackHD
| 451 |
109054
|
from config.redfish1_0_config import config
from config.auth import *
from config.settings import *
from logger import Log
from json import loads, dumps
import pexpect
import pxssh
import subprocess
LOG = Log(__name__)
class Auth(object):
"""
Class to abstract python authentication functionality
"""
@staticmethod
def get_auth_token():
""" call /SessionService/Sessions to get auth_token """
resource_path = '/redfish/v1/SessionService/Sessions'
method = 'POST'
body_params = {
'UserName': 'admin',
'Password': '<PASSWORD>'
}
config.api_client.host = config.host_authed
config.api_client.call_api(resource_path, method, body=body_params)
return config.api_client.last_response.getheader('X-Auth-Token')
@staticmethod
def enable():
""" update config to enable auth """
if config.auth_enabled:
LOG.info('auth already enabled.')
config.api_client.default_headers['X-Auth-Token'] = Auth.get_auth_token()
config.api_client.host = config.host_authed + config.api_root
config.auth_enabled = True
LOG.info('Enable auth successfully.')
@staticmethod
def disable():
""" update config to disable auth """
if not config.auth_enabled:
LOG.info('auth already disabled.')
del config.api_client.default_headers['X-Auth-Token']
config.api_client.host = config.host + config.api_root
config.auth_enabled = False
LOG.info('Disable auth successfully.')
|
src/netius/test/middleware/proxy.py
|
timgates42/netius
| 107 |
109057
|
<reponame>timgates42/netius<gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import struct
import unittest
import netius.common
import netius.middleware
class ProxyMiddlewareTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.server = netius.Server(poll = netius.Poll)
self.server.poll.open()
def tearDown(self):
unittest.TestCase.tearDown(self)
self.server.cleanup()
def test_ipv4_v1(self):
instance = self.server.register_middleware(
netius.middleware.ProxyMiddleware
)
connection = netius.Connection(owner = self.server)
connection.open()
connection.restore(b"PROXY TCP4 192.168.1.1 192.168.1.2 32598 8080\r\n")
instance._proxy_handshake_v1(connection)
self.assertEqual(connection.address, ("192.168.1.1", 32598))
self.assertEqual(len(connection.restored), 0)
def test_ipv6_v1(self):
instance = self.server.register_middleware(
netius.middleware.ProxyMiddleware
)
connection = netius.Connection(owner = self.server)
connection.open()
connection.restore(b"PROXY TCP4 fe80::787f:f63f:3176:d61b fe80::787f:f63f:3176:d61c 32598 8080\r\n")
instance._proxy_handshake_v1(connection)
self.assertEqual(connection.address, ("fe80::787f:f63f:3176:d61b", 32598))
self.assertEqual(len(connection.restored), 0)
def test_starter_v1(self):
self.server.register_middleware(
netius.middleware.ProxyMiddleware
)
connection = netius.Connection(owner = self.server)
connection.open()
connection.restore(b"PROXY TCP4 192.168.1.1 192.168.1.2 32598 8080\r\n")
connection.run_starter()
self.assertEqual(connection.address, ("192.168.1.1", 32598))
self.assertEqual(connection.restored_s, 0)
self.assertEqual(len(connection.restored), 0)
connection = netius.Connection(owner = self.server)
connection.open()
connection.restore(b"PROXY TCP4 192.168.1.3 ")
connection.restore(b"192.168.1.4 32598 8080\r\n")
connection.run_starter()
self.assertEqual(connection.address, ("192.168.1.3", 32598))
self.assertEqual(connection.restored_s, 0)
self.assertEqual(len(connection.restored), 0)
connection = netius.Connection(owner = self.server)
connection.open()
connection.restore(b"PROXY TCP4 192.168.1.3 ")
connection.restore(b"192.168.1.4 32598 8080\r\nGET")
connection.restore(b" / HTTP/1.0\r\n\r\n")
connection.run_starter()
self.assertEqual(connection.address, ("192.168.1.3", 32598))
self.assertEqual(connection.restored_s, 18)
self.assertEqual(len(connection.restored), 2)
def test_starter_v2(self):
self.server.register_middleware(
netius.middleware.ProxyMiddleware, version = 2
)
connection = netius.Connection(owner = self.server)
connection.open()
body = struct.pack(
"!IIHH",
netius.common.ip4_to_addr("192.168.1.1"),
netius.common.ip4_to_addr("192.168.1.2"),
32598,
8080
)
header = struct.pack(
"!12sBBH",
netius.middleware.ProxyMiddleware.HEADER_MAGIC_V2,
(2 << 4) + (netius.middleware.ProxyMiddleware.TYPE_PROXY_V2),
(netius.middleware.ProxyMiddleware.AF_INET_v2 << 4) + (netius.middleware.ProxyMiddleware.PROTO_STREAM_v2),
len(body)
)
connection.restore(header)
connection.restore(body)
connection.run_starter()
self.assertEqual(connection.address, ("192.168.1.1", 32598))
self.assertEqual(connection.restored_s, 0)
self.assertEqual(len(connection.restored), 0)
|
python/test/resource/test_dataset_mixed.py
|
Fosstack/vmaf
| 2,874 |
109074
|
<gh_stars>1000+
dataset_name = 'example'
quality_width = 1920
quality_height = 1080
from vmaf.config import VmafConfig
ref_videos = [
{'content_id': 0, 'path': VmafConfig.resource_path('yuv', 'checkerboard_1920_1080_10_3_0_0.yuv'), 'yuv_fmt': 'yuv420p', 'width': 1920, 'height': 1080},
{'content_id': 1, 'path': VmafConfig.resource_path('yuv', 'flat_1920_1080_0.yuv'), 'yuv_fmt': 'yuv420p', 'width': 720, 'height': 480},
]
dis_videos = [
{'content_id': 0, 'asset_id': 0, 'dmos': 100, 'path': VmafConfig.resource_path('yuv', 'checkerboard_1920_1080_10_3_0_0.yuv'), 'yuv_fmt': 'yuv420p', 'width': 1920, 'height': 1080}, # ref
{'content_id': 0, 'asset_id': 1, 'dmos': 50, 'path': VmafConfig.resource_path('yuv', 'checkerboard_1920_1080_10_3_1_0.264'), 'yuv_fmt': 'notyuv',},
{'content_id': 1, 'asset_id': 2, 'dmos': 100, 'path': VmafConfig.resource_path('yuv', 'flat_1920_1080_0.yuv'), 'yuv_fmt': 'yuv420p', 'width': 720, 'height': 480}, # ref
{'content_id': 1, 'asset_id': 3, 'dmos': 80, 'path': VmafConfig.resource_path('yuv', 'flat_1920_1080_10.264'), 'yuv_fmt': 'notyuv',},
]
|
yacv/abstractsyntaxtree.py
|
amitpatra/yacv
| 120 |
109091
|
from yacv.grammar import Production
from yacv.constants import YACV_EPSILON
from pprint import pformat
class AbstractSyntaxTree(object):
def __init__(self, *args):
if len(args) == 0:
self.root = None
self.desc = []
self.prod_id = None
self.node_id = None
if len(args) == 1:
if isinstance(args[0], Production):
self.root = args[0].lhs
desc = []
for symbol in args[0].rhs:
desc.append(AbstractSyntaxTree(symbol))
self.desc = desc
self.prod_id = None
elif isinstance(args[0], str):
self.root = args[0]
self.desc = []
self.prod_id = None
def __str__(self):
return '{}->{}'.format(self.root, pformat(self.desc))
def __repr__(self):
return str(self)
|
micro-benchmark/snippets/external/cls_parent/main.py
|
WenJinfeng/PyCG
| 121 |
109096
|
from ext import parent
class A(parent):
def fn(self):
self.parent_fn()
a = A()
a.fn()
|
stereomag/rank.py
|
Dawars/stereo-magnification
| 348 |
109100
|
<reponame>Dawars/stereo-magnification
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantitative evaluation of view synthesis results.
Read in dumped json data and compute various statistics.
"""
import os
import json
import numpy as np
from scipy.stats import wilcoxon
from scipy.stats.mstats import rankdata
from pyglib import app
from pyglib import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('root', 'evaluation', 'Evaluation directory')
flags.DEFINE_string(
'model_names',
'v4_1024,v4_1024_alpha,v4_1024_singleRGB,v4_1024_fgbg,v4_1024_all',
'model names')
flags.DEFINE_string('data_split', 'test', 'split of the data')
flags.DEFINE_string('stats', 'mean,rank,diff,wilcoxon',
'which stats to compute')
def load_data(root, model):
with open(root + '/json/' + model + '.json') as f:
data = json.load(f)
return data
def merge_into(data, d):
if data == {}:
data['models'] = []
data['examples'] = d['examples']
n = len(data['examples'])
data['ssim'] = [[]] * n
data['psnr'] = [[]] * n
for m in d['model_names']:
assert m not in data['models']
data['models'].append(str(m))
assert d['examples'] == data['examples']
assert len(data['ssim']) == len(d['ssim'])
assert len(data['psnr']) == len(d['psnr'])
data['ssim'] = [a + b for (a, b) in zip(data['ssim'], d['ssim'])]
data['psnr'] = [a + b for (a, b) in zip(data['psnr'], d['psnr'])]
def compute_mean(data):
print '\nMEAN + STD\n'
ssim = np.array(data['ssim'])
psnr = np.array(data['psnr'])
for i, m in enumerate(data['models']):
print '%30s ssim %.3f ± %.3f psnr %.2f ± %.2f' % (
m, np.mean(ssim[:, i]), np.std(ssim[:, i]), np.mean(psnr[:, i]),
np.std(psnr[:, i]))
def compute_rank(data):
print '\nRANK\n'
# rankdata assigns rank 1 to the lowest element, so
# we need to negate before ranking.
ssim_rank = rankdata(np.array(data['ssim']) * -1.0, axis=1)
psnr_rank = rankdata(np.array(data['psnr']) * -1.0, axis=1)
# Rank mean + std.
for i, m in enumerate(data['models']):
print '%30s ssim-rank %.2f ± %.2f psnr-rank %.2f ± %.2f' % (
m, np.mean(ssim_rank[:, i]), np.std(ssim_rank[:, i]),
np.mean(psnr_rank[:, i]), np.std(psnr_rank[:, i]))
# Rank frequencies
print '\n SSIM rank freqs'
print_rank_freqs(data, ssim_rank)
print '\n PSNR rank freqs'
print_rank_freqs(data, psnr_rank)
def print_rank_freqs(data, rank):
e = len(data['examples'])
m = len(data['models'])
freqs = []
for i in range(m):
one_rank = np.count_nonzero(
np.logical_and(np.less_equal(i + 1.0, rank), np.less(rank, i + 2.0)),
axis=0) * 1.0 / e
freqs.append(one_rank)
freqs = np.array(freqs)
print '%30s %s' % ('', ''.join('%4.0f ' % (x + 1) for x in range(m)))
for i, m in enumerate(data['models']):
print '%30s %s' % (m, ''.join(
'%4.0f%%' % (100 * x) for x in freqs[:, i]))
def compute_diff(data):
print '\nDIFF\n'
# We take the first model as the best!
ssim = np.array(data['ssim'])
psnr = np.array(data['psnr'])
ssim_diff = ssim - ssim[:, 0:1]
psnr_diff = psnr - psnr[:, 0:1]
for i, m in enumerate(data['models']):
print '%30s ssim-diff %.3f ± %.3f psnr-diff %.2f ± %.2f' % (
m, np.mean(ssim_diff[:, i]), np.std(ssim_diff[:, i]),
np.mean(psnr_diff[:, i]), np.std(psnr_diff[:, i]))
def compute_wilcoxon(data):
print '\nWILCOXON SIGNED-RANK TEST\n'
# We take the first model as the basis for each comparison.
ssim = np.array(data['ssim'])
psnr = np.array(data['psnr'])
for i, m in enumerate(data['models']):
if i == 0:
print ' [differences from %s]' % m
continue
ssim_v, ssim_p = wilcoxon(ssim[:, i], ssim[:, 0])
psnr_v, psnr_p = wilcoxon(psnr[:, i], psnr[:, 0])
print '%30s ssim %.3f, p %.1e psnr %.2f, p %.1e' % (m, ssim_v, ssim_p,
psnr_v, psnr_p)
def main(_):
stats = FLAGS.stats.split(',')
root = FLAGS.root
model_names = FLAGS.model_names.split(',')
data = {}
for m in model_names:
d = load_data(root, m)
merge_into(data, d)
print '\nLOADED %d models, %d examples' % (len(data['models']),
len(data['examples']))
if 'mean' in stats:
compute_mean(data)
if 'rank' in stats:
compute_rank(data)
if 'diff' in stats:
compute_diff(data)
if 'wilcoxon' in stats:
compute_wilcoxon(data)
print
if __name__ == '__main__':
app.run()
|
esphome/components/tcs34725/sensor.py
|
OttoWinter/esphomeyaml
| 249 |
109147
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c, sensor
from esphome.const import (
CONF_COLOR_TEMPERATURE,
CONF_GAIN,
CONF_ID,
CONF_ILLUMINANCE,
CONF_GLASS_ATTENUATION_FACTOR,
CONF_INTEGRATION_TIME,
DEVICE_CLASS_ILLUMINANCE,
ICON_LIGHTBULB,
STATE_CLASS_MEASUREMENT,
UNIT_PERCENT,
ICON_THERMOMETER,
UNIT_KELVIN,
UNIT_LUX,
)
DEPENDENCIES = ["i2c"]
CONF_RED_CHANNEL = "red_channel"
CONF_GREEN_CHANNEL = "green_channel"
CONF_BLUE_CHANNEL = "blue_channel"
CONF_CLEAR_CHANNEL = "clear_channel"
tcs34725_ns = cg.esphome_ns.namespace("tcs34725")
TCS34725Component = tcs34725_ns.class_(
"TCS34725Component", cg.PollingComponent, i2c.I2CDevice
)
TCS34725IntegrationTime = tcs34725_ns.enum("TCS34725IntegrationTime")
TCS34725_INTEGRATION_TIMES = {
"auto": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_AUTO,
"2.4ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_2_4MS,
"24ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_24MS,
"50ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_50MS,
"101ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_101MS,
"120ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_120MS,
"154ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_154MS,
"180ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_180MS,
"199ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_199MS,
"240ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_240MS,
"300ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_300MS,
"360ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_360MS,
"401ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_401MS,
"420ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_420MS,
"480ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_480MS,
"499ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_499MS,
"540ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_540MS,
"600ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_600MS,
"614ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_614MS,
}
TCS34725Gain = tcs34725_ns.enum("TCS34725Gain")
TCS34725_GAINS = {
"1X": TCS34725Gain.TCS34725_GAIN_1X,
"4X": TCS34725Gain.TCS34725_GAIN_4X,
"16X": TCS34725Gain.TCS34725_GAIN_16X,
"60X": TCS34725Gain.TCS34725_GAIN_60X,
}
color_channel_schema = sensor.sensor_schema(
unit_of_measurement=UNIT_PERCENT,
icon=ICON_LIGHTBULB,
accuracy_decimals=1,
state_class=STATE_CLASS_MEASUREMENT,
)
color_temperature_schema = sensor.sensor_schema(
unit_of_measurement=UNIT_KELVIN,
icon=ICON_THERMOMETER,
accuracy_decimals=1,
state_class=STATE_CLASS_MEASUREMENT,
)
illuminance_schema = sensor.sensor_schema(
unit_of_measurement=UNIT_LUX,
accuracy_decimals=1,
device_class=DEVICE_CLASS_ILLUMINANCE,
state_class=STATE_CLASS_MEASUREMENT,
)
CONFIG_SCHEMA = (
cv.Schema(
{
cv.GenerateID(): cv.declare_id(TCS34725Component),
cv.Optional(CONF_RED_CHANNEL): color_channel_schema,
cv.Optional(CONF_GREEN_CHANNEL): color_channel_schema,
cv.Optional(CONF_BLUE_CHANNEL): color_channel_schema,
cv.Optional(CONF_CLEAR_CHANNEL): color_channel_schema,
cv.Optional(CONF_ILLUMINANCE): illuminance_schema,
cv.Optional(CONF_COLOR_TEMPERATURE): color_temperature_schema,
cv.Optional(CONF_INTEGRATION_TIME, default="auto"): cv.enum(
TCS34725_INTEGRATION_TIMES, lower=True
),
cv.Optional(CONF_GAIN, default="1X"): cv.enum(TCS34725_GAINS, upper=True),
cv.Optional(CONF_GLASS_ATTENUATION_FACTOR, default=1.0): cv.float_range(
min=1.0
),
}
)
.extend(cv.polling_component_schema("60s"))
.extend(i2c.i2c_device_schema(0x29))
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
await i2c.register_i2c_device(var, config)
cg.add(var.set_integration_time(config[CONF_INTEGRATION_TIME]))
cg.add(var.set_gain(config[CONF_GAIN]))
cg.add(var.set_glass_attenuation_factor(config[CONF_GLASS_ATTENUATION_FACTOR]))
if CONF_RED_CHANNEL in config:
sens = await sensor.new_sensor(config[CONF_RED_CHANNEL])
cg.add(var.set_red_sensor(sens))
if CONF_GREEN_CHANNEL in config:
sens = await sensor.new_sensor(config[CONF_GREEN_CHANNEL])
cg.add(var.set_green_sensor(sens))
if CONF_BLUE_CHANNEL in config:
sens = await sensor.new_sensor(config[CONF_BLUE_CHANNEL])
cg.add(var.set_blue_sensor(sens))
if CONF_CLEAR_CHANNEL in config:
sens = await sensor.new_sensor(config[CONF_CLEAR_CHANNEL])
cg.add(var.set_clear_sensor(sens))
if CONF_ILLUMINANCE in config:
sens = await sensor.new_sensor(config[CONF_ILLUMINANCE])
cg.add(var.set_illuminance_sensor(sens))
if CONF_COLOR_TEMPERATURE in config:
sens = await sensor.new_sensor(config[CONF_COLOR_TEMPERATURE])
cg.add(var.set_color_temperature_sensor(sens))
|
vnpy/gateway/deribit/__init__.py
|
ChaunceyDong/vnpy
| 19,529 |
109173
|
<gh_stars>1000+
from vnpy_deribit import DeribitGateway
|
deepy/tensor/__init__.py
|
uaca/deepy
| 260 |
109184
|
<reponame>uaca/deepy<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wrapper import deepy_tensor
from functions import concat, concatenate, reverse, ifelse, apply, repeat, var, vars, activate, is_neural_var, is_theano_var
from onehot import onehot_tensor, onehot
import theano_nnet_imports as nnet
import costs as costs
from theano_imports import *
|
test/IECore/TypedDataAsObject.py
|
bradleyhenke/cortex
| 386 |
109226
|
##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class TestTypedDataAsObject( unittest.TestCase ) :
def testSimpleCopy( self ) :
o = IECore.IntData( 1 )
self.assertEqual( o.value, 1 )
self.assertEqual( o, o )
o.value = 2
self.assertEqual( o.value, 2 )
oo = o.copy()
self.assertEqual( oo.value, 2 )
self.assertEqual( o, oo )
o.value = 3
self.assertEqual( o.value, 3 )
self.assertEqual( oo.value, 2 )
self.assertNotEqual( o, oo )
oo.value = 4
self.assertEqual( o.value, 3 )
self.assertEqual( oo.value, 4 )
self.assertNotEqual( o, oo )
def testCompoundCopy( self ) :
"""CompoundData must specialise the copyFrom() method
to ensure that a genuine deep copy of the data is produced.
This tests that."""
a = IECore.IntData( 2 )
self.assertEqual( a.value, 2 )
self.assertEqual( a, a )
c = IECore.CompoundData()
c["A"] = a
self.assertEqual( c["A"].value, 2 )
a.value = 3
self.assertEqual( a.value, 3 )
self.assertEqual( c["A"].value, 3 )
self.assertTrue( a.isSame( c["A"] ) )
cc = c.copy()
self.assertTrue( a.isSame( c["A"] ) )
self.assertTrue( not a.isSame( cc["A"] ) )
self.assertEqual( c, cc )
a.value = 10
self.assertEqual( a.value, 10 )
self.assertEqual( c["A"].value, 10 )
self.assertEqual( cc["A"].value, 3 )
self.assertNotEqual( c, cc )
cc["A"].value = 100
self.assertEqual( cc["A"].value, 100 )
self.assertEqual( c["A"].value, 10 )
self.assertEqual( a.value, 10 )
self.assertNotEqual( c, cc )
a.value = 100
self.assertEqual( c, cc )
if __name__ == "__main__":
unittest.main()
|
dataviva/api/hedu/services.py
|
joelvisroman/dataviva-site
| 126 |
109265
|
from dataviva.api.hedu.models import Ybu, Ybc_hedu, Yu, Yuc, Yc_hedu, Ybuc
from dataviva.api.attrs.models import University as uni, Course_hedu, Bra
from dataviva import db
from sqlalchemy.sql.expression import func, desc, not_
class University:
def __init__(self, university_id):
self._hedu = None
self._hedu_sorted_by_enrolled = None
self._hedu_sorted_by_entrants = None
self._hedu_sorted_by_graduates = None
self.university_id = university_id
if university_id is None:
self.max_year_query = db.session.query(func.max(Yu.year))
self.hedu_query = Yu.query.filter(Yu.year == self.max_year_query)
else:
self.max_year_query = db.session.query(
func.max(Yu.year)).filter_by(university_id=university_id)
self.hedu_query = Yu.query.filter(
Yu.university_id == self.university_id,
Yu.year == self.max_year_query)
def __hedu__(self):
if not self._hedu:
hedu_data = self.hedu_query.first_or_404()
self._hedu = hedu_data
return self._hedu
def __hedu_list__(self):
if not self._hedu:
hedu_data = self.hedu_query.all()
self._hedu = hedu_data
return self._hedu
def __hedu_sorted_by_enrolled__(self):
if not self._hedu_sorted_by_enrolled:
self._hedu_sorted_by_enrolled = self.__hedu_list__()
self._hedu_sorted_by_enrolled.sort(
key=lambda hedu: hedu.enrolled, reverse=True)
return self._hedu_sorted_by_enrolled
def __hedu_sorted_by_entrants__(self):
if not self._hedu_sorted_by_entrants:
self._hedu_sorted_by_entrants = self.__hedu_list__()
self._hedu_sorted_by_entrants.sort(
key=lambda hedu: hedu.entrants, reverse=True)
return self._hedu_sorted_by_entrants
def __hedu_sorted_by_graduates__(self):
if not self._hedu_sorted_by_graduates:
self._hedu_sorted_by_graduates = self.__hedu_list__()
self._hedu_sorted_by_graduates.sort(
key=lambda hedu: hedu.graduates, reverse=True)
return self._hedu_sorted_by_graduates
def name(self):
return self.__hedu__().university.name()
def university_type(self):
return self.__hedu__().university.school_type()
def enrolled(self):
return self.__hedu__().enrolled
def entrants(self):
return self.__hedu__().entrants
def graduates(self):
return self.__hedu__().graduates
def profile(self):
return self.__hedu__().university.desc_pt
def year(self):
return self.max_year_query.first()[0]
def highest_enrolled_number(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.enrolled
def highest_entrants_number(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.entrants
def highest_graduates_number(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.graduates
def highest_enrolled_by_university(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_university_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.university.name()
else:
return None
class UniversityMajors(University):
def __init__(self, university_id):
University.__init__(self, university_id)
self.max_year_query = db.session.query(func.max(Yuc.year))
self.hedu_query = Yuc.query.filter(
Yuc.university_id == self.university_id,
Yuc.year == self.max_year_query,
func.length(Yuc.course_hedu_id) == 6)
def major_with_more_enrollments(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.course_hedu.name()
def major_with_more_entrants(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.course_hedu.name()
def major_with_more_graduates(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.course_hedu.name()
class Major:
def __init__(self, course_hedu_id, bra_id):
self._hedu = None
self._hedu_sorted_by_enrolled = None
self._hedu_sorted_by_entrants = None
self._hedu_sorted_by_graduates = None
self._hedu_major_rank = None
self.course_hedu_id = course_hedu_id
self.bra_id = bra_id
if course_hedu_id is None and bra_id is None:
self.max_year_query = db.session.query(func.max(Yc_hedu.year))
self.hedu_query = Ybc_hedu.query.filter(Ybc_hedu.year == self.max_year_query)
else:
self.max_year_query = db.session.query(
func.max(Yc_hedu.year)).filter_by(course_hedu_id=course_hedu_id)
if bra_id != '':
self.hedu_query = Ybc_hedu.query.filter(
Ybc_hedu.course_hedu_id == self.course_hedu_id,
Ybc_hedu.bra_id == self.bra_id,
Ybc_hedu.year == self.max_year_query)
else:
self.hedu_query = Yc_hedu.query.filter(
Yc_hedu.course_hedu_id == self.course_hedu_id,
Yc_hedu.year == self.max_year_query)
def __hedu__(self):
if not self._hedu:
hedu_data = self.hedu_query.first_or_404()
self._hedu = hedu_data
return self._hedu
def __hedu_list__(self):
if not self._hedu:
hedu_data = self.hedu_query.all()
self._hedu = hedu_data
return self._hedu
def __hedu_sorted_by_enrolled__(self):
if not self._hedu_sorted_by_enrolled:
self._hedu_sorted_by_enrolled = self.__hedu_list__()
self._hedu_sorted_by_enrolled.sort(
key=lambda hedu: hedu.enrolled, reverse=True)
return self._hedu_sorted_by_enrolled
def __hedu_sorted_by_entrants__(self):
if not self._hedu_sorted_by_entrants:
self._hedu_sorted_by_entrants = self.__hedu_list__()
self._hedu_sorted_by_entrants.sort(
key=lambda hedu: hedu.entrants, reverse=True)
return self._hedu_sorted_by_entrants
def __hedu_sorted_by_graduates__(self):
if not self._hedu_sorted_by_graduates:
self._hedu_sorted_by_graduates = self.__hedu_list__()
self._hedu_sorted_by_graduates.sort(
key=lambda hedu: hedu.graduates, reverse=True)
return self._hedu_sorted_by_graduates
def name(self):
return self.__hedu__().course_hedu.name()
def enrolled(self):
return self.__hedu__().enrolled
def entrants(self):
return self.__hedu__().entrants
def graduates(self):
return self.__hedu__().graduates
def profile(self):
return self.__hedu__().course_hedu.desc_pt
def year(self):
return self.__hedu__().year
def highest_enrolled_number(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.enrolled
def highest_entrants_number(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.entrants
def highest_graduates_number(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.graduates
def location_name(self):
return Bra.query.filter(Bra.id == self.bra_id).first().name()
def highest_enrolled_by_major(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_major_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.course_hedu.name()
else:
return None
class MajorUniversities(Major):
def __init__(self, course_hedu_id, bra_id):
Major.__init__(self, course_hedu_id, bra_id)
self.course_hedu_id = course_hedu_id
self.max_year_query = db.session.query(
func.max(Yuc.year)).filter_by(course_hedu_id=course_hedu_id)
if bra_id == '':
self.hedu_query = Yuc.query.filter(
Yuc.course_hedu_id == self.course_hedu_id,
Yuc.year == self.max_year_query)
else:
self.hedu_query = Ybuc.query.filter(
Ybuc.course_hedu_id == self.course_hedu_id,
Ybuc.bra_id == self.bra_id,
Ybuc.year == self.max_year_query)
def university_with_more_enrolled(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.university.name()
def university_with_more_entrants(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.university.name()
def university_with_more_graduates(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.university.name()
class MajorMunicipalities(Major):
def __init__(self, course_hedu_id, bra_id):
Major.__init__(self, course_hedu_id, bra_id)
self.course_hedu_id = course_hedu_id
self.max_year_query = db.session.query(
func.max(Ybc_hedu.year)).filter_by(course_hedu_id=course_hedu_id)
if bra_id == '':
self.hedu_query = Ybc_hedu.query.filter(
Ybc_hedu.course_hedu_id == self.course_hedu_id,
Ybc_hedu.year == self.max_year_query,
not_(Ybc_hedu.bra_id.like('0xx%')),
func.length(Ybc_hedu.bra_id) == 9)
else:
self.hedu_query = Ybc_hedu.query.filter(
Ybc_hedu.course_hedu_id == self.course_hedu_id,
Ybc_hedu.year == self.max_year_query,
Ybc_hedu.bra_id.like(self.bra_id+'%'),
not_(Ybc_hedu.bra_id.like('0xx%')),
func.length(Ybc_hedu.bra_id) == 9)
def municipality_with_more_enrolled(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.bra.name()
def municipality_with_more_enrolled_state(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.bra.abbreviation
def municipality_with_more_entrants(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.bra.name()
def municipality_with_more_entrants_state(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.bra.abbreviation
def municipality_with_more_graduates(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.bra.name()
def municipality_with_more_graduates_state(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.bra.abbreviation
class LocationUniversity:
def __init__(self, bra_id):
self._hedu_sorted_by_enrolled = None
self._hedu = None
self.bra_id = bra_id
self.max_year_query = db.session.query(
func.max(Ybu.year)).filter_by(bra_id=bra_id)
self.hedu_query = Ybu.query.join(uni).filter(
Ybu.bra_id == self.bra_id,
Ybu.year == self.max_year_query)
def __hedu__(self):
if not self._hedu:
hedu_data = self.hedu_query.one()
self._hedu = hedu_data
return self._hedu
def __hedu_list__(self):
if not self._hedu:
hedu_data = self.hedu_query.all()
self._hedu = hedu_data
return self._hedu
def __hedu_sorted_by_enrolled__(self):
if not self._hedu_sorted_by_enrolled:
self._hedu_sorted_by_enrolled = self.__hedu_list__()
self._hedu_sorted_by_enrolled.sort(
key=lambda hedu: hedu.enrolled, reverse=True)
return self._hedu_sorted_by_enrolled
def year(self):
return self.max_year_query.first()[0]
def highest_enrolled_by_university(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_university_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.university.name()
else:
return None
class LocationMajor(LocationUniversity):
def __init__(self, bra_id):
LocationUniversity.__init__(self, bra_id)
self._hedu = None
self.bra_id = bra_id
self.max_year_query = db.session.query(
func.max(Ybc_hedu.year)).filter_by(bra_id=bra_id)
self.hedu_query = Ybc_hedu.query.join(Course_hedu).filter(
Ybc_hedu.bra_id == self.bra_id,
Ybc_hedu.course_hedu_id_len == 6,
Ybc_hedu.year == self.max_year_query)
def highest_enrolled_by_major(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_major_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.course_hedu.name()
else:
return None
|
tt/tests/unit/definitions/test_boolean_inputs_factory.py
|
fkromer/tt
| 233 |
109322
|
"""Tests for boolean_inputs_factory method."""
import unittest
from tt.definitions import boolean_variables_factory
class TestBooleanInputsFactory(unittest.TestCase):
def test_str_methods(self):
"""Test converting to string via __str__ and __repr__."""
factory = boolean_variables_factory(['A', 'B', 'C', 'D'])
instance = factory(A=1, B=False, C=True, D=0)
self.assertEqual(str(instance), 'A=1, B=0, C=1, D=0')
self.assertEqual(repr(instance),
'<BooleanValues [A=1, B=0, C=1, D=0]>')
def test_attr_access(self):
"""Test attribute access."""
factory = boolean_variables_factory(['op1', 'op2', 'op3'])
instance = factory(op1=1, op2=False, op3=True)
self.assertEqual(instance.op1, 1)
self.assertEqual(instance.op2, False)
self.assertEqual(instance.op3, True)
|
packages/pyright-internal/src/tests/samples/tryExcept7.py
|
martindemello/pyright
| 4,391 |
109355
|
# This sample tests the syntax handling for Python 3.11 exception groups
# as described in PEP 654.
def func1():
try:
pass
# This should generate an error if using Python 3.10 or earlier.
except* BaseException:
pass
# This should generate an error if using Python 3.10 or earlier.
except*:
pass
|
SRT/lib/xvision/style_trans.py
|
yerang823/landmark-detection
| 612 |
109385
|
<gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#
from PIL import Image, ImageOps
import numpy as np
import random
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
class AugStyle(object):
def __init__(self):
self.contrast = iaa.ContrastNormalization((0.5, 1.5), per_channel=0.3)
self.mulaug = iaa.Multiply((0.8, 1.2), per_channel=0.3)
self.grayaug = iaa.Grayscale(alpha=(0.0, 1.0))
self.sharpen = iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))
self.coloraug = iaa.Sequential([
iaa.ChangeColorspace(from_colorspace="RGB", to_colorspace="HSV"),
iaa.WithChannels(0, iaa.Add((-50, 50))),
iaa.WithChannels(1, iaa.Add((-50, 50))),
iaa.WithChannels(2, iaa.Add((-50, 50))),
iaa.ChangeColorspace(from_colorspace="HSV", to_colorspace="RGB")
])
def augment(self, pic):
assert isinstance(pic, Image.Image)
image = np.array(pic)
aug = False
if random.random() > 0.4:
image, aug = self.contrast.augment_image(image), True
if random.random() > 0.4:
image, aug = self.mulaug.augment_image(image), True
if random.random() > 0.4:
image, aug = self.grayaug.augment_image(image), True
if random.random() > 0.4 or aug == False:
image, aug = self.sharpen.augment_image(image), True
#if random.random() > 0.4 or aug == False:
# image, aug = self.coloraug.augment_image(image), True
augpic = Image.fromarray(image)
return augpic
def __call__(self, imgs, point_meta):
point_meta = point_meta.copy()
if isinstance(imgs, list): is_list = True
else: is_list, imgs = False, [imgs]
augimages = [self.augment(x) for x in imgs]
return imgs + augimages, point_meta
|
tools/expand_model.py
|
nyu-dl/dl4mt-multi
| 143 |
109389
|
<gh_stars>100-1000
import sys
import numpy
import logging
rng = numpy.random.RandomState(1234)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('expand_model')
ref_model = dict(numpy.load(sys.argv[1]))
old_model = dict(numpy.load(sys.argv[2]))
for o_key, o_val in old_model.items():
if o_key not in ref_model:
logger.info(
'parameter {} does not exist in the old model'.format(o_key))
continue
elif o_val.shape != ref_model[o_key].shape:
if len(set(o_val.shape) & set(ref_model[o_key].shape)) == 0 and \
len(o_val.shape) > 1:
logger.info(
'not initializing {} since old shape{} and new shape {}'
.format(o_key, o_val.shape, ref_model[o_key].shape))
else:
logger.info(
'expanding parameter {} from {} into {}'.format(
o_key, o_val.shape, ref_model[o_key].shape))
val = o_val.std() * rng.randn(*ref_model[o_key].shape).astype('float32') + \
o_val.mean()
if len(val.shape) == 1:
val[:o_val.shape[0]] = o_val
else:
val[:o_val.shape[0], :o_val.shape[1]] = o_val
ref_model[o_key] = val
else:
logger.info('copying parameter {}'.format(o_key))
ref_model[o_key] = o_val
logger.info('saving {}.expanded'.format(sys.argv[1]))
numpy.savez(sys.argv[1] + '.expanded', **ref_model)
|
src/sparsify/schemas/system.py
|
dhuangnm/sparsify
| 152 |
109420
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Schemas for anything related to system routes
"""
from marshmallow import Schema, fields, validate
from sparsify.schemas.helpers import INFERENCE_ENGINE_TYPES, INSTRUCTION_SETS
__all__ = ["VersionInfoSchema", "SystemInfo", "ResponseSystemInfo"]
class VersionInfoSchema(Schema):
deepsparse = fields.Str(allow_none=True)
sparseml = fields.Str(allow_none=True)
sparsezoo = fields.Str(allow_none=True)
onnx = fields.Str(allow_none=True)
onnxruntime = fields.Str(allow_none=True)
class SystemInfo(Schema):
"""
Schema for the system info the server is currently running on
"""
vendor = fields.Str(required=False, default=None, missing=None, allow_none=True)
isa = fields.Str(required=False, default=None, missing=None, allow_none=True)
vnni = fields.Bool(required=False, default=None, missing=None, allow_none=True)
num_sockets = fields.Int(
required=False, default=None, missing=None, allow_none=True
)
cores_per_socket = fields.Int(
required=False, default=None, missing=None, allow_none=True
)
threads_per_core = fields.Int(
required=False, default=None, missing=None, allow_none=True
)
l1_instruction_cache_size = fields.Int(
required=False, default=None, missing=None, allow_none=True
)
l1_data_cache_size = fields.Int(
required=False, default=None, missing=None, allow_none=True
)
l2_cache_size = fields.Int(
required=False, default=None, missing=None, allow_none=True
)
l3_cache_size = fields.Int(
required=False, default=None, missing=None, allow_none=True
)
ip_address = fields.Str(required=False, default=None, missing=None, allow_none=True)
available_engines = fields.List(
fields.Str(validate=validate.OneOf(INFERENCE_ENGINE_TYPES)),
required=False,
default=None,
missing=None,
allow_none=True,
)
available_instructions = fields.List(
fields.Str(validate=validate.OneOf(INSTRUCTION_SETS)),
required=False,
default=None,
missing=None,
allow_none=True,
)
version_info = fields.Nested(
VersionInfoSchema, allow_none=True, default=None, required=False
)
class ResponseSystemInfo(Schema):
"""
Schema for returning a response with the system info
"""
info = fields.Nested(SystemInfo, required=True)
|
tests/rtllib/test_aes.py
|
SwiftWinds/PyRTL
| 159 |
109437
|
import unittest
import pyrtl
import pyrtl.corecircuits
from pyrtl.rtllib import aes, testingutils
class TestAESDecrypt(unittest.TestCase):
"""
Test vectors are retrieved from:
http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
"""
def setUp(self):
pyrtl.reset_working_block()
self.aes_decrypt = aes.AES()
self.in_vector = pyrtl.Input(bitwidth=128, name='in_vector')
self.out_vector = pyrtl.Output(bitwidth=128, name='out_vector')
def test_inv_shift_rows(self):
self.out_vector <<= self.aes_decrypt._inv_shift_rows(self.in_vector)
in_vals = [0x3e1c22c0b6fcbf768da85067f6170495, 0x2d6d7ef03f33e334093602dd5bfb12c7]
true_result = [0x3e175076b61c04678dfc2295f6a8bfc0, 0x2dfb02343f6d12dd09337ec75b36e3f0]
calculated_result = testingutils.sim_and_ret_out(self.out_vector,
(self.in_vector,), (in_vals,))
self.assertEqual(calculated_result, true_result)
def test_inv_sub_bytes(self):
self.out_vector <<= self.aes_decrypt._sub_bytes(self.in_vector, True)
in_vals = [0x3e175076b61c04678dfc2295f6a8bfc0, 0x2dfb02343f6d12dd09337ec75b36e3f0]
true_result = [0xd1876c0f79c4300ab45594add66ff41f, 0xfa636a2825b339c940668a3157244d17]
calculated_result = testingutils.sim_and_ret_out(self.out_vector,
(self.in_vector,), (in_vals,))
self.assertEqual(calculated_result, true_result)
def test_inv_mix_columns(self):
self.out_vector <<= self.aes_decrypt._mix_columns(self.in_vector, True)
in_vals = [0xe9f74eec023020f61bf2ccf2353c21c7, 0xbaa03de7a1f9b56ed5512cba5f414d23]
real_res = [0x54d990a16ba09ab596bbf40ea111702f, 0x3e1c22c0b6fcbf768da85067f6170495]
calculated_result = testingutils.sim_and_ret_out(self.out_vector,
(self.in_vector,), (in_vals,))
self.assertEqual(calculated_result, real_res)
@unittest.skip
def test_key_expansion(self):
# This is not at all correct. Needs to be completely rewritten
self.out_vector <<=\
pyrtl.corecircuits.concat_list(self.aes_decrypt._key_gen(self.in_vector))
in_vals = [0xd1876c0f79c4300ab45594add66ff41f, 0xfa636a2825b339c940668a3157244d17]
true_result = [0x3e175076b61c04678dfc2295f6a8bfc0, 0x2dfb02343f6d12dd09337ec75b36e3f0]
calculated_result = testingutils.sim_and_ret_out(self.out_vector,
(self.in_vector,), (in_vals,))
self.assertEqual(calculated_result, true_result)
def test_aes_full(self):
aes_key = pyrtl.Input(bitwidth=128, name='aes_key')
self.out_vector <<= self.aes_decrypt.decryption(self.in_vector, aes_key)
ciphers = [0x3ad77bb40d7a3660a89ecaf32466ef97, 0x66e94bd4ef8a2c3b884cfa59ca342b2e]
keys = [0x2b7e151628aed2a6abf7158809cf4f3c, 0x0]
plain_text = [0x6bc1bee22e409f96e93d7e117393172a, 0x0]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector, aes_key),
(ciphers, keys))
self.assertEqual(calculated_result, plain_text)
def test_aes_state_machine(self):
# self.longMessage = True
aes_key = pyrtl.Input(bitwidth=128, name='aes_key')
reset = pyrtl.Input(1)
ready = pyrtl.Output(1, name='ready')
decrypt_ready, decrypt_out =\
self.aes_decrypt.decryption_statem(self.in_vector, aes_key, reset)
self.out_vector <<= decrypt_out
ready <<= decrypt_ready
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
sim.step({
self.in_vector: 0x69c4e0d86a7b0430d8cdb78070b4c55a,
aes_key: 0x000102030405060708090a0b0c0d0e0f,
reset: 1
})
true_vals = [0x69c4e0d86a7b0430d8cdb78070b4c55a, 0x7ad5fda789ef4e272bca100b3d9ff59f,
0x54d990a16ba09ab596bbf40ea111702f, 0x3e1c22c0b6fcbf768da85067f6170495,
0xb458124c68b68a014b99f82e5f15554c, 0xe8dab6901477d4653ff7f5e2e747dd4f,
0x36339d50f9b539269f2c092dc4406d23, 0x2d6d7ef03f33e334093602dd5bfb12c7,
0x3bd92268fc74fb735767cbe0c0590e2d, 0xa7be1a6997ad739bd8c9ca451f618b61,
0x6353e08c0960e104cd70b751bacad0e7, 0x00112233445566778899aabbccddeeff,
0x00112233445566778899aabbccddeeff, ]
for cycle in range(1, 13): # Bogus data for while the state machine churns
sim.step({
self.in_vector: 0x0, aes_key: 0x1, reset: 0
})
circuit_out = sim_trace.trace[self.out_vector][cycle]
self.assertEqual(circuit_out, true_vals[cycle], "\nAssertion failed on cycle: "
+ str(cycle) + " Gotten value: " + hex(circuit_out))
for ready_signal in sim_trace.trace[ready][:11]:
self.assertEquals(ready_signal, 0)
for ready_signal in sim_trace.trace[ready][11:]:
self.assertEquals(ready_signal, 1)
class TestAESEncrypt(unittest.TestCase):
"""
Test vectors are retrieved from:
http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
"""
def setUp(self):
pyrtl.reset_working_block()
self.aes_encrypt = aes.AES()
self.in_vector = pyrtl.Input(bitwidth=128, name='in_vector')
self.out_vector = pyrtl.Output(bitwidth=128, name='out_vector')
def test_shift_rows(self):
self.out_vector <<= self.aes_encrypt._shift_rows(self.in_vector)
in_vals = [0x3b59cb73fcd90ee05774222dc067fb68, 0xb415f8016858552e4bb6124c5f998a4c]
true_result = [0x3bd92268fc74fb735767cbe0c0590e2d, 0xb458124c68b68a014b99f82e5f15554c]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector,),
(in_vals,))
self.assertEqual(calculated_result, true_result)
def test_sub_bytes(self):
self.out_vector <<= self.aes_encrypt._sub_bytes(self.in_vector)
in_vals = [0x4915598f55e5d7a0daca94fa1f0a63f7, 0xc62fe109f75eedc3cc79395d84f9cf5d]
true_result = [0x3b59cb73fcd90ee05774222dc067fb68, 0xb415f8016858552e4bb6124c5f998a4c]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector,),
(in_vals,))
self.assertEqual(calculated_result, true_result)
def test_mix_columns(self):
self.out_vector <<= self.aes_encrypt._mix_columns(self.in_vector)
in_vals = [0x6353e08c0960e104cd70b751bacad0e7, 0xa7be1a6997ad739bd8c9ca451f618b61]
real_res = [0x5f72641557f5bc92f7be3b291db9f91a, 0xff87968431d86a51645151fa773ad009]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector,),
(in_vals,))
self.assertEqual(calculated_result, real_res)
@unittest.skip
def test_key_expansion(self):
# This is not at all correct. Needs to be completely rewritten
self.out_vector <<= pyrtl.concat_list(self.aes_encrypt._key_gen(self.in_vector))
in_vals = [0x4c9c1e66f771f0762c3f868e534df256, 0xc57e1c159a9bd286f05f4be098c63439]
true_result = [0x3bd92268fc74fb735767cbe0c0590e2d, 0xb458124c68b68a014b99f82e5f15554c]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector,),
(in_vals,))
self.assertEqual(calculated_result, true_result)
def test_aes_full(self):
aes_key = pyrtl.Input(bitwidth=128, name='aes_key')
self.out_vector <<= self.aes_encrypt.encryption(self.in_vector, aes_key)
plain_text = [0x00112233445566778899aabbccddeeff, 0x0]
keys = [<KEY>, 0x0]
ciphers = [0x69c4e0d86a7b0430d8cdb78070b4c55a, 0x66e94bd4ef8a2c3b884cfa59ca342b2e]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector, aes_key),
(plain_text, keys))
self.assertEqual(calculated_result, ciphers)
def test_aes_state_machine(self):
# self.longMessage = True
aes_key = pyrtl.Input(bitwidth=128, name='aes_key')
reset = pyrtl.Input(1)
ready = pyrtl.Output(1, name='ready')
encrypt_ready, encrypt_out = self.aes_encrypt.encrypt_state_m(self.in_vector, aes_key,
reset)
self.out_vector <<= encrypt_out
ready <<= encrypt_ready
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
sim.step({
self.in_vector: 0x00112233445566778899aabbccddeeff,
aes_key: <KEY>,
reset: 1
})
true_vals = [0x00112233445566778899aabbccddeeff, 0x00102030405060708090a0b0c0d0e0f0,
0x89d810e8855ace682d1843d8cb128fe4, 0x4915598f55e5d7a0daca94fa1f0a63f7,
0xfa636a2825b339c940668a3157244d17, 0x247240236966b3fa6ed2753288425b6c,
0xc81677bc9b7ac93b25027992b0261996, 0xc62fe109f75eedc3cc79395d84f9cf5d,
0xd1876c0f79c4300ab45594add66ff41f, 0xfde3bad205e5d0d73547964ef1fe37f1,
0xbd6e7c3df2b5779e0b61216e8b10b689, 0x69c4e0d86a7b0430d8cdb78070b4c55a,
0x69c4e0d86a7b0430d8cdb78070b4c55a, ]
for cycle in range(1, 13): # Bogus data for while the state machine churns
sim.step({
self.in_vector: 0x0, aes_key: 0x1, reset: 0
})
circuit_out = sim_trace.trace[self.out_vector][cycle]
# sim_trace.render_trace(symbol_len=40)
self.assertEqual(circuit_out, true_vals[cycle], "\nAssertion failed on cycle: "
+ str(cycle) + " Gotten value: " + hex(circuit_out))
for ready_signal in sim_trace.trace[ready][:11]:
self.assertEquals(ready_signal, 0)
for ready_signal in sim_trace.trace[ready][11:]:
self.assertEquals(ready_signal, 1)
|
python/ccxt/static_dependencies/__init__.py
|
DavidFelsen/ccxt
| 24,910 |
109456
|
<filename>python/ccxt/static_dependencies/__init__.py
__all__ = ['ecdsa', 'keccak']
|
tests/ignite/engine/__init__.py
|
Juddd/ignite
| 4,119 |
109465
|
import torch
try:
from torch.utils.data import IterableDataset
except ImportError:
class IterableDataset:
pass
class BatchChecker:
def __init__(self, data, init_counter=0):
self.counter = init_counter
self.data = data
self.true_batch = None
def check(self, batch):
self.true_batch = self.data[self.counter % len(self.data)]
self.counter += 1
res = self.true_batch == batch
return res.all() if not isinstance(res, bool) else res
class IterationCounter:
def __init__(self, start_value=1):
self.current_iteration_count = start_value
def __call__(self, engine):
assert engine.state.iteration == self.current_iteration_count
self.current_iteration_count += 1
class EpochCounter:
def __init__(self, start_value=1):
self.current_epoch_count = start_value
def __call__(self, engine):
assert engine.state.epoch == self.current_epoch_count
self.current_epoch_count += 1
def setup_sampler(sampler_type, num_iters, batch_size):
if sampler_type is None:
return None, batch_size
if sampler_type == "weighted":
from torch.utils.data.sampler import WeightedRandomSampler
w = torch.ones(num_iters * batch_size, dtype=torch.float)
for i in range(num_iters):
w[batch_size * i : batch_size * (i + 1)] += i * 1.0
return WeightedRandomSampler(w, num_samples=num_iters * batch_size, replacement=True), batch_size
if sampler_type == "distributed":
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
num_replicas = 1
rank = 0
if dist.is_available() and dist.is_initialized():
num_replicas = dist.get_world_size()
rank = dist.get_rank()
dataset = torch.zeros(num_iters * batch_size)
return DistributedSampler(dataset, num_replicas=num_replicas, rank=rank), batch_size // num_replicas
class MyIterableDataset(IterableDataset):
def __init__(self, start, end):
super(MyIterableDataset).__init__()
assert end > start, "this example code only works with end >= start"
self.start = start
self.end = end
def __iter__(self):
return iter(range(self.start, self.end))
def get_iterable_dataset(*args, **kwargs):
return MyIterableDataset(*args, **kwargs)
|
tests/test_multi_k_model.py
|
shifaoh/dna2vec
| 151 |
109501
|
<gh_stars>100-1000
import pytest
from dna2vec.multi_k_model import MultiKModel
K_LOW = 3
K_HIGH = 8
kmer_samples = ['AAA', 'ACGT', 'ACGTACGT']
cosine_testdata = [
('AAA', 'AAAA', 'CCCC'),
('ACGA', 'ACGT', 'TTTT'),
('ACGT', 'ACGTAA', 'TTT'),
]
@pytest.fixture(scope="module")
def mk_model():
filepath = 'pretrained/dna2vec-20161219-0153-k3to8-100d-10c-29320Mbp-sliding-Xat.w2v'
return MultiKModel(filepath)
@pytest.mark.parametrize('k_len', list(range(K_LOW, K_HIGH + 1)))
def test_num_of_vectors(k_len, mk_model):
assert len(mk_model.model(k_len).vocab) == 4 ** k_len
@pytest.mark.parametrize('kmer', kmer_samples)
def test_cosine_dist_to_itself(kmer, mk_model):
assert abs(mk_model.cosine_distance(kmer, kmer) - 1.0) < 1e-10
@pytest.mark.parametrize('kmer0, kmer_greater, kmer_less', cosine_testdata)
def test_cosine_dist_sanity(kmer0, kmer_greater, kmer_less, mk_model):
assert mk_model.cosine_distance(kmer0, kmer_greater) > mk_model.cosine_distance(kmer0, kmer_less)
|
pytools/modules/newyearcardgenerator/__init__.py
|
maopucheng/pytools
| 757 |
109508
|
'''初始化'''
from .newyearcardgenerator import NewYearCardGenerator
|
examples/pxScene2d/external/libnode-v10.15.3/deps/v8/tools/lldb_commands.py
|
madanagopaltcomcast/pxCore
| 2,151 |
109520
|
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import lldb
import re
def jst(debugger, *args):
"""Print the current JavaScript stack trace"""
target = debugger.GetSelectedTarget()
process = target.GetProcess()
thread = process.GetSelectedThread()
frame = thread.GetSelectedFrame()
frame.EvaluateExpression("_v8_internal_Print_StackTrace();")
print("")
def jss(debugger, *args):
"""Skip the jitted stack on x64 to where we entered JS last"""
target = debugger.GetSelectedTarget()
process = target.GetProcess()
thread = process.GetSelectedThread()
frame = thread.GetSelectedFrame()
js_entry_sp = frame.EvaluateExpression(
"v8::internal::Isolate::Current()->thread_local_top()->js_entry_sp_;") \
.GetValue()
sizeof_void = frame.EvaluateExpression("sizeof(void*)").GetValue()
rbp = frame.FindRegister("rbp")
rsp = frame.FindRegister("rsp")
pc = frame.FindRegister("pc")
rbp = js_entry_sp
rsp = js_entry_sp + 2 *sizeof_void
pc.value = js_entry_sp + sizeof_void
def bta(debugger, *args):
"""Print stack trace with assertion scopes"""
func_name_re = re.compile("([^(<]+)(?:\(.+\))?")
assert_re = re.compile(
"^v8::internal::Per\w+AssertType::(\w+)_ASSERT, (false|true)>")
target = debugger.GetSelectedTarget()
process = target.GetProcess()
thread = process.GetSelectedThread()
frame = thread.GetSelectedFrame()
for frame in thread:
functionSignature = frame.GetDisplayFunctionName()
if functionSignature is None:
continue
functionName = func_name_re.match(functionSignature)
line = frame.GetLineEntry().GetLine()
sourceFile = frame.GetLineEntry().GetFileSpec().GetFilename()
if line:
sourceFile = sourceFile + ":" + str(line)
if sourceFile is None:
sourceFile = ""
print("[%-2s] %-60s %-40s" % (frame.GetFrameID(),
functionName.group(1),
sourceFile))
match = assert_re.match(str(functionSignature))
if match:
if match.group(3) == "false":
prefix = "Disallow"
color = "\033[91m"
else:
prefix = "Allow"
color = "\033[92m"
print("%s -> %s %s (%s)\033[0m" % (
color, prefix, match.group(2), match.group(1)))
def __lldb_init_module (debugger, dict):
debugger.HandleCommand('command script add -f lldb_commands.jst jst')
debugger.HandleCommand('command script add -f lldb_commands.jss jss')
debugger.HandleCommand('command script add -f lldb_commands.bta bta')
|
lucent/misc/io/collapse_channels.py
|
fuzhanrahmanian/lucent
| 449 |
109546
|
<gh_stars>100-1000
# Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convert an "image" with n channels into 3 RGB channels."""
from __future__ import absolute_import, division, print_function
import math
import numpy as np
def hue_to_rgb(ang, warp=True):
"""Produce an RGB unit vector corresponding to a hue of a given angle."""
ang = ang - 360*(ang//360)
colors = np.asarray([
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 1, 1],
[0, 0, 1],
[1, 0, 1],
])
colors = colors / np.linalg.norm(colors, axis=1, keepdims=True)
R = 360 / len(colors)
n = math.floor(ang / R)
D = (ang - n * R) / R
if warp:
# warping the angle away from the primary colors (RGB)
# helps make equally-spaced angles more visually distinguishable
adj = lambda x: math.sin(x * math.pi / 2)
if n % 2 == 0:
D = adj(D)
else:
D = 1 - adj(1 - D)
v = (1-D) * colors[n] + D * colors[(n+1) % len(colors)]
return v / np.linalg.norm(v)
def sparse_channels_to_rgb(array):
assert (array >= 0).all()
channels = array.shape[-1]
rgb = 0
for i in range(channels):
ang = 360 * i / channels
color = hue_to_rgb(ang)
color = color[tuple(None for _ in range(len(array.shape)-1))]
rgb += array[..., i, None] * color
rgb += np.ones(array.shape[:-1])[..., None] * (array.sum(-1) - array.max(-1))[..., None]
rgb /= 1e-4 + np.linalg.norm(rgb, axis=-1, keepdims=True)
rgb *= np.linalg.norm(array, axis=-1, keepdims=True)
return rgb
def collapse_channels(array):
if (array < 0).any():
array = np.concatenate([np.maximum(0, array), np.maximum(0, -array)], axis=-1)
return sparse_channels_to_rgb(array)
|
Python/Tests/TestData/AddImport/ImportFunctionFromExistingFromImportParens.py
|
techkey/PTVS
| 404 |
109553
|
<gh_stars>100-1000
from test_module import (module_func_2)
module_func()
|
src/olympia/devhub/cron.py
|
covariant/addons-server
| 843 |
109574
|
from datetime import datetime
from django.conf import settings
import requests
import olympia.core.logger
from olympia.devhub.models import BlogPost
log = olympia.core.logger.getLogger('z.cron')
def update_blog_posts():
"""Update the blog post cache."""
items = requests.get(settings.DEVELOPER_BLOG_URL, timeout=10).json()
if not items:
return
BlogPost.objects.all().delete()
for item in items[:5]:
BlogPost.objects.create(
title=item['title']['rendered'],
date_posted=datetime.strptime(item['date'], '%Y-%m-%dT%H:%M:%S'),
permalink=item['link'],
)
log.info(f'Adding {BlogPost.objects.count():d} blog posts.')
|
eval/evaluate.py
|
aiainui/crnn-audio-classification
| 307 |
109583
|
<filename>eval/evaluate.py
import os
import torch
from tqdm import tqdm
from net import MelspectrogramStretch
from utils import plot_heatmap, mkdir_p
class ClassificationEvaluator(object):
def __init__(self, data_loader, model):
self.data_loader = data_loader
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = model.to(self.device)
self.model.eval()
self.mel = MelspectrogramStretch(norm='db').to(self.device)
def evaluate(self, metrics, debug=False):
with torch.no_grad():
total_metrics = torch.zeros(len(metrics))
for batch_idx, batch in enumerate(tqdm(self.data_loader)):
batch = [b.to(self.device) for b in batch]
data, target = batch[:-1], batch[-1]
output = self.model(data)
self.model.classes
batch_size = data[0].size(0)
if debug:
self._store_batch(data, batch_size, output, target)
for i, metric in enumerate(metrics):
total_metrics[i] += metric(output, target) * batch_size
size = len(self.data_loader.dataset)
ret = {met.__name__ : "%.3f"%(total_metrics[i].item() / size) for i, met in enumerate(metrics)}
return ret
def _store_batch(self, data, batch_size, output, target):
path = 'eval_batch'
mkdir_p(path)
sig, lengths, _ = data
inds = output.argmax(1)
confs = torch.exp(output)[torch.arange(batch_size), inds]
spec, lengths = self.mel(sig.transpose(1,2).float(), lengths)
for i in range(batch_size):
if inds[i] == target[i]:
label = self.model.classes[inds[i]]
pred_txt = "%s (%.1f%%)"%(label, 100*confs[inds[i]])
out_path = os.path.join(path, '%s.png'%i)
plot_heatmap(spec[i][...,:lengths[i]].cpu().numpy(),
out_path,
pred=pred_txt)
|
tools/clang/bindings/python/tests/cindex/test_comment.py
|
clayne/DirectXShaderCompiler
| 1,192 |
109623
|
from clang.cindex import TranslationUnit
from tests.cindex.util import get_cursor
def test_comment():
files = [('fake.c', """
/// Aaa.
int test1;
/// Bbb.
/// x
void test2(void);
void f() {
}
""")]
# make a comment-aware TU
tu = TranslationUnit.from_source('fake.c', ['-std=c99'], unsaved_files=files,
options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION)
test1 = get_cursor(tu, 'test1')
assert test1 is not None, "Could not find test1."
assert test1.type.is_pod()
raw = test1.raw_comment
brief = test1.brief_comment
assert raw == """/// Aaa."""
assert brief == """Aaa."""
test2 = get_cursor(tu, 'test2')
raw = test2.raw_comment
brief = test2.brief_comment
assert raw == """/// Bbb.\n/// x"""
assert brief == """Bbb. x"""
f = get_cursor(tu, 'f')
raw = f.raw_comment
brief = f.brief_comment
assert raw is None
assert brief is None
|
BRATS/data/explore.py
|
eanemo/KiU-Net-pytorch
| 236 |
109638
|
import os
import numpy as np
from numpy.lib.stride_tricks import as_strided
import nibabel as nib
def nib_load(file_name):
proxy = nib.load(file_name)
data = proxy.get_data().astype('float32')
proxy.uncache()
return data
def crop(x, ksize, stride=3):
shape = (np.array(x.shape[:3]) - ksize)/stride + 1
shape = tuple(shape) + (ksize, )*3 + (x.shape[3], )
strides = np.array(x.strides[:3])*3
strides = tuple(strides) + x.strides
x = as_strided(x, shape=shape, strides=strides)
return x
modalities = ('flair', 't1ce', 't1', 't2')
root = '/home/thuyen/Data/brats17/Brats17TrainingData/'
file_list = root + 'file_list.txt'
subjects = open(file_list).read().splitlines()
subj = subjects[0]
name = subj.split('/')[-1]
path = os.path.join(root, subj, name + '_')
x0 = np.stack([
nib_load(path + modal + '.nii.gz') \
for modal in modalities], 3)
y0 = nib_load(path + 'seg.nii.gz')[..., None]
x0 = np.pad(x0, ((0, 0), (0, 0), (0, 1), (0, 0)), mode='constant')
y0 = np.pad(y0, ((0, 0), (0, 0), (0, 1), (0, 0)), mode='constant')
x1 = crop(x0, 9)
x2 = crop(np.pad(x0, ((8, 8), (8, 8), (8, 8), (0, 0)), mode='constant'), 25)
x3 = crop(np.pad(x0, ((24, 24), (24, 24), (24, 24), (0, 0)), mode='constant'), 57)
y1 = crop(y0, 9)
m = x1.reshape(x1.shape[:3] + (-1, )).sum(3) > 0
x1 = x1[m]
x2 = x2[m]
x3 = x3[m]
y1 = y1[m]
print(x1.shape)
print(x2.shape)
print(x3.shape)
print(y1.shape)
|
conceptnet5/db/connection.py
|
tesYolan/conceptnet5
| 2,195 |
109644
|
<filename>conceptnet5/db/connection.py
import psycopg2
from conceptnet5.db import config
_CONNECTIONS = {}
def get_db_connection(dbname=None):
"""
Get a global connection to the ConceptNet PostgreSQL database.
`dbname` specifies the name of the database in PostgreSQL.
"""
if dbname is None:
dbname = config.DB_NAME
if dbname in _CONNECTIONS:
return _CONNECTIONS[dbname]
else:
_CONNECTIONS[dbname] = _get_db_connection_inner(dbname)
return _CONNECTIONS[dbname]
def _get_db_connection_inner(dbname):
if config.DB_PASSWORD:
conn = psycopg2.connect(
dbname=dbname,
user=config.DB_USERNAME,
password=<PASSWORD>,
host=config.DB_HOSTNAME,
port=config.DB_PORT
)
else:
conn = psycopg2.connect(dbname=dbname)
conn.autocommit = True
psycopg2.paramstyle = 'named'
return conn
def check_db_connection(dbname=None):
"""
Raise an error early if we can't access the database. This is intended
to be used at the start of the build script.
The desired outcome is that we successfully make a connection (and then
throw it away). If the DB is unavailable, this will raise an uncaught
error.
"""
if dbname is None:
dbname = config.DB_NAME
_get_db_connection_inner(dbname)
|
Python3/598.py
|
rakhi2001/ecom7
| 854 |
109685
|
__________________________________________________________________________________________________
sample 68 ms submission
class Solution:
def maxCount(self, m: int, n: int, ops: List[List[int]]) -> int:
rMin, cMin = sys.maxsize, sys.maxsize
for i in ops:
rMin = min(rMin, i[0])
cMin = min(cMin, i[1])
return min(m, rMin) * min(n, cMin)
__________________________________________________________________________________________________
sample 14136 kb submission
class Solution:
def maxCount(self, m: int, n: int, ops: List[List[int]]) -> int:
if not ops:
return m*n
row = min(m, min([op[0] for op in ops]))
col = min(n, min([op[1] for op in ops]))
return row * col
__________________________________________________________________________________________________
|
saas/aiops/api/aiops-server/services/base_service.py
|
iuskye/SREWorks
| 407 |
109750
|
<gh_stars>100-1000
#!/usr/bin/env python
# encoding: utf-8
""" """
__author__ = 'sreworks'
from models.db_session import db
class BaseService(object):
def __init__(self):
self.db = db
self.db_session = db.session
|
swift_rpc/server/__init__.py
|
rfyiamcool/swift_rpc
| 116 |
109780
|
#coding:utf-8
from tornado import (
gen,
ioloop,
log,
web
)
from tornado.httpserver import HTTPServer
from .handlers import _AsyncBase, _Base, _ThreadPoolBase, _MessageQueueBase
from swift_rpc.log import get_logger
class RPCServer(object):
def __init__(self,config):
self.config = config
self._routes = []
self.log = log.logging.getLogger("transmit.%s" % __name__)
log.logging.config.dictConfig(config.LOGCONFIG)
#log.enable_pretty_logging(logger=self.log)
self.register_async(self._getroutes)
@gen.coroutine
def _getroutes(self):
raise gen.Return([v.__name__ for _, v in self._routes])
def _make(self, func, base):
name = func.__name__
handler = type(name, (base,), {'func': [func],'config':self.config})
self._routes.append((r'/{0}'.format(name), handler))
self.log.info('Registered {0} command {1}'.format(base.TYPE, name))
def register(self, func):
self._make(func, _Base)
def register_async(self, func):
self._make(func, _AsyncBase)
def register_pool(self, func):
self._make(func, _ThreadPoolBase)
def register_mq(self, func):
self._make(func, _MessageQueueBase)
def start(self, host, port):
self.log.info('Starting server on port {0}'.format(port))
# app = web.Application(self._routes, debug=True)
server = HTTPServer(web.Application(self._routes, debug=True),xheaders=True)
server.listen(int(port), host)
ioloop.IOLoop.current().start()
__all__ = ('RPCServer')
if __name__ == "__main__":
pass
|
Configuration/DataProcessing/python/Impl/cosmicsEra_Run2_2017.py
|
Purva-Chaudhari/cmssw
| 852 |
109784
|
<filename>Configuration/DataProcessing/python/Impl/cosmicsEra_Run2_2017.py<gh_stars>100-1000
#!/usr/bin/env python3
"""
_cosmicsEra_Run2_2017_
Scenario supporting cosmic data taking
"""
import os
import sys
from Configuration.Eras.Era_Run2_2017_cff import Run2_2017
from Configuration.DataProcessing.Impl.cosmics import cosmics
class cosmicsEra_Run2_2017(cosmics):
def __init__(self):
cosmics.__init__(self)
self.eras = Run2_2017
"""
_cosmicsEra_Run2_2017_
Implement configuration building for data processing for cosmic
data taking in Run2
"""
|
finalists/yc14600/PyTorch-Encoding/tests/unit_test/test_function.py
|
lrzpellegrini/cvpr_clvision_challenge
| 2,190 |
109806
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: <NAME>
## ECE Department, Rutgers University
## Email: <EMAIL>
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import numpy as np
import torch
from torch.autograd import Variable, gradcheck
import encoding
EPS = 1e-3
ATOL = 1e-3
def _assert_tensor_close(a, b, atol=ATOL, rtol=EPS):
npa, npb = a.cpu().numpy(), b.cpu().numpy()
assert np.allclose(npa, npb, rtol=rtol, atol=atol), \
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(
a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
def test_aggregate():
B,N,K,D = 2,3,4,5
A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5),
requires_grad=True)
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
input = (A, X, C)
test = gradcheck(encoding.functions.aggregate, input, eps=EPS, atol=ATOL)
print('Testing aggregate(): {}'.format(test))
def test_scaled_l2():
B,N,K,D = 2,3,4,5
X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5),
requires_grad=True)
C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5),
requires_grad=True)
S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5),
requires_grad=True)
input = (X, C, S)
test = gradcheck(encoding.functions.scaled_l2, input, eps=EPS, atol=ATOL)
print('Testing scaled_l2(): {}'.format(test))
def test_moments():
B,C,H = 2,3,4
X = Variable(torch.cuda.DoubleTensor(B,C,H).uniform_(-0.5,0.5),
requires_grad=True)
input = (X,)
test = gradcheck(encoding.functions.moments, input, eps=EPS, atol=ATOL)
print('Testing moments(): {}'.format(test))
def test_non_max_suppression():
def _test_nms(cuda):
# check a small test case
boxes = torch.Tensor([
[[10.2, 23., 50., 20.],
[11.3, 23., 52., 20.1],
[23.2, 102.3, 23.3, 50.3],
[101.2, 32.4, 70.6, 70.],
[100.2, 30.9, 70.7, 69.]],
[[200.3, 234., 530., 320.],
[110.3, 223., 152., 420.1],
[243.2, 240.3, 50.3, 30.3],
[243.2, 236.4, 48.6, 30.],
[100.2, 310.9, 170.7, 691.]]])
scores = torch.Tensor([
[0.9, 0.7, 0.11, 0.23, 0.8],
[0.13, 0.89, 0.45, 0.23, 0.3]])
if cuda:
boxes = boxes.cuda()
scores = scores.cuda()
expected_output = (
torch.ByteTensor(
[[1, 1, 0, 0, 1], [1, 1, 1, 0, 1]]),
torch.LongTensor(
[[0, 4, 1, 3, 2], [1, 2, 4, 3, 0]])
)
mask, inds = encoding.functions.NonMaxSuppression(boxes, scores, 0.7)
_assert_tensor_close(mask, expected_output[0])
_assert_tensor_close(inds, expected_output[1])
_test_nms(False)
_test_nms(True)
if __name__ == '__main__':
import nose
nose.runmodule()
|
quantifiedcode/backend/api/v1/task.py
|
marcinguy/quantifiedcode
| 118 |
109829
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import os
from flask import request
from quantifiedcode.settings import backend, settings
from ...utils.api import ArgumentError, get_pagination_args
from ...models import Task
from ...decorators import valid_user, valid_project
from ..resource import Resource
from .forms.task import TaskLogForm
class TaskDetails(Resource):
export_map = (
{'project': ('pk', 'name')},
'pk',
'status',
'created_at',
'updated_at',
'last_ping',
'type',
)
@valid_user(anon_ok=True)
@valid_project(public_ok=True)
def get(self, project_id, task_id):
""" Get the task with the given task id for the project with the given project id.
:param project_id: id of the project
:param task_id: id of the task
:return: task information
"""
try:
task = backend.get(Task, {'project.pk': request.project.pk, 'pk': task_id},
only=self.export_fields, include=('project',), raw=True)
except Task.DoesNotExist:
return {'message': "unknown task"}, 404
return {'task': self.export(task)}, 200
class TaskLog(Resource):
@valid_user(anon_ok=True)
@valid_project(public_ok=True)
def get(self, project_id, task_id):
""" Returns the log for the specified project task. Accepts a from request parameter, which seeks into
the log file to return the rest of the file.
:param project_id: id of the project
:param task_id: id of the task
:return: log, its length, offset, and the task status
"""
form = TaskLogForm(request.args)
if not form.validate():
return {'message' : 'please correct the errors mentioned below.'}, 400
data = form.data
from_chr = data['from_chr']
try:
task = backend.get(Task, {'project.pk': request.project.pk, 'pk': task_id}, raw=True)
except Task.DoesNotExist:
return {'message': "unknown task"}, 404
log_path = os.path.join(settings.get('backend.path'),
settings.get('backend.paths.tasks'),
"{}.log".format(task['pk']))
try:
with open(log_path, "r") as task_log:
task_log.seek(from_chr)
content = task_log.read()
data = {
'task_log': content,
'len': len(content),
'from': from_chr,
'task_status': task['status'] if 'status' in task else "unknown"
}
return data, 200
except IOError:
return {'message': "no log found {}".format(log_path)}, 404
class Tasks(Resource):
DEFAULT_LIMIT = 20
DEFAULT_OFFSET = 0
@valid_user(anon_ok=True)
@valid_project(public_ok=True)
def get(self, project_id):
""" Get all tasks for the project with the given id.
:param project_id: id of the project
:return: tasks for the project
"""
try:
pagination_args = get_pagination_args(request)
except ArgumentError as e:
return {'message': e.message}, 500
limit = pagination_args['limit'] if 'limit' in pagination_args else self.DEFAULT_LIMIT
offset = pagination_args['offset'] if 'offset' in pagination_args else self.DEFAULT_OFFSET
tasks = backend.filter(Task, {'project.pk': request.project.pk},
include=('project',), only=TaskDetails.export_fields, raw=True
).sort('created_at', -1)
return {'tasks': [TaskDetails.export(task) for task in tasks[offset:offset + limit]]}, 200
|
Chapter17/PI-QTicTacToe/run.py
|
trappn/Mastering-GUI-Programming-with-Python
| 138 |
109837
|
<gh_stars>100-1000
"""Qtictactoe by <NAME>"""
from qtictactoe.__main__ import main
if __name__ == '__main__':
main()
|
deepy/utils/__init__.py
|
uaca/deepy
| 260 |
109872
|
<filename>deepy/utils/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from dim_to_var import dim_to_var
from fake_generator import FakeGenerator
from initializers import *
from map_dict import MapDict
from scanner import Scanner
from stream_pickler import StreamPickler
from timer import Timer
|
examples/processing/plot_line_noise.py
|
dieloz/fooof
| 154 |
109900
|
<reponame>dieloz/fooof
"""
Dealing with Line Noise
=======================
This example covers strategies for dealing with line noise.
"""
###################################################################################################
# sphinx_gallery_thumbnail_number = 2
# Import the spectral parameterization object and utilities
from fooof import FOOOF
from fooof.plts import plot_spectra
from fooof.utils import trim_spectrum, interpolate_spectrum
# Import simulation functions to create some example data
from fooof.sim.gen import gen_power_spectrum
# Import NeuroDSP functions for simulating & processing time series
from neurodsp.sim import sim_combined
from neurodsp.filt import filter_signal
from neurodsp.spectral import compute_spectrum
###################################################################################################
# Line Noise Peaks
# ----------------
#
# Neural recordings typically have power line artifacts, at either 50 or 60 Hz, depending on
# where the data were collected, which can impact spectral parameterization.
#
# In this example, we explore some options for dealing with line noise artifacts.
#
# Interpolating Line Noise Peaks
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# One approach is to interpolate away line noise peaks, in the frequency domain. This
# approach simply gets rid of the peaks, interpolating the data to maintain the 1/f
# character of the data, allowing for subsequent fitting.
#
# The :func:`~fooof.utils.interpolate_spectrum` function allows for doing simple
# interpolation. Given a narrow frequency region, this function interpolates the spectrum,
# such that the 'peak' of the line noise is removed.
#
###################################################################################################
# Generate an example power spectrum, with line noise
freqs1, powers1 = gen_power_spectrum([3, 75], [1, 1],
[[10, 0.75, 2], [60, 1, 0.5]])
# Visualize the generated power spectrum
plot_spectra(freqs1, powers1, log_powers=True)
###################################################################################################
#
# In the plot above, we have an example spectrum, with some power line noise.
#
# To prepare this data for fitting, we can interpolate away the line noise region.
#
###################################################################################################
# Interpolate away the line noise region
interp_range = [58, 62]
freqs_int1, powers_int1 = interpolate_spectrum(freqs1, powers1, interp_range)
###################################################################################################
# Plot the spectra for the power spectra before and after interpolation
plot_spectra(freqs1, [powers1, powers_int1], log_powers=True,
labels=['Original Spectrum', 'Interpolated Spectrum'])
###################################################################################################
#
# As we can see in the above, the interpolation removed the peak from the data.
#
# We can now go ahead and parameterize the spectrum.
#
###################################################################################################
# Initialize a power spectrum model
fm1 = FOOOF(verbose=False)
fm1.report(freqs_int1, powers_int1)
###################################################################################################
# Multiple Interpolation Regions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Line noise artifacts often also display harmonics, such that when analyzing broader
# frequency ranges, there may be multiple peaks that need to be interpolated.
#
# This can be done by passing in multiple interpolation regions to
# :func:`~fooof.utils.interpolate_spectrum`, which we will do in the next example.
#
###################################################################################################
# Generate an example power spectrum, with line noise & harmonics
freqs2, powers2 = gen_power_spectrum([1, 150], [1, 500, 1.5],
[[10, 0.5, 2], [60, 0.75, 0.5], [120, 0.5, 0.5]])
# Interpolate away the line noise region & harmonics
interp_ranges = [[58, 62], [118, 122]]
freqs_int2, powers_int2 = interpolate_spectrum(freqs2, powers2, interp_ranges)
###################################################################################################
# Plot the power spectrum before and after interpolation
plot_spectra(freqs2, [powers2, powers_int2], log_powers=True,
labels=['Original Spectrum', 'Interpolated Spectrum'])
###################################################################################################
# Parameterize the interpolated power spectrum
fm2 = FOOOF(aperiodic_mode='knee', verbose=False)
fm2.report(freqs2, powers_int2)
###################################################################################################
# Fitting Line Noise as Peaks
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# In some cases, you may also be able to simply allow the parameterization to peaks to the
# line noise and harmonics. By simply fitting the line noise as peaks, the model can deal
# with the peaks in order to accurately fit the aperiodic component.
#
# These peaks are of course not to be analyzed, but once the model has been fit, you can
# simply ignore them. There should generally be no issue with fitting and having them in
# the model, and allowing the model to account for these peaks typically helps the model
# better fit the rest of the data.
#
# Below we can see that the model does indeed work when fitting data with line noise peaks.
#
###################################################################################################
# Fit power spectrum models to original spectra
fm1.report(freqs1, powers1)
fm2.report(freqs2, powers2)
###################################################################################################
# The Problem with Bandstop Filtering
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# A common approach for getting rid of line noise activity is to use bandstop filtering to
# remove activity at the line noise frequencies. Such a filter effectively set the power
# of these frequencies to be approximately zero.
#
# Unfortunately, this doesn't work very well with spectral parameterization, since the
# parameterization algorithm tries to fit each power value as either part of the aperiodic
# component, or as an overlying peak. Frequencies that have filtered out are neither, and
# the model has trouble, as it and has no concept of power values below the aperiodic component.
#
# In practice, this means that the "missing" power will impact the fit, and pull down the
# aperiodic component. One way to think of this is that the power spectrum model can deal with,
# and even expects, 'positive outliers' above the aperiodic (these are considered 'peaks'), but
# not 'negative outliers', or values below the aperiodic, as there is no expectation of this
# happening in the model.
#
# In the following example, we can see how bandstop filtering negatively impacts fitting.
# Because of this, for the purposes of spectral parameterization, bandstop filters are not
# recommended as a way to remove line noise.
#
# Note that if one has already applied a bandstop filter, then you can still
# apply the interpolation from above.
#
###################################################################################################
# General settings for the simulation
n_seconds = 30
fs = 1000
# Define the settings for the simulated signal
components = {'sim_powerlaw' : {'exponent' : -1.5},
'sim_oscillation' : [{'freq' : 10}, {'freq' : 60}]}
comp_vars = [0.5, 1, 1]
# Simulate a time series
sig = sim_combined(n_seconds, fs, components, comp_vars)
###################################################################################################
# Bandstop filter the signal to remove line noise frequencies
sig_filt = filter_signal(sig, fs, 'bandstop', (57, 63),
n_seconds=2, remove_edges=False)
###################################################################################################
# Compute a power spectrum of the simulated signal
freqs, powers_pre = trim_spectrum(*compute_spectrum(sig, fs), [3, 75])
freqs, powers_post = trim_spectrum(*compute_spectrum(sig_filt, fs), [3, 75])
###################################################################################################
# Plot the spectrum of the data, pre and post bandstop filtering
plot_spectra(freqs, [powers_pre, powers_post], log_powers=True,
labels=['Pre-Filter', 'Post-Filter'])
###################################################################################################
#
# In the above, we can see that the the bandstop filter removes power in the filtered range,
# leaving a "dip" in the power spectrum. This dip causes issues with subsequent fitting.
#
###################################################################################################
# Initialize and fit a power spectrum model
fm = FOOOF()
fm.report(freqs, powers_post)
###################################################################################################
#
# In order to try and capture the data points in the "dip", the power spectrum model
# gets 'pulled' down, leading to an inaccurate fit of the aperiodic component. This is
# why fitting frequency regions that included frequency regions that have been filtered
# out is not recommended.
#
|
homeassistant/components/uptime/__init__.py
|
domwillcode/home-assistant
| 22,481 |
109901
|
<reponame>domwillcode/home-assistant<filename>homeassistant/components/uptime/__init__.py
"""The uptime component."""
|
tests/formatters/chrome_preferences.py
|
pyllyukko/plaso
| 1,253 |
109920
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Google Chrome Preferences file event formatter."""
import unittest
from plaso.formatters import chrome_preferences
from tests.formatters import test_lib
class ChromePreferencesPrimaryURLFormatterHelperTest(
test_lib.EventFormatterTestCase):
"""Tests for the Google Chrome preferences primary URL formatter helper."""
def testFormatEventValues(self):
"""Tests the FormatEventValues function."""
formatter_helper = (
chrome_preferences.ChromePreferencesPrimaryURLFormatterHelper())
event_values = {'primary_url': 'https://example.com'}
formatter_helper.FormatEventValues(event_values)
self.assertEqual(event_values['primary_url'], 'https://example.com')
event_values = {'primary_url': ''}
formatter_helper.FormatEventValues(event_values)
self.assertEqual(event_values['primary_url'], 'local file')
event_values = {'primary_url': None}
formatter_helper.FormatEventValues(event_values)
self.assertIsNone(event_values['primary_url'])
class ChromePreferencesSecondaryURLFormatterHelperTest(
test_lib.EventFormatterTestCase):
"""Tests for the Google Chrome preferences secondary URL formatter helper."""
def testFormatEventValues(self):
"""Tests the FormatEventValues function."""
formatter_helper = (
chrome_preferences.ChromePreferencesSecondaryURLFormatterHelper())
event_values = {
'primary_url': 'https://example.com',
'secondary_url': 'https://anotherexample.com'}
formatter_helper.FormatEventValues(event_values)
self.assertEqual(
event_values['secondary_url'], 'https://anotherexample.com')
event_values = {
'primary_url': 'https://example.com',
'secondary_url': 'https://example.com'}
formatter_helper.FormatEventValues(event_values)
self.assertIsNone(event_values['secondary_url'])
event_values = {
'primary_url': 'https://example.com',
'secondary_url': ''}
formatter_helper.FormatEventValues(event_values)
self.assertEqual(event_values['secondary_url'], 'local file')
event_values = {
'primary_url': 'https://example.com',
'secondary_url': None}
formatter_helper.FormatEventValues(event_values)
self.assertIsNone(event_values['secondary_url'])
if __name__ == '__main__':
unittest.main()
|
qiskit/opflow/gradients/circuit_qfis/lin_comb_full.py
|
QAMP-Spring-2022-Transpiler-Hackathon/qiskit-terra
| 1,456 |
109933
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The module for Quantum the Fisher Information."""
from typing import List, Union
import numpy as np
from qiskit.circuit import QuantumCircuit, QuantumRegister, ParameterVector, ParameterExpression
from qiskit.utils.arithmetic import triu_to_dense
from ...operator_base import OperatorBase
from ...list_ops.list_op import ListOp
from ...list_ops.summed_op import SummedOp
from ...operator_globals import I, Z, Y
from ...state_fns.state_fn import StateFn
from ...state_fns.circuit_state_fn import CircuitStateFn
from ..circuit_gradients.lin_comb import LinComb
from .circuit_qfi import CircuitQFI
class LinCombFull(CircuitQFI):
r"""Compute the full Quantum Fisher Information (QFI).
Given a pure, parameterized quantum state this class uses the linear combination of unitaries
See also :class:`~qiskit.opflow.QFI`.
"""
# pylint: disable=signature-differs, arguments-differ
def __init__(
self,
aux_meas_op: OperatorBase = Z,
phase_fix: bool = True,
):
"""
Args:
aux_meas_op: The operator that the auxiliary qubit is measured with respect to.
For ``aux_meas_op = Z`` we compute 4Re[(dω⟨ψ(ω)|)O(θ)|ψ(ω)〉],
for ``aux_meas_op = -Y`` we compute 4Im[(dω⟨ψ(ω)|)O(θ)|ψ(ω)〉], and
for ``aux_meas_op = Z - 1j * Y`` we compute 4(dω⟨ψ(ω)|)O(θ)|ψ(ω)〉.
phase_fix: Whether or not to compute and add the additional phase fix term
Re[(dω⟨<ψ(ω)|)|ψ(ω)><ψ(ω)|(dω|ψ(ω))>].
Raises:
ValueError: If the provided auxiliary measurement operator is not supported.
"""
super().__init__()
if aux_meas_op not in [Z, -Y, (Z - 1j * Y)]:
raise ValueError(
"This auxiliary measurement operator is currently not supported. Please choose "
"either Z, -Y, or Z - 1j * Y. "
)
self._aux_meas_op = aux_meas_op
self._phase_fix = phase_fix
def convert(
self,
operator: CircuitStateFn,
params: Union[ParameterExpression, ParameterVector, List[ParameterExpression]],
) -> ListOp:
r"""
Args:
operator: The operator corresponding to the quantum state :math:`|\psi(\omega)\rangle`
for which we compute the QFI.
params: The parameters :math:`\omega` with respect to which we are computing the QFI.
Returns:
A ``ListOp[ListOp]`` where the operator at position ``[k][l]`` corresponds to the matrix
element :math:`k, l` of the QFI.
Raises:
TypeError: If ``operator`` is an unsupported type.
"""
# QFI & phase fix observable
qfi_observable = StateFn(
4 * self._aux_meas_op ^ (I ^ operator.num_qubits), is_measurement=True
)
# Check if the given operator corresponds to a quantum state given as a circuit.
if not isinstance(operator, CircuitStateFn):
raise TypeError(
"LinCombFull is only compatible with states that are given as "
f"CircuitStateFn, not {type(operator)}"
)
# If a single parameter is given wrap it into a list.
if isinstance(params, ParameterExpression):
params = [params]
elif isinstance(params, ParameterVector):
params = params[:] # unroll to list
if self._phase_fix:
# First, the operators are computed which can compensate for a potential phase-mismatch
# between target and trained state, i.e.〈ψ|∂lψ〉
phase_fix_observable = I ^ operator.num_qubits
gradient_states = LinComb(aux_meas_op=(Z - 1j * Y))._gradient_states(
operator,
meas_op=phase_fix_observable,
target_params=params,
open_ctrl=False,
trim_after_grad_gate=True,
)
# pylint: disable=unidiomatic-typecheck
if type(gradient_states) == ListOp:
phase_fix_states = gradient_states.oplist
else:
phase_fix_states = [gradient_states]
# Get 4 * Re[〈∂kψ|∂lψ]
qfi_operators = []
# Add a working qubit
qr_work = QuantumRegister(1, "work_qubit")
state_qc = QuantumCircuit(*operator.primitive.qregs, qr_work)
state_qc.h(qr_work)
# unroll separately from the H gate since we need the H gate to be the first
# operation in the data attributes of the circuit
unrolled = LinComb._transpile_to_supported_operations(
operator.primitive, LinComb.SUPPORTED_GATES
)
state_qc.compose(unrolled, inplace=True)
# Get the circuits needed to compute〈∂iψ|∂jψ〉
for i, param_i in enumerate(params): # loop over parameters
qfi_ops = []
for j, param_j in enumerate(params[i:], i):
# Get the gates of the quantum state which are parameterized by param_i
qfi_op = []
param_gates_i = state_qc._parameter_table[param_i]
for gate_i, idx_i in param_gates_i:
grad_coeffs_i, grad_gates_i = LinComb._gate_gradient_dict(gate_i)[idx_i]
# get the location of gate_i, used for trimming
location_i = None
for idx, (op, _, _) in enumerate(state_qc._data):
if op is gate_i:
location_i = idx
break
for grad_coeff_i, grad_gate_i in zip(grad_coeffs_i, grad_gates_i):
# Get the gates of the quantum state which are parameterized by param_j
param_gates_j = state_qc._parameter_table[param_j]
for gate_j, idx_j in param_gates_j:
grad_coeffs_j, grad_gates_j = LinComb._gate_gradient_dict(gate_j)[idx_j]
# get the location of gate_j, used for trimming
location_j = None
for idx, (op, _, _) in enumerate(state_qc._data):
if op is gate_j:
location_j = idx
break
for grad_coeff_j, grad_gate_j in zip(grad_coeffs_j, grad_gates_j):
grad_coeff_ij = np.conj(grad_coeff_i) * grad_coeff_j
qfi_circuit = LinComb.apply_grad_gate(
state_qc,
gate_i,
idx_i,
grad_gate_i,
grad_coeff_ij,
qr_work,
open_ctrl=True,
trim_after_grad_gate=(location_j < location_i),
)
# create a copy of the original circuit with the same registers
qfi_circuit = LinComb.apply_grad_gate(
qfi_circuit,
gate_j,
idx_j,
grad_gate_j,
1,
qr_work,
open_ctrl=False,
trim_after_grad_gate=(location_j >= location_i),
)
qfi_circuit.h(qr_work)
# Convert the quantum circuit into a CircuitStateFn and add the
# coefficients i, j and the original operator coefficient
coeff = operator.coeff
coeff *= np.sqrt(np.abs(grad_coeff_i) * np.abs(grad_coeff_j))
state = CircuitStateFn(qfi_circuit, coeff=coeff)
param_grad = 1
for gate, idx, param in zip(
[gate_i, gate_j], [idx_i, idx_j], [param_i, param_j]
):
param_expression = gate.params[idx]
param_grad *= param_expression.gradient(param)
meas = param_grad * qfi_observable
term = meas @ state
qfi_op.append(term)
# Compute −4 * Re(〈∂kψ|ψ〉〈ψ|∂lψ〉)
def phase_fix_combo_fn(x):
return -4 * np.real(x[0] * np.conjugate(x[1]))
if self._phase_fix:
phase_fix_op = ListOp(
[phase_fix_states[i], phase_fix_states[j]], combo_fn=phase_fix_combo_fn
)
# Add the phase fix quantities to the entries of the QFI
# Get 4 * Re[〈∂kψ|∂lψ〉−〈∂kψ|ψ〉〈ψ|∂lψ〉]
qfi_ops += [SummedOp(qfi_op) + phase_fix_op]
else:
qfi_ops += [SummedOp(qfi_op)]
qfi_operators.append(ListOp(qfi_ops))
# Return estimate of the full QFI -- A QFI is by definition positive semi-definite.
return ListOp(qfi_operators, combo_fn=triu_to_dense)
|
plenum/test/node_catchup/test_catchup_from_unequal_nodes_without_waiting.py
|
andkononykhin/plenum
| 148 |
109936
|
<reponame>andkononykhin/plenum
import pytest
from plenum.common.messages.node_messages import Commit
from plenum.server.catchup.node_leecher_service import NodeLeecherService
from plenum.test.delayers import delay_3pc
from plenum.test.helper import sdk_send_random_and_check, max_3pc_batch_limits, assert_eq, sdk_send_random_requests, \
sdk_get_replies, sdk_get_and_check_replies
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules_without_processing, delay_rules
from stp_core.loop.eventually import eventually
@pytest.fixture(scope="module")
def tconf(tconf):
with max_3pc_batch_limits(tconf, size=1) as tconf:
old_cons_proof_timeout = tconf.ConsistencyProofsTimeout
# Effectively disable resending cons proof requests after timeout
tconf.ConsistencyProofsTimeout = 1000
yield tconf
tconf.ConsistencyProofsTimeout = old_cons_proof_timeout
def test_catchup_from_unequal_nodes_without_waiting(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
normal_node = txnPoolNodeSet[0]
lagging_node_1 = txnPoolNodeSet[1]
lagging_node_2 = txnPoolNodeSet[2]
stopped_node = txnPoolNodeSet[3]
# Make sure everyone have one batch
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
# Wait until all nodes have same data and store last 3PC number of node that's going to be "stopped"
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=30)
last_3pc = stopped_node.master_last_ordered_3PC
with delay_rules_without_processing(stopped_node.nodeIbStasher, delay_3pc()):
# Create one more batch on all nodes except "stopped" node
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
with delay_rules(lagging_node_1.nodeIbStasher, delay_3pc(msgs=Commit)):
# Create one more batch on all nodes except "stopped" and first lagging node
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
with delay_rules(lagging_node_2.nodeIbStasher, delay_3pc(msgs=Commit)):
# Create one more batch on all nodes except "stopped" and both lagging nodes
# This time we can't wait for replies because there will be only one
reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
# Wait until normal node orders txn
looper.run(eventually(lambda: assert_eq(normal_node.master_last_ordered_3PC[1],
last_3pc[1] + 3)))
# Now all nodes have different number of txns, so if we try to start a catch up
# it is guaranteed that we'll need to ask for equal consistency proofs, and
# disabled timeout ensures that node can do so without relying on timeout
stopped_node.start_catchup()
# Wait until catchup ends
looper.run(eventually(lambda: assert_eq(stopped_node.ledgerManager._node_leecher._state,
NodeLeecherService.State.Idle)))
# Ensure stopped node caught up at least one batch
assert stopped_node.master_last_ordered_3PC[1] > last_3pc[1]
# And there was no view change
assert stopped_node.master_last_ordered_3PC[0] == last_3pc[0]
# Make sure replies from last request are eventually received
sdk_get_and_check_replies(looper, reqs)
|
utils/tests/test_text_utils.py
|
nathandarnell/sal
| 215 |
109960
|
<reponame>nathandarnell/sal
"""General functional tests for the text_utils module."""
from django.test import TestCase
from utils import text_utils
class TextUtilsTest(TestCase):
"""Test the Utilities module."""
def test_safe_text_null(self):
"""Ensure that null characters are dropped."""
original = '\x00'
self.assertTrue(text_utils.safe_text(original) == '')
self.assertTrue(text_utils.safe_text(original.encode() == ''))
def test_listify_basic(self):
"""Ensure non-collection data is only str converted."""
catalogs = 'testing'
result = text_utils.stringify(catalogs)
self.assertEqual(result, catalogs)
self.assertTrue(isinstance(result, str))
# Bool, int, float, dict
tests = (False, 5, 5.0, {'a': 'test'})
for test in tests:
self.assertEqual(text_utils.stringify(test), str(test))
def test_listify_list(self):
"""Ensure list data can be converted to strings."""
catalogs = ['testing', 'phase', 'production']
result = text_utils.stringify(catalogs)
self.assertEqual(result, ', '.join(catalogs))
def test_listify_dict(self):
"""Ensure dict data can be converted to strings."""
catalogs = ['testing', 'phase', {'key': 'value'}]
result = text_utils.stringify(catalogs)
self.assertEqual(result, "testing, phase, {'key': 'value'}")
def test_listify_non_str_types(self):
"""Ensure nested non-str types are converted."""
catalogs = [5, 5.0, {'a': 'test'}]
result = text_utils.stringify(catalogs)
self.assertEqual(result, "5, 5.0, {'a': 'test'}")
|
tt/expressions/__init__.py
|
fkromer/tt
| 233 |
109970
|
<reponame>fkromer/tt
"""Tools for working with Boolean expressions."""
from .bexpr import BooleanExpression # noqa
|
examples/window.py
|
superzazu/pylibui
| 222 |
109982
|
"""
Shows an empty window.
"""
from pylibui.core import App
from pylibui.controls import Window
class MyWindow(Window):
def onClose(self, data):
super().onClose(data)
app.stop()
app = App()
window = MyWindow('Window', 800, 600)
window.setMargined(True)
window.show()
app.start()
app.close()
|
btalib/utils.py
|
demattia/bta-lib
| 352 |
110009
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
# Copyright (C) 2020 <NAME>
# Use of this source code is governed by the MIT License
###############################################################################
__all__ = [
'SEED_AVG', 'SEED_LAST', 'SEED_SUM', 'SEED_NONE', 'SEED_ZERO',
'SEED_ZFILL',
'_INCPERIOD', '_DECPERIOD', '_MINIDX',
'_SERIES', '_MPSERIES',
'_SETVAL', '_MPSETVAL',
]
SEED_AVG = 0
SEED_LAST = 1
SEED_SUM = 2
SEED_NONE = 4
SEED_ZERO = 5
SEED_ZFILL = 6
def _INCPERIOD(x, p=1):
'''
Forces an increase `p` in the minperiod of object `x`.
Example: `ta-lib` calculates `+DM` a period of 1 too early, but calculates
the depending `+DI` from the right starting point. Increasing the period,
without changing the underlying already calculated `+DM` values, allows the
`+DI` values to be right
'''
x._minperiod += p
def _DECPERIOD(x, p=1):
'''
Forces an increase `p` in the minperiod of object `x`.
Example: `ta-lib` calculates `obv` already when the period is `1`,
discarding the needed "close" to "previous close" comparison. The only way
to take this into account is to decrease the delivery period of the
comparison by 1 to start the calculation before (and using a fixed
criterion as to what to do in the absence of a valid close to close
comparison)
'''
x._minperiod -= p
def _MINIDX(x, p=0):
'''
Delivers the index to an array which corresponds to `_minperiod` offset by
`p`. This allow direct manipulation of single values in arrays like in the
`obv` scenario in which a seed value is needed for the 1st delivered value
(in `ta-lib` mode) because no `close` to `previous close` comparison is
possible.
'''
return x._minperiod - 1 + p
def _SERIES(x):
'''Macro like function which makes clear that one is retrieving the actual
underlying series and not something a wrapped version'''
return x._series
def _MPSERIES(x):
'''Macro like function which makes clear that one is retrieving the actual
underlying series, sliced starting at the MINPERIOD of the series'''
return x._series[x._minperiod - 1:]
def _SETVAL(x, idx, val):
'''Macro like function which makes clear that one is setting a value in the
underlying series'''
x._series[idx] = val
def _MPSETVAL(x, idx, val):
'''Macro like function which makes clear that one is setting a value in the
underlying series'''
x._series[x._minperiod - 1 + idx] = val
|
ConvertShellcode.py
|
rvrsh3ll/CPLResourceRunner
| 232 |
110036
|
<reponame>rvrsh3ll/CPLResourceRunner<filename>ConvertShellcode.py
#!/usr/bin/env python
import binascii
import sys
file_name = sys.argv[1]
with open (file_name) as f:
hexdata = binascii.hexlify(f.read())
hexlist = map(''.join, zip(hexdata[::2], hexdata[1::2]))
shellcode = ''
for i in hexlist:
shellcode += "0x{},".format(i)
shellcode = shellcode[:-1]
output = open('shellcode.txt', 'w')
output.write(shellcode)
output.close()
print "Shellcode written to shellcode.txt"
|
mopp/mopp_lib.py
|
diwashsapkota/jupyter-text2code
| 2,147 |
110046
|
<reponame>diwashsapkota/jupyter-text2code
import json
from IPython import get_ipython
from IPython.core.magics.namespace import NamespaceMagics
_nms = NamespaceMagics()
_Jupyter = get_ipython()
_nms.shell = _Jupyter.kernel.shell
def dataframes_info():
values = _nms.who_ls()
info = {v: (eval(v).columns.tolist()) for v in values if type(eval(v)).__name__ == 'DataFrame'}
return json.dumps(info)
|
river/datasets/synth/friedman.py
|
fox-ds/river
| 2,184 |
110055
|
import math
import random
from typing import Tuple
from .. import base
class Friedman(base.SyntheticDataset):
"""Friedman synthetic dataset.
Each observation is composed of 10 features. Each feature value is sampled uniformly in [0, 1].
The target is defined by the following function:
$$y = 10 sin(\\pi x_0 x_1) + 20 (x_2 - 0.5)^2 + 10 x_3 + 5 x_4 + \\epsilon$$
In the last expression, $\\epsilon \\sim \\mathcal{N}(0, 1)$, is the noise. Therefore,
only the first 5 features are relevant.
Parameters
----------
seed
Random seed number used for reproducibility.
Examples
--------
>>> from river import synth
>>> dataset = synth.Friedman(seed=42)
>>> for x, y in dataset.take(5):
... print(list(x.values()), y)
[0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66
[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33
[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 7.04
[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16
[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 8.90
References
----------
[^1]: [<NAME>., 1991. Multivariate adaptive regression splines. The annals of statistics, pp.1-67.](https://projecteuclid.org/euclid.aos/1176347963)
"""
def __init__(self, seed: int = None):
super().__init__(task=base.REG, n_features=10)
self.seed = seed
def __iter__(self):
rng = random.Random(self.seed)
while True:
x = {i: rng.uniform(a=0, b=1) for i in range(10)}
y = (
10 * math.sin(math.pi * x[0] * x[1])
+ 20 * (x[2] - 0.5) ** 2
+ 10 * x[3]
+ 5 * x[4]
+ rng.gauss(mu=0, sigma=1)
)
yield x, y
class FriedmanDrift(Friedman):
"""Friedman synthetic dataset with concept drifts.
Each observation is composed of 10 features. Each feature value is sampled uniformly in [0, 1].
Only the first 5 features are relevant. The target is defined by different functions depending
on the type of the drift.
The three available modes of operation of the data generator are described in [^1].
Parameters
----------
drift_type
The variant of concept drift.</br>
- `'lea'`: Local Expanding Abrupt drift. The concept drift appears in two distinct
regions of the instance space, while the remaining regions are left unaltered.
There are three points of abrupt change in the training dataset.
At every consecutive change the regions of drift are expanded.</br>
- `'gra'`: Global Recurring Abrupt drift. The concept drift appears over the whole
instance space. There are two points of concept drift. At the second point of drift
the old concept reoccurs.</br>
- `'gsg'`: Global and Slow Gradual drift. The concept drift affects all the instance
space. However, the change is gradual and not abrupt. After each one of the two change
points covered by this variant, and during a window of length `transition_window`,
examples from both old and the new concepts are generated with equal probability.
After the transition period, only the examples from the new concept are generated.
position
The amount of monitored instances after which each concept drift occurs. A tuple with
at least two element must be passed, where each number is greater than the preceding one.
If `drift_type='lea'`, then the tuple must have three elements.
transition_window
The length of the transition window between two concepts. Only applicable when
`drift_type='gsg'`. If set to zero, the drifts will be abrupt. Anytime
`transition_window > 0`, it defines a window in which instances of the new
concept are gradually introduced among the examples from the old concept.
During this transition phase, both old and new concepts appear with equal probability.
seed
Random seed number used for reproducibility.
Examples
--------
>>> from river import synth
>>> dataset = synth.FriedmanDrift(
... drift_type='lea',
... position=(1, 2, 3),
... seed=42
... )
>>> for x, y in dataset.take(5):
... print(list(x.values()), y)
[0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66
[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33
[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 7.04
[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16
[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] -2.65
>>> dataset = synth.FriedmanDrift(
... drift_type='gra',
... position=(2, 3),
... seed=42
... )
>>> for x, y in dataset.take(5):
... print(list(x.values()), y)
[0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66
[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33
[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 8.96
[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16
[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 8.90
>>> dataset = synth.FriedmanDrift(
... drift_type='gsg',
... position=(1, 4),
... transition_window=2,
... seed=42
... )
>>> for x, y in dataset.take(5):
... print(list(x.values()), y)
[0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66
[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33
[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 8.92
[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 17.32
[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 6.05
References
----------
[^1]: <NAME>., <NAME>. and <NAME>., 2011. Learning model trees from evolving
data streams. Data mining and knowledge discovery, 23(1), pp.128-168.
"""
_LOCAL_EXPANDING_ABRUPT = "lea"
_GLOBAL_RECURRING_ABRUPT = "gra"
_GLOBAL_AND_SLOW_GRADUAL = "gsg"
_VALID_DRIFT_TYPES = [
_LOCAL_EXPANDING_ABRUPT,
_GLOBAL_RECURRING_ABRUPT,
_GLOBAL_AND_SLOW_GRADUAL,
]
def __init__(
self,
drift_type: str = "lea",
position: Tuple[int, ...] = (50_000, 100_000, 150_000),
transition_window: int = 10_000,
seed: int = None,
):
super().__init__(seed=seed)
if drift_type not in self._VALID_DRIFT_TYPES:
raise ValueError(
f'Invalid "drift_type: {drift_type}"\n'
f"Valid options are: {self._VALID_DRIFT_TYPES}"
)
self.drift_type = drift_type
if self.drift_type == self._LOCAL_EXPANDING_ABRUPT and len(position) < 3:
raise ValueError(
"Insufficient number of concept drift locations passed.\n"
'Three concept drift points should be passed when drift_type=="lea"'
)
elif self.drift_type != self._LOCAL_EXPANDING_ABRUPT and len(position) < 2:
raise ValueError(
"Insufficient number of concept drift locations passed.\n"
"Two locations must be defined."
)
elif len(position) > 3:
raise ValueError(
"Too many concept drift locations passed. Check the documentation"
"for details on the usage of this class."
)
self.position = position
if self.drift_type == self._LOCAL_EXPANDING_ABRUPT:
(
self._change_point1,
self._change_point2,
self._change_point3,
) = self.position
else:
self._change_point1, self._change_point2 = self.position
self._change_point3 = math.inf
if not self._change_point1 < self._change_point2 < self._change_point3:
raise ValueError(
"The concept drift locations must be defined in an increasing order."
)
if (
transition_window > self._change_point2 - self._change_point1
or transition_window > self._change_point3 - self._change_point2
) and self.drift_type == self._GLOBAL_AND_SLOW_GRADUAL:
raise ValueError(
f'The chosen "transition_window" value is too big: {transition_window}'
)
self.transition_window = transition_window
if self.drift_type == self._LOCAL_EXPANDING_ABRUPT:
self._y_maker = self._local_expanding_abrupt_gen
elif self.drift_type == self._GLOBAL_RECURRING_ABRUPT:
self._y_maker = self._global_recurring_abrupt_gen
else: # Global and slow gradual drifts
self._y_maker = self._global_and_slow_gradual_gen
def __lea_in_r1(self, x, index):
if index < self._change_point1:
return False
elif self._change_point1 <= index < self._change_point2:
return x[1] < 0.3 and x[2] < 0.3 and x[3] > 0.7 and x[4] < 0.3
elif self._change_point2 <= index < self._change_point3:
return x[1] < 0.3 and x[2] < 0.3 and x[3] > 0.7
else:
return x[1] < 0.3 and x[2] < 0.3
def __lea_in_r2(self, x, index):
if index < self._change_point1:
return False
elif self._change_point1 <= index < self._change_point2:
return x[1] > 0.7 and x[2] > 0.7 and x[3] < 0.3 and x[4] > 0.7
elif self._change_point2 <= index < self._change_point3:
return x[1] > 0.7 and x[2] > 0.7 and x[3] < 0.3
else:
return x[1] > 0.7 and x[2] > 0.7
def _local_expanding_abrupt_gen(
self, x, index: int, rc: random.Random = None
): # noqa
if self.__lea_in_r1(x, index):
return 10 * x[0] * x[1] + 20 * (x[2] - 0.5) + 10 * x[3] + 5 * x[4]
if self.__lea_in_r2(x, index):
return (
10 * math.cos(x[0] * x[1])
+ 20 * (x[2] - 0.5)
+ math.exp(x[3])
+ 5 * x[4] ** 2
)
# default case
return (
10 * math.sin(math.pi * x[0] * x[1])
+ 20 * (x[2] - 0.5) ** 2
+ 10 * x[3]
+ 5 * x[4]
)
def _global_recurring_abrupt_gen(
self, x, index: int, rc: random.Random = None
): # noqa
if index < self._change_point1 or index >= self._change_point2:
# The initial concept is recurring
return (
10 * math.sin(math.pi * x[0] * x[1])
+ 20 * (x[2] - 0.5) ** 2
+ 10 * x[3]
+ 5 * x[4]
)
else:
# Drift: the positions of the features are swapped
return (
10 * math.sin(math.pi * x[3] * x[5])
+ 20 * (x[1] - 0.5) ** 2
+ 10 * x[0]
+ 5 * x[2]
)
def _global_and_slow_gradual_gen(self, x, index: int, rc: random.Random):
if index < self._change_point1:
# default function
return (
10 * math.sin(math.pi * x[0] * x[1])
+ 20 * (x[2] - 0.5) ** 2
+ 10 * x[3]
+ 5 * x[4]
)
elif self._change_point1 <= index < self._change_point2:
if index < self._change_point1 + self.transition_window and bool(
rc.getrandbits(1)
):
# default function
return (
10 * math.sin(math.pi * x[0] * x[1])
+ 20 * (x[2] - 0.5) ** 2
+ 10 * x[3]
+ 5 * x[4]
)
else: # First new function
return (
10 * math.sin(math.pi * x[3] * x[4])
+ 20 * (x[1] - 0.5) ** 2
+ 10 * x[0]
+ 5 * x[2]
)
elif index >= self._change_point2:
if index < self._change_point2 + self.transition_window and bool(
rc.getrandbits(1)
):
# First new function
return (
10 * math.sin(math.pi * x[3] * x[4])
+ 20 * (x[1] - 0.5) ** 2
+ 10 * x[0]
+ 5 * x[2]
)
else: # Second new function
return (
10 * math.sin(math.pi * x[1] * x[4])
+ 20 * (x[3] - 0.5) ** 2
+ 10 * x[2]
+ 5 * x[0]
)
def __iter__(self):
rng = random.Random(self.seed)
# To produce True or False with equal probability. Only used in gradual drifts
if self.drift_type == self._GLOBAL_AND_SLOW_GRADUAL:
rc = random.Random(self.seed)
else:
rc = None
i = 0
while True:
x = {i: rng.uniform(a=0, b=1) for i in range(10)}
y = self._y_maker(x, i, rc) + rng.gauss(mu=0, sigma=1)
yield x, y
i += 1
|
models/warp.py
|
RNubla/FeatureFlow
| 161 |
110069
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class backWarp(nn.Module):
"""
A class for creating a backwarping object.
This is used for backwarping to an image:
Given optical flow from frame I0 to I1 --> F_0_1 and frame I1,
it generates I0 <-- backwarp(F_0_1, I1).
...
Methods
-------
forward(x)
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
"""
def __init__(self, H, W):
"""
Parameters
----------
W : int
width of the image.
H : int
height of the image.
device : device
computation device (cpu/cuda).
"""
super(backWarp, self).__init__()
# create a grid
gridX, gridY = np.meshgrid(np.arange(W), np.arange(H))
self.W = W
self.H = H
self.gridX = torch.nn.Parameter(torch.tensor(gridX), requires_grad=False)
self.gridY = torch.nn.Parameter(torch.tensor(gridY), requires_grad=False)
def forward(self, img, flow):
"""
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
I0 = backwarp(I1, F_0_1)
Parameters
----------
img : tensor
frame I1.
flow : tensor
optical flow from I0 and I1: F_0_1.
Returns
-------
tensor
frame I0.
"""
# Extract horizontal and vertical flows.
u = flow[:, 0, :, :]
v = flow[:, 1, :, :]
x = self.gridX.unsqueeze(0).expand_as(u).float() + u
y = self.gridY.unsqueeze(0).expand_as(v).float() + v
# range -1 to 1
x = 2*(x/self.W - 0.5)
y = 2*(y/self.H - 0.5)
# stacking X and Y
grid = torch.stack((x,y), dim=3)
# Sample pixels using bilinear interpolation.
imgOut = torch.nn.functional.grid_sample(img, grid, padding_mode='border')
return imgOut
# Creating an array of `t` values for the 7 intermediate frames between
# reference frames I0 and I1.
class Coeff(nn.Module):
def __init__(self):
super(Coeff, self).__init__()
self.t = torch.nn.Parameter(torch.FloatTensor(np.linspace(0.125, 0.875, 7)), requires_grad=False)
def getFlowCoeff (self, indices):
"""
Gets flow coefficients used for calculating intermediate optical
flows from optical flows between I0 and I1: F_0_1 and F_1_0.
F_t_0 = C00 x F_0_1 + C01 x F_1_0
F_t_1 = C10 x F_0_1 + C11 x F_1_0
where,
C00 = -(1 - t) x t
C01 = t x t
C10 = (1 - t) x (1 - t)
C11 = -t x (1 - t)
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C00, C01, C10, C11.
"""
# Convert indices tensor to numpy array
ind = indices.detach()
C11 = C00 = - (1 - (self.t[ind])) * (self.t[ind])
C01 = (self.t[ind]) * (self.t[ind])
C10 = (1 - (self.t[ind])) * (1 - (self.t[ind]))
return C00[None, None, None, :].permute(3, 0, 1, 2), C01[None, None, None, :].permute(3, 0, 1, 2), C10[None, None, None, :].permute(3, 0, 1, 2), C11[None, None, None, :].permute(3, 0, 1, 2)
def getWarpCoeff (self, indices):
"""
Gets coefficients used for calculating final intermediate
frame `It_gen` from backwarped images using flows F_t_0 and F_t_1.
It_gen = (C0 x V_t_0 x g_I_0_F_t_0 + C1 x V_t_1 x g_I_1_F_t_1) / (C0 x V_t_0 + C1 x V_t_1)
where,
C0 = 1 - t
C1 = t
V_t_0, V_t_1 --> visibility maps
g_I_0_F_t_0, g_I_1_F_t_1 --> backwarped intermediate frames
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C0 and C1.
"""
# Convert indices tensor to numpy array
ind = indices.detach()
C0 = 1 - self.t[ind]
C1 = self.t[ind]
return C0[None, None, None, :].permute(3, 0, 1, 2), C1[None, None, None, :].permute(3, 0, 1, 2)
def set_t(self, factor):
ti = 1 / factor
self.t = torch.nn.Parameter(torch.FloatTensor(np.linspace(ti, 1 - ti, factor - 1)), requires_grad=False)
|
src/intensio_obfuscator/core/obfuscation/intensio_delete.py
|
bbhunter/Intensio-Obfuscator
| 553 |
110075
|
# -*- coding: utf-8 -*-
# https://github.com/Hnfull/Intensio-Obfuscator
#---------------------------------------------------------- [Lib] -----------------------------------------------------------#
import re
import fileinput
import os
import sys
from progress.bar import Bar
try:
from intensio_obfuscator.core.utils.intensio_utils import Utils, Reg
except ModuleNotFoundError:
from core.utils.intensio_utils import Utils, Reg
#------------------------------------------------- [Function(s)/Class(es)] --------------------------------------------------#
class Delete:
def __init__(self):
self.utils = Utils()
def LinesSpaces(self, outputArg, verboseArg):
checkLinesSpace = {}
checkEmptyLineOutput = 0
checkEmptyLineInput = 0
countRecursFiles = 0
numberLine = 0
recursFiles = self.utils.CheckFileDir(
output=outputArg,
detectFiles="py",
blockDir="__pycache__",
blockFile=False,
dirOnly=False
)
for file in recursFiles:
countRecursFiles += 1
# -- Delete all empty lines -- #
with Bar("Obfuscation ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar:
for file in recursFiles:
with fileinput.FileInput(file, inplace=True) as inputFile:
for eachLine in inputFile:
if re.match(Reg.detectLineEmpty, eachLine):
checkEmptyLineInput += 1
pass
else:
sys.stdout.write(eachLine)
bar.next(1)
bar.finish()
with Bar("Check ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar:
for file in recursFiles:
numberLine = 0
with open(file, "r") as readFile:
readF = readFile.readlines()
for eachLine in readF:
numberLine += 1
if re.match(Reg.detectLineEmpty, eachLine):
checkLinesSpace[numberLine] = file
checkEmptyLineOutput += 1
bar.next(1)
bar.finish()
if checkEmptyLineOutput == 0:
return 1
else:
if verboseArg:
print("\n[!] Empty line that not been deleted... :\n")
for key, value in checkLinesSpace.items():
print("\n-> File : {}".format(value))
print("-> Line : {}".format(key))
else:
print("\n[*] Empty line that deleted : {}\n".format(checkEmptyLineInput))
return 0
def Comments(self, outputArg, verboseArg):
getIndexList = []
filesConcerned = []
eachLineListCheckIndex = []
countLineCommentOutput = 0
countLineCommentInput = 0
multipleLinesComments = 0
countRecursFiles = 0
noCommentsQuotes = 0
getIndex = 0
detectIntoSimpleQuotes = None
eachLineCheckIndex = ""
recursFiles = self.utils.CheckFileDir(
output=outputArg,
detectFiles="py",
blockDir="__pycache__",
blockFile=False,
dirOnly=False
)
for i in recursFiles:
countRecursFiles += 1
# -- Delete comments and count comments will be deleted -- #
print("\n[+] Running delete comments in {} file(s)...\n".format(countRecursFiles))
with Bar("Obfuscation ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar:
for file in recursFiles:
with fileinput.input(file, inplace=True) as inputFile:
for eachLine in inputFile:
if re.match(Reg.pythonFileHeader, eachLine):
sys.stdout.write(eachLine)
else:
if multipleLinesComments == 1:
if re.match(Reg.quotesCommentsEndMultipleLines, eachLine):
if self.utils.VerifyMultipleLinesComments(eachLine) == True:
if multipleLinesComments == 1:
countLineCommentInput += 1
multipleLinesComments = 0
else:
countLineCommentInput += 1
else:
countLineCommentInput += 1
elif noCommentsQuotes == 1:
if re.match(Reg.checkIfEndVarStdoutMultipleQuotes, eachLine):
sys.stdout.write(eachLine)
noCommentsQuotes = 0
else:
sys.stdout.write(eachLine)
else:
if re.match(Reg.quotesCommentsOneLine, eachLine):
countLineCommentInput += 1
else:
if re.match(Reg.quotesCommentsMultipleLines, eachLine):
if self.utils.VerifyMultipleLinesComments(eachLine) == True:
countLineCommentInput += 1
multipleLinesComments = 1
else:
sys.stdout.write(eachLine)
else:
if re.match(Reg.checkIfStdoutMultipleQuotes, eachLine) \
or re.match(Reg.checkIfVarMultipleQuotes, eachLine):
sys.stdout.write(eachLine)
noCommentsQuotes = 1
elif re.match(Reg.checkIfRegexMultipleQuotes, eachLine):
sys.stdout.write(eachLine)
else:
sys.stdout.write(eachLine)
with fileinput.input(file, inplace=True) as inputFile:
for eachLine in inputFile:
if re.match(Reg.pythonFileHeader, eachLine):
sys.stdout.write(eachLine)
else:
if re.match(Reg.hashCommentsBeginLine, eachLine):
countLineCommentInput += 1
elif re.match(Reg.hashCommentsAfterLine, eachLine):
eachLineList = list(eachLine)
getIndexList = []
for i, v in enumerate(eachLineList):
if v == "#":
getIndexList.append(i)
for i in getIndexList:
if self.utils.DetectIntoSimpleQuotes(eachLine, maxIndexLine=i) == False:
countLineCommentInput += 1
detectIntoSimpleQuotes = False
break
else:
continue
if detectIntoSimpleQuotes == False:
for i in getIndexList:
eachLineListCheckIndex = eachLineList[:i]
eachLineListCheckIndex.append("\n")
eachLineCheckIndex = "".join(eachLineListCheckIndex)
if self.utils.DetectIntoSimpleQuotes(eachLineCheckIndex, maxIndexLine=i) == False:
getIndex = i
break
else:
continue
eachLineList = eachLineList[:getIndex]
eachLineList.append("\n")
eachLine = "".join(eachLineList)
sys.stdout.write(eachLine)
detectIntoSimpleQuotes = None
countLineCommentInput += 1
else:
sys.stdout.write(eachLine)
else:
sys.stdout.write(eachLine)
bar.next(1)
bar.finish()
# -- Check if all comments are deleted -- #
with Bar("Check ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar:
for file in recursFiles:
with open(file, "r") as readFile:
readF = readFile.readlines()
for eachLine in readF:
if re.match(Reg.pythonFileHeader, eachLine):
continue
else:
if multipleLinesComments == 1:
if re.match(Reg.quotesCommentsEndMultipleLines, eachLine):
if self.utils.VerifyMultipleLinesComments(eachLine) == True:
if multipleLinesComments == 1:
countLineCommentOutput += 1
multipleLinesComments = 0
filesConcerned.append(file)
else:
countLineCommentOutput += 1
filesConcerned.append(file)
else:
countLineCommentOutput += 1
filesConcerned.append(file)
elif noCommentsQuotes == 1:
if re.match(Reg.checkIfEndVarStdoutMultipleQuotes, eachLine):
noCommentsQuotes = 0
else:
continue
else:
if re.match(Reg.quotesCommentsOneLine, eachLine):
countLineCommentOutput += 1
filesConcerned.append(file)
else:
if re.match(Reg.quotesCommentsMultipleLines, eachLine):
if self.utils.VerifyMultipleLinesComments(eachLine) == True:
countLineCommentOutput += 1
multipleLinesComments = 1
filesConcerned.append(file)
else:
continue
else:
if re.match(Reg.checkIfStdoutMultipleQuotes, eachLine) \
or re.match(Reg.checkIfVarMultipleQuotes, eachLine):
noCommentsQuotes = 1
elif re.match(Reg.checkIfRegexMultipleQuotes, eachLine):
continue
else:
continue
with open(file, "r") as readFile:
readF = readFile.readlines()
for eachLine in readF:
if re.match(Reg.pythonFileHeader, eachLine):
continue
else:
if re.match(Reg.hashCommentsBeginLine, eachLine):
countLineCommentOutput += 1
filesConcerned.append(file)
elif re.match(Reg.hashCommentsAfterLine, eachLine):
eachLineList = list(eachLine)
getIndexList = []
for i, v in enumerate(eachLineList):
if v == "#":
getIndexList.append(i)
for i in getIndexList:
if self.utils.DetectIntoSimpleQuotes(eachLine, maxIndexLine=i) == False:
countLineCommentOutput += 1
detectIntoSimpleQuotes = False
filesConcerned.append(file)
break
else:
continue
if detectIntoSimpleQuotes == False:
for i in getIndexList:
eachLineListCheckIndex = eachLineList[:i]
eachLineListCheckIndex.append("\n")
eachLineCheckIndex = "".join(eachLineListCheckIndex)
if self.utils.DetectIntoSimpleQuotes(eachLineCheckIndex, maxIndexLine=i) == False:
getIndex = i
break
else:
continue
eachLineList = eachLineList[:getIndex]
eachLineList.append("\n")
eachLine = "".join(eachLineList)
countLineCommentOutput += 1
detectIntoSimpleQuotes = None
else:
continue
else:
continue
bar.next(1)
bar.finish()
if countLineCommentOutput == 0:
print("\n-> {} lines of comments deleted\n".format(countLineCommentInput))
return 1
else:
if verboseArg:
filesConcerned = self.utils.RemoveDuplicatesValuesInList(filesConcerned)
print("\nFiles concerned of comments no deleted :\n")
for f in filesConcerned:
print("-> {}".format(f))
print("\n-> {} lines of comments no deleted\n".format(countLineCommentOutput))
return 0
def TrashFiles(self, outputArg, verboseArg):
countRecursFiles = 0
deleteFiles = 0
checkPycFile = []
currentPosition = os.getcwd()
recursFiles = self.utils.CheckFileDir(
output=outputArg,
detectFiles="pyc",
blockDir="__pycache__",
blockFile=False,
dirOnly=False
)
for number in recursFiles:
countRecursFiles += 1
if countRecursFiles == 0:
print("[!] No .pyc file(s) found in {}".format(outputArg))
return 1
print("\n[+] Running delete {} .pyc file(s)...\n".format(countRecursFiles))
# -- Check if .pyc file(s) exists and delete it -- #
with Bar("Setting up ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar:
for file in recursFiles:
if re.match(Reg.detectPycFiles, file):
deleteFiles += 1
checkPycFile.append(file)
bar.next(1)
bar.finish()
# -- Delete pyc file(s) -- #
with Bar("Correction ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar:
for file in recursFiles:
if re.match(Reg.detectPycFiles, file):
extractPycFiles = re.search(r".*\.pyc$", file)
moveFolder = re.sub(r".*\.pyc$", "", file)
os.chdir(moveFolder)
os.remove(extractPycFiles.group(0))
os.chdir(currentPosition)
bar.next(1)
bar.finish()
checkRecursFiles = self.utils.CheckFileDir(
output=outputArg,
detectFiles="pyc",
blockDir="__pycache__",
blockFile=False,
dirOnly=False
)
if checkRecursFiles != []:
if verboseArg:
for pycFile in checkRecursFiles:
print("-> .pyc file no deleted : {}".format(pycFile))
return 0
else:
if verboseArg:
for pycFile in checkPycFile:
print("-> .pyc file deleted : {}".format(pycFile))
print("\n-> {} .pyc file(s) deleted".format(deleteFiles))
return 1
|
scripts/conversionScripts/Opt_FD_name.py
|
rinelson456/raven
| 159 |
110143
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as ET
import xml.dom.minidom as pxml
import os
def convert(tree,fileName=None):
"""
Converts input files to be compatible with merge request #460:
- Removes "all" node
- Sets default variable names
@ In, tree, xml.etree.ElementTree.ElementTree object, the contents of a RAVEN input file
@ In, fileName, the name for the raven input file
@Out, tree, xml.etree.ElementTree.ElementTree object, the modified RAVEN input file
"""
simulation = tree.getroot()
opts = simulation.find('Optimizers')
if opts is not None:
for child in opts:
if child.tag == 'FiniteDifferenceGradientOptimizer':
child.tag = 'FiniteDifference'
return tree
if __name__=='__main__':
import convert_utils
import sys
convert_utils.standardMain(sys.argv,convert)
|
torchMoji/examples/create_twitter_vocab.py
|
UmaTaru/run
| 163 |
110149
|
""" Creates a vocabulary from a tsv file.
"""
import codecs
from torchMoji.torchmoji.create_vocab import VocabBuilder
from torchMoji.torchmoji.word_generator import TweetWordGenerator
with codecs.open('../../twitterdata/tweets.2016-09-01', 'rU', 'utf-8') as stream:
wg = TweetWordGenerator(stream)
vb = VocabBuilder(wg)
vb.count_all_words()
vb.save_vocab()
|
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotStringProperty_t.py
|
htlcnn/ironpython-stubs
| 182 |
110202
|
<gh_stars>100-1000
class dotStringProperty_t(object):
# no doc
aName=None
aValueString=None
FatherId=None
ValueStringIteration=None
|
functions/source/CleanupPV/lambda_function.py
|
srinivasreddych/quickstart-redhat-openshift
| 179 |
110213
|
import json
import logging
import boto3
import cfnresponse
import time
ec2_client = boto3.client('ec2')
logs_client = boto3.client('logs')
def boto_throttle_backoff(boto_method, max_retries=10, backoff_multiplier=2, **kwargs):
retry = 0
results = None
while not results:
try:
results = boto_method(**kwargs)
except Exception as e:
if 'ThrottlingException' in str(e) or 'VolumeInUse' in str(e):
retry += 1
if retry > max_retries:
print("Maximum retries of %s reached" % str(max_retries))
raise
print("hit an api throttle, or eventual consistency error, waiting for %s seconds before retrying" % str(retry * backoff_multiplier))
time.sleep(retry * backoff_multiplier)
else:
raise
return results
def handler(event, context):
print('Received event: %s' % json.dumps(event))
status = cfnresponse.SUCCESS
physical_resource_id = 'PVCleanup'
data = {}
reason = None
try:
if event['RequestType'] == 'Delete':
print('Removing any orphaned EBS volumes...')
tag_name = 'tag:kubernetes.io/cluster/%s' % event['ResourceProperties']['ClusterId']
response = boto_throttle_backoff(
ec2_client.describe_volumes,
Filters=[{'Name': tag_name, 'Values': ['owned']}]
)['Volumes']
for volume in response:
print('deleting volume %s' % volume['VolumeId'])
boto_throttle_backoff(ec2_client.delete_volume, VolumeId=volume['VolumeId'])
except Exception as e:
logging.error('Exception: %s' % e, exc_info=True)
reason = str(e)
status = cfnresponse.FAILED
finally:
if event['RequestType'] == 'Delete':
try:
wait_message = 'waiting for events for request_id %s to propagate to cloudwatch...' % context.aws_request_id
while not logs_client.filter_log_events(
logGroupName=context.log_group_name,
logStreamNames=[context.log_stream_name],
filterPattern='"%s"' % wait_message
)['events']:
print(wait_message)
time.sleep(5)
except Exception as e:
logging.error('Exception: %s' % e, exc_info=True)
time.sleep(120)
cfnresponse.send(event, context, status, data, physical_resource_id, reason)
|
plugins/webosint/who/whois.py
|
Appnet1337/OSINT-SAN
| 313 |
110242
|
import requests
def whois_more(IP):
result = requests.get('http://api.hackertarget.com/whois/?q=' + IP).text
print('\n'+ result + '\n')
|
tests/warnings/semantic/UNDEFINED_DECORATOR.py
|
dina-fouad/pyccel
| 206 |
110243
|
# pylint: disable=missing-function-docstring, missing-module-docstring/
@toto # pylint: disable=undefined-variable
def f():
pass
|
mastiff/plugins/analysis/EXE/EXE-resources.py
|
tt1379/mastiff
| 164 |
110248
|
#!/usr/bin/env python
"""
Copyright 2012-2013 The MASTIFF Project, All Rights Reserved.
This software, having been partly or wholly developed and/or
sponsored by KoreLogic, Inc., is hereby released under the terms
and conditions set forth in the project's "README.LICENSE" file.
For a list of all contributors and sponsors, please refer to the
project's "README.CREDITS" file.
"""
__doc__ = """
PE Resources Plug-in
Plugin Type: EXE
Purpose:
This plug-in obtains information on any resources contained within
the Windows EXE and extracts them.
More information on how resources are stored can be found in the
Microsoft PE and COFF Specification document.
http://msdn.microsoft.com/library/windows/hardware/gg463125
Thanks to <NAME> for creating the pefile library, whose code helped
understand how to process resources.
Output:
resources.txt - File containing a list of all resources in the EXE and any
associated information.
log_dir/resource - Directory containing any extracted resource.
Pre-requisites:
- pefile library (http://code.google.com/p/pefile/)
"""
__version__ = "$Id: 519a2014141003f89b18bb5c3de571729a952f8e $"
import logging
import os
import time
try:
import pefile
except ImportError, err:
print ("Unable to import pefile: %s" % err)
import mastiff.plugins.category.exe as exe
class EXE_Resources(exe.EXECat):
"""EXE Resources plugin code."""
def __init__(self):
"""Initialize the plugin."""
exe.EXECat.__init__(self)
self.resources = list()
self.pe = None
self.output = dict()
def analyze_dir(self, directory, prefix='', _type='', timedate=0):
""" Analyze a resource directory and obtain all of its items."""
log = logging.getLogger('Mastiff.Plugins.' + self.name + '.analyze')
# save the timedate stamp
timedate = directory.struct.TimeDateStamp
for top_item in directory.entries:
if hasattr(top_item, 'data'):
# at the language level that contains all of our information
resource = dict()
resource['Id'] = prefix
resource['Type'] = _type
# store the offset as the offset within the file, not the RVA!
try:
resource['Offset'] = self.pe.get_offset_from_rva(top_item.data.struct.OffsetToData)
resource['Size'] = top_item.data.struct.Size
resource['Lang'] = [ pefile.LANG.get(top_item.data.lang, '*unknown*'), \
pefile.get_sublang_name_for_lang( top_item.data.lang, top_item.data.sublang ) ]
resource['TimeDate'] = timedate
except pefile.PEFormatError, err:
log.error('Error grabbing resource \"%s\" info: %s' % (prefix, err))
return False
self.resources.append(resource)
log.debug('Adding resource item %s' % resource['Id'])
elif hasattr(top_item, 'directory'):
if top_item.name is not None:
# in a name level
if len(prefix) == 0:
newprefix = prefix + str(top_item.name)
else:
newprefix = ', '.join([prefix, str(top_item.name)])
else:
# if name is blank, we are in a Type level
if len(prefix) == 0:
newprefix = 'ID ' + str(top_item.id)
_type = pefile.RESOURCE_TYPE.get(top_item.id)
else:
newprefix = ', '.join([prefix, 'ID ' + str(top_item.id)])
# we aren't at the end, recurse
self.analyze_dir(top_item.directory, prefix=newprefix, _type=_type)
def extract_resources(self, log_dir, filename):
"""
Extract any resources from the file and put them in
the resources dir.
"""
log = logging.getLogger('Mastiff.Plugins.' + self.name + '.extract')
if len(self.resources) == 0:
# no resources
return False
# create the dir if it doesn't exist
log_dir = log_dir + os.sep + 'resources'
if not os.path.exists(log_dir):
try:
os.makedirs(log_dir)
except IOError, err:
log.error('Unable to create dir %s: %s' % (log_dir, err))
return False
try:
my_file = open(filename, 'rb')
except IOError, err:
log.error('Unable to open file.')
return False
file_size = os.path.getsize(filename)
# cycle through resources and extract them
for res_item in self.resources:
# check to make sure we won't go past the EOF
if (res_item['Offset'] + res_item['Size']) > file_size:
log.error('File is smaller than resource location. Could be a packed file.')
continue
my_file.seek(res_item['Offset'])
data = my_file.read(res_item['Size'])
out_name = res_item['Id'].replace('ID ', '_').replace(', ', '_').lstrip('_')
if res_item['Type'] is not None and len(res_item['Type']) > 0:
out_name += '_' + res_item['Type']
with open(log_dir + os.sep + out_name, 'wb') as out_file:
log.debug('Writing %s to %s.' % (res_item['Id'], out_name))
out_file.write(data)
out_file.close()
my_file.close()
return True
def analyze(self, config, filename):
"""Analyze the file."""
# sanity check to make sure we can run
if self.is_activated == False:
return False
log = logging.getLogger('Mastiff.Plugins.' + self.name)
log.info('Starting execution.')
try:
self.pe = pefile.PE(filename)
except pefile.PEFormatError, err:
log.error('Unable to parse PE file: %s' % err)
return False
if not hasattr(self.pe, 'DIRECTORY_ENTRY_RESOURCE'):
log.info('No resources for this file.')
return False
# parse the directory structure
self.analyze_dir(self.pe.DIRECTORY_ENTRY_RESOURCE)
self.output['metadata'] = { }
self.output['data'] = dict()
if len(self.resources) == 0:
log.info('No resources could be found.')
else:
# output data to file and extract resources
self.gen_output(config.get_var('Dir','log_dir'))
self.output_file(config.get_var('Dir','log_dir'))
self.extract_resources(config.get_var('Dir','log_dir'), filename)
return self.output
def gen_output(self, outdir):
""" Generate the output to send back. """
self.output['data']['resources'] = list()
self.output['data']['resources'].append([ 'Name/ID', 'Type', 'File Offset', 'Size', 'Language', 'Time Date Stamp'])
for item in sorted(self.resources, key=lambda mydict: mydict['Offset']):
lang = ', '.join(item['Lang']).replace('SUBLANG_', '').replace('LANG_', '')
my_time = time.asctime(time.gmtime(item['TimeDate']))
self.output['data']['resources'].append([ item['Id'], item['Type'], hex(item['Offset']), hex(item['Size']), lang, my_time ])
return True
def output_file(self, outdir):
"""Print output from analysis to a file."""
log = logging.getLogger('Mastiff.Plugins.' + self.name + '.output')
try:
outfile = open(outdir + os.sep + 'resources.txt', 'w')
outfile.write('Resource Information\n\n')
except IOError, err:
log.error('Could not open resources.txt: %s' % err)
return False
outstr = '{0:20} {1:15} {2:15} {3:8} {4:<30} {5:<25}\n'.format( \
'Name/ID',
'Type',
'File Offset',
'Size',
'Language',
'Time Date Stamp')
outfile.write(outstr)
outfile.write('-' * len(outstr) + '\n')
for item in sorted(self.resources, key=lambda mydict: mydict['Offset']):
lang = ', '.join(item['Lang']).replace('SUBLANG_', '').replace('LANG_', '')
my_time = time.asctime(time.gmtime(item['TimeDate']))
outstr = '{0:20} {1:15} {2:<15} {3:<8} {4:30} {5:<25}\n'.format(item['Id'],
item['Type'],
hex(item['Offset']),
hex(item['Size']),
lang,
my_time)
outfile.write(outstr)
return True
|
baseline/onnx/apis/grpc_service_pb2_grpc.py
|
sagnik/baseline
| 241 |
110249
|
<filename>baseline/onnx/apis/grpc_service_pb2_grpc.py
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import baseline.onnx.apis.grpc_service_pb2 as grpc__service__pb2
class GRPCInferenceServiceStub(object):
"""@@
@@.. cpp:var:: service InferenceService
@@
@@ Inference Server GRPC endpoints.
@@
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ServerLive = channel.unary_unary(
'/inference.GRPCInferenceService/ServerLive',
request_serializer=grpc__service__pb2.ServerLiveRequest.SerializeToString,
response_deserializer=grpc__service__pb2.ServerLiveResponse.FromString,
)
self.ServerReady = channel.unary_unary(
'/inference.GRPCInferenceService/ServerReady',
request_serializer=grpc__service__pb2.ServerReadyRequest.SerializeToString,
response_deserializer=grpc__service__pb2.ServerReadyResponse.FromString,
)
self.ModelReady = channel.unary_unary(
'/inference.GRPCInferenceService/ModelReady',
request_serializer=grpc__service__pb2.ModelReadyRequest.SerializeToString,
response_deserializer=grpc__service__pb2.ModelReadyResponse.FromString,
)
self.ServerMetadata = channel.unary_unary(
'/inference.GRPCInferenceService/ServerMetadata',
request_serializer=grpc__service__pb2.ServerMetadataRequest.SerializeToString,
response_deserializer=grpc__service__pb2.ServerMetadataResponse.FromString,
)
self.ModelMetadata = channel.unary_unary(
'/inference.GRPCInferenceService/ModelMetadata',
request_serializer=grpc__service__pb2.ModelMetadataRequest.SerializeToString,
response_deserializer=grpc__service__pb2.ModelMetadataResponse.FromString,
)
self.ModelInfer = channel.unary_unary(
'/inference.GRPCInferenceService/ModelInfer',
request_serializer=grpc__service__pb2.ModelInferRequest.SerializeToString,
response_deserializer=grpc__service__pb2.ModelInferResponse.FromString,
)
self.ModelStreamInfer = channel.stream_stream(
'/inference.GRPCInferenceService/ModelStreamInfer',
request_serializer=grpc__service__pb2.ModelInferRequest.SerializeToString,
response_deserializer=grpc__service__pb2.ModelStreamInferResponse.FromString,
)
self.ModelConfig = channel.unary_unary(
'/inference.GRPCInferenceService/ModelConfig',
request_serializer=grpc__service__pb2.ModelConfigRequest.SerializeToString,
response_deserializer=grpc__service__pb2.ModelConfigResponse.FromString,
)
self.ModelStatistics = channel.unary_unary(
'/inference.GRPCInferenceService/ModelStatistics',
request_serializer=grpc__service__pb2.ModelStatisticsRequest.SerializeToString,
response_deserializer=grpc__service__pb2.ModelStatisticsResponse.FromString,
)
self.RepositoryIndex = channel.unary_unary(
'/inference.GRPCInferenceService/RepositoryIndex',
request_serializer=grpc__service__pb2.RepositoryIndexRequest.SerializeToString,
response_deserializer=grpc__service__pb2.RepositoryIndexResponse.FromString,
)
self.RepositoryModelLoad = channel.unary_unary(
'/inference.GRPCInferenceService/RepositoryModelLoad',
request_serializer=grpc__service__pb2.RepositoryModelLoadRequest.SerializeToString,
response_deserializer=grpc__service__pb2.RepositoryModelLoadResponse.FromString,
)
self.RepositoryModelUnload = channel.unary_unary(
'/inference.GRPCInferenceService/RepositoryModelUnload',
request_serializer=grpc__service__pb2.RepositoryModelUnloadRequest.SerializeToString,
response_deserializer=grpc__service__pb2.RepositoryModelUnloadResponse.FromString,
)
self.SystemSharedMemoryStatus = channel.unary_unary(
'/inference.GRPCInferenceService/SystemSharedMemoryStatus',
request_serializer=grpc__service__pb2.SystemSharedMemoryStatusRequest.SerializeToString,
response_deserializer=grpc__service__pb2.SystemSharedMemoryStatusResponse.FromString,
)
self.SystemSharedMemoryRegister = channel.unary_unary(
'/inference.GRPCInferenceService/SystemSharedMemoryRegister',
request_serializer=grpc__service__pb2.SystemSharedMemoryRegisterRequest.SerializeToString,
response_deserializer=grpc__service__pb2.SystemSharedMemoryRegisterResponse.FromString,
)
self.SystemSharedMemoryUnregister = channel.unary_unary(
'/inference.GRPCInferenceService/SystemSharedMemoryUnregister',
request_serializer=grpc__service__pb2.SystemSharedMemoryUnregisterRequest.SerializeToString,
response_deserializer=grpc__service__pb2.SystemSharedMemoryUnregisterResponse.FromString,
)
self.CudaSharedMemoryStatus = channel.unary_unary(
'/inference.GRPCInferenceService/CudaSharedMemoryStatus',
request_serializer=grpc__service__pb2.CudaSharedMemoryStatusRequest.SerializeToString,
response_deserializer=grpc__service__pb2.CudaSharedMemoryStatusResponse.FromString,
)
self.CudaSharedMemoryRegister = channel.unary_unary(
'/inference.GRPCInferenceService/CudaSharedMemoryRegister',
request_serializer=grpc__service__pb2.CudaSharedMemoryRegisterRequest.SerializeToString,
response_deserializer=grpc__service__pb2.CudaSharedMemoryRegisterResponse.FromString,
)
self.CudaSharedMemoryUnregister = channel.unary_unary(
'/inference.GRPCInferenceService/CudaSharedMemoryUnregister',
request_serializer=grpc__service__pb2.CudaSharedMemoryUnregisterRequest.SerializeToString,
response_deserializer=grpc__service__pb2.CudaSharedMemoryUnregisterResponse.FromString,
)
class GRPCInferenceServiceServicer(object):
"""@@
@@.. cpp:var:: service InferenceService
@@
@@ Inference Server GRPC endpoints.
@@
"""
def ServerLive(self, request, context):
"""@@ .. cpp:var:: rpc ServerLive(ServerLiveRequest) returns
@@ (ServerLiveResponse)
@@
@@ Check liveness of the inference server.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerReady(self, request, context):
"""@@ .. cpp:var:: rpc ServerReady(ServerReadyRequest) returns
@@ (ServerReadyResponse)
@@
@@ Check readiness of the inference server.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModelReady(self, request, context):
"""@@ .. cpp:var:: rpc ModelReady(ModelReadyRequest) returns
@@ (ModelReadyResponse)
@@
@@ Check readiness of a model in the inference server.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerMetadata(self, request, context):
"""@@ .. cpp:var:: rpc ServerMetadata(ServerMetadataRequest) returns
@@ (ServerMetadataResponse)
@@
@@ Get server metadata.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModelMetadata(self, request, context):
"""@@ .. cpp:var:: rpc ModelMetadata(ModelMetadataRequest) returns
@@ (ModelMetadataResponse)
@@
@@ Get model metadata.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModelInfer(self, request, context):
"""@@ .. cpp:var:: rpc ModelInfer(ModelInferRequest) returns
@@ (ModelInferResponse)
@@
@@ Perform inference using a specific model.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModelStreamInfer(self, request_iterator, context):
"""@@ .. cpp:var:: rpc ModelStreamInfer(stream ModelInferRequest) returns
@@ (stream ModelStreamInferResponse)
@@
@@ Perform streaming inference.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModelConfig(self, request, context):
"""@@ .. cpp:var:: rpc ModelConfig(ModelConfigRequest) returns
@@ (ModelConfigResponse)
@@
@@ Get model configuration.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModelStatistics(self, request, context):
"""@@ .. cpp:var:: rpc ModelStatistics(
@@ ModelStatisticsRequest)
@@ returns (ModelStatisticsResponse)
@@
@@ Get the cumulative inference statistics for a model.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RepositoryIndex(self, request, context):
"""@@ .. cpp:var:: rpc RepositoryIndex(RepositoryIndexRequest) returns
@@ (RepositoryIndexResponse)
@@
@@ Get the index of model repository contents.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RepositoryModelLoad(self, request, context):
"""@@ .. cpp:var:: rpc RepositoryModelLoad(RepositoryModelLoadRequest) returns
@@ (RepositoryModelLoadResponse)
@@
@@ Load or reload a model from a repository.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RepositoryModelUnload(self, request, context):
"""@@ .. cpp:var:: rpc RepositoryModelUnload(RepositoryModelUnloadRequest)
@@ returns (RepositoryModelUnloadResponse)
@@
@@ Unload a model.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SystemSharedMemoryStatus(self, request, context):
"""@@ .. cpp:var:: rpc SystemSharedMemoryStatus(
@@ SystemSharedMemoryStatusRequest)
@@ returns (SystemSharedMemoryStatusRespose)
@@
@@ Get the status of all registered system-shared-memory regions.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SystemSharedMemoryRegister(self, request, context):
"""@@ .. cpp:var:: rpc SystemSharedMemoryRegister(
@@ SystemSharedMemoryRegisterRequest)
@@ returns (SystemSharedMemoryRegisterResponse)
@@
@@ Register a system-shared-memory region.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SystemSharedMemoryUnregister(self, request, context):
"""@@ .. cpp:var:: rpc SystemSharedMemoryUnregister(
@@ SystemSharedMemoryUnregisterRequest)
@@ returns (SystemSharedMemoryUnregisterResponse)
@@
@@ Unregister a system-shared-memory region.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CudaSharedMemoryStatus(self, request, context):
"""@@ .. cpp:var:: rpc CudaSharedMemoryStatus(
@@ CudaSharedMemoryStatusRequest)
@@ returns (CudaSharedMemoryStatusRespose)
@@
@@ Get the status of all registered CUDA-shared-memory regions.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CudaSharedMemoryRegister(self, request, context):
"""@@ .. cpp:var:: rpc CudaSharedMemoryRegister(
@@ CudaSharedMemoryRegisterRequest)
@@ returns (CudaSharedMemoryRegisterResponse)
@@
@@ Register a CUDA-shared-memory region.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CudaSharedMemoryUnregister(self, request, context):
"""@@ .. cpp:var:: rpc CudaSharedMemoryUnregister(
@@ CudaSharedMemoryUnregisterRequest)
@@ returns (CudaSharedMemoryUnregisterResponse)
@@
@@ Unregister a CUDA-shared-memory region.
@@
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GRPCInferenceServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ServerLive': grpc.unary_unary_rpc_method_handler(
servicer.ServerLive,
request_deserializer=grpc__service__pb2.ServerLiveRequest.FromString,
response_serializer=grpc__service__pb2.ServerLiveResponse.SerializeToString,
),
'ServerReady': grpc.unary_unary_rpc_method_handler(
servicer.ServerReady,
request_deserializer=grpc__service__pb2.ServerReadyRequest.FromString,
response_serializer=grpc__service__pb2.ServerReadyResponse.SerializeToString,
),
'ModelReady': grpc.unary_unary_rpc_method_handler(
servicer.ModelReady,
request_deserializer=grpc__service__pb2.ModelReadyRequest.FromString,
response_serializer=grpc__service__pb2.ModelReadyResponse.SerializeToString,
),
'ServerMetadata': grpc.unary_unary_rpc_method_handler(
servicer.ServerMetadata,
request_deserializer=grpc__service__pb2.ServerMetadataRequest.FromString,
response_serializer=grpc__service__pb2.ServerMetadataResponse.SerializeToString,
),
'ModelMetadata': grpc.unary_unary_rpc_method_handler(
servicer.ModelMetadata,
request_deserializer=grpc__service__pb2.ModelMetadataRequest.FromString,
response_serializer=grpc__service__pb2.ModelMetadataResponse.SerializeToString,
),
'ModelInfer': grpc.unary_unary_rpc_method_handler(
servicer.ModelInfer,
request_deserializer=grpc__service__pb2.ModelInferRequest.FromString,
response_serializer=grpc__service__pb2.ModelInferResponse.SerializeToString,
),
'ModelStreamInfer': grpc.stream_stream_rpc_method_handler(
servicer.ModelStreamInfer,
request_deserializer=grpc__service__pb2.ModelInferRequest.FromString,
response_serializer=grpc__service__pb2.ModelStreamInferResponse.SerializeToString,
),
'ModelConfig': grpc.unary_unary_rpc_method_handler(
servicer.ModelConfig,
request_deserializer=grpc__service__pb2.ModelConfigRequest.FromString,
response_serializer=grpc__service__pb2.ModelConfigResponse.SerializeToString,
),
'ModelStatistics': grpc.unary_unary_rpc_method_handler(
servicer.ModelStatistics,
request_deserializer=grpc__service__pb2.ModelStatisticsRequest.FromString,
response_serializer=grpc__service__pb2.ModelStatisticsResponse.SerializeToString,
),
'RepositoryIndex': grpc.unary_unary_rpc_method_handler(
servicer.RepositoryIndex,
request_deserializer=grpc__service__pb2.RepositoryIndexRequest.FromString,
response_serializer=grpc__service__pb2.RepositoryIndexResponse.SerializeToString,
),
'RepositoryModelLoad': grpc.unary_unary_rpc_method_handler(
servicer.RepositoryModelLoad,
request_deserializer=grpc__service__pb2.RepositoryModelLoadRequest.FromString,
response_serializer=grpc__service__pb2.RepositoryModelLoadResponse.SerializeToString,
),
'RepositoryModelUnload': grpc.unary_unary_rpc_method_handler(
servicer.RepositoryModelUnload,
request_deserializer=grpc__service__pb2.RepositoryModelUnloadRequest.FromString,
response_serializer=grpc__service__pb2.RepositoryModelUnloadResponse.SerializeToString,
),
'SystemSharedMemoryStatus': grpc.unary_unary_rpc_method_handler(
servicer.SystemSharedMemoryStatus,
request_deserializer=grpc__service__pb2.SystemSharedMemoryStatusRequest.FromString,
response_serializer=grpc__service__pb2.SystemSharedMemoryStatusResponse.SerializeToString,
),
'SystemSharedMemoryRegister': grpc.unary_unary_rpc_method_handler(
servicer.SystemSharedMemoryRegister,
request_deserializer=grpc__service__pb2.SystemSharedMemoryRegisterRequest.FromString,
response_serializer=grpc__service__pb2.SystemSharedMemoryRegisterResponse.SerializeToString,
),
'SystemSharedMemoryUnregister': grpc.unary_unary_rpc_method_handler(
servicer.SystemSharedMemoryUnregister,
request_deserializer=grpc__service__pb2.SystemSharedMemoryUnregisterRequest.FromString,
response_serializer=grpc__service__pb2.SystemSharedMemoryUnregisterResponse.SerializeToString,
),
'CudaSharedMemoryStatus': grpc.unary_unary_rpc_method_handler(
servicer.CudaSharedMemoryStatus,
request_deserializer=grpc__service__pb2.CudaSharedMemoryStatusRequest.FromString,
response_serializer=grpc__service__pb2.CudaSharedMemoryStatusResponse.SerializeToString,
),
'CudaSharedMemoryRegister': grpc.unary_unary_rpc_method_handler(
servicer.CudaSharedMemoryRegister,
request_deserializer=grpc__service__pb2.CudaSharedMemoryRegisterRequest.FromString,
response_serializer=grpc__service__pb2.CudaSharedMemoryRegisterResponse.SerializeToString,
),
'CudaSharedMemoryUnregister': grpc.unary_unary_rpc_method_handler(
servicer.CudaSharedMemoryUnregister,
request_deserializer=grpc__service__pb2.CudaSharedMemoryUnregisterRequest.FromString,
response_serializer=grpc__service__pb2.CudaSharedMemoryUnregisterResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'inference.GRPCInferenceService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class GRPCInferenceService(object):
"""@@
@@.. cpp:var:: service InferenceService
@@
@@ Inference Server GRPC endpoints.
@@
"""
@staticmethod
def ServerLive(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/ServerLive',
grpc__service__pb2.ServerLiveRequest.SerializeToString,
grpc__service__pb2.ServerLiveResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerReady(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/ServerReady',
grpc__service__pb2.ServerReadyRequest.SerializeToString,
grpc__service__pb2.ServerReadyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModelReady(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/ModelReady',
grpc__service__pb2.ModelReadyRequest.SerializeToString,
grpc__service__pb2.ModelReadyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServerMetadata(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/ServerMetadata',
grpc__service__pb2.ServerMetadataRequest.SerializeToString,
grpc__service__pb2.ServerMetadataResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModelMetadata(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/ModelMetadata',
grpc__service__pb2.ModelMetadataRequest.SerializeToString,
grpc__service__pb2.ModelMetadataResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModelInfer(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/ModelInfer',
grpc__service__pb2.ModelInferRequest.SerializeToString,
grpc__service__pb2.ModelInferResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModelStreamInfer(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/inference.GRPCInferenceService/ModelStreamInfer',
grpc__service__pb2.ModelInferRequest.SerializeToString,
grpc__service__pb2.ModelStreamInferResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModelConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/ModelConfig',
grpc__service__pb2.ModelConfigRequest.SerializeToString,
grpc__service__pb2.ModelConfigResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModelStatistics(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/ModelStatistics',
grpc__service__pb2.ModelStatisticsRequest.SerializeToString,
grpc__service__pb2.ModelStatisticsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RepositoryIndex(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/RepositoryIndex',
grpc__service__pb2.RepositoryIndexRequest.SerializeToString,
grpc__service__pb2.RepositoryIndexResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RepositoryModelLoad(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/RepositoryModelLoad',
grpc__service__pb2.RepositoryModelLoadRequest.SerializeToString,
grpc__service__pb2.RepositoryModelLoadResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RepositoryModelUnload(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/RepositoryModelUnload',
grpc__service__pb2.RepositoryModelUnloadRequest.SerializeToString,
grpc__service__pb2.RepositoryModelUnloadResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SystemSharedMemoryStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/SystemSharedMemoryStatus',
grpc__service__pb2.SystemSharedMemoryStatusRequest.SerializeToString,
grpc__service__pb2.SystemSharedMemoryStatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SystemSharedMemoryRegister(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/SystemSharedMemoryRegister',
grpc__service__pb2.SystemSharedMemoryRegisterRequest.SerializeToString,
grpc__service__pb2.SystemSharedMemoryRegisterResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SystemSharedMemoryUnregister(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/SystemSharedMemoryUnregister',
grpc__service__pb2.SystemSharedMemoryUnregisterRequest.SerializeToString,
grpc__service__pb2.SystemSharedMemoryUnregisterResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CudaSharedMemoryStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/CudaSharedMemoryStatus',
grpc__service__pb2.CudaSharedMemoryStatusRequest.SerializeToString,
grpc__service__pb2.CudaSharedMemoryStatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CudaSharedMemoryRegister(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/CudaSharedMemoryRegister',
grpc__service__pb2.CudaSharedMemoryRegisterRequest.SerializeToString,
grpc__service__pb2.CudaSharedMemoryRegisterResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CudaSharedMemoryUnregister(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/inference.GRPCInferenceService/CudaSharedMemoryUnregister',
grpc__service__pb2.CudaSharedMemoryUnregisterRequest.SerializeToString,
grpc__service__pb2.CudaSharedMemoryUnregisterResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
dfvfs/path/xfs_path_spec.py
|
dfjxs/dfvfs
| 176 |
110252
|
# -*- coding: utf-8 -*-
"""The XFS path specification implementation."""
from dfvfs.lib import definitions
from dfvfs.path import factory
from dfvfs.path import path_spec
class XFSPathSpec(path_spec.PathSpec):
"""XFS path specification implementation.
Attributes:
inode (int): inode.
location (str): location.
"""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_XFS
def __init__(
self, inode=None, location=None, parent=None, **kwargs):
"""Initializes a path specification.
Note that an XFS path specification must have a parent.
Args:
inode (Optional[int]): inode.
location (Optional[str]): location.
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when parent or both inode and location are not set.
"""
if (not inode and not location) or not parent:
raise ValueError('Missing inode and location, or parent value.')
super(XFSPathSpec, self).__init__(parent=parent, **kwargs)
self.inode = inode
self.location = location
@property
def comparable(self):
"""str: comparable representation of the path specification."""
string_parts = []
if self.inode is not None:
string_parts.append('inode: {0:d}'.format(self.inode))
if self.location is not None:
string_parts.append('location: {0:s}'.format(self.location))
return self._GetComparable(sub_comparable_string=', '.join(string_parts))
factory.Factory.RegisterPathSpec(XFSPathSpec)
|
koku/api/migrations/0031_clone_schema.py
|
rubik-ai/koku
| 157 |
110277
|
# Generated by Django 3.1.2 on 2020-10-14 12:24
import os
from django.db import migrations
from koku import migration_sql_helpers as msh
def apply_clone_schema(apps, schema_editor):
path = msh.find_db_functions_dir()
msh.apply_sql_file(schema_editor, os.path.join(path, "clone_schema.sql"), literal_placeholder=True)
class Migration(migrations.Migration):
dependencies = [("api", "0030_auto_20201007_1403")]
operations = [migrations.RunPython(code=apply_clone_schema)]
|
survae/transforms/stochastic/permutation.py
|
alisiahkoohi/survae_flows
| 262 |
110281
|
<gh_stars>100-1000
import torch
from survae.transforms.stochastic import StochasticTransform
class StochasticPermutation(StochasticTransform):
'''A stochastic permutation layer.'''
def __init__(self, dim=1):
super(StochasticPermutation, self).__init__()
self.register_buffer('buffer', torch.zeros(1))
self.dim = dim
def forward(self, x):
rand = torch.rand(x.shape[0], x.shape[self.dim], device=x.device)
permutation = rand.argsort(dim=1)
for d in range(1, self.dim):
permutation = permutation.unsqueeze(1)
for d in range(self.dim+1, x.dim()):
permutation = permutation.unsqueeze(-1)
permutation = permutation.expand_as(x)
z = torch.gather(x, self.dim, permutation)
ldj = self.buffer.new_zeros(x.shape[0])
return z, ldj
def inverse(self, z):
rand = torch.rand(z.shape[0], z.shape[self.dim], device=z.device)
permutation = rand.argsort(dim=1)
for d in range(1, self.dim):
permutation = permutation.unsqueeze(1)
for d in range(self.dim+1, z.dim()):
permutation = permutation.unsqueeze(-1)
permutation = permutation.expand_as(z)
x = torch.gather(z, self.dim, permutation)
return x
|
care/facility/migrations/0121_auto_20200619_2306.py
|
gigincg/care
| 189 |
110283
|
<gh_stars>100-1000
# Generated by Django 2.2.11 on 2020-06-19 17:36
from django.db import migrations, models
import django.db.models.deletion
import fernet_fields.fields
class Migration(migrations.Migration):
dependencies = [
('facility', '0120_patientsample_icmr_label'),
]
operations = [
migrations.AddField(
model_name='patientsearch',
name='facility',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='facility.Facility'),
),
migrations.AddField(
model_name='patientsearch',
name='patient_external_id',
field=fernet_fields.fields.EncryptedCharField(default='', max_length=100),
),
]
|
apps/reputation/serializers.py
|
macdaliot/exist
| 159 |
110285
|
<filename>apps/reputation/serializers.py
from rest_framework import serializers
from .models import blacklist
class blSerializer(serializers.ModelSerializer):
source = serializers.CharField(source='get_source_display')
class Meta:
model = blacklist
fields = ('__all__')
class sourceSerializer(serializers.ModelSerializer):
class Meta:
model = blacklist
fields = ('SOURCES',)
|
tests/modules/core/test_speedtest.py
|
spxtr/bumblebee-status
| 1,089 |
110299
|
import pytest
pytest.importorskip("speedtest")
def test_load_module():
__import__("modules.core.speedtest")
|
tests/xfdnn/test_xfdnn_compiler_caffe_deephi.py
|
yarenty/ml-suite
| 334 |
110336
|
<filename>tests/xfdnn/test_xfdnn_compiler_caffe_deephi.py<gh_stars>100-1000
#!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import os,sys
from xfdnn.tools.compile.bin.xfdnn_compiler_caffe import CaffeFrontend as xfdnnCompiler
def run_compiler(dsp, mem, prototxt, caffemodel, quantcfg):
print("Testing xfdnn_compiler_caffe...")
print("Testing:\n prototxt %s\n caffemodel %s\n quantization file %s\n dsp %s\n mem %s\n" % (prototxt,caffemodel,quantcfg,dsp,mem))
compiler = xfdnnCompiler(
usedeephi=True,
cpulayermustgo=True,
pipelineconvmaxpool=True,
quant_cfgfile=quantcfg,
networkfile=prototxt,
weights=caffemodel,
dsp=dsp,
memory=mem,
generatefile="work/"+prototxt.replace('/','_')+"_"+str(dsp)+"/fpga.cmds",
anew="work/"+prototxt.replace('/','_')+"_"+str(dsp)+"/optimized_model"
)
SUCCESS = compiler.compile()
#assert(SUCCESS)
# Compiler will throw exception if it does not succeed as of 3/12/19
del compiler
def get_caffe_model_list_all():
prototxt_list = [ \
"/wrk/acceleration/models/deephi/License_Plate_Recognition_INT8_models_test_codes/license_plate_recognition_quantizations.prototxt", \
"/wrk/acceleration/models/deephi/Car_Logo_Recognition_INT8_models_test_codes/car_logo_recognition_quantizations.prototxt", \
"/wrk/acceleration/models/deephi/Car_Attributes_Recognition_INT8_models_test_codes/car_attributes_recognition_quantizations.prototxt", \
"/wrk/acceleration/models/deephi/Pedestrian_Attributes_Recognition_INT8_models_test_codes/pedestrian_attributes_recognition_quantizations.prototxt", \
"/wrk/acceleration/models/deephi/reid_model_release_20190301/deploy.prototxt", \
"/wrk/acceleration/models/deephi/Car_Logo_Detection/deploy.prototxt", \
"/wrk/acceleration/models/deephi/Plate_Detection/deploy.prototxt", \
# "/wrk/acceleration/models/deephi/Pedestrian_Detection_INT8_models_test_codes/deploy.prototxt", \
]
caffemodel_list = [ \
"/wrk/acceleration/models/deephi/License_Plate_Recognition_INT8_models_test_codes/license_plate_recognition_quantizations.caffemodel", \
"/wrk/acceleration/models/deephi/Car_Logo_Recognition_INT8_models_test_codes/car_logo_recognition_quantizations.caffemodel", \
"/wrk/acceleration/models/deephi/Car_Attributes_Recognition_INT8_models_test_codes/car_attributes_recognition_quantizations.caffemodel", \
"/wrk/acceleration/models/deephi/Pedestrian_Attributes_Recognition_INT8_models_test_codes/pedestrian_attributes_recognition_quantizations.caffemodel", \
"/wrk/acceleration/models/deephi/reid_model_release_20190301/deploy.caffemodel", \
"/wrk/acceleration/models/deephi/Car_Logo_Detection/deploy.caffemodel", \
"/wrk/acceleration/models/deephi/Plate_Detection/deploy.caffemodel", \
# "/wrk/acceleration/models/deephi/Pedestrian_Detection_INT8_models_test_codes/deploy.caffemodel", \
]
quantcfg_list = [ \
"/wrk/acceleration/models/deephi/License_Plate_Recognition_INT8_models_test_codes/fix_info.txt", \
"/wrk/acceleration/models/deephi/Car_Logo_Recognition_INT8_models_test_codes/fix_info.txt", \
"/wrk/acceleration/models/deephi/Car_Attributes_Recognition_INT8_models_test_codes/fix_info.txt", \
"/wrk/acceleration/models/deephi/Pedestrian_Attributes_Recognition_INT8_models_test_codes/fix_info.txt", \
"/wrk/acceleration/models/deephi/reid_model_release_20190301/fix_info.txt", \
"/wrk/acceleration/models/deephi/Car_Logo_Detection/fix_info.txt", \
"/wrk/acceleration/models/deephi/Plate_Detection/fix_info.txt", \
# "/wrk/acceleration/models/deephi/Pedestrian_Detection_INT8_models_test_codes/fix_info.txt", \
]
return (prototxt_list,caffemodel_list,quantcfg_list)
def get_caffe_model_list_1():
prototxt_list = [ \
"models/caffe/bvlc_googlenet_without_lrn/fp32/bvlc_googlenet_without_lrn_deploy.prototxt" \
]
caffemodel_list = [ \
"models/caffe/bvlc_googlenet_without_lrn/fp32/bvlc_googlenet_without_lrn.caffemodel" \
]
return (prototxt_list,caffemodel_list)
def get_caffe_model_list():
(p,c,q) = get_caffe_model_list_all()
return list(zip(p,c,q))
testdata = get_caffe_model_list()
import pytest
@pytest.mark.parametrize('prototxt,caffemodel,quantcfg', testdata)
def test_xfdnn_compiler_caffe_28(prototxt,caffemodel,quantcfg):
print("Testing xfdnn_compiler_caffe...")
dsp = 28
mem = 4
run_compiler(dsp,mem,prototxt,caffemodel,quantcfg)
@pytest.mark.parametrize('prototxt,caffemodel,quantcfg', testdata)
def test_xfdnn_compiler_caffe_56(prototxt,caffemodel,quantcfg):
print("Testing xfdnn_compiler_caffe...")
dsp = 56
mem = 6
run_compiler(dsp,mem,prototxt,caffemodel,quantcfg)
@pytest.mark.parametrize('prototxt,caffemodel,quantcfg', testdata)
def test_xfdnn_compiler_caffe_96(prototxt,caffemodel,quantcfg):
print("Testing xfdnn_compiler_caffe...")
dsp = 96
mem = 9
run_compiler(dsp,mem,prototxt,caffemodel,quantcfg)
if __name__ == "__main__":
for (prototxt,caffemodel) in testdata:
test_xfdnn_compiler_caffe_28(prototxt,caffemodel,quantcfg)
test_xfdnn_compiler_caffe_56(prototxt,caffemodel,quantcfg)
test_xfdnn_compiler_caffe_96(prototxt,caffemodel,quantcfg)
|
yukarin/param.py
|
m95music/yukarin
| 139 |
110376
|
class AcousticParam(object):
def __init__(
self,
sampling_rate: int = 24000,
pad_second: float = 0,
threshold_db: float = None,
frame_period: int = 5,
order: int = 8,
alpha: float = 0.466,
f0_floor: float = 71,
f0_ceil: float = 800,
fft_length: int = 1024,
dtype: str = 'float32',
) -> None:
self.sampling_rate = sampling_rate
self.pad_second = pad_second
self.threshold_db = threshold_db
self.frame_period = frame_period
self.order = order
self.alpha = alpha
self.f0_floor = f0_floor
self.f0_ceil = f0_ceil
self.fft_length = fft_length
self.dtype = dtype
def _asdict(self):
return self.__dict__
|
poco/utils/multitouch_gesture.py
|
HBoPRC/Poco
| 1,444 |
110386
|
<filename>poco/utils/multitouch_gesture.py
# coding=utf-8
import math
from poco.utils.track import MotionTrack
def make_pinching(direction, center, size, percent, dead_zone, duration):
w, h = size
half_distance = percent / 2
dead_zone_distance = dead_zone / 2
pa0 = center
pb0 = list(pa0)
pa1 = list(pa0)
pb1 = list(pa0)
if direction == 'in':
pa0[0] += w * half_distance
pa0[1] += h * half_distance
pb0[0] -= w * half_distance
pb0[1] -= h * half_distance
pa1[0] += w * dead_zone_distance
pa1[1] += h * dead_zone_distance
pb1[0] -= w * dead_zone_distance
pb1[1] -= h * dead_zone_distance
else:
pa1[0] += w * half_distance
pa1[1] += h * half_distance
pb1[0] -= w * half_distance
pb1[1] -= h * half_distance
pa0[0] += w * dead_zone_distance
pa0[1] += h * dead_zone_distance
pb0[0] -= w * dead_zone_distance
pb0[1] -= h * dead_zone_distance
speed = math.sqrt(w * h) * (percent - dead_zone) / 2 / duration
track_a = MotionTrack([pa0, pa1], speed)
track_b = MotionTrack([pb0, pb1], speed)
return track_a, track_b
def make_panning():
pass
|
teradata/tdodbc.py
|
Teradata/PyTd
| 133 |
110388
|
<gh_stars>100-1000
"""An implementation of the Python Database API Specification v2.0
using Teradata ODBC."""
# The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import atexit
import collections
import ctypes
import platform
import re
import sys
import threading
from . import util, datatypes
from .api import * # @UnusedWildImport # noqa
logger = logging.getLogger(__name__)
# ODBC Constants
SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC2, SQL_OV_ODBC3 = 200, 2, 3
SQL_ATTR_QUERY_TIMEOUT, SQL_ATTR_AUTOCOMMIT = 0, 102
SQL_NULL_HANDLE, SQL_HANDLE_ENV, SQL_HANDLE_DBC, SQL_HANDLE_STMT = 0, 1, 2, 3
SQL_SUCCESS, SQL_SUCCESS_WITH_INFO = 0, 1,
SQL_ERROR, SQL_INVALID_HANDLE = -1, -2
SQL_NEED_DATA, SQL_NO_DATA = 99, 100
SQL_CLOSE, SQL_UNBIND, SQL_RESET_PARAMS = 0, 2, 3
SQL_PARAM_TYPE_UNKNOWN = 0
SQL_PARAM_INPUT, SQL_PARAM_INPUT_OUTPUT, SQL_PARAM_OUTPUT = 1, 2, 4
SQL_ATTR_PARAM_BIND_TYPE = 18
SQL_ATTR_ROWS_FETCHED_PTR, SQL_ATTR_ROW_STATUS_PTR = 26, 25
SQL_ATTR_ROW_ARRAY_SIZE = 27
SQL_ATTR_PARAMS_PROCESSED_PTR, SQL_ATTR_PARAM_STATUS_PTR = 21, 20
SQL_ATTR_PARAMSET_SIZE = 22
SQL_PARAM_BIND_BY_COLUMN = 0
SQL_NULL_DATA, SQL_NTS = -1, -3
SQL_IS_POINTER, SQL_IS_UINTEGER, SQL_IS_INTEGER = -4, -5, -6
SQL_FETCH_NEXT, SQL_FETCH_FIRST, SQL_FETCH_LAST = 1, 2, 4
SQL_SIGNED_OFFSET = -20
SQL_C_BINARY, SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY = -2, -2, -3, -4
SQL_C_WCHAR, SQL_WCHAR, SQL_WVARCHAR, SQL_WLONGVARCHAR = -8, -8, -9, -10
SQL_C_SBIGINT = -5 + SQL_SIGNED_OFFSET
SQL_FLOAT = 6
SQL_C_FLOAT = SQL_REAL = 7
SQL_C_DOUBLE = SQL_DOUBLE = 8
SQL_DESC_TYPE_NAME = 14
SQL_COMMIT, SQL_ROLLBACK = 0, 1
SQL_STATE_DATA_TRUNCATED = '01004'
SQL_STATE_CONNECTION_NOT_OPEN = '08003'
SQL_STATE_INVALID_TRANSACTION_STATE = '25000'
SQLLEN = ctypes.c_ssize_t
SQLULEN = ctypes.c_size_t
SQLUSMALLINT = ctypes.c_ushort
SQLSMALLINT = ctypes.c_short
SQLINTEGER = ctypes.c_int
SQLFLOAT = ctypes.c_float
SQLDOUBLE = ctypes.c_double
SQLBYTE = ctypes.c_ubyte
SQLCHAR = ctypes.c_char
SQLWCHAR = ctypes.c_wchar
SQLRETURN = SQLSMALLINT
SQLPOINTER = ctypes.c_void_p
SQLHANDLE = ctypes.c_void_p
ADDR = ctypes.byref
PTR = ctypes.POINTER
ERROR_BUFFER_SIZE = 2 ** 10
SMALL_BUFFER_SIZE = 2 ** 12
LARGE_BUFFER_SIZE = 2 ** 20
TRUE = 1
FALSE = 0
odbc = None
hEnv = None
drivers = None
lock = threading.Lock()
pyVer = sys.version_info[0]
osType = platform.system()
# The amount of seconds to wait when submitting non-user defined SQL (e.g.
# set query bands, etc).
QUERY_TIMEOUT = 120
if pyVer > 2:
unicode = str # @ReservedAssignment
# Define OS specific methods for handling buffers and strings.
if osType == "Darwin" or osType == "Windows" or osType.startswith('CYGWIN'):
# Mac OSx and Windows
def _createBuffer(l):
return ctypes.create_unicode_buffer(l)
def _inputStr(s, l=None):
if s is None:
return None
return ctypes.create_unicode_buffer(
(s if util.isString(s) else str(s)), l)
def _outputStr(s):
return s.value
def _convertParam(s):
if s is None:
return None
return s if util.isString(s) else str(s)
else:
# Unix/Linux
# Multiply by 3 as one UTF-16 character can require 3 UTF-8 bytes.
def _createBuffer(l):
return ctypes.create_string_buffer(l * 3)
def _inputStr(s, l=None):
if s is None:
return None
return ctypes.create_string_buffer(
(s if util.isString(s) else str(s)).encode('utf8'), l)
def _outputStr(s):
return unicode(s.raw.partition(b'\00')[0], 'utf8')
def _convertParam(s):
if s is None:
return None
return (s if util.isString(s) else str(s)).encode('utf8')
SQLWCHAR = ctypes.c_char
connections = []
def cleanupConnections():
"""Cleanup open connections."""
if connections:
logger.warn(
"%s open connections found on exit, attempting to close...",
len(connections))
for conn in list(connections):
conn.close()
def getDiagnosticInfo(handle, handleType=SQL_HANDLE_STMT):
"""Gets diagnostic information associated with ODBC calls, particularly
when errors occur."""
info = []
infoNumber = 1
while True:
sqlState = _createBuffer(6)
nativeError = SQLINTEGER()
messageBuffer = _createBuffer(ERROR_BUFFER_SIZE)
messageLength = SQLSMALLINT()
rc = odbc.SQLGetDiagRecW(handleType, handle, infoNumber, sqlState,
ADDR(nativeError), messageBuffer,
len(messageBuffer), ADDR(messageLength))
if rc == SQL_SUCCESS_WITH_INFO and \
messageLength.value > ctypes.sizeof(messageBuffer):
# Resize buffer to fit entire message.
messageBuffer = _createBuffer(messageLength.value)
continue
if rc == SQL_SUCCESS or rc == SQL_SUCCESS_WITH_INFO:
info.append(
(_outputStr(sqlState), _outputStr(messageBuffer),
abs(nativeError.value)))
infoNumber += 1
elif rc == SQL_NO_DATA:
return info
elif rc == SQL_INVALID_HANDLE:
raise InterfaceError(
'SQL_INVALID_HANDLE',
"Invalid handle passed to SQLGetDiagRecW.")
elif rc == SQL_ERROR:
if infoNumber > 1:
return info
raise InterfaceError(
"SQL_ERROR", "SQL_ERROR returned from SQLGetDiagRecW.")
else:
raise InterfaceError(
"UNKNOWN_RETURN_CODE",
"SQLGetDiagRecW returned an unknown return code: %s", rc)
def checkStatus(rc, hEnv=SQL_NULL_HANDLE, hDbc=SQL_NULL_HANDLE,
hStmt=SQL_NULL_HANDLE, method="Method", ignore=None):
""" Check return status code and log any information or error messages.
If error is returned, raise exception."""
sqlState = []
logger.trace("%s returned status code %s", method, rc)
if rc not in (SQL_SUCCESS, SQL_NO_DATA):
if hStmt != SQL_NULL_HANDLE:
info = getDiagnosticInfo(hStmt, SQL_HANDLE_STMT)
elif hDbc != SQL_NULL_HANDLE:
info = getDiagnosticInfo(hDbc, SQL_HANDLE_DBC)
else:
info = getDiagnosticInfo(hEnv, SQL_HANDLE_ENV)
for i in info:
sqlState.append(i[0])
if rc == SQL_SUCCESS_WITH_INFO:
logger.debug(
u"{} succeeded with info: [{}] {}".format(method,
i[0], i[1]))
elif not ignore or i[0] not in ignore:
logger.debug((u"{} returned non-successful error code "
u"{}: [{}] {}").format(method, rc, i[0], i[1]))
msg = ", ".join(map(lambda m: m[1], info))
if re.search(r'[^0-9\s]', msg) is None or i[0] == 'I':
msg = msg + (". Check that the ODBC driver is installed "
"and the ODBCINI or ODBCINST environment "
"variables are correctly set.")
raise DatabaseError(i[2], u"[{}] {}".format(i[0], msg), i[0])
else:
logger.debug(
u"Ignoring return of {} from {}: [{}] {}".format(rc,
method,
i[0],
i[1]))
# Breaking here because this error is ignored and info could
# contain older error messages.
# E.g. if error was SQL_STATE_CONNECTION_NOT_OPEN, the next
# error would be the original connection error.
break
if not info:
logger.info(
"No information associated with return code %s from %s",
rc, method)
return sqlState
def prototype(func, *args):
"""Setup function prototype"""
func.restype = SQLRETURN
func.argtypes = args
def initFunctionPrototypes():
"""Initialize function prototypes for ODBC calls."""
prototype(odbc.SQLAllocHandle, SQLSMALLINT, SQLHANDLE, PTR(SQLHANDLE))
prototype(odbc.SQLGetDiagRecW, SQLSMALLINT, SQLHANDLE, SQLSMALLINT,
PTR(SQLWCHAR), PTR(SQLINTEGER), PTR(SQLWCHAR), SQLSMALLINT,
PTR(SQLSMALLINT))
prototype(odbc.SQLSetEnvAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLDriverConnectW, SQLHANDLE, SQLHANDLE,
PTR(SQLWCHAR), SQLSMALLINT, PTR(SQLWCHAR), SQLSMALLINT,
PTR(SQLSMALLINT), SQLUSMALLINT)
prototype(odbc.SQLFreeHandle, SQLSMALLINT, SQLHANDLE)
prototype(odbc.SQLExecDirectW, SQLHANDLE, PTR(SQLWCHAR), SQLINTEGER)
prototype(odbc.SQLNumResultCols, SQLHANDLE, PTR(SQLSMALLINT))
prototype(odbc.SQLDescribeColW, SQLHANDLE, SQLUSMALLINT, PTR(SQLWCHAR),
SQLSMALLINT, PTR(SQLSMALLINT), PTR(SQLSMALLINT), PTR(SQLULEN),
PTR(SQLSMALLINT), PTR(SQLSMALLINT))
prototype(odbc.SQLColAttributeW, SQLHANDLE, SQLUSMALLINT,
SQLUSMALLINT, SQLPOINTER, SQLSMALLINT, PTR(SQLSMALLINT),
PTR(SQLLEN))
prototype(odbc.SQLFetch, SQLHANDLE)
prototype(odbc.SQLGetData, SQLHANDLE, SQLUSMALLINT,
SQLSMALLINT, SQLPOINTER, SQLLEN, PTR(SQLLEN))
prototype(odbc.SQLFreeStmt, SQLHANDLE, SQLUSMALLINT)
prototype(odbc.SQLPrepareW, SQLHANDLE, PTR(SQLWCHAR), SQLINTEGER)
prototype(odbc.SQLNumParams, SQLHANDLE, PTR(SQLSMALLINT))
prototype(odbc.SQLDescribeParam, SQLHANDLE, SQLUSMALLINT, PTR(
SQLSMALLINT), PTR(SQLULEN), PTR(SQLSMALLINT), PTR(SQLSMALLINT))
prototype(odbc.SQLBindParameter, SQLHANDLE, SQLUSMALLINT, SQLSMALLINT,
SQLSMALLINT, SQLSMALLINT, SQLULEN, SQLSMALLINT, SQLPOINTER,
SQLLEN, PTR(SQLLEN))
prototype(odbc.SQLExecute, SQLHANDLE)
prototype(odbc.SQLSetStmtAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLMoreResults, SQLHANDLE)
prototype(odbc.SQLDisconnect, SQLHANDLE)
prototype(odbc.SQLSetConnectAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLEndTran, SQLSMALLINT, SQLHANDLE, SQLSMALLINT)
prototype(odbc.SQLRowCount, SQLHANDLE, PTR(SQLLEN))
prototype(odbc.SQLBindCol, SQLHANDLE, SQLUSMALLINT, SQLSMALLINT,
SQLPOINTER, SQLLEN, PTR(SQLLEN))
prototype(odbc.SQLDrivers, SQLHANDLE, SQLUSMALLINT, PTR(SQLCHAR),
SQLSMALLINT, PTR(SQLSMALLINT), PTR(SQLCHAR), SQLSMALLINT,
PTR(SQLSMALLINT))
def initOdbcLibrary(odbcLibPath=None):
"""Initialize the ODBC Library."""
global odbc
if odbc is None:
if osType == "Windows":
odbc = ctypes.windll.odbc32
else:
if not odbcLibPath:
# If MAC OSx
if osType == "Darwin":
odbcLibPath = "libiodbc.dylib"
elif osType.startswith("CYGWIN"):
odbcLibPath = "odbc32.dll"
else:
odbcLibPath = 'libodbc.so'
logger.info("Loading ODBC Library: %s", odbcLibPath)
odbc = ctypes.cdll.LoadLibrary(odbcLibPath)
def initDriverList():
global drivers
if drivers is None:
drivers = []
description = ctypes.create_string_buffer(SMALL_BUFFER_SIZE)
descriptionLength = SQLSMALLINT()
attributesLength = SQLSMALLINT()
rc = SQL_SUCCESS
direction = SQL_FETCH_FIRST
while True:
rc = odbc.SQLDrivers(hEnv, direction, description,
len(description), ADDR(descriptionLength),
None, 0, attributesLength)
checkStatus(rc, hEnv=hEnv)
if rc == SQL_NO_DATA:
break
drivers.append(description.value.decode("utf-8"))
direction = SQL_FETCH_NEXT
logger.info("Available drivers: {}".format(", ".join(drivers)))
def initOdbcEnv():
"""Initialize ODBC environment handle."""
global hEnv
if hEnv is None:
hEnv = SQLPOINTER()
rc = odbc.SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, ADDR(hEnv))
checkStatus(rc, hEnv=hEnv)
atexit.register(cleanupOdbcEnv)
atexit.register(cleanupConnections)
# Set the ODBC environment's compatibility level to ODBC 3.0
rc = odbc.SQLSetEnvAttr(hEnv, SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC3, 0)
checkStatus(rc, hEnv=hEnv)
def cleanupOdbcEnv():
"""Cleanup ODBC environment handle."""
if hEnv:
odbc.SQLFreeHandle(SQL_HANDLE_ENV, hEnv)
def init(odbcLibPath=None):
try:
lock.acquire()
initOdbcLibrary(odbcLibPath)
initFunctionPrototypes()
initOdbcEnv()
initDriverList()
finally:
lock.release()
def determineDriver(dbType, driver):
retval = driver
if driver is not None:
if driver not in drivers:
raise InterfaceError(
"DRIVER_NOT_FOUND",
"No driver found with name '{}'. "
" Available drivers: {}".format(driver, ",".join(drivers)))
else:
matches = []
for driver in drivers:
if dbType in driver:
matches.append(driver)
if not matches:
raise InterfaceError(
"DRIVER_NOT_FOUND",
"No driver found for '{}'. "
"Available drivers: {}".format(dbType, ",".join(drivers)))
else:
retval = matches[len(matches) - 1]
if len(matches) > 1:
logger.warning(
"More than one driver found "
"for '{}'. Using '{}'."
" Specify the 'driver' option to "
"select a specific driver.".format(dbType, retval))
return retval
class OdbcConnection:
"""Represents a Connection to Teradata using ODBC."""
def __init__(self, dbType="Teradata", system=None,
username=None, password=<PASSWORD>, autoCommit=False,
transactionMode=None, queryBands=None, odbcLibPath=None,
dataTypeConverter=datatypes.DefaultDataTypeConverter(),
driver=None, **kwargs):
"""Creates an ODBC connection."""
self.hDbc = SQLPOINTER()
self.cursorCount = 0
self.sessionno = 0
self.cursors = []
self.dbType = dbType
self.converter = dataTypeConverter
# Initialize connection handle
init(odbcLibPath)
# Build connect string
extraParams = set(k.lower() for k in kwargs)
connectParams = collections.OrderedDict()
if "dsn" not in extraParams:
connectParams["DRIVER"] = determineDriver(dbType, driver)
if system:
connectParams["DBCNAME"] = system
if username:
connectParams["UID"] = username
if password:
connectParams["PWD"] = password
if transactionMode:
connectParams["SESSIONMODE"] = "Teradata" \
if transactionMode == "TERA" else transactionMode
connectParams.update(kwargs)
connectString = u";".join(u"{}={}".format(key, value)
for key, value in connectParams.items())
rc = odbc.SQLAllocHandle(SQL_HANDLE_DBC, hEnv, ADDR(self.hDbc))
checkStatus(rc, hEnv=hEnv, method="SQLAllocHandle")
# Create connection
logger.debug("Creating connection using ODBC ConnectString: %s",
re.sub("PWD=.*?(;|$)", "PWD=XXX;", connectString))
try:
lock.acquire()
rc = odbc.SQLDriverConnectW(self.hDbc, 0, _inputStr(connectString),
SQL_NTS, None, 0, None, 0)
finally:
lock.release()
try:
checkStatus(rc, hDbc=self.hDbc, method="SQLDriverConnectW")
except:
rc = odbc.SQLFreeHandle(SQL_HANDLE_DBC, self.hDbc)
self.hDbc = None
raise
connections.append(self)
# Setup autocommit, query bands, etc.
try:
logger.debug("Setting AUTOCOMMIT to %s",
"True" if util.booleanValue(autoCommit) else "False")
rc = odbc.SQLSetConnectAttr(
self.hDbc, SQL_ATTR_AUTOCOMMIT,
TRUE if util.booleanValue(autoCommit) else FALSE, 0)
checkStatus(
rc, hDbc=self.hDbc,
method="SQLSetConnectAttr - SQL_ATTR_AUTOCOMMIT")
if dbType == "Teradata":
with self.cursor() as c:
self.sessionno = c.execute(
"SELECT SESSION",
queryTimeout=QUERY_TIMEOUT).fetchone()[0]
logger.debug("SELECT SESSION returned %s", self.sessionno)
if queryBands:
c.execute(u"SET QUERY_BAND = '{};' FOR SESSION".format(
u";".join(u"{}={}".format(util.toUnicode(k),
util.toUnicode(v))
for k, v in queryBands.items())),
queryTimeout=QUERY_TIMEOUT)
self.commit()
logger.debug("Created session %s.", self.sessionno)
except Exception:
self.close()
raise
def close(self):
"""CLoses an ODBC Connection."""
if self.hDbc:
if self.sessionno:
logger.debug("Closing session %s...", self.sessionno)
for cursor in list(self.cursors):
cursor.close()
rc = odbc.SQLDisconnect(self.hDbc)
sqlState = checkStatus(
rc, hDbc=self.hDbc, method="SQLDisconnect",
ignore=[SQL_STATE_CONNECTION_NOT_OPEN,
SQL_STATE_INVALID_TRANSACTION_STATE])
if SQL_STATE_INVALID_TRANSACTION_STATE in sqlState:
logger.warning("Rolling back open transaction for session %s "
"so it can be closed.", self.sessionno)
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_ROLLBACK)
checkStatus(
rc, hDbc=self.hDbc,
method="SQLEndTran - SQL_ROLLBACK - Disconnect")
rc = odbc.SQLDisconnect(self.hDbc)
checkStatus(rc, hDbc=self.hDbc, method="SQLDisconnect")
rc = odbc.SQLFreeHandle(SQL_HANDLE_DBC, self.hDbc)
if rc != SQL_INVALID_HANDLE:
checkStatus(rc, hDbc=self.hDbc, method="SQLFreeHandle")
connections.remove(self)
self.hDbc = None
if self.sessionno:
logger.debug("Session %s closed.", self.sessionno)
def commit(self):
"""Commits a transaction."""
logger.debug("Committing transaction...")
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_COMMIT)
checkStatus(rc, hDbc=self.hDbc, method="SQLEndTran - SQL_COMMIT")
def rollback(self):
"""Rollsback a transaction."""
logger.debug("Rolling back transaction...")
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_ROLLBACK)
checkStatus(rc, hDbc=self.hDbc, method="SQLEndTran - SQL_ROLLBACK")
def cursor(self):
"""Returns a cursor."""
cursor = OdbcCursor(
self, self.dbType, self.converter, self.cursorCount)
self.cursorCount += 1
return cursor
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
def __repr__(self):
return "OdbcConnection(sessionno={})".format(self.sessionno)
connect = OdbcConnection
class OdbcCursor (util.Cursor):
"""Represents an ODBC Cursor."""
def __init__(self, connection, dbType, converter, num):
util.Cursor.__init__(self, connection, dbType, converter)
self.num = num
self.moreResults = None
if num > 0:
logger.debug(
"Creating cursor %s for session %s.", self.num,
self.connection.sessionno)
self.hStmt = SQLPOINTER()
rc = odbc.SQLAllocHandle(
SQL_HANDLE_STMT, connection.hDbc, ADDR(self.hStmt))
checkStatus(rc, hStmt=self.hStmt)
connection.cursors.append(self)
def callproc(self, procname, params, queryTimeout=0):
self._checkClosed()
query = "CALL {} (".format(procname)
for i in range(0, len(params)):
if i > 0:
query += ", "
query += "?"
query += ")"
logger.debug("Executing Procedure: %s", query)
self.execute(query, params, queryTimeout=queryTimeout)
return util.OutParams(params, self.dbType, self.converter)
def close(self):
if self.hStmt:
if self.num > 0:
logger.debug(
"Closing cursor %s for session %s.", self.num,
self.connection.sessionno)
rc = odbc.SQLFreeHandle(SQL_HANDLE_STMT, self.hStmt)
checkStatus(rc, hStmt=self.hStmt)
self.connection.cursors.remove(self)
self.hStmt = None
def _setQueryTimeout(self, queryTimeout):
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_QUERY_TIMEOUT, SQLPOINTER(queryTimeout),
SQL_IS_UINTEGER)
checkStatus(
rc, hStmt=self.hStmt,
method="SQLSetStmtStmtAttr - SQL_ATTR_QUERY_TIMEOUT")
def execute(self, query, params=None, queryTimeout=0):
self._checkClosed()
if params:
self.executemany(query, [params, ], queryTimeout)
else:
if self.connection.sessionno:
logger.debug(
"Executing query on session %s using SQLExecDirectW: %s",
self.connection.sessionno, query)
self._free()
self._setQueryTimeout(queryTimeout)
rc = odbc.SQLExecDirectW(
self.hStmt, _inputStr(_convertLineFeeds(query)), SQL_NTS)
checkStatus(rc, hStmt=self.hStmt, method="SQLExecDirectW")
self._handleResults()
return self
def executemany(self, query, params, batch=False, queryTimeout=0):
self._checkClosed()
self._free()
# Prepare the query
rc = odbc.SQLPrepareW(
self.hStmt, _inputStr(_convertLineFeeds(query)), SQL_NTS)
checkStatus(rc, hStmt=self.hStmt, method="SQLPrepare")
self._setQueryTimeout(queryTimeout)
# Get the number of parameters in the SQL statement.
numParams = SQLSMALLINT()
rc = odbc.SQLNumParams(self.hStmt, ADDR(numParams))
checkStatus(rc, hStmt=self.hStmt, method="SQLNumParams")
numParams = numParams.value
# The argument types.
dataTypes = []
for paramNum in range(0, numParams):
dataType = SQLSMALLINT()
parameterSize = SQLULEN()
decimalDigits = SQLSMALLINT()
nullable = SQLSMALLINT()
rc = odbc.SQLDescribeParam(
self.hStmt, paramNum + 1, ADDR(dataType), ADDR(parameterSize),
ADDR(decimalDigits), ADDR(nullable))
checkStatus(rc, hStmt=self.hStmt, method="SQLDescribeParams")
dataTypes.append(dataType.value)
if batch:
logger.debug(
"Executing query on session %s using batched SQLExecute: %s",
self.connection.sessionno, query)
self._executeManyBatch(params, numParams, dataTypes)
else:
logger.debug(
"Executing query on session %s using SQLExecute: %s",
self.connection.sessionno, query)
rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAMSET_SIZE, 1, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
paramSetNum = 0
for p in params:
paramSetNum += 1
logger.trace("ParamSet %s: %s", paramSetNum, p)
if len(p) != numParams:
raise InterfaceError(
"PARAMS_MISMATCH", "The number of supplied parameters "
"({}) does not match the expected number of "
"parameters ({}).".format(len(p), numParams))
paramArray = []
lengthArray = []
for paramNum in range(0, numParams):
val = p[paramNum]
inputOutputType = _getInputOutputType(val)
valueType, paramType = _getParamValueType(
dataTypes[paramNum])
param, length, null = _getParamValue(val, valueType, False)
paramArray.append(param)
if param is not None:
if valueType == SQL_C_BINARY:
bufSize = SQLLEN(length)
lengthArray.append(SQLLEN(length))
columnSize = SQLULEN(length)
elif valueType == SQL_C_DOUBLE:
bufSize = SQLLEN(length)
lengthArray.append(SQLLEN(length))
columnSize = SQLULEN(length)
param = ADDR(param)
else:
bufSize = SQLLEN(ctypes.sizeof(param))
lengthArray.append(SQLLEN(SQL_NTS))
columnSize = SQLULEN(length)
if null:
# Handle INOUT parameter with NULL input value.
lengthArray.pop(-1)
lengthArray.append(SQLLEN(SQL_NULL_DATA))
else:
bufSize = SQLLEN(0)
columnSize = SQLULEN(0)
lengthArray.append(SQLLEN(SQL_NULL_DATA))
logger.trace("Binding parameter %s...", paramNum + 1)
rc = odbc.SQLBindParameter(
self.hStmt, paramNum + 1, inputOutputType, valueType,
paramType, columnSize, 0, param, bufSize,
ADDR(lengthArray[paramNum]))
checkStatus(
rc, hStmt=self.hStmt, method="SQLBindParameter")
logger.debug("Executing prepared statement.")
rc = odbc.SQLExecute(self.hStmt)
for paramNum in range(0, numParams):
val = p[paramNum]
if isinstance(val, OutParam):
val.size = lengthArray[paramNum].value
checkStatus(rc, hStmt=self.hStmt, method="SQLExecute")
self._handleResults()
return self
def _executeManyBatch(self, params, numParams, dataTypes):
# Get the number of parameter sets.
paramSetSize = len(params)
# Set the SQL_ATTR_PARAM_BIND_TYPE statement attribute to use
# column-wise binding.
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_PARAM_BIND_TYPE, SQL_PARAM_BIND_BY_COLUMN, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify the number of elements in each parameter array.
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_PARAMSET_SIZE, paramSetSize, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify a PTR to get the number of parameters processed.
# paramsProcessed = SQLULEN()
# rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAMS_PROCESSED_PTR,
# ADDR(paramsProcessed), SQL_IS_POINTER)
# checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify a PTR to get the status of the parameters processed.
# paramsStatus = (SQLUSMALLINT * paramSetSize)()
# rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAM_STATUS_PTR,
# ADDR(paramsStatus), SQL_IS_POINTER)
# checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Bind the parameters.
paramArrays = []
lengthArrays = []
paramSetSize = len(params)
paramSetNum = 0
debugEnabled = logger.isEnabledFor(logging.DEBUG)
for p in params:
paramSetNum += 1
if debugEnabled:
logger.debug("ParamSet %s: %s", paramSetNum, p)
if len(p) != numParams:
raise InterfaceError(
"PARAMS_MISMATCH", "The number of supplied parameters "
"({}) does not match the expected number of parameters "
"({}).".format(len(p), numParams))
for paramNum in range(0, numParams):
p = []
valueType, paramType = _getParamValueType(dataTypes[paramNum])
maxLen = 0
for paramSetNum in range(0, paramSetSize):
param, length, null = _getParamValue( # @UnusedVariable
params[paramSetNum][paramNum], valueType, True)
if length > maxLen:
maxLen = length
p.append(param)
if debugEnabled:
logger.debug("Max length for parameter %s is %s.",
paramNum + 1, maxLen)
if valueType == SQL_C_BINARY:
valueSize = SQLLEN(maxLen)
paramArrays.append((SQLBYTE * (paramSetSize * maxLen))())
elif valueType == SQL_C_DOUBLE:
valueSize = SQLLEN(maxLen)
paramArrays.append((SQLDOUBLE * paramSetSize)())
else:
maxLen += 1
valueSize = SQLLEN(ctypes.sizeof(SQLWCHAR) * maxLen)
paramArrays.append(_createBuffer(paramSetSize * maxLen))
lengthArrays.append((SQLLEN * paramSetSize)())
for paramSetNum in range(0, paramSetSize):
index = paramSetNum * maxLen
if p[paramSetNum] is not None:
if valueType == SQL_C_DOUBLE:
paramArrays[paramNum][paramSetNum] = p[paramSetNum]
else:
for c in p[paramSetNum]:
paramArrays[paramNum][index] = c
index += 1
if valueType == SQL_C_BINARY:
lengthArrays[paramNum][
paramSetNum] = len(p[paramSetNum])
else:
lengthArrays[paramNum][
paramSetNum] = SQLLEN(SQL_NTS)
paramArrays[paramNum][
index] = _convertParam("\x00")[0]
else:
lengthArrays[paramNum][paramSetNum] = SQLLEN(SQL_NULL_DATA)
if valueType == SQL_C_WCHAR:
paramArrays[paramNum][index] = _convertParam("\x00")[0]
if debugEnabled:
logger.debug("Binding parameter %s...", paramNum + 1)
rc = odbc.SQLBindParameter(self.hStmt, paramNum + 1,
SQL_PARAM_INPUT, valueType, paramType,
SQLULEN(maxLen), 0,
paramArrays[paramNum], valueSize,
lengthArrays[paramNum])
checkStatus(rc, hStmt=self.hStmt, method="SQLBindParameter")
# Execute the SQL statement.
if debugEnabled:
logger.debug("Executing prepared statement.")
rc = odbc.SQLExecute(self.hStmt)
checkStatus(rc, hStmt=self.hStmt, method="SQLExecute")
def _handleResults(self):
# Rest cursor attributes.
self.description = None
self.rowcount = -1
self.rownumber = None
self.columns = {}
self.types = []
self.moreResults = None
# Get column count in result set.
columnCount = SQLSMALLINT()
rc = odbc.SQLNumResultCols(self.hStmt, ADDR(columnCount))
checkStatus(rc, hStmt=self.hStmt, method="SQLNumResultCols")
rowCount = SQLLEN()
rc = odbc.SQLRowCount(self.hStmt, ADDR(rowCount))
checkStatus(rc, hStmt=self.hStmt, method="SQLRowCount")
self.rowcount = rowCount.value
# Get column meta data and create row iterator.
if columnCount.value > 0:
self.description = []
nameBuf = _createBuffer(SMALL_BUFFER_SIZE)
nameLength = SQLSMALLINT()
dataType = SQLSMALLINT()
columnSize = SQLULEN()
decimalDigits = SQLSMALLINT()
nullable = SQLSMALLINT()
for col in range(0, columnCount.value):
rc = odbc.SQLDescribeColW(
self.hStmt, col + 1, nameBuf, len(nameBuf),
ADDR(nameLength), ADDR(dataType), ADDR(columnSize),
ADDR(decimalDigits), ADDR(nullable))
checkStatus(rc, hStmt=self.hStmt, method="SQLDescribeColW")
columnName = _outputStr(nameBuf)
odbc.SQLColAttributeW(
self.hStmt, col + 1, SQL_DESC_TYPE_NAME, ADDR(nameBuf),
len(nameBuf), None, None)
checkStatus(rc, hStmt=self.hStmt, method="SQLColAttributeW")
typeName = _outputStr(nameBuf)
typeCode = self.converter.convertType(self.dbType, typeName)
self.columns[columnName.lower()] = col
self.types.append((typeName, typeCode, dataType.value))
self.description.append((
columnName, typeCode, None, columnSize.value,
decimalDigits.value, None, nullable.value))
self.iterator = rowIterator(self)
def nextset(self):
self._checkClosed()
if self.moreResults is None:
self._checkForMoreResults()
if self.moreResults:
self._handleResults()
return True
def _checkForMoreResults(self):
rc = odbc.SQLMoreResults(self.hStmt)
checkStatus(rc, hStmt=self.hStmt, method="SQLMoreResults")
self.moreResults = rc == SQL_SUCCESS or rc == SQL_SUCCESS_WITH_INFO
return self.moreResults
def _free(self):
rc = odbc.SQLFreeStmt(self.hStmt, SQL_CLOSE)
checkStatus(rc, hStmt=self.hStmt, method="SQLFreeStmt - SQL_CLOSE")
rc = odbc.SQLFreeStmt(self.hStmt, SQL_RESET_PARAMS)
checkStatus(
rc, hStmt=self.hStmt, method="SQLFreeStmt - SQL_RESET_PARAMS")
def _checkClosed(self):
if not self.hStmt:
raise InterfaceError("CURSOR_CLOSED",
"Operations cannot be performed on a "
"closed cursor.")
def _convertLineFeeds(query):
return "\r".join(util.linesplit(query))
def _getInputOutputType(val):
inputOutputType = SQL_PARAM_INPUT
if isinstance(val, InOutParam):
inputOutputType = SQL_PARAM_INPUT_OUTPUT
elif isinstance(val, OutParam):
inputOutputType = SQL_PARAM_OUTPUT
return inputOutputType
def _getParamValueType(dataType):
valueType = SQL_C_WCHAR
paramType = SQL_WVARCHAR
if dataType in (SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY):
valueType = SQL_C_BINARY
paramType = dataType
elif dataType == SQL_WLONGVARCHAR:
paramType = SQL_WLONGVARCHAR
elif dataType in (SQL_FLOAT, SQL_DOUBLE, SQL_REAL):
valueType = SQL_C_DOUBLE
paramType = SQL_DOUBLE
return valueType, paramType
def _getParamBufferSize(val):
return SMALL_BUFFER_SIZE if val.size is None else val.size
def _getParamValue(val, valueType, batch):
length = 0
null = False
if val is None:
param = None
elif valueType == SQL_C_BINARY:
ba = val
if isinstance(val, InOutParam):
ba = val.inValue
if val.inValue is None:
null = True
ba = bytearray(_getParamBufferSize(val))
elif isinstance(val, OutParam):
ba = bytearray(_getParamBufferSize(val))
if ba is not None and not isinstance(ba, bytearray):
raise InterfaceError("Expected bytearray for BINARY parameter.")
length = len(ba)
if batch:
param = ba
else:
byteArr = SQLBYTE * length
param = byteArr.from_buffer(ba)
if isinstance(val, OutParam):
val.setValueFunc(lambda: ba[:val.size])
elif valueType == SQL_C_DOUBLE:
f = val
if isinstance(val, InOutParam):
f = val.inValue
if f is None:
null = True
f = float(0)
elif isinstance(val, OutParam):
f = float(0)
param = SQLDOUBLE(f if not util.isString(f) else float(f))
length = ctypes.sizeof(param)
if isinstance(val, OutParam):
val.setValueFunc(lambda: param.value)
else:
if batch:
param = _convertParam(val)
length = len(param)
elif isinstance(val, InOutParam):
length = _getParamBufferSize(val)
if val.inValue is not None:
param = _inputStr(val.inValue, length)
else:
param = _createBuffer(length)
null = True
val.setValueFunc(lambda: _outputStr(param))
elif isinstance(val, OutParam):
length = _getParamBufferSize(val)
param = _createBuffer(length)
val.setValueFunc(lambda: _outputStr(param))
else:
param = _inputStr(val)
length = len(param)
return param, length, null
def _getFetchSize(cursor):
"""Gets the fetch size associated with the cursor."""
fetchSize = cursor.fetchSize
for dataType in cursor.types:
if dataType[2] in (SQL_LONGVARBINARY, SQL_WLONGVARCHAR):
fetchSize = 1
break
return fetchSize
def _getBufSize(cursor, colIndex):
bufSize = cursor.description[colIndex - 1][3] + 1
dataType = cursor.types[colIndex - 1][0]
if dataType in datatypes.BINARY_TYPES:
pass
elif dataType in datatypes.FLOAT_TYPES:
bufSize = ctypes.sizeof(ctypes.c_double)
elif dataType in datatypes.INT_TYPES:
bufSize = 30
elif cursor.types[colIndex - 1][2] in (SQL_WCHAR, SQL_WVARCHAR,
SQL_WLONGVARCHAR):
pass
elif dataType.startswith("DATE"):
bufSize = 20
elif dataType.startswith("TIMESTAMP"):
bufSize = 40
elif dataType.startswith("TIME"):
bufSize = 30
elif dataType.startswith("INTERVAL"):
bufSize = 80
elif dataType.startswith("PERIOD"):
bufSize = 80
elif dataType.startswith("DECIMAL"):
bufSize = 42
else:
bufSize = 2 ** 16 + 1
return bufSize
def _setupColumnBuffers(cursor, buffers, bufSizes, dataTypes, indicators,
lastFetchSize):
"""Sets up the column buffers for retrieving multiple rows of a result set
at a time"""
fetchSize = _getFetchSize(cursor)
# If the fetchSize hasn't changed since the last time setupBuffers
# was called, then we can reuse the previous buffers.
if fetchSize != lastFetchSize:
logger.debug("FETCH_SIZE: %s" % fetchSize)
rc = odbc.SQLSetStmtAttr(
cursor.hStmt, SQL_ATTR_ROW_ARRAY_SIZE, fetchSize, 0)
checkStatus(rc, hStmt=cursor.hStmt,
method="SQLSetStmtAttr - SQL_ATTR_ROW_ARRAY_SIZE")
for col in range(1, len(cursor.description) + 1):
dataType = SQL_C_WCHAR
buffer = None
bufSize = _getBufSize(cursor, col)
lob = False
if cursor.types[col - 1][2] == SQL_LONGVARBINARY:
lob = True
bufSize = LARGE_BUFFER_SIZE
buffer = (ctypes.c_byte * bufSize)()
dataType = SQL_LONGVARBINARY
elif cursor.types[col - 1][2] == SQL_WLONGVARCHAR:
lob = True
buffer = _createBuffer(LARGE_BUFFER_SIZE)
bufSize = ctypes.sizeof(buffer)
dataType = SQL_WLONGVARCHAR
elif cursor.description[col - 1][1] == BINARY:
dataType = SQL_C_BINARY
buffer = (ctypes.c_byte * bufSize * fetchSize)()
elif cursor.types[col - 1][0] in datatypes.FLOAT_TYPES:
dataType = SQL_C_DOUBLE
buffer = (ctypes.c_double * fetchSize)()
else:
buffer = _createBuffer(bufSize * fetchSize)
bufSize = int(ctypes.sizeof(buffer) / fetchSize)
dataTypes.append(dataType)
buffers.append(buffer)
bufSizes.append(bufSize)
logger.debug("Buffer size for column %s: %s", col, bufSize)
indicators.append((SQLLEN * fetchSize)())
if not lob:
rc = odbc.SQLBindCol(cursor.hStmt, col, dataType, buffer,
bufSize, indicators[col - 1])
checkStatus(rc, hStmt=cursor.hStmt, method="SQLBindCol")
return fetchSize
def _getLobData(cursor, colIndex, buf, binary):
""" Get LOB Data """
length = SQLLEN()
dataType = SQL_C_WCHAR
bufSize = ctypes.sizeof(buf)
if binary:
dataType = SQL_C_BINARY
rc = odbc.SQLGetData(
cursor.hStmt, colIndex, dataType, buf, bufSize, ADDR(length))
sqlState = checkStatus(rc, hStmt=cursor.hStmt, method="SQLGetData")
val = None
if length.value != SQL_NULL_DATA:
if SQL_STATE_DATA_TRUNCATED in sqlState:
logger.debug(
"Data truncated. Calling SQLGetData to get next part "
"of data for column %s of size %s.",
colIndex, length.value)
if dataType == SQL_C_BINARY:
val = bytearray(length.value)
val[0:bufSize] = buf
newBufSize = len(val) - bufSize
newBuffer = (ctypes.c_byte * newBufSize).from_buffer(
val, bufSize)
rc = odbc.SQLGetData(
cursor.hStmt, colIndex, dataType, newBuffer,
newBufSize, ADDR(length))
checkStatus(
rc, hStmt=cursor.hStmt, method="SQLGetData2")
else:
val = [_outputStr(buf), ]
while SQL_STATE_DATA_TRUNCATED in sqlState:
rc = odbc.SQLGetData(
cursor.hStmt, colIndex, dataType, buf, bufSize,
ADDR(length))
sqlState = checkStatus(
rc, hStmt=cursor.hStmt, method="SQLGetData2")
val.append(_outputStr(buf))
val = "".join(val)
else:
if dataType == SQL_C_BINARY:
val = bytearray(
(ctypes.c_byte * length.value).from_buffer(buf))
else:
val = _outputStr(buf)
return val
def _getRow(cursor, buffers, bufSizes, dataTypes, indicators, rowIndex):
"""Reads a row of data from the fetched input buffers. If the column
type is a BLOB or CLOB, then that data is obtained via calls to
SQLGetData."""
row = []
for col in range(1, len(cursor.description) + 1):
val = None
buf = buffers[col - 1]
bufSize = bufSizes[col - 1]
dataType = dataTypes[col - 1]
length = indicators[col - 1][rowIndex]
if length != SQL_NULL_DATA:
if dataType == SQL_C_BINARY:
val = bytearray((ctypes.c_byte * length).from_buffer(
buf, bufSize * rowIndex))
elif dataType == SQL_C_DOUBLE:
val = ctypes.c_double.from_buffer(buf,
bufSize * rowIndex).value
elif dataType == SQL_WLONGVARCHAR:
val = _getLobData(cursor, col, buf, False)
elif dataType == SQL_LONGVARBINARY:
val = _getLobData(cursor, col, buf, True)
else:
chLen = (int)(bufSize / ctypes.sizeof(SQLWCHAR))
chBuf = (SQLWCHAR * chLen)
val = _outputStr(chBuf.from_buffer(buf,
bufSize * rowIndex))
row.append(val)
return row
def rowIterator(cursor):
buffers = []
bufSizes = []
dataTypes = []
indicators = []
rowCount = SQLULEN()
lastFetchSize = None
rc = odbc.SQLSetStmtAttr(
cursor.hStmt, SQL_ATTR_ROWS_FETCHED_PTR, ADDR(rowCount), 0)
checkStatus(rc, hStmt=cursor.hStmt,
method="SQLSetStmtAttr - SQL_ATTR_ROWS_FETCHED_PTR")
while cursor.description is not None:
lastFetchSize = _setupColumnBuffers(cursor, buffers, bufSizes,
dataTypes, indicators,
lastFetchSize)
rc = odbc.SQLFetch(cursor.hStmt)
checkStatus(rc, hStmt=cursor.hStmt, method="SQLFetch")
if rc == SQL_NO_DATA:
break
for rowIndex in range(0, rowCount.value):
yield _getRow(cursor, buffers, bufSizes, dataTypes,
indicators, rowIndex)
if not cursor._checkForMoreResults():
cursor._free()
|
alipay/aop/api/domain/DeviceTradeInfoList.py
|
antopen/alipay-sdk-python-all
| 213 |
110464
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class DeviceTradeInfoList(object):
def __init__(self):
self._biz_tid = None
self._dau = None
self._device_face_trade_dau = None
self._device_face_trade_dau_d_value = None
self._device_name = None
self._device_sn = None
self._device_status = None
self._face_trade_cnt = None
self._face_trd_amt = None
self._face_trd_cnt_rate = None
self._face_trd_user_cnt_rate = None
self._face_trd_user_cnt_rate_d_value = None
self._gmt_active = None
self._iot_trd_up = None
self._iot_trd_user_cnt = None
self._iot_trd_user_cnt_d_value = None
self._max_dt = None
self._merchant_pid = None
self._shop_id = None
self._trade_amt = None
self._trade_cnt = None
@property
def biz_tid(self):
return self._biz_tid
@biz_tid.setter
def biz_tid(self, value):
self._biz_tid = value
@property
def dau(self):
return self._dau
@dau.setter
def dau(self, value):
self._dau = value
@property
def device_face_trade_dau(self):
return self._device_face_trade_dau
@device_face_trade_dau.setter
def device_face_trade_dau(self, value):
self._device_face_trade_dau = value
@property
def device_face_trade_dau_d_value(self):
return self._device_face_trade_dau_d_value
@device_face_trade_dau_d_value.setter
def device_face_trade_dau_d_value(self, value):
self._device_face_trade_dau_d_value = value
@property
def device_name(self):
return self._device_name
@device_name.setter
def device_name(self, value):
self._device_name = value
@property
def device_sn(self):
return self._device_sn
@device_sn.setter
def device_sn(self, value):
self._device_sn = value
@property
def device_status(self):
return self._device_status
@device_status.setter
def device_status(self, value):
self._device_status = value
@property
def face_trade_cnt(self):
return self._face_trade_cnt
@face_trade_cnt.setter
def face_trade_cnt(self, value):
self._face_trade_cnt = value
@property
def face_trd_amt(self):
return self._face_trd_amt
@face_trd_amt.setter
def face_trd_amt(self, value):
self._face_trd_amt = value
@property
def face_trd_cnt_rate(self):
return self._face_trd_cnt_rate
@face_trd_cnt_rate.setter
def face_trd_cnt_rate(self, value):
self._face_trd_cnt_rate = value
@property
def face_trd_user_cnt_rate(self):
return self._face_trd_user_cnt_rate
@face_trd_user_cnt_rate.setter
def face_trd_user_cnt_rate(self, value):
self._face_trd_user_cnt_rate = value
@property
def face_trd_user_cnt_rate_d_value(self):
return self._face_trd_user_cnt_rate_d_value
@face_trd_user_cnt_rate_d_value.setter
def face_trd_user_cnt_rate_d_value(self, value):
self._face_trd_user_cnt_rate_d_value = value
@property
def gmt_active(self):
return self._gmt_active
@gmt_active.setter
def gmt_active(self, value):
self._gmt_active = value
@property
def iot_trd_up(self):
return self._iot_trd_up
@iot_trd_up.setter
def iot_trd_up(self, value):
self._iot_trd_up = value
@property
def iot_trd_user_cnt(self):
return self._iot_trd_user_cnt
@iot_trd_user_cnt.setter
def iot_trd_user_cnt(self, value):
self._iot_trd_user_cnt = value
@property
def iot_trd_user_cnt_d_value(self):
return self._iot_trd_user_cnt_d_value
@iot_trd_user_cnt_d_value.setter
def iot_trd_user_cnt_d_value(self, value):
self._iot_trd_user_cnt_d_value = value
@property
def max_dt(self):
return self._max_dt
@max_dt.setter
def max_dt(self, value):
self._max_dt = value
@property
def merchant_pid(self):
return self._merchant_pid
@merchant_pid.setter
def merchant_pid(self, value):
self._merchant_pid = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def trade_amt(self):
return self._trade_amt
@trade_amt.setter
def trade_amt(self, value):
self._trade_amt = value
@property
def trade_cnt(self):
return self._trade_cnt
@trade_cnt.setter
def trade_cnt(self, value):
self._trade_cnt = value
def to_alipay_dict(self):
params = dict()
if self.biz_tid:
if hasattr(self.biz_tid, 'to_alipay_dict'):
params['biz_tid'] = self.biz_tid.to_alipay_dict()
else:
params['biz_tid'] = self.biz_tid
if self.dau:
if hasattr(self.dau, 'to_alipay_dict'):
params['dau'] = self.dau.to_alipay_dict()
else:
params['dau'] = self.dau
if self.device_face_trade_dau:
if hasattr(self.device_face_trade_dau, 'to_alipay_dict'):
params['device_face_trade_dau'] = self.device_face_trade_dau.to_alipay_dict()
else:
params['device_face_trade_dau'] = self.device_face_trade_dau
if self.device_face_trade_dau_d_value:
if hasattr(self.device_face_trade_dau_d_value, 'to_alipay_dict'):
params['device_face_trade_dau_d_value'] = self.device_face_trade_dau_d_value.to_alipay_dict()
else:
params['device_face_trade_dau_d_value'] = self.device_face_trade_dau_d_value
if self.device_name:
if hasattr(self.device_name, 'to_alipay_dict'):
params['device_name'] = self.device_name.to_alipay_dict()
else:
params['device_name'] = self.device_name
if self.device_sn:
if hasattr(self.device_sn, 'to_alipay_dict'):
params['device_sn'] = self.device_sn.to_alipay_dict()
else:
params['device_sn'] = self.device_sn
if self.device_status:
if hasattr(self.device_status, 'to_alipay_dict'):
params['device_status'] = self.device_status.to_alipay_dict()
else:
params['device_status'] = self.device_status
if self.face_trade_cnt:
if hasattr(self.face_trade_cnt, 'to_alipay_dict'):
params['face_trade_cnt'] = self.face_trade_cnt.to_alipay_dict()
else:
params['face_trade_cnt'] = self.face_trade_cnt
if self.face_trd_amt:
if hasattr(self.face_trd_amt, 'to_alipay_dict'):
params['face_trd_amt'] = self.face_trd_amt.to_alipay_dict()
else:
params['face_trd_amt'] = self.face_trd_amt
if self.face_trd_cnt_rate:
if hasattr(self.face_trd_cnt_rate, 'to_alipay_dict'):
params['face_trd_cnt_rate'] = self.face_trd_cnt_rate.to_alipay_dict()
else:
params['face_trd_cnt_rate'] = self.face_trd_cnt_rate
if self.face_trd_user_cnt_rate:
if hasattr(self.face_trd_user_cnt_rate, 'to_alipay_dict'):
params['face_trd_user_cnt_rate'] = self.face_trd_user_cnt_rate.to_alipay_dict()
else:
params['face_trd_user_cnt_rate'] = self.face_trd_user_cnt_rate
if self.face_trd_user_cnt_rate_d_value:
if hasattr(self.face_trd_user_cnt_rate_d_value, 'to_alipay_dict'):
params['face_trd_user_cnt_rate_d_value'] = self.face_trd_user_cnt_rate_d_value.to_alipay_dict()
else:
params['face_trd_user_cnt_rate_d_value'] = self.face_trd_user_cnt_rate_d_value
if self.gmt_active:
if hasattr(self.gmt_active, 'to_alipay_dict'):
params['gmt_active'] = self.gmt_active.to_alipay_dict()
else:
params['gmt_active'] = self.gmt_active
if self.iot_trd_up:
if hasattr(self.iot_trd_up, 'to_alipay_dict'):
params['iot_trd_up'] = self.iot_trd_up.to_alipay_dict()
else:
params['iot_trd_up'] = self.iot_trd_up
if self.iot_trd_user_cnt:
if hasattr(self.iot_trd_user_cnt, 'to_alipay_dict'):
params['iot_trd_user_cnt'] = self.iot_trd_user_cnt.to_alipay_dict()
else:
params['iot_trd_user_cnt'] = self.iot_trd_user_cnt
if self.iot_trd_user_cnt_d_value:
if hasattr(self.iot_trd_user_cnt_d_value, 'to_alipay_dict'):
params['iot_trd_user_cnt_d_value'] = self.iot_trd_user_cnt_d_value.to_alipay_dict()
else:
params['iot_trd_user_cnt_d_value'] = self.iot_trd_user_cnt_d_value
if self.max_dt:
if hasattr(self.max_dt, 'to_alipay_dict'):
params['max_dt'] = self.max_dt.to_alipay_dict()
else:
params['max_dt'] = self.max_dt
if self.merchant_pid:
if hasattr(self.merchant_pid, 'to_alipay_dict'):
params['merchant_pid'] = self.merchant_pid.to_alipay_dict()
else:
params['merchant_pid'] = self.merchant_pid
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.trade_amt:
if hasattr(self.trade_amt, 'to_alipay_dict'):
params['trade_amt'] = self.trade_amt.to_alipay_dict()
else:
params['trade_amt'] = self.trade_amt
if self.trade_cnt:
if hasattr(self.trade_cnt, 'to_alipay_dict'):
params['trade_cnt'] = self.trade_cnt.to_alipay_dict()
else:
params['trade_cnt'] = self.trade_cnt
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DeviceTradeInfoList()
if 'biz_tid' in d:
o.biz_tid = d['biz_tid']
if 'dau' in d:
o.dau = d['dau']
if 'device_face_trade_dau' in d:
o.device_face_trade_dau = d['device_face_trade_dau']
if 'device_face_trade_dau_d_value' in d:
o.device_face_trade_dau_d_value = d['device_face_trade_dau_d_value']
if 'device_name' in d:
o.device_name = d['device_name']
if 'device_sn' in d:
o.device_sn = d['device_sn']
if 'device_status' in d:
o.device_status = d['device_status']
if 'face_trade_cnt' in d:
o.face_trade_cnt = d['face_trade_cnt']
if 'face_trd_amt' in d:
o.face_trd_amt = d['face_trd_amt']
if 'face_trd_cnt_rate' in d:
o.face_trd_cnt_rate = d['face_trd_cnt_rate']
if 'face_trd_user_cnt_rate' in d:
o.face_trd_user_cnt_rate = d['face_trd_user_cnt_rate']
if 'face_trd_user_cnt_rate_d_value' in d:
o.face_trd_user_cnt_rate_d_value = d['face_trd_user_cnt_rate_d_value']
if 'gmt_active' in d:
o.gmt_active = d['gmt_active']
if 'iot_trd_up' in d:
o.iot_trd_up = d['iot_trd_up']
if 'iot_trd_user_cnt' in d:
o.iot_trd_user_cnt = d['iot_trd_user_cnt']
if 'iot_trd_user_cnt_d_value' in d:
o.iot_trd_user_cnt_d_value = d['iot_trd_user_cnt_d_value']
if 'max_dt' in d:
o.max_dt = d['max_dt']
if 'merchant_pid' in d:
o.merchant_pid = d['merchant_pid']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'trade_amt' in d:
o.trade_amt = d['trade_amt']
if 'trade_cnt' in d:
o.trade_cnt = d['trade_cnt']
return o
|
community-content/pytorch_text_classification_using_vertex_sdk_and_gcloud/python_package/setup.py
|
gogasca/vertex-ai-samples
| 213 |
110467
|
from setuptools import find_packages
from setuptools import setup
import setuptools
from distutils.command.build import build as _build
import subprocess
REQUIRED_PACKAGES = [
'transformers',
'datasets',
'tqdm',
'cloudml-hypertune'
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='Vertex AI | Training | PyTorch | Text Classification | Python Package'
)
|
codigo_das_aulas/aula_05/aula_05_01.py
|
VeirichR/curso-python-selenium
| 234 |
110472
|
from selenium.webdriver import Firefox
url = 'http://selenium.dunossauro.live/aula_05_a.html'
firefox = Firefox()
firefox.get(url)
div_py = firefox.find_element_by_id('python')
div_hk = firefox.find_element_by_id('haskell')
print(div_hk.text)
firefox.quit()
|
tensorflow_io/python/ops/archive_ops.py
|
lgeiger/io
| 558 |
110509
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Archive."""
from tensorflow_io.python.ops import core_ops
def list_archive_entries(filename, filters, **kwargs):
"""list_archive_entries"""
memory = kwargs.get("memory", "")
if not isinstance(filters, list):
filters = [filters]
return core_ops.io_list_archive_entries(filename, filters=filters, memory=memory)
def read_archive(
filename, format, entries, **kwargs
): # pylint: disable=redefined-builtin
"""read_archive"""
memory = kwargs.get("memory", "")
return core_ops.io_read_archive(filename, format, entries, memory=memory)
|
mods/NERO/teams.py
|
LeRoi46/opennero
| 215 |
110525
|
<reponame>LeRoi46/opennero
import json
import constants
import OpenNero
import agent as agents
def factory(ai, *args):
cls = ai_map.get(ai, NeroTeam)
return cls(*args)
class TeamEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, NeroTeam):
return {
'team_ai': inv_ai_map.get(obj.__class__, 'none'),
'agents': [
{
'agent_ai': agent.ai_label(),
'args': agent.args()
}
for agent in obj.agents
]
}
return json.JSONEncoder.default(self, obj)
def as_team(team_type, dct):
if 'team_ai' in dct:
team = factory(dct['team_ai'], team_type)
for a in dct['agents'][:constants.pop_size]:
team.create_agent(a['agent_ai'], *a['args'])
return team
return dct
class NeroTeam(object):
"""
Basic NERO Team
"""
def __init__(self, team_type):
self.team_type = team_type
self.color = constants.TEAM_LABELS[team_type]
self.agents = set()
self.dead_agents = set()
def create_agents(self, ai):
for _ in range(constants.pop_size):
self.create_agent(ai)
def create_agent(self, ai, *args):
a = agents.factory(ai, self.team_type, *args)
self.add_agent(a)
return a
def add_agent(self, a):
self.agents.add(a)
def kill_agent(self, a):
self.agents.remove(a)
self.dead_agents.add(a)
def is_episode_over(self, agent):
return False
def reset(self, agent):
pass
def start_training(self):
pass
def stop_training(self):
pass
def is_destroyed(self):
return len(self.agents) == 0 and len(self.dead_agents) > 0
def reset_all(self):
self.agents |= self.dead_agents
self.dead_agents = set()
class RTNEATTeam(NeroTeam):
def __init__(self, team_type):
NeroTeam.__init__(self, team_type)
self.pop = OpenNero.Population()
self.rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat",
self.pop,
constants.DEFAULT_LIFETIME_MIN,
constants.DEFAULT_EVOLVE_RATE)
self.generation = 1
def add_agent(self, a):
NeroTeam.add_agent(self, a)
self.pop.add_organism(a.org)
def start_training(self):
OpenNero.set_ai('rtneat-%s' % self.team_type, self.rtneat)
def stop_training(self):
OpenNero.set_ai('rtneat-%s' % self.team_type, None)
def is_episode_over(self, agent):
return agent.org.eliminate
def reset(self, agent):
if agent.org.elminate:
agent.org = self.rtneat.reproduce_one()
def reset_all(self):
NeroTeam.reset_all(self)
#TODO: Epoch can segfault without fitness differentials
if any([agent.org.fitness > 0 for agent in self.agents]):
self.generation += 1
self.pop.epoch(self.generation)
for agent, org in zip(self.agents, self.pop.organisms):
agent.org = org
ai_map = {
'rtneat': RTNEATTeam,
'none': NeroTeam
}
inv_ai_map = {v: k for k, v in ai_map.items()}
|
pyEX/stocks/quote.py
|
adamklaff/pyEX
| 335 |
110529
|
<gh_stars>100-1000
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import pandas as pd
from ..common import (
_get,
_quoteSymbols,
_raiseIfNotStr,
_reindex,
_toDatetime,
json_normalize,
)
def quote(symbol, token="", version="stable", filter="", format="json"):
"""Get quote for ticker
https://iexcloud.io/docs/api/#quote
4:30am-8pm ET Mon-Fri
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
return _get(
"stock/{symbol}/quote".format(symbol=_quoteSymbols(symbol)),
token=token,
version=version,
filter=filter,
format=format,
)
@wraps(quote)
def quoteDF(*args, **kwargs):
q = quote(*args, **kwargs)
if q:
df = _reindex(_toDatetime(json_normalize(q)), "symbol")
else:
df = pd.DataFrame()
return df
|
pygmt/tests/test_grdtrack.py
|
weiji14/gmt-python
| 168 |
110535
|
"""
Tests for grdtrack.
"""
import os
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from pygmt import grdtrack
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import GMTTempFile, data_kind
from pygmt.helpers.testing import load_static_earth_relief
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
POINTS_DATA = os.path.join(TEST_DATA_DIR, "track.txt")
@pytest.fixture(scope="module", name="dataarray")
def fixture_dataarray():
"""
Load the grid data from the sample earth_relief file.
"""
return load_static_earth_relief()
@pytest.fixture(scope="module", name="expected_array")
def fixture_numpy_array():
"""
Load a numpy array with x, y, and bathymetry data.
"""
array = [
[-51.613, -17.93, 796.59434514],
[-48.917, -22.434, 566.49184359],
[-50.444, -16.358, 571.1492788],
[-50.721, -16.628, 578.76116859],
[-51.394, -12.196, 274.43205501],
[-50.207, -18.404, 532.11444935],
[-52.56, -16.977, 670.16934401],
[-51.866, -19.794, 426.77300768],
[-48.001, -14.144, 741.35824074],
[-54.438, -19.193, 490.02716679],
]
return array
@pytest.fixture(scope="module", name="dataframe")
def fixture_dataframe():
"""
Load a pandas DataFrame with points.
"""
return pd.read_csv(
POINTS_DATA, sep=r"\s+", header=None, names=["longitude", "latitude"]
)
def test_grdtrack_input_dataframe_and_dataarray(dataarray, dataframe, expected_array):
"""
Run grdtrack by passing in a pandas.DataFrame and xarray.DataArray as
inputs.
"""
output = grdtrack(points=dataframe, grid=dataarray, newcolname="bathymetry")
assert isinstance(output, pd.DataFrame)
assert output.columns.to_list() == ["longitude", "latitude", "bathymetry"]
npt.assert_allclose(np.array(output), expected_array)
def test_grdtrack_input_csvfile_and_dataarray(dataarray, expected_array):
"""
Run grdtrack by passing in a csvfile and xarray.DataArray as inputs.
"""
with GMTTempFile() as tmpfile:
output = grdtrack(points=POINTS_DATA, grid=dataarray, outfile=tmpfile.name)
assert output is None # check that output is None since outfile is set
assert os.path.exists(path=tmpfile.name) # check that outfile exists at path
output = np.loadtxt(tmpfile.name)
npt.assert_allclose(np.array(output), expected_array)
def test_grdtrack_input_dataframe_and_ncfile(dataframe, expected_array):
"""
Run grdtrack by passing in a pandas.DataFrame and netcdf file as inputs.
"""
output = grdtrack(
points=dataframe, grid="@static_earth_relief.nc", newcolname="bathymetry"
)
assert isinstance(output, pd.DataFrame)
assert output.columns.to_list() == ["longitude", "latitude", "bathymetry"]
npt.assert_allclose(np.array(output), expected_array)
def test_grdtrack_input_csvfile_and_ncfile_to_dataframe(expected_array):
"""
Run grdtrack by passing in a csv file and netcdf file as inputs with a
pandas.DataFrame output.
"""
output = grdtrack(points=POINTS_DATA, grid="@static_earth_relief.nc")
assert isinstance(output, pd.DataFrame)
npt.assert_allclose(np.array(output), expected_array)
def test_grdtrack_wrong_kind_of_points_input(dataarray, dataframe):
"""
Run grdtrack using points input that is not a pandas.DataFrame (matrix) or
file.
"""
invalid_points = dataframe.longitude.to_xarray()
assert data_kind(invalid_points) == "grid"
with pytest.raises(GMTInvalidInput):
grdtrack(points=invalid_points, grid=dataarray, newcolname="bathymetry")
def test_grdtrack_wrong_kind_of_grid_input(dataarray, dataframe):
"""
Run grdtrack using grid input that is not as xarray.DataArray (grid) or
file.
"""
invalid_grid = dataarray.to_dataset()
assert data_kind(invalid_grid) == "matrix"
with pytest.raises(GMTInvalidInput):
grdtrack(points=dataframe, grid=invalid_grid, newcolname="bathymetry")
def test_grdtrack_without_newcolname_setting(dataarray, dataframe):
"""
Run grdtrack by not passing in newcolname parameter setting.
"""
with pytest.raises(GMTInvalidInput):
grdtrack(points=dataframe, grid=dataarray)
def test_grdtrack_without_outfile_setting(dataarray, dataframe):
"""
Run grdtrack by not passing in outfile parameter setting.
"""
with pytest.raises(GMTInvalidInput):
grdtrack(points=dataframe, grid=dataarray)
|
brew/utils/data.py
|
va26/brew
| 344 |
110562
|
<reponame>va26/brew
from sklearn import cross_validation
def split_data(X, y, t_size):
if len(X) != len(y):
return None
if hasattr(cross_validation, 'train_test_split'):
return cross_validation.train_test_split(X, y, test_size=t_size)
return None
|
tests/app/serializers.py
|
simiotics/djangorestframework-queryfields
| 195 |
110597
|
<reponame>simiotics/djangorestframework-queryfields
from rest_framework import serializers
from drf_queryfields import QueryFieldsMixin
from tests.app.fields import BoomField
from tests.app.models import Snippet
class QuoteSerializer(QueryFieldsMixin, serializers.Serializer):
character = serializers.CharField()
line = serializers.CharField()
sketch = serializers.CharField()
class SnippetSerializer(QueryFieldsMixin, serializers.ModelSerializer):
class Meta:
model = Snippet
exclude = ()
class ExplosiveSerializer(QueryFieldsMixin, serializers.Serializer):
safe = serializers.CharField()
boom = BoomField()
|
_build/jupyter_execute/content/c6/s2/bagging.py
|
curioushruti/mlbook
| 970 |
110600
|
# Bagging
Bagging can be used for regression or classification, though we will demonstrate a regression bagging model here. Since this model is based on decision tree regressors, we'll first import our {doc}`regression tree </content/c5/s2/regression_tree>` construction from the previous chapter. We'll also import numpy and the visualization packages.
## Import decision trees
import import_ipynb
import regression_tree as rt;
## Import numpy and visualization packages
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
We will build our bagging model on the {doc}`tips </content/appendix/data>` dataset from `scikit-learn`. The hidden code cell below loads that data and does a train-test split.
## Load data
tips = sns.load_dataset('tips')
X = np.array(tips.drop(columns = 'tip'))
y = np.array(tips['tip'])
## Train-test split
np.random.seed(1)
test_frac = 0.25
test_size = int(len(y)*test_frac)
test_idxs = np.random.choice(np.arange(len(y)), test_size, replace = False)
X_train = np.delete(X, test_idxs, 0)
y_train = np.delete(y, test_idxs, 0)
X_test = X[test_idxs]
y_test = y[test_idxs]
Now we can get right into the bagging class. To fit the `Bagger` object, we provide training data, the number of bootstraps (`B`), and size regulation parameters for the decision trees. The object then takes `B` bootstraps of the data, each time fitting a decision tree regressor. To form predictions with the `Bagger` object, we simply run test observations through each bootstrapped tree and average the fitted values.
class Bagger:
def fit(self, X_train, y_train, B, max_depth = 100, min_size = 2, seed = None):
self.X_train = X_train
self.N, self.D = X_train.shape
self.y_train = y_train
self.B = B
self.seed = seed
self.trees = []
np.random.seed(seed)
for b in range(self.B):
sample = np.random.choice(np.arange(self.N), size = self.N, replace = True)
X_train_b = X_train[sample]
y_train_b = y_train[sample]
tree = rt.DecisionTreeRegressor()
tree.fit(X_train_b, y_train_b, max_depth = max_depth, min_size = min_size)
self.trees.append(tree)
def predict(self, X_test):
y_test_hats = np.empty((len(self.trees), len(X_test)))
for i, tree in enumerate(self.trees):
y_test_hats[i] = tree.predict(X_test)
return y_test_hats.mean(0)
We can now fit the bagging model and display the observed versus fitted values.
## Build model
bagger = Bagger()
bagger.fit(X_train, y_train, B = 30, max_depth = 20, min_size = 5, seed = 123)
y_test_hat = bagger.predict(X_test)
## Plot
fig, ax = plt.subplots(figsize = (7, 5))
sns.scatterplot(y_test, y_test_hat)
ax.set(xlabel = r'$y$', ylabel = r'$\hat{y}$', title = r'Observed vs. Fitted Values for Bagging')
sns.despine()
|
asylo/platform/system_call/type_conversions/types_parse_functions.py
|
light1021/asylo
| 890 |
110620
|
#
#
# Copyright 2019 Asylo authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Functions for describing type definitions for generating macros.
Implements the functions for describing and parsing the type definitions. Allows
emitting macros which can be read directly by a C/C++ program, to evaluate the
unresolved values in such macros and then generate include directives, constant
definitions and conversion functions that allow system constants to be converted
from the enclave C library implementation used by Asylo to target host
implementation on the untrusted side (typically libc).
For each type definition (eg. define_constants, define_structs), a definition
and getter methods are provided. The definition methods accept a type definition
one at a time, while the get methods return all the type definitions under a
single macro.
Finally, a write_output() method is provided, which emits all the type
definitions recorded so far in the definitions file (types.py).
"""
from __future__ import print_function
import collections
import re
import sys
# Stores system header includes as a set. Only header file names are expected
# with or without the .h extension and without the '#include' directive
# prefixed.
# We include stdbool.h by default so that the generated output (as .inc file) is
# also readable by a C program.
_includes = {'stdbool.h'}
# Map from enum names to dictionary of enum properties and their values.
_enum_map = collections.defaultdict(dict)
# Map from struct names to dictionary of struct properties and its members.
_struct_map = collections.defaultdict(dict)
# Declare the prefix to be used for C enum declarations and conversion
# functions. This prefix should be used for direct conversions between enclave
# C library and host library, ones which do not involve an intermediate bridge.
_klinux_prefix = 'kLinux'
def set_klinux_prefix(prefix):
"""Sets the prefix used for constants definitions and conversion functions.
Args:
prefix: Name of the prefix to be applied to a kernel based constant
definition or conversion function name.
"""
global _klinux_prefix
_klinux_prefix = prefix
def define_constants(name,
values,
include_header_file,
multi_valued=False,
skip_conversions=False,
wrap_macros_with_if_defined=False,
data_type='int'):
"""Defines a collection of related constants/macros and their properties.
Args:
name: Name of the collection of constants.
values: Constant names provided as a list of strings.
include_header_file: The system header file used for resolving values to
generate the type definition. The filename here is expected to be a
system header file (included as #include <filename>). This system header
file is used twice - once for resolving values of constants on the target
host implementation at compile time, then by the generated conversion
functions for converting the constant values between enclave C library and
the target host C library at runtime.
multi_valued: Boolean indicating if the constant values can be combined
using bitwise OR operations.
skip_conversions: Boolean indicating if generation of types conversion
functions be skipped, and only constants definitions be generated. Useful
when conversion functions are complex and need to be written manually, but
the constants definitions can be generated automatically by resolving the
constants for the target host implementation.
wrap_macros_with_if_defined: Boolean indicating if each constant value in
the collection is to be wrapped inside a #if defined(value) ...#endif
while generating the conversion functions. This allows define_constants()
to safely accept constants that might not exist on a particular platform
or architecture. This parameter is intended for use only with constants
that are C/C++ macros.
data_type: String specifying the type of constants, if not int.
Raises:
ValueError: Invalid include_header_file format provided.
"""
# A constant here are written twice, once as a string literal, then as an
# numerical value pointing to the actual integer value of the constant. This
# allows types conversions generator to directly interpret the latter as a
# valid integer corresponding to the constant value, since casting string to
# enum value is non-trivial in c++.
# An example 'values', like ['CONST_VAL1', 'CONST_VAL2'] looks like the
# following stored as a dictionary entry -
# {"CONST_VAL1", CONST_VAL1}, {"CONST_VAL2", CONST_VAL2}
_enum_map[name]['values'] = ', '.join(
'{{"{}", {}}}'.format(val, val) for val in values)
_enum_map[name]['multi_valued'] = multi_valued
_enum_map[name]['skip_conversions'] = skip_conversions
_enum_map[name]['wrap_macros_with_if_defined'] = wrap_macros_with_if_defined
_enum_map[name]['data_type'] = '"{}"'.format(data_type)
add_include_header_file(include_header_file)
def add_include_header_file(include_header_file):
"""Adds a system header file to the list of includes to be generated.
Args:
include_header_file: Name of the system header file, in the format
'filename.h'. Do not use <> or "" to wrap the filename.
"""
if re.match(r'[<,"].*?[>,"]', include_header_file):
raise ValueError(
'Invalid include format for filename "%s". Please provide the include '
'file without enclosing pointy brackets <> or quotes "".' %
include_header_file)
if re.match('#include', include_header_file, re.IGNORECASE):
raise ValueError(
'Invalid include format for filename "%s". Please provide the filename '
'without the prefixing #include directive.' % include_header_file)
_includes.add(include_header_file)
def define_struct(name,
values,
include_header_file,
pack_attributes=True,
skip_conversions=False):
"""Defines a collection of structs and their properties.
Args:
name: Name of the struct. This should be the same as the struct name used in
enclave C library and the host C library for the system calls. Eg. 'stat',
'timeval'
values: List containing tuples of struct member types and struct member
names. The struct members names should match the corresponding struct
member names in the struct from enclave C library and libc. Eg.
[("int64_t", "st_dev"), ("int64_t", "st_ino")].
include_header_file: Kernel header file to include to identify |name| as a
valid kernel struct when generating conversion functions between kernel
structs and enclave structs.
pack_attributes: Boolean indicating if the compiler should be prevented from
padding the generated kernel struct members from their natural alignment.
skip_conversions: Boolean indicating if generation of types conversion
functions be skipped, and only kernel struct definitions be generated.
Useful when kernel conversion functions are complex and need to be written
manually, but the struct definitions can be generated automatically.
"""
_struct_map[name]['values'] = ', '.join(
'{{"{}", "{}"}}'.format(member_name, member_type)
for member_type, member_name in values)
_struct_map[name]['pack_attributes'] = pack_attributes
_struct_map[name]['skip_conversions'] = skip_conversions
add_include_header_file(include_header_file)
def get_klinux_prefix():
"""Gets the prefix for generated C enums and conversion functions."""
return 'const char klinux_prefix[] = "{}";\n'.format(_klinux_prefix)
def get_includes_as_include_macros():
"""Returns all the includes as line separated #include macros.
These includes are required by the types conversions generator at compile time
to infer the values of constants for a given host implementation.
"""
return ''.join(
'#include <{}>\n'.format(filename) for filename in sorted(_includes))
def get_includes_in_define_macro():
"""Returns all the includes under a #define INCLUDES macro.
The returned list can be used to generate #include directives by a consumer.
"""
quoted_includes = ['"{}"'.format(incl) for incl in sorted(_includes)]
return '#define INCLUDES {}'.format(', \\\n'.join(quoted_includes))
def get_constants():
r"""Returns a macro containing all constants' description.
The returned macro is used by types conversions generator to initialize a enum
description table (enum_properties_table) mapping enum names to a struct
(EnumProperties) describing the enum properties, including the enum values. A
typical output of get_constants() looks like the following -
#define ENUMS_INIT \
{"FcntlCmd", {false, false, false, "int",
{{"F_GETFD", F_GETFD}, {"F_SETFD", F_SETFD}}}}, \
{"FileFlags", {0, 0, true, false, false, false, "int", {{"O_RDONLY",
O_RDONLY}, {"O_WRONLY", O_WRONLY}}}}
Each line contains an enum, and has the following pattern -
{"EnumName", {multi_valued, skip_conversions, wrap_macros_with_if_defined,
data_type, {{"const_val1", const_val1}, {"const_val2", const_val2}}}}, \
"""
enum_rows = []
for enum_name, enum_properties in sorted(_enum_map.items()):
enum_rows.append(
'{{{name}, {{{multi_valued}, {skip_conversions}, '
'{wrap_macros_with_if_defined}, {data_type}, {{{values}}}}}}}'.format(
name='"{}"'.format(enum_name),
multi_valued='true' if enum_properties['multi_valued'] else 'false',
skip_conversions='true'
if enum_properties['skip_conversions'] else 'false',
wrap_macros_with_if_defined='true'
if enum_properties['wrap_macros_with_if_defined'] else 'false',
data_type=enum_properties['data_type'],
values=enum_properties['values']))
return '#define ENUMS_INIT \\\n{}\n'.format(', \\\n'.join(enum_rows))
def get_structs():
r"""Returns a macro containing all struct descriptions.
The returned macro is used by types conversion generator to initialize a
struct description table (struct_properties_table) mapping struct names to a
struct (StructProperties) describing the struct properties, including struct
members. A typical output of get_structs looks like the following -
#define STRUCTS_INIT \
{"stat", {true, false, {{"st_dev", "int64_t"}, {"st_ino", "int64_t"}}}}, \
{"timespec", {true, false, {{"tv_sec", "int64_t"}, {"tv_nsec", "int64_t"}}}}
Each line contains a struct, and has the following pattern -
{"struct_name", {pack_attributes, skip_conversions, \
{{"member_name1", "member_type1"}, {"member_name2", "member_type2"}}}}
"""
struct_rows = []
for struct_name, struct_properties in sorted(_struct_map.items()):
struct_rows.append(
'{{{struct}, {{{pack_attributes}, {skip_conversions}, {{{values}}}}}}}'
.format(
struct='"{}"'.format(struct_name),
pack_attributes='true'
if struct_properties['pack_attributes'] else 'false',
skip_conversions='true'
if struct_properties['skip_conversions'] else 'false',
values=struct_properties['values']))
return '#define STRUCTS_INIT \\\n{}\n'.format(', \\\n'.join(struct_rows))
def write_output(stream=sys.stdout):
"""Writes the macros to a stream, default to stdout."""
print(get_includes_as_include_macros(), file=stream)
print(get_includes_in_define_macro(), file=stream)
print(get_klinux_prefix(), file=stream)
print(get_constants(), file=stream)
print(get_structs(), file=stream)
|
nodes/1.x/python/Buckyball.ByOriginAndRadius.py
|
jdehotin/Clockworkfordynamo
| 147 |
110660
|
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
points = IN[0]
almostzero = IN[1]
struts = list()
# this function recursively finds all the pairs of points of the buckyball struts
def BuckyballStruts(points,struts):
firstpoint = points[0]
restofpoints = points[1:]
# measure distance between first point and rest of points
distances = [firstpoint.DistanceTo(x) for x in restofpoints]
# filter out all points that do not have a distance of 2 to the first point
strutpoints = list()
strutpointpairs = list()
i = 0
for dist in distances:
# use a little tolerance so we catch all struts
if dist > 2 - almostzero and dist < 2 + almostzero:
strutpoints.append(restofpoints[i])
strutpointpairs.append((firstpoint,restofpoints[i]))
i += 1
# add strutpointpairs to struts
if len(strutpointpairs) > 0: struts.extend(strutpointpairs)
# Continue processing the list recursively until there's only one point left. By always removing the first point from the list, we ensure that no duplicate struts are computed.
if len(restofpoints) > 1:
return BuckyballStruts(restofpoints,struts)
else: return (restofpoints,struts)
OUT = BuckyballStruts(points,struts)[1]
##### NEXT PYTHON NODE #####
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
struts = IN[0]
points = IN[1]
almostzero = IN[2]
def BuckyballFaces(struts,points,planes,almostzero,vertices):
firststrut = struts[0]
struts.pop(0)
# find the two adjacent struts
adjacent = list()
for strut in struts:
for point in strut:
if point.IsAlmostEqualTo(firststrut[0]):
adjacent.append(strut)
break
if len(adjacent) == 2:
break
# identify planes and find all vertices on planes
vlist = list()
for item in adjacent:
triangle = (firststrut[1],item[0],item[1])
pl = Plane.ByBestFitThroughPoints(triangle)
vlist = list()
for point in points:
dist = pl.DistanceTo(point)
if dist < almostzero and dist > -almostzero:
vlist.append(point)
newplane = (Plane.ByBestFitThroughPoints(vlist))
append_vertices = True
for pl in planes:
if newplane.IsAlmostEqualTo(pl):
append_vertices = False
if append_vertices:
vertices.append(vlist)
planes.append(newplane)
# let this function recursively call itself until it finds all planes
if len(planes) < 32:
return BuckyballFaces(struts,points,planes,almostzero,vertices)
else:
return (struts,points,planes,almostzero,vertices)
def OrderFaceIndices(p_ordered,p_unordered,almostzero):
i = 0;
for p in p_unordered:
dist = p_ordered[(len(p_ordered)-1)].DistanceTo(p)
if dist > 2-almostzero and dist < 2+almostzero:
p_ordered.append(p)
p_unordered.pop(i)
break
i += 1
if len(p_unordered) > 0:
return OrderFaceIndices(p_ordered,p_unordered,almostzero)
else:
return (p_ordered,p_unordered,almostzero)
vlist_unordered = BuckyballFaces(struts,points,list(),almostzero,list())[4]
vset_ordered = list()
for vset in vlist_unordered:
p_ordered = [vset[0]]
vset.pop(0)
vset_ordered.append(OrderFaceIndices(p_ordered,vset,almostzero))
vset_out = list()
for vset in vset_ordered:
vset_out.append(vset[0])
OUT = vset_out
|
build/lib/cort/analysis/plotting.py
|
leonardoboliveira/cort
| 141 |
110670
|
""" Plot error analysis statistics. """
from __future__ import division
from matplotlib import pyplot
from matplotlib import cm
import numpy
from pylab import rcParams
__author__ = 'martscsn'
def plot(data,
title,
xlabel,
ylabel,
filename=None):
""" Plot error analysis statistics.
In particular, plot a bar chart for the numbers described in ``data``.
Args:
data (list(str, list((str,int)))): The data to be plotted. The ith entry
of this list contains the name which will appear in the legend,
and a list of (category, count) pairs. These are the individual
data points which will be plotted.
title (str): Title of the plot.
xlabel (str): Label of the x axis.
ylabel (str): Label of the y axis.
filename (str, optional): If set, write plot to ``filename``.
Example::
pair_errs = errors["pair"]["recall_errors"]["all"]
tree_errs = errors["tree"]["recall_errors"]["all"]
plot(
[("pair", [(cat, len(pair_errs[cat])) for cat in pair_errs.keys()]),
("tree", [(cat, len(tree_errs[cat])) for cat in tree_errs.keys()])],
"Recall Errors",
"Type of anaphor",
"Number of Errors")
"""
rcParams['xtick.major.pad'] = '12'
rcParams['ytick.major.pad'] = '12'
fig, ax = pyplot.subplots()
systems = []
categories = []
colors = cm.Accent(numpy.linspace(0, 1, len(data)))
bars_for_legend = []
for i, system_data in enumerate(data):
system_name, categories_and_numbers = system_data
systems.append(system_name)
for j, cat_and_number in enumerate(categories_and_numbers):
category, number = cat_and_number
if category not in categories:
categories.append(category)
bar = ax.bar(2*j + i*(1/len(data)), number, color=colors[i],
width=1/len(data), label=system_name)
if j == 0:
bars_for_legend.append(bar)
xticks = [2*k + 0.5 for k in range(0, len(categories))]
pyplot.title(title, fontsize=28)
pyplot.xlabel(xlabel, fontsize=24)
pyplot.ylabel(ylabel, fontsize=24)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xticklabels(categories)
ax.set_xticks(xticks)
pyplot.tick_params(axis='both', which='major', labelsize=20)
if filename:
legend = ax.legend(bars_for_legend, systems,
loc='upper right', bbox_to_anchor=(1.2, 1.2))
fig.savefig(filename, bbox_extra_artists=(legend,), bbox_inches='tight')
else:
legend = ax.legend(bars_for_legend, systems, loc='upper right')
legend.draggable()
fig.show()
|
niftynet/engine/application_initializer.py
|
tdml13/NiftyNet
| 1,403 |
110696
|
# -*- coding: utf-8 -*-
"""
Loading modules from a string representing the class name
or a short name that matches the dictionary item defined
in this module
all classes and docs are taken from
https://github.com/tensorflow/tensorflow/blob/r1.3/tensorflow/python/ops/init_ops.py
"""
import tensorflow as tf
SEED = 42
class Constant(object):
"""
initialize with a constant value
"""
@staticmethod
def get_instance(args):
"""
create an instance of the initializer
"""
value = float(args.get('value', 0.0))
return tf.constant_initializer(value)
class Zeros(object):
"""
initialize with zeros
"""
@staticmethod
def get_instance(args):
# pylint: disable=unused-argument
"""
create an instance of the initializer
"""
return tf.constant_initializer(0.0)
class Ones(object):
"""
initialize with ones
"""
@staticmethod
def get_instance(args):
# pylint: disable=unused-argument
"""
create an instance of the initializer
"""
return tf.constant_initializer(1.0)
class UniformUnitScaling(object):
"""
see also:
https://www.tensorflow.org/api_docs/python/tf/uniform_unit_scaling_initializer
"""
@staticmethod
def get_instance(args):
"""
create an instance of the initializer
"""
factor = float(args.get('factor', 1.0))
return tf.uniform_unit_scaling_initializer(factor, seed=SEED)
class Orthogonal(object):
"""
see also:
https://www.tensorflow.org/api_docs/python/tf/orthogonal_initializer
"""
@staticmethod
def get_instance(args):
"""
create an instance of the initializer
"""
gain = float(args.get('gain', 1.0))
return tf.orthogonal_initializer(gain, seed=SEED)
class VarianceScaling(object):
"""
see also:
https://www.tensorflow.org/api_docs/python/tf/variance_scaling_initializer
"""
@staticmethod
def get_instance(args):
"""
create an instance of the initializer
"""
scale = float(args.get('scale', 1.0))
mode = args.get('mode', "fan_in")
assert (mode in ["fan_in", "fan_out", "fan_avg"])
distribution = args.get('distribution', "normal")
assert (distribution in ["normal", "uniform"])
return tf.variance_scaling_initializer(scale,
mode,
distribution,
seed=SEED)
class GlorotNormal(object):
"""
see also:
https://www.tensorflow.org/api_docs/python/tf/glorot_normal_initializer
"""
@staticmethod
def get_instance(args):
# pylint: disable=unused-argument
"""
create an instance of the initializer
"""
return tf.glorot_normal_initializer(seed=SEED)
class GlorotUniform(object):
"""
see also:
https://www.tensorflow.org/api_docs/python/tf/glorot_uniform_initializer
"""
@staticmethod
def get_instance(args):
# pylint: disable=unused-argument
"""
create an instance of the initializer
"""
return tf.glorot_uniform_initializer(seed=SEED)
class HeUniform(object):
"""
He uniform variance scaling initializer.
It draws samples from a uniform distribution within [-limit, limit]
where ``limit`` is ``sqrt(6 / fan_in)``
where ``fan_in`` is the number of input units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
He et al., https://arxiv.org/abs/1502.01852
"""
@staticmethod
def get_instance(args):
# pylint: disable=unused-argument
"""
create an instance of the initializer
"""
if not args:
args = {"scale": "2.", "mode": "fan_in", "distribution": "uniform"}
return VarianceScaling.get_instance(args)
class HeNormal(object):
"""
He normal initializer.
It draws samples from a truncated normal distribution centered on 0
with ``stddev = sqrt(2 / fan_in)``
where ``fan_in`` is the number of input units in the weight tensor.
# Arguments
seed: A Python integer. Used to seed the random generator.
# Returns
An initializer.
# References
He et al., https://arxiv.org/abs/1502.01852
"""
@staticmethod
def get_instance(args):
# pylint: disable=unused-argument
"""
create an instance of the initializer
"""
if not args:
args = {"scale": "2.", "mode": "fan_in", "distribution": "normal"}
return VarianceScaling.get_instance(args)
|
test/test_jpiarea.py
|
liujb/python-geohash
| 284 |
110698
|
<reponame>liujb/python-geohash
# coding: UTF-8
import unittest
import jpiarea
def dms(d,m,s):
return float(d) + (float(m) + float(s)/60)/60.0
class TestReference(unittest.TestCase):
# hash code examples from open iarea document
# http://www.nttdocomo.co.jp/service/imode/make/content/iarea/domestic/index.html
def test_lv1(self):
self.assertEqual("5438", jpiarea.encode(36,138)[0:4])
self.assertEqual("5637", jpiarea.encode(dms(37,20,0),137)[0:4])
def test_lv2(self):
p = jpiarea.bbox("533946")
self.assertAlmostEqual(503100000, p["s"]*3600*1000)
self.assertAlmostEqual(503550000, p["n"]*3600*1000)
self.assertAlmostEqual(128400000, p["w"]*3600*1000)
self.assertAlmostEqual(128700000, p["e"]*3600*1000)
def test_lv3(self):
p = jpiarea.bbox("5339463")
self.assertAlmostEqual(503325000, p["s"]*3600*1000)
self.assertAlmostEqual(503550000, p["n"]*3600*1000)
self.assertAlmostEqual(128550000, p["w"]*3600*1000)
self.assertAlmostEqual(128700000, p["e"]*3600*1000)
def test_lvN(self):
self.assertEqual("53394600300",jpiarea.encode(dms(35,40,41), dms(139,46,9.527)))
if __name__=='__main__':
unittest.main()
|
test/test_local_interpolation.py
|
FedericoV/diffrax
| 377 |
110702
|
<gh_stars>100-1000
import diffrax.local_interpolation
import jax
import jax.numpy as jnp
from helpers import shaped_allclose
def test_local_linear_interpolation():
t0 = 2.0
t1 = 3.3
t0_ = 2.8
t1_ = 2.9
for y0 in (2.1, jnp.array(2.1), jnp.array([2.1, 3.1])):
for y1 in (2.2, jnp.array(2.2), jnp.array([2.2, 3.2])):
interp = diffrax.local_interpolation.LocalLinearInterpolation(
t0=t0, t1=t1, y0=y0, y1=y1
)
# evaluate position
pred = interp.evaluate(t0_)
true = y0 + (y1 - y0) * (t0_ - t0) / (t1 - t0)
assert shaped_allclose(pred, true)
_, pred = jax.jvp(interp.evaluate, (t0_,), (jnp.ones_like(t0_),))
true = (y1 - y0) / (t1 - t0)
assert shaped_allclose(pred, true)
# evaluate increment
pred = interp.evaluate(t0_, t1_)
true = (y1 - y0) * (t1_ - t0_) / (t1 - t0)
assert shaped_allclose(pred, true)
_, pred = jax.jvp(
interp.evaluate, (t0_, t1_), (jnp.ones_like(t0_), jnp.ones_like(t1_))
)
assert shaped_allclose(pred, jnp.zeros_like(pred))
# evaluate over zero-length interval. Note t1=t0.
interp = diffrax.local_interpolation.LocalLinearInterpolation(
t0=t0, t1=t0, y0=y0, y1=y1
)
pred = interp.evaluate(t0)
true, _ = jnp.broadcast_arrays(y0, y1)
assert shaped_allclose(pred, true)
_, pred = jax.jvp(interp.evaluate, (t0,), (jnp.ones_like(t0),))
assert shaped_allclose(pred, jnp.zeros_like(pred))
|
libs/sqlobject/tests/test_sqlbuilder_importproxy.py
|
scambra/HTPC-Manager
| 422 |
110713
|
<reponame>scambra/HTPC-Manager
from sqlobject import *
from sqlobject.tests.dbtest import *
from sqlobject.views import *
from sqlobject.sqlbuilder import ImportProxy, Alias
def testSimple():
nyi = ImportProxy('NotYetImported')
x = nyi.q.name
class NotYetImported(SQLObject):
name = StringCol(dbName='a_name')
y = nyi.q.name
assert str(x) == 'not_yet_imported.a_name'
assert str(y) == 'not_yet_imported.a_name'
def testAddition():
nyi = ImportProxy('NotYetImported2')
x = nyi.q.name+nyi.q.name
class NotYetImported2(SQLObject):
name = StringCol(dbName='a_name')
assert str(x) == '((not_yet_imported2.a_name) + (not_yet_imported2.a_name))'
def testOnView():
nyi = ImportProxy('NotYetImportedV')
x = nyi.q.name
class NotYetImported3(SQLObject):
name = StringCol(dbName='a_name')
class NotYetImportedV(ViewSQLObject):
class sqlmeta:
idName = NotYetImported3.q.id
name = StringCol(dbName=NotYetImported3.q.name)
assert str(x) == 'not_yet_imported_v.name'
def testAlias():
nyi = ImportProxy('NotYetImported4')
y = Alias(nyi, 'y')
x = y.q.name
class NotYetImported4(SQLObject):
name = StringCol(dbName='a_name')
assert str(y) == 'not_yet_imported4 y'
assert tablesUsedSet(x, None) == set(['not_yet_imported4 y'])
assert str(x) == 'y.a_name'
|
opy/__init__.py
|
nm17/Opy
| 284 |
110718
|
<reponame>nm17/Opy<filename>opy/__init__.py
'''_opy_Copyright 2014, 2015, 2016, 2017, 2018 <NAME>, GEATEC engineering, www.geatec.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
'''This module makes it possible to use Opy as part of a Python application.
The API was kept utterly simple and non-intrusive.
It just mimics command line activation without starting a new interpreter.
So the normal docs about the Opy command line apply.
Just import this module and then call the 'run' function with as parameters
the strings that normally would have been the command line
arguments to mypy.
Function 'run' returns a tuple
(<normal_report>, <error_report>, <exit_status>),
in which
- <normal_report> is what Opy normally writes to sys.stdout
- <error_report> is what Opy normally writes to sys.stderr
- exit_status is twhat Opy normally returns to the operating system
Trivial example of code using this module:
import sys
from opy import api
result = api.run(sys.argv[1:])
if result[0]:
print('\nObfuscation report:\n')
print(result[0]) # stdout
if result[1]:
print('\nError report:\n')
print(result[1]) # stderr
print ('\nExit status:', result[2])
'''
import sys
import io
import traceback
from .opy import main
def run (*params):
sys.argv = [''] + list (params)
old_stdout = sys.stdout
new_stdout = io.StringIO ()
#sys.stdout = new_stdout
old_stderr = sys.stderr
new_stderr = io.StringIO ()
sys.stderr = new_stderr
try:
exit_status = 0
main ()
except SystemExit as system_exit:
exit_status = system_exit.code
except Exception as exception:
print (traceback.format_exc ())
sys.stdout = old_stdout
sys.stderr = old_stderr
return new_stdout.getvalue (), new_stderr.getvalue (), exit_status
|
nlu/components/classifiers/generic_classifier/generic_classifier.py
|
milyiyo/nlu
| 480 |
110730
|
<gh_stars>100-1000
from sparknlp_jsl.annotator import GenericClassifierModel, GenericClassifierApproach
from sparknlp_jsl.base import *
class GenericClassifier:
@staticmethod
def get_default_model():
return GenericClassifierModel.pretrained() \
.setInputCols("feature_vector") \
.setOutputCol("generic_classification") \
@staticmethod
def get_pretrained_model(name, language):
return GenericClassifierModel.pretrained(name,language) \
.setInputCols("feature_vector") \
.setOutputCol("generic_classification") \
@staticmethod
def get_default_trainable_model():
return GenericClassifierApproach() \
.setInputCols("feature_vector") \
.setOutputCol("generic_classification") \
.setLabelColumn("y") \
.setEpochsNumber(2)
|
tests/test_wd.py
|
alliance-genome/ontobio
| 101 |
110735
|
<reponame>alliance-genome/ontobio
from ontobio.ontol_factory import OntologyFactory
from ontobio.assoc_factory import AssociationSetFactory
from ontobio.assocmodel import AssociationSet
import ontobio.sparql.wikidata as wd
import logging
PTSD = 'DOID:2055'
def test_wd_sparql_ptsd():
"""
TODO
test using PTSD
"""
xrefs = wd.fetchall_xrefs('HP')
print("XRs: {}".format(list(xrefs.items())[:10]))
[doid] = wd.map_id(PTSD, 'DOID')
genes = wd.fetchall_sp(doid, 'genetic_association')
logging.info("GENES: {}".format(genes))
proteins = wd.canned_query('disease2protein', doid)
logging.info("PROTEINS: {}".format(proteins))
for p in proteins:
print(p)
ofactory = OntologyFactory()
afactory = AssociationSetFactory()
ont = ofactory.create('go')
aset = afactory.create(ontology=ont,
subject_category='protein',
object_category='function',
taxon='NCBITaxon:9606')
rs = aset.enrichment_test(proteins, threshold=1e-2, labels=True, direction='less')
for r in rs:
print("UNDER: "+str(r))
for p in proteins:
print("{} {}".format(p, aset.label(p)))
for t in aset.annotations(p):
print(" {} {}".format(t,ont.label(t)))
|
rest_registration/signers/register_email.py
|
psibean/django-rest-registration
| 329 |
110766
|
<gh_stars>100-1000
from rest_registration.settings import registration_settings
from rest_registration.utils.signers import URLParamsSigner
class RegisterEmailSigner(URLParamsSigner):
SALT_BASE = 'register-email'
USE_TIMESTAMP = True
def get_base_url(self):
return registration_settings.REGISTER_EMAIL_VERIFICATION_URL
def get_valid_period(self):
return registration_settings.REGISTER_EMAIL_VERIFICATION_PERIOD
|
ghostwriter/reporting/migrations/0004_report_delivered.py
|
bbhunter/Ghostwriter
| 601 |
110779
|
# Generated by Django 2.2.3 on 2019-08-27 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reporting', '0003_findingnote'),
]
operations = [
migrations.AddField(
model_name='report',
name='delivered',
field=models.BooleanField(default=False, help_text='Delivery status of the report', verbose_name='Delivered'),
),
]
|
tests/estimator/classifier/GaussianNB/GaussianNBJSTest.py
|
midhunexeter/sklearn-porter
| 1,197 |
110789
|
<reponame>midhunexeter/sklearn-porter<filename>tests/estimator/classifier/GaussianNB/GaussianNBJSTest.py<gh_stars>1000+
# -*- coding: utf-8 -*-
from unittest import TestCase
from sklearn.naive_bayes import GaussianNB
from tests.estimator.classifier.Classifier import Classifier
from tests.language.JavaScript import JavaScript
class GaussianNBJSTest(JavaScript, Classifier, TestCase):
def setUp(self):
super(GaussianNBJSTest, self).setUp()
self.estimator = GaussianNB()
def tearDown(self):
super(GaussianNBJSTest, self).tearDown()
|
evaluation_scripts/2017-evaluation-script/program/task_2_preprocess.py
|
yolochai/scisumm-corpus
| 198 |
110831
|
import os
import sys
def process(input_file, output_file):
output_text = ""
if input_file.endswith("abstract.summary.txt") or input_file.endswith("community.summary.txt") or input_file.endswith("combined.summary.txt") or input_file.endswith("human.summary.txt"):
input_text = []
with open(input_file, "r") as f:
input_text = f.readlines()
for i in range(len(input_text)):
inp = input_text[i].strip()
if len(inp) == 0:
continue
if inp.startswith("<S sid ="):
out = inp.split(">", 1)[1]
out = out.split("</S>", 1)[0]
else:
out = inp
output_text += out + " "
else:
with open(input_file, "r") as f:
output_text = f.read()
with open(output_file, "w") as f:
f.write(output_text)
def main(input_dir, output_dir):
if not os.path.exists(input_dir):
print("%s not a valid directory" % input_dir)
if not os.path.exists(output_dir):
print("%s not a valid directory" % output_dir)
for file in os.listdir(input_dir):
process(os.path.join(input_dir, file), os.path.join(output_dir, file))
if __name__ == "__main__":
input_dir = sys.argv[1]
output_dir = sys.argv[2]
main(input_dir, output_dir)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.