content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import io
import math
import skbio
from scipy.spatial import distance
from scipy.spatial import ConvexHull
import pandas as pd
import numpy as np
from skbio import TreeNode
NUM_TRI = 100
VERTS_PER_TRI = 3
ELEMENTS_PER_VERT = 5
(R_INDEX, G_INDEX, B_INDEX) = (0, 1, 2)
(R_OFFSET, G_OFFSET, B_OFFSET) = (2, 3, 4)
def in_quad_1(angle):
""" Determines if the angle is between 0 and pi / 2 radians
Parameters
----------
angle : float
the angle of a vector in radians
Returns
-------
return : bool
true if angle is between 0 and pi / 2 radians
"""
return True if angle > 0 and angle < math.pi / 2 else False
def in_quad_4(angle):
""" Determines if the angle is between (3 * pi) / 2 radians and 2 * pi
angle : float
the angle of a vector in radians
Returns
-------
return : bool
true is angle is between 0 and pi / 2 radians
"""
return True if angle > 3 * math.pi / 2 and angle < 2 * math.pi else False
def calculate_angle(v):
""" Finds the angle of the two 2-d vectors in radians
Parameters
----------
v : tuple
vector
Returns
-------
angle of vector in radians
"""
if v[0] == 0:
return math.pi / 2 if v[1] > 0 else 3 * math.pi / 2
angle = math.atan(v[1] / v[0])
if v[0] > 0:
return angle if angle >= 0 else 2 * math.pi + angle
else:
return angle + math.pi if angle >= 0 else (2 * math.pi + angle) - math.pi
def name_internal_nodes(tree):
""" Name internal nodes that does not have name
Parameters
----------
tree : skbio.TreeNode or empress.Tree
Input tree with labeled tips and partially unlabeled internal nodes or branch lengths.
Returns
-------
skbio.TreeNode or empress.Tree
Tree with fully labeled internal nodes and branches.
"""
# initialize tree with branch lengths and node names if they are missing
for i, n in enumerate(tree.postorder(include_self=True)):
if n.length is None:
n.length = 1
if n.name is None:
new_name = 'EmpressNode%d' % i
n.name = new_name
def read_metadata(file_name, skip_row=0, seperator='\t'):
""" Reads in metadata for internal nodes
Parameters
----------
file_name : str
The name of the file to read the data from
skip_row : int
The number of rows to skip when reading in the data
seperator : str
The delimiter used in the data file
Returns
-------
pd.Dataframe
"""
if seperator == ' ':
cols = pd.read_csv(
file_name, skiprows=skip_row, nrows=1, delim_whitespace=True).columns.tolist()
# StringIO is used in test cases, without this the tests will fail due to the buffer
# being placed at the end everytime its read
if type(file_name) is io.StringIO:
file_name.seek(0)
metadata = pd.read_table(
file_name, skiprows=skip_row, delim_whitespace=True, dtype={cols[0]: object})
metadata.rename(columns={metadata.columns[0]: "Node_id"}, inplace=True)
else:
cols = pd.read_csv(
file_name, skiprows=skip_row, nrows=1, sep=seperator).columns.tolist()
# StringIO is used in test cases, without this the tests will fail due to the buffer
# being placed at the end everytime its read
if type(file_name) is io.StringIO:
file_name.seek(0)
metadata = pd.read_table(
file_name, skiprows=skip_row, sep=seperator, dtype={cols[0]: object})
metadata.rename(columns={metadata.columns[0]: 'Node_id'}, inplace=True)
return metadata
def read(file_name, file_format='newick'):
""" Reads in contents from a file.
This will create a skbio.TreeNode object
Current Support formats: newick
Future Suppoert formats: phyloxml,
cytoscape network.
cytoscape layout
- networkx
phyloxml
- Python has a parser for it, but it parse it into a phylogeny object.
- We need to parse the phylogeny object into the metadata table by
traversing?
- What is the confidence for each clade?
Parameters
----------
file_name : str
The name of the file to read that contains the tree
file_format : str
The format of the file to read that contains the tree
TODO: Need to create parsers for each of these.
Returns
-------
tree - skbio.TreeNode
A TreeNode object of the newick file
None - null
If a non-newick file_format was passed in
"""
if file_format == 'newick':
tree = skbio.read(file_name, file_format, into=TreeNode)
return tree
return None
def total_angle(a_1, a_2, small_sector=True):
""" determines the starting angle of the sector and total theta of the sector.
Note this is only to be used if the sector is less than pi radians
Parameters
----------
a1 : float
angle (in radians) of one of the edges of the sector
a2 : float
angle (in radians of one of the edges of the sector)
Returns
-------
starting angle : float
the angle at which to start drawing the sector
theta : float
the angle of the sector
"""
# detemines the angle of the sector as well as the angle to start drawing the sector
if small_sector:
if (not (in_quad_1(a_1) and in_quad_4(a_2) or
in_quad_4(a_1) and in_quad_1(a_2))):
a_min, a_max = (min(a_1, a_2), max(a_1, a_2))
if a_max - a_min > math.pi:
a_min += 2 * math.pi
starting_angle = a_max
theta = a_min - a_max
else:
starting_angle = a_2 if a_1 > a_2 else a_1
theta = abs(a_1 - a_2)
else:
starting_angle = a_1 if a_1 > a_2 else a_2
ending_angle = a_1 if starting_angle == a_2 else a_2
theta = ending_angle + abs(starting_angle - 2 * math.pi)
else:
theta = 2 * math.pi - abs(a_1 - a_2)
return theta
def extract_color(color):
"""
A 6 digit hex string representing an (r, g, b) color
"""
HEX_BASE = 16
NUM_CHAR = 2
LARGEST_COLOR = 255
color = color.lower()
color = [color[i: i+NUM_CHAR] for i in range(0, len(color), NUM_CHAR)]
color = [int(hex_string, HEX_BASE) for hex_string in color]
color = [c / LARGEST_COLOR for c in color]
return (color[R_INDEX], color[G_INDEX], color[B_INDEX])
def create_arc_sector(sector_info):
"""
Creates an arc using sector_info:
"""
sector = []
theta = sector_info['theta'] / NUM_TRI
rad = sector_info['starting_angle']
(red, green, blue) = extract_color(sector_info['color'])
c_x = sector_info['center_x']
c_y = sector_info['center_y']
longest_branch = sector_info['largest_branch']
# creating the sector
for i in range(0, NUM_TRI):
# first vertice of triangle
sector.append(c_x)
sector.append(c_y)
sector.append(red)
sector.append(green)
sector.append(blue)
# second vertice of triangle
sector.append(math.cos(rad) * longest_branch + c_x)
sector.append(math.sin(rad) * longest_branch + c_y)
sector.append(red)
sector.append(green)
sector.append(blue)
rad += theta
# third vertice of triangle
sector.append(math.cos(rad) * longest_branch + c_x)
sector.append(math.sin(rad) * longest_branch + c_y)
sector.append(red)
sector.append(green)
sector.append(blue)
return sector
def sector_info(points, sector_center, ancestor_coords):
"""
'create_sector' will find the left most branch, right most branch, deepest branch, and
shortes branch of the clade. Then, 'create_sector' will also find the angle between the
left and right most branch.
Parameter
---------
points : 2-d list
format of list [[x1,y1, x2, y2],...]
center : list
the point in points that will be used as the center of the sector. Note center
should not be in points
ancestor_coords : list
the coordinates of the direct parent of center. Note ancestor_coords should not
be in points
Return
------
sector_info : Dictionary
The keys are center_x, center_y, starting_angle, theta, largest_branch, smallest_branch
"""
# origin
center_point = np.array([[0, 0]])
center = (0, 0)
# find the length of the smallest and longest branches
distances = [distance.euclidean(tip, center) for tip in points]
longest_branch = max(distances)
smallest_branch = min(distances)
# calculate angles of the tip vectors
angles = [calculate_angle(points[x]) for x in range(0, len(points))]
angles = sorted(angles)
# calculate the angle of the vector going from clade root to its direct ancestor
ancestor_angle = calculate_angle((ancestor_coords))
# find position of the left most branch
num_angles = len(angles)
l_branch = [i for i in range(0, num_angles - 1) if angles[i] < ancestor_angle < angles[i + 1]]
l_found = len(l_branch) > 0
l_index = l_branch[0] if l_found else 0
# the left and right most branches
(a_1, a_2) = (angles[l_index], angles[l_index + 1]) if l_found else (angles[l_index], angles[-1])
# detemines the starting angle(left most branch) of the sectorr
if l_found:
starting_angle = a_1 if a_1 > a_2 else a_2
else:
starting_angle = a_2 if a_1 > a_2 else a_1
# calculate the angle between the left and right most branches
small_sector = False if angles[-1] - angles[0] > math.pi else True
theta = total_angle(a_1, a_2, small_sector)
# the sector webgl will draw
colored_clades = {
'center_x': sector_center[0], 'center_y': sector_center[1],
'starting_angle': starting_angle, 'theta': theta,
'largest_branch': longest_branch, 'smallest_branch': smallest_branch}
return colored_clades
|
python
|
from kimonet.system.generators import regular_system, crystal_system
from kimonet.analysis import visualize_system, TrajectoryAnalysis
from kimonet.system.molecule import Molecule
from kimonet import system_test_info
from kimonet.core.processes.couplings import forster_coupling
from kimonet.core.processes.decays import einstein_radiative_decay
from kimonet.core.processes.types import GoldenRule, DecayRate, DirectRate
from kimonet.system.vibrations import MarcusModel, LevichJortnerModel, EmpiricalModel
from kimonet.fileio import store_trajectory_list, load_trajectory_list
from kimonet.analysis import plot_polar_plot
from kimonet import calculate_kmc, calculate_kmc_parallel, calculate_kmc_parallel_py2
from kimonet.system.state import State
from kimonet.system.state import ground_state as gs
from kimonet.core.processes.transitions import Transition
import numpy as np
# states list
s1 = State(label='s1', energy=20.0, multiplicity=1)
# transition moments
transitions = [Transition(s1, gs,
tdm=[0.1, 0.0], # a.u.
reorganization_energy=0.08)] # eV
# define system as a crystal
molecule = Molecule()
#print(molecule, molecule.state, molecule.state.get_center())
molecule2 = Molecule(site_energy=2)
print(molecule2, molecule2.state, molecule2.state.get_center())
print(molecule, molecule.state, molecule.state.get_center())
system = crystal_system(molecules=[molecule, molecule], # molecule to use as reference
scaled_site_coordinates=[[0.0, 0.0],
[0.0, 0.5]],
unitcell=[[5.0, 1.0],
[1.0, 5.0]],
dimensions=[2, 2], # supercell size
orientations=[[0.0, 0.0, np.pi/2],
[0.0, 0.0, 0.0]]) # if element is None then random, if list then Rx Ry Rz
print([m.site_energy for m in system.molecules])
print(system.get_ground_states())
# set initial exciton
system.add_excitation_index(s1, 0)
system.add_excitation_index(s1, 1)
# set additional system parameters
system.process_scheme = [GoldenRule(initial_states=(s1, gs), final_states=(gs, s1),
electronic_coupling_function=forster_coupling,
description='Forster coupling',
arguments={'ref_index': 1,
'transitions': transitions},
vibrations=MarcusModel(transitions=transitions) # eV
),
DecayRate(initial_state=s1, final_state=gs,
decay_rate_function=einstein_radiative_decay,
arguments={'transitions': transitions},
description='custom decay rate')
]
system.cutoff_radius = 8 # interaction cutoff radius in Angstrom
# some system analyze functions
system_test_info(system)
visualize_system(system)
# do the kinetic Monte Carlo simulation
trajectories = calculate_kmc(system,
num_trajectories=5, # number of trajectories that will be simulated
max_steps=100, # maximum number of steps for trajectory allowed
silent=False)
# specific trajectory plot
trajectories[0].plot_graph().show()
trajectories[0].plot_2d().show()
# resulting trajectories analysis
analysis = TrajectoryAnalysis(trajectories)
print('diffusion coefficient: {:9.5e} Angs^2/ns'.format(analysis.diffusion_coefficient()))
print('lifetime: {:9.5e} ns'.format(analysis.lifetime()))
print('diffusion length: {:9.5e} Angs'.format(analysis.diffusion_length()))
for state in analysis.get_states():
print('\nState: {}\n--------------------------------'.format(state))
print('diffusion coefficient: {:9.5e} Angs^2/ns'.format(analysis.diffusion_coefficient(state)))
print('lifetime: {:9.5e} ns'.format(analysis.lifetime(state)))
print('diffusion length: {:9.5e} Angs'.format(analysis.diffusion_length(state)))
print('diffusion tensor (angs^2/ns)')
print(analysis.diffusion_coeff_tensor(state))
print('diffusion length tensor (Angs)')
print(analysis.diffusion_length_square_tensor(state))
plot_polar_plot(analysis.diffusion_coeff_tensor(state),
title='Diffusion', plane=[0, 1])
plot_polar_plot(analysis.diffusion_length_square_tensor(state, unit_cell=[[5.0, 1.0],
[1.0, 5.0]]),
title='Diffusion length square', crystal_labels=True, plane=[0, 1])
analysis.plot_exciton_density('s1').show()
analysis.plot_2d('s1').show()
analysis.plot_distances('s1').show()
analysis.plot_histogram('s1').show()
analysis.plot_histogram('s1').savefig('histogram_s1.png')
store_trajectory_list(trajectories, 'example_simple.h5')
|
python
|
import numpy as np
import pytest
from quara.loss_function.loss_function import LossFunction, LossFunctionOption
class TestLossFunctionOption:
def test_access_mode_weight(self):
loss_option = LossFunctionOption()
assert loss_option.mode_weight == None
loss_option = LossFunctionOption(
mode_weight="mode", weights=[1, 2, 3], weight_name="name"
)
assert loss_option.mode_weight == "mode"
# Test that "mode_weight" cannot be updated
with pytest.raises(AttributeError):
loss_option.mode_weight = "mode"
def test_access_weights(self):
loss_option = LossFunctionOption()
assert loss_option.weights == None
loss_option = LossFunctionOption(
mode_weight="mode", weights=[1, 2, 3], weight_name="name"
)
assert loss_option.weights == [1, 2, 3]
# Test that "weights" cannot be updated
with pytest.raises(AttributeError):
loss_option.weights = [1, 2, 3]
def test_access_weight_name(self):
loss_option = LossFunctionOption()
assert loss_option.weight_name == None
loss_option = LossFunctionOption(
mode_weight="mode", weights=[1, 2, 3], weight_name="name"
)
assert loss_option.weight_name == "name"
# Test that "weight_name" cannot be updated
with pytest.raises(AttributeError):
loss_option.weight_name = "name"
class TestLossFunction:
def test_access_num_var(self):
loss_func = LossFunction(4)
assert loss_func.num_var == 4
# Test that "num_var" cannot be updated
with pytest.raises(AttributeError):
loss_func.num_var = 5
def test_access_option(self):
loss_func = LossFunction(4)
assert loss_func.option == None
loss_func.set_from_option(LossFunctionOption())
assert loss_func.option != None
def test_access_on_value(self):
loss_func = LossFunction(4)
assert loss_func.on_value == False
# Test that "on_value" cannot be updated
with pytest.raises(AttributeError):
loss_func.on_value = True
def test_reset_on_value(self):
loss_func = LossFunction(4)
loss_func._on_value = True
loss_func._reset_on_value()
assert loss_func.on_value == False
def test_set_on_value(self):
loss_func = LossFunction(4)
loss_func._set_on_value(True)
assert loss_func.on_value == True
loss_func._set_on_value(False)
assert loss_func.on_value == False
def test_access_on_gradient(self):
loss_func = LossFunction(4)
assert loss_func.on_gradient == False
# Test that "on_gradient" cannot be updated
with pytest.raises(AttributeError):
loss_func.on_gradient = True
def test_reset_on_gradient(self):
loss_func = LossFunction(4)
loss_func._on_gradient = True
loss_func._reset_on_gradient()
assert loss_func.on_gradient == False
def test_set_on_gradient(self):
loss_func = LossFunction(4)
loss_func._set_on_gradient(True)
assert loss_func.on_gradient == True
loss_func._set_on_gradient(False)
assert loss_func.on_gradient == False
def test_access_on_hessian(self):
loss_func = LossFunction(4)
assert loss_func.on_hessian == False
# Test that "on_hessian" cannot be updated
with pytest.raises(AttributeError):
loss_func.on_hessian = True
def test_reset_on_hessian(self):
loss_func = LossFunction(4)
loss_func._on_hessian = True
loss_func._reset_on_hessian()
assert loss_func.on_hessian == False
def test_set_on_hessian(self):
loss_func = LossFunction(4)
loss_func._set_on_hessian(True)
assert loss_func.on_hessian == True
loss_func._set_on_hessian(False)
assert loss_func.on_hessian == False
def test_validate_var_shape(self):
loss_func = LossFunction(4)
var = np.array([1, 2, 3, 4], dtype=np.float64)
loss_func._validate_var_shape(var)
var = np.array([1, 2, 3, 4, 5], dtype=np.float64)
with pytest.raises(ValueError):
loss_func._validate_var_shape(var)
|
python
|
import os
import io
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
def get_readme():
path = os.path.join(here, 'README.md')
with io.open(path, encoding='utf-8') as f:
return '\n' + f.read()
setup(
name='ndc_parser',
version='0.1',
description='NDC Parser',
long_description=get_readme(),
long_description_content_type='text/markdown',
url='https://github.com/ndc-dev/python-parser',
author='CALIL Inc.',
author_email='[email protected]',
license='MIT',
keywords='NDC Nippon Decimal Classification Parser',
packages=[
"ndc_parser",
],
install_requires=[],
)
|
python
|
import unittest
import torch
from fn import F
from fn.op import apply
from tensorneko.layer import Linear
from torch.nn import Linear as PtLinear, LeakyReLU, BatchNorm1d, Tanh
class TestLinear(unittest.TestCase):
@property
def batch(self):
return 4
@property
def in_neurons(self):
return 128
@property
def out_neurons(self):
return 32
def test_simple_linear_layer(self):
# build layers
neko_linear = Linear(self.in_neurons, self.out_neurons, True)
# test definition
self.assertEqual(str(neko_linear.linear), str(PtLinear(self.in_neurons, self.out_neurons, True)))
self.assertIs(neko_linear.activation, None)
self.assertIs(neko_linear.normalization, None)
# test feedforward
x = torch.rand(self.batch, self.in_neurons) # four 128-dim vectors
neko_res, pt_res = map(F(apply, args=[x]), [neko_linear, neko_linear.linear])
self.assertTrue((pt_res - neko_res).sum() < 1e-8)
def test_linear_with_activation(self):
# build layers
neko_linear = Linear(self.in_neurons, self.out_neurons, False, build_activation=LeakyReLU)
# test definition
self.assertEqual(str(neko_linear.linear), str(PtLinear(self.in_neurons, self.out_neurons, False)))
self.assertEqual(str(neko_linear.activation), str(LeakyReLU()))
self.assertIs(neko_linear.normalization, None)
# test feedforward
x = torch.rand(self.batch, self.in_neurons) # four 128-dim vectors
neko_res, pt_res = map(F(apply, args=[x]), [neko_linear, F() >> neko_linear.linear >> neko_linear.activation])
self.assertTrue((pt_res - neko_res).sum() < 1e-8)
def test_linear_with_activation_after_normalization(self):
# build layers
neko_linear = Linear(self.in_neurons, self.out_neurons, True, build_activation=Tanh,
build_normalization=F(BatchNorm1d, self.out_neurons),
normalization_after_activation=False
)
# test definition
self.assertEqual(str(neko_linear.linear), str(PtLinear(self.in_neurons, self.out_neurons, True)))
self.assertEqual(str(neko_linear.activation), str(Tanh()))
self.assertEqual(str(neko_linear.normalization), str(BatchNorm1d(self.out_neurons)))
# test feedforward
x = torch.rand(self.batch, self.in_neurons) # four 128-dim vectors
neko_res, pt_res = map(F(apply, args=[x]), [neko_linear,
F() >> neko_linear.linear >> neko_linear.normalization >> neko_linear.activation]
)
self.assertTrue((pt_res - neko_res).sum() < 1e-8)
def test_linear_with_activation_before_normalization(self):
# build layers
neko_linear = Linear(self.in_neurons, self.out_neurons, True, build_activation=Tanh,
build_normalization=F(BatchNorm1d, self.out_neurons),
normalization_after_activation=True
)
# test definition
self.assertEqual(str(neko_linear.linear), str(PtLinear(self.in_neurons, self.out_neurons, True)))
self.assertEqual(str(neko_linear.activation), str(Tanh()))
self.assertEqual(str(neko_linear.normalization), str(BatchNorm1d(self.out_neurons)))
# test feedforward
x = torch.rand(self.batch, self.in_neurons) # four 128-dim vectors
neko_res, pt_res = map(F(apply, args=[x]), [neko_linear,
F() >> neko_linear.linear >> neko_linear.activation >> neko_linear.normalization]
)
self.assertTrue((pt_res - neko_res).sum() < 1e-8)
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2016, RadsiantBlue Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from flask import request
import json
import signal
import sys
import time
import mongo
import loop
import common
class HttpMessage:
statusCode = 0
data = ""
def __init__(self,statusCode=None,message=None):
if statusCode is not None:
self.statusCode=statusCode
else:
self.statusCode=200
if message is not None:
self.data = message
else:
self.data = "Hi! I'm a monitor lizard!"
def setMessage(self,message):
self.data = message
def setStatusCode(self,statusCode):
self.statusCode=statusCode
def getJSON(self):
return json.dumps(self.__dict__, indent=2)
def getHTML(self):
return '<pre style="word-wrap: break-word; white-space: pre-wrap;">'+self.getJSON()+'</pre>'
class AdminStats:
def __init__(self, createdAt=time.time(),online=0,degraded=0,failed=0,unknown=0):
self.createdAt = createdAt
self.online=online
self.degraded=degraded
self.failed=failed
self.unknown=unknown
def update(self,mong):
if not common.mongoFound:
return
self.online,self.degraded,self.failed,self.unknown = 0,0,0,0
services = mong.get_services()
for service in services:
meta = mongo.ResourceMetaDataInterface(**service.resourceMetadata)
av = None
try:
av = meta.availability
except:
pass
if av == None:
self.unknown+=1
elif av == mongo.ONLINE:
self.online+=1
elif av == mongo.DEGRADED:
self.degraded+=1
elif av == mongo.FAILED:
self.failed+=1
else:
self.unknown+=1
def getJSON(self):
if not common.mongoFound:
return HttpMessage(200,"MongoDB could not be found.").getJSON()
return json.dumps(self.__dict__, indent=2)
app = Flask(__name__)
mong = mongo.Mongo()
mongExists=mong.env_found()
loopThread = loop.LoopingThread(interval=20,mong=mong)
adminStats = AdminStats()
@app.route("/",methods=['GET'])
def helloWorld():
return HttpMessage().getJSON()
@app.route("/admin/stats", methods=['GET'])
def adminStat():
adminStats.update(mong)
return adminStats.getJSON()
@app.route("/hello")
def hello():
return "<html>Hello world</html>"
@app.route('/test', methods=['GET','POST'])
def test():
if request.method == 'GET':
return HttpMessage(200,"GET").getJSON()
elif request.method == 'POST':
if not request.is_json:
return HttpMessage(400,"Payload is not of type json").getJSON()
jsn = request.get_json()
if jsn is None:
return HttpMessage(400,"Bad payload").getJSON()
return HttpMessage(200,jsn).getJSON()
else:
return HttpMessage(400,"Bad request").getJSON()
def signal_handler(signal, frame):
print('Shutting down...')
loopThread.stop()
sys.exit(0)
if __name__ =="__main__":
signal.signal(signal.SIGINT, signal_handler)
print('Press Ctrl+C')
loopThread.start()
app.run()
|
python
|
def max_val(t):
"""
t, tuple or list
Each element of t is either an int, a tuple, or a list
No tuple or list is empty
Returns the maximum int in t or (recursively) in an element of t
"""
def find_all_int(data):
int_list = []
for item in data:
if isinstance(item, list) or isinstance(item, tuple):
int_list.extend(find_all_int(item))
elif isinstance(item, int):
int_list.append(item)
return int_list
return max(find_all_int(t))
print(max_val((5, (1,2), [[1],[9]])))
|
python
|
from __future__ import absolute_import
# noinspection PyUnresolvedReferences
from .ABuPickStockExecute import do_pick_stock_work
# noinspection PyUnresolvedReferences
from .ABuPickTimeExecute import do_symbols_with_same_factors, do_symbols_with_diff_factors
# noinspection all
from . import ABuPickTimeWorker as pick_time_worker
|
python
|
#!/usr/bin/env python2.7
# license removed for brevity
import rospy
import numpy as np
import json
import time
from std_msgs.msg import String
from std_msgs.msg import Bool
from rospy.numpy_msg import numpy_msg
from feedback_cclfd.srv import RequestFeedback
from feedback_cclfd.srv import PerformDemonstration
from feedback_cclfd.msg import ConstraintTypes
from cairo_nlp.srv import TTS, TTSResponse
""" This class is responsible for sampling constraints and
demonstrating them to the user for feedback. """
class Demonstrator():
def __init__(self):
rospy.init_node('demonstrator')
self.finished_first_demo = False
# start pub/sub
rospy.Subscriber("/planners/constraint_types",
numpy_msg(ConstraintTypes),
self.sample_demonstrations)
self.demos_pub = rospy.Publisher(
"/planners/demonstrations", String, queue_size=10)
# set up client for demonstration service
rospy.wait_for_service("feedback_demonstration")
try:
self.feedback_demonstration = rospy.ServiceProxy(
"feedback_demonstration", PerformDemonstration)
except rospy.ServiceException:
rospy.logwarn("Service setup failed (feedback_demonstration)")
# set up client for feedback service
rospy.wait_for_service("request_feedback")
try:
self.request_feedback = rospy.ServiceProxy(
"request_feedback", RequestFeedback)
except rospy.ServiceException:
rospy.logwarn("Service setup failed (request_feedback)")
# Set up client for NLP TTS service
rospy.wait_for_service("/nlp/google/tts")
try:
self.tts_server = rospy.ServiceProxy(
"/nlp/google/tts", TTS)
except rospy.ServiceException:
rospy.logerr("Service setup failed (/nlp/google/tts)")
rospy.loginfo("DEMONSTRATOR: Starting...")
def run(self):
# perform a bad demo to start
rospy.loginfo("DEMONSTRATOR: Starting first skill execution...")
self.tts_server("I am going to hand you the mug.")
finished = self.feedback_demonstration(0) # 0 = negative
if finished.response:
self.finished_first_demo = True
rospy.spin()
def sample_demonstrations(self, constraint_types):
# run until complete
while True:
# don't perform alternative demos until first is finished
if self.finished_first_demo:
num_demos = 2
rospy.loginfo("DEMONSTRATOR: Sampling demonstrations...")
cur_type = constraint_types.data
results = dict()
for i in range(0, num_demos):
# perform a single demonstration
constraint = i
self.tts_server("I am going to try the skill again.")
finished = self.feedback_demonstration(constraint)
if finished.response:
# request feedback about demonstration from user
feedback_type = constraint == 1
msg = self.request_feedback(feedback_type)
key = i
if msg.response:
rospy.loginfo(
"DEMONSTRATOR: Response was POSITIVE!")
results[key] = 1
else:
rospy.loginfo(
"DEMONSTRATOR: Response was NEGATIVE")
results[key] = 0
# save feedback results
rospy.loginfo("DEMONSTRATOR: Saving feedback...")
encoded_data_string = json.dumps(results)
self.demos_pub.publish(encoded_data_string)
# demonstrate what has been learned
rospy.loginfo("DEMONSTRATOR: Showing what has been learned...")
self.tts_server("Let me show you what I have learned.")
for key, value in results.items():
if value:
constraint = key
self.feedback_demonstration(constraint)
break
break
else:
# wait a second
rospy.loginfo(
"DEMONSTRATOR: Waiting for first demo to be finished...")
time.sleep(1)
self.tts_server("Thank you for helping me learn!")
rospy.loginfo("FINISHED!!!")
if __name__ == '__main__':
try:
obj = Demonstrator()
obj.run()
except rospy.ROSInterruptException:
pass
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import ctypes
import datetime
from bprint import cp
class KLog(object):
#
# Const
#
KLOG_FATAL = ctypes.c_uint(0x00000001)
KLOG_ALERT = ctypes.c_uint(0x00000002)
KLOG_CRIT = ctypes.c_uint(0x00000004)
KLOG_ERR = ctypes.c_uint(0x00000008)
KLOG_WARNING = ctypes.c_uint(0x00000010)
KLOG_NOTICE = ctypes.c_uint(0x00000020)
KLOG_INFO = ctypes.c_uint(0x00000040)
KLOG_DEBUG = ctypes.c_uint(0x00000080)
_filepath = "/dev/stdout"
_to_stderr = False
_to_stdout = True
_to_file = False
_to_network = False
@classmethod
def to_stderr(cls, enable=True):
cls._to_stderr = enable
@classmethod
def to_stdout(cls, enable=True):
cls._to_stdout = enable
@classmethod
def to_file(
cls,
pathfmt="/tmp/klog-%N%Y%R_%S%F%M-%U-%P-%I.log",
size=0,
time=0,
when=0,
enable=True):
cls._to_file = enable
now = datetime.datetime.now()
path = pathfmt
path = path.replace("%N", "%04d" % (now.year))
path = path.replace("%Y", "%02d" % (now.month))
path = path.replace("%R", "%02d" % (now.day))
path = path.replace("%S", "%02d" % (now.hour))
path = path.replace("%F", "%02d" % (now.minute))
path = path.replace("%M", "%02d" % (now.second))
path = path.replace("%I", "0000")
path = path.replace("%U", os.environ.get("USER"))
cls._filepath = path
cls._logfile = open(cls._filepath, "a")
print(cls._logfile)
@classmethod
def to_network(cls, addr="127.0.0.1", port=7777, enable=True):
pass
def __init__(self, frame):
pass
@classmethod
def _log(cls, indi, mask, nl, *str_segs):
now = datetime.datetime.now()
frame = sys._getframe(2)
_x_ln = frame.f_lineno
_x_fn = frame.f_code.co_filename
_x_func = frame.f_code.co_name
ts = "%s.%03d" % (now.strftime("%Y/%m/%d %H:%M:%S"), now.microsecond / 1000)
fullstr = ""
for seg in str_segs:
try:
s = str(seg)
except:
try:
s = unicode(seg)
except:
s = seg.encode("utf-8")
fullstr += s
nl = "\n" if nl else ""
line = "|%s|%s|%s|%s|%s| %s%s" % (cp.r(indi), cp.y(ts),
_x_fn, cp.c(_x_func), cp.c(_x_ln), fullstr, nl)
if cls._to_stderr:
sys.stderr.write(line)
if cls._to_stdout:
sys.stdout.write(line)
sys.stdout.flush()
if cls._to_file:
cls._logfile.write(line)
cls._logfile.flush()
if cls._to_network:
pass
@classmethod
def f(cls, *str_segs):
'''fatal'''
KLog._log('F', cls.KLOG_FATAL, True, *str_segs)
@classmethod
def a(cls, *str_segs):
'''alert'''
KLog._log('A', cls.KLOG_ALERT, True, *str_segs)
@classmethod
def c(cls, *str_segs):
'''critical'''
KLog._log('C', cls.KLOG_CRIT, True, *str_segs)
@classmethod
def e(cls, *str_segs):
'''error'''
KLog._log('E', cls.KLOG_ERR, True, *str_segs)
@classmethod
def w(cls, *str_segs):
'''warning'''
KLog._log('W', cls.KLOG_WARNING, True, *str_segs)
@classmethod
def i(cls, *str_segs):
'''info'''
KLog._log('I', cls.KLOG_INFO, True, *str_segs)
@classmethod
def n(cls, *str_segs):
'''notice'''
KLog._log('N', cls.KLOG_NOTICE, True, *str_segs)
@classmethod
def d(cls, *str_segs):
'''debug'''
KLog._log('D', cls.KLOG_DEBUG, True, *str_segs)
klog = KLog
|
python
|
# Test case that runs in continuous integration to ensure that PTest isn't broken.
from .base import SingleSatOnlyCase
from .utils import Enums, TestCaseFailure
import os
class CICase(SingleSatOnlyCase):
def run_case_singlesat(self):
self.sim.cycle_no = int(self.sim.flight_controller.read_state("pan.cycle_no"))
if self.sim.cycle_no != 1:
raise TestCaseFailure(f"Cycle number was incorrect: expected {1} got {self.sim.cycle_no}.")
self.sim.flight_controller.write_state("cycle.start", "true")
self.sim.cycle_no = int(self.sim.flight_controller.read_state("pan.cycle_no"))
if self.sim.cycle_no != 2:
raise TestCaseFailure(f"Cycle number was incorrect: expected {2} got {self.sim.cycle_no}.")
self.finish()
|
python
|
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmcv.cnn import constant_init
from mmdet.core import (PointGenerator, multi_apply, multiclass_rnms,
images_to_levels, unmap)
from mmdet.core import (ConvexPseudoSampler, assign_and_sample, build_assigner)
from mmdet.ops import ConvModule, DeformConv
from ..builder import build_loss
from ..registry import HEADS
from ..utils import bias_init_with_prob
from mmdet.ops.minareabbox import find_minarea_rbbox
from mmdet.ops.iou import convex_iou
INF = 100000000
eps = 1e-12
def levels_to_images(mlvl_tensor, flatten=False):
batch_size = mlvl_tensor[0].size(0)
batch_list = [[] for _ in range(batch_size)]
if flatten:
channels = mlvl_tensor[0].size(-1)
else:
channels = mlvl_tensor[0].size(1)
for t in mlvl_tensor:
if not flatten:
t = t.permute(0, 2, 3, 1)
t = t.view(batch_size, -1, channels).contiguous()
for img in range(batch_size):
batch_list[img].append(t[img])
return [torch.cat(item, 0) for item in batch_list]
@HEADS.register_module
class CFAHead(nn.Module):
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
conv_cfg=None,
norm_cfg=None,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(
type='SmoothL1Loss', loss_weight=0.375),
loss_bbox_refine=dict(
type='SmoothL1Loss', loss_weight=0.75),
center_init=True,
transform_method='rotrect',
show_points=False,
use_cfa=False,
topk=6,
anti_factor=0.75):
super(CFAHead, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.point_feat_channels = point_feat_channels
self.stacked_convs = stacked_convs
self.num_points = num_points
self.gradient_mul = gradient_mul
self.point_base_scale = point_base_scale
self.point_strides = point_strides
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
self.sampling = loss_cls['type'] not in ['FocalLoss']
self.loss_cls = build_loss(loss_cls)
self.loss_bbox_init = build_loss(loss_bbox_init)
self.loss_bbox_refine = build_loss(loss_bbox_refine)
self.center_init = center_init
self.transform_method = transform_method
self.show_points = show_points
self.use_cfa = use_cfa
self.topk = topk
self.anti_factor = anti_factor
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes - 1
else:
self.cls_out_channels = self.num_classes
self.point_generators = [PointGenerator() for _ in self.point_strides]
# we use deformable conv to extract points features
self.dcn_kernel = int(np.sqrt(num_points))
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
assert self.dcn_kernel * self.dcn_kernel == num_points, \
'The points number should be a square number.'
assert self.dcn_kernel % 2 == 1, \
'The points number should be an odd square number.'
dcn_base = np.arange(-self.dcn_pad,
self.dcn_pad + 1).astype(np.float64)
dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
(-1))
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
self._init_layers()
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
pts_out_dim = 2 * self.num_points
self.reppoints_cls_conv = DeformConv(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1, self.dcn_pad)
self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,
self.cls_out_channels, 1, 1, 0)
self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,
self.point_feat_channels, 3,
1, 1)
self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
self.reppoints_pts_refine_conv = DeformConv(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1,
self.dcn_pad)
self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.reppoints_cls_conv, std=0.01)
normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
normal_init(self.reppoints_pts_init_conv, std=0.01)
normal_init(self.reppoints_pts_init_out, std=0.01)
normal_init(self.reppoints_pts_refine_conv, std=0.01)
normal_init(self.reppoints_pts_refine_out, std=0.01)
def convex_overlaps(self, gt_rbboxes, points):
overlaps = convex_iou(points, gt_rbboxes)
overlaps = overlaps.transpose(1, 0) # [gt, ex]
return overlaps
def points2rotrect(self, pts, y_first=True):
if y_first:
pts = pts.reshape(-1, self.num_points, 2)
pts_dy = pts[:, :, 0::2]
pts_dx = pts[:, :, 1::2]
pts = torch.cat([pts_dx, pts_dy], dim=2).reshape(-1, 2 * self.num_points)
if self.transform_method == 'rotrect':
rotrect_pred = find_minarea_rbbox(pts)
return rotrect_pred
else:
raise NotImplementedError
def forward_single(self, x):
dcn_base_offset = self.dcn_base_offset.type_as(x)
# If we use center_init, the initial reppoints is from center points.
# If we use bounding bbox representation, the initial reppoints is
# from regular grid placed on a pre-defined bbox.
points_init = 0
cls_feat = x
pts_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
pts_feat = reg_conv(pts_feat)
# initialize reppoints
pts_out_init = self.reppoints_pts_init_out(
self.relu(self.reppoints_pts_init_conv(pts_feat)))
pts_out_init = pts_out_init + points_init
# refine and classify reppoints
pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach() + self.gradient_mul * pts_out_init
dcn_offset = pts_out_init_grad_mul - dcn_base_offset
dcn_cls_feat = self.reppoints_cls_conv(cls_feat, dcn_offset)
cls_out = self.reppoints_cls_out(self.relu(dcn_cls_feat))
pts_out_refine = self.reppoints_pts_refine_out(self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))
pts_out_refine = pts_out_refine + pts_out_init.detach()
return cls_out, pts_out_init, pts_out_refine
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_points(self, featmap_sizes, img_metas):
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# points center for one time
multi_level_points = []
for i in range(num_levels):
points = self.point_generators[i].grid_points(
featmap_sizes[i], self.point_strides[i])
multi_level_points.append(points)
points_list = [[point.clone() for point in multi_level_points]
for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level grids
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
for i in range(num_levels):
point_stride = self.point_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w = img_meta['pad_shape'][:2]
valid_feat_h = min(int(np.ceil(h / point_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / point_stride)), feat_w)
flags = self.point_generators[i].valid_flags(
(feat_h, feat_w), (valid_feat_h, valid_feat_w))
multi_level_flags.append(flags)
valid_flag_list.append(multi_level_flags)
return points_list, valid_flag_list
def offset_to_pts(self, center_list, pred_list):
pts_list = []
for i_lvl in range(len(self.point_strides)):
pts_lvl = []
for i_img in range(len(center_list)):
pts_center = center_list[i_img][i_lvl][:, :2].repeat(
1, self.num_points)
pts_shift = pred_list[i_lvl][i_img]
yx_pts_shift = pts_shift.permute(1, 2, 0).view(-1, 2 * self.num_points)
y_pts_shift = yx_pts_shift[..., 0::2]
x_pts_shift = yx_pts_shift[..., 1::2]
xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
pts_lvl.append(pts)
pts_lvl = torch.stack(pts_lvl, 0)
pts_list.append(pts_lvl)
return pts_list
def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
label_weights, rbbox_gt_init, convex_weights_init,
rbbox_gt_refine, convex_weights_refine, stride, num_total_samples_refine):
normalize_term = self.point_base_scale * stride
if self.use_cfa:
rbbox_gt_init = rbbox_gt_init.reshape(-1, 8)
convex_weights_init = convex_weights_init.reshape(-1)
pts_pred_init = pts_pred_init.reshape(-1, 2 * self.num_points) # [B, NUM(H * W), 2*num_pint]
pos_ind_init = (convex_weights_init > 0).nonzero().reshape(-1)
pts_pred_init_norm = pts_pred_init[pos_ind_init]
rbbox_gt_init_norm = rbbox_gt_init[pos_ind_init]
convex_weights_pos_init = convex_weights_init[pos_ind_init]
loss_pts_init = self.loss_bbox_init(
pts_pred_init_norm / normalize_term,
rbbox_gt_init_norm / normalize_term,
convex_weights_pos_init
)
return 0, loss_pts_init, 0
else:
rbbox_gt_init = rbbox_gt_init.reshape(-1, 8)
convex_weights_init = convex_weights_init.reshape(-1)
# init points loss
pts_pred_init = pts_pred_init.reshape(-1, 2 * self.num_points) # [B, NUM(H * W), 2*num_pint]
pos_ind_init = (convex_weights_init > 0).nonzero().reshape(-1)
pts_pred_init_norm = pts_pred_init[pos_ind_init]
rbbox_gt_init_norm = rbbox_gt_init[pos_ind_init]
convex_weights_pos_init = convex_weights_init[pos_ind_init]
loss_pts_init = self.loss_bbox_init(
pts_pred_init_norm / normalize_term,
rbbox_gt_init_norm / normalize_term,
convex_weights_pos_init
)
# refine points loss
rbbox_gt_refine = rbbox_gt_refine.reshape(-1, 8)
pts_pred_refine = pts_pred_refine.reshape(-1, 2 * self.num_points)
convex_weights_refine = convex_weights_refine.reshape(-1)
pos_ind_refine = (convex_weights_refine > 0).nonzero().reshape(-1)
pts_pred_refine_norm = pts_pred_refine[pos_ind_refine]
rbbox_gt_refine_norm = rbbox_gt_refine[pos_ind_refine]
convex_weights_pos_refine = convex_weights_refine[pos_ind_refine]
loss_pts_refine = self.loss_bbox_refine(
pts_pred_refine_norm / normalize_term,
rbbox_gt_refine_norm / normalize_term,
convex_weights_pos_refine
)
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=num_total_samples_refine)
return loss_cls, loss_pts_init, loss_pts_refine
def loss(self,
cls_scores,
pts_preds_init,
pts_preds_refine,
gt_rbboxes,
gt_labels,
img_metas,
cfg,
gt_rbboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == len(self.point_generators)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
# target for initial stage
center_list, valid_flag_list = self.get_points(featmap_sizes,
img_metas)
pts_coordinate_preds_init = self.offset_to_pts(center_list,
pts_preds_init)
if self.use_cfa: # get num_proposal_each_lvl and lvl_num
num_proposals_each_level = [(featmap.size(-1) * featmap.size(-2))
for featmap in cls_scores]
num_level = len(featmap_sizes)
assert num_level == len(pts_coordinate_preds_init)
if cfg.init.assigner['type'] == 'ConvexAssigner':
candidate_list = center_list
else:
raise NotImplementedError
cls_reg_targets_init = self.point_target(
candidate_list,
valid_flag_list,
gt_rbboxes,
img_metas,
cfg.init,
gt_rbboxes_ignore_list=gt_rbboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=self.sampling)
(*_, rbbox_gt_list_init, candidate_list_init, convex_weights_list_init,
num_total_pos_init, num_total_neg_init, gt_inds_init) = cls_reg_targets_init
num_total_samples_init = (num_total_pos_init +
num_total_neg_init if self.sampling else num_total_pos_init)
# target for refinement stage
center_list, valid_flag_list = self.get_points(featmap_sizes, img_metas)
pts_coordinate_preds_refine = self.offset_to_pts(center_list, pts_preds_refine)
points_list = []
for i_img, center in enumerate(center_list):
points = []
for i_lvl in range(len(pts_preds_refine)):
points_preds_init_ = pts_preds_init[i_lvl].detach()
points_preds_init_ = points_preds_init_.view(points_preds_init_.shape[0], -1,
*points_preds_init_.shape[2:])
points_shift = points_preds_init_.permute(0, 2, 3, 1) * self.point_strides[i_lvl]
points_center = center[i_lvl][:, :2].repeat(1, self.num_points)
points.append(points_center + points_shift[i_img].reshape(-1, 2 * self.num_points))
points_list.append(points)
if self.use_cfa:
cls_reg_targets_refine = self.cfa_point_target(
points_list,
valid_flag_list,
gt_rbboxes,
img_metas,
cfg.refine,
gt_rbboxes_ignore_list=gt_rbboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=self.sampling)
(labels_list, label_weights_list, rbbox_gt_list_refine,
_, convex_weights_list_refine, pos_inds_list_refine,
pos_gt_index_list_refine) = cls_reg_targets_refine
cls_scores = levels_to_images(cls_scores)
cls_scores = [
item.reshape(-1, self.cls_out_channels) for item in cls_scores
]
pts_coordinate_preds_init_cfa = levels_to_images(
pts_coordinate_preds_init, flatten=True)
pts_coordinate_preds_init_cfa = [
item.reshape(-1, 2 * self.num_points) for item in pts_coordinate_preds_init_cfa
]
pts_coordinate_preds_refine = levels_to_images(
pts_coordinate_preds_refine, flatten=True)
pts_coordinate_preds_refine = [
item.reshape(-1, 2 * self.num_points) for item in pts_coordinate_preds_refine
]
with torch.no_grad():
pos_losses_list, = multi_apply(self.get_pos_loss, cls_scores,
pts_coordinate_preds_init_cfa, labels_list,
rbbox_gt_list_refine, label_weights_list,
convex_weights_list_refine, pos_inds_list_refine)
labels_list, label_weights_list, convex_weights_list_refine, num_pos, pos_normalize_term = multi_apply(
self.cfa_reassign,
pos_losses_list,
labels_list,
label_weights_list,
pts_coordinate_preds_init_cfa,
convex_weights_list_refine,
gt_rbboxes,
pos_inds_list_refine,
pos_gt_index_list_refine,
num_proposals_each_level=num_proposals_each_level,
num_level=num_level
)
num_pos = sum(num_pos)
# convert all tensor list to a flatten tensor
cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))
pts_preds_refine = torch.cat(pts_coordinate_preds_refine,
0).view(-1, pts_coordinate_preds_refine[0].size(-1))
labels = torch.cat(labels_list, 0).view(-1)
labels_weight = torch.cat(label_weights_list, 0).view(-1)
rbbox_gt_refine = torch.cat(rbbox_gt_list_refine,
0).view(-1, rbbox_gt_list_refine[0].size(-1))
convex_weights_refine = torch.cat(convex_weights_list_refine, 0).view(-1)
pos_normalize_term = torch.cat(pos_normalize_term, 0).reshape(-1)
pos_inds_flatten = (labels > 0).nonzero().reshape(-1)
assert len(pos_normalize_term) == len(pos_inds_flatten)
if num_pos:
losses_cls = self.loss_cls(
cls_scores, labels, labels_weight, avg_factor=num_pos)
pos_pts_pred_refine = pts_preds_refine[pos_inds_flatten]
pos_rbbox_gt_refine = rbbox_gt_refine[pos_inds_flatten]
pos_convex_weights_refine = convex_weights_refine[pos_inds_flatten]
losses_pts_refine = self.loss_bbox_refine(
pos_pts_pred_refine / pos_normalize_term.reshape(-1, 1),
pos_rbbox_gt_refine / pos_normalize_term.reshape(-1, 1),
pos_convex_weights_refine
)
else:
losses_cls = cls_scores.sum() * 0
losses_pts_refine = pts_preds_refine.sum() * 0
None_list = [None] * num_level
_, losses_pts_init, _ = multi_apply(
self.loss_single,
None_list,
pts_coordinate_preds_init,
None_list,
None_list,
None_list,
rbbox_gt_list_init,
convex_weights_list_init,
None_list,
None_list,
self.point_strides,
num_total_samples_refine=None,
)
loss_dict_all = {
'loss_cls': losses_cls,
'loss_pts_init': losses_pts_init,
'loss_pts_refine': losses_pts_refine
}
return loss_dict_all
## without cfa
else:
cls_reg_targets_refine = self.point_target(
points_list,
valid_flag_list,
gt_rbboxes,
img_metas,
cfg.refine,
gt_rbboxes_ignore_list=gt_rbboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=self.sampling,
featmap_sizes=featmap_sizes)
(labels_list, label_weights_list, rbbox_gt_list_refine,
candidate_list_refine, convex_weights_list_refine, num_total_pos_refine,
num_total_neg_refine, gt_inds_refine) = cls_reg_targets_refine
num_total_samples_refine = (
num_total_pos_refine +
num_total_neg_refine if self.sampling else num_total_pos_refine)
losses_cls, losses_pts_init, losses_pts_refine = multi_apply(
self.loss_single,
cls_scores,
pts_coordinate_preds_init,
pts_coordinate_preds_refine,
labels_list,
label_weights_list,
rbbox_gt_list_init,
convex_weights_list_init,
rbbox_gt_list_refine,
convex_weights_list_refine,
self.point_strides,
num_total_samples_refine=num_total_samples_refine
)
loss_dict_all = {
'loss_cls': losses_cls,
'loss_pts_init': losses_pts_init,
'loss_pts_refine': losses_pts_refine
}
return loss_dict_all
def get_pos_loss(self, cls_score, pts_pred, label, rbbox_gt,
label_weight, convex_weight, pos_inds):
pos_scores = cls_score[pos_inds]
pos_pts_pred = pts_pred[pos_inds]
pos_rbbox_gt = rbbox_gt[pos_inds]
pos_label = label[pos_inds]
pos_label_weight = label_weight[pos_inds]
pos_convex_weight = convex_weight[pos_inds]
loss_cls = self.loss_cls(
pos_scores,
pos_label,
pos_label_weight,
avg_factor=self.loss_cls.loss_weight,
reduction_override='none')
loss_bbox = self.loss_bbox_refine(
pos_pts_pred,
pos_rbbox_gt,
pos_convex_weight,
avg_factor=self.loss_cls.loss_weight,
reduction_override='none')
loss_cls = loss_cls.sum(-1)
pos_loss = loss_bbox + loss_cls
return pos_loss,
def cfa_reassign(self, pos_losses, label, label_weight, pts_pred_init, convex_weight, gt_rbbox,
pos_inds, pos_gt_inds, num_proposals_each_level=None, num_level=None):
if len(pos_inds) == 0:
return label, label_weight, convex_weight, 0, torch.tensor([]).type_as(convex_weight)
num_gt = pos_gt_inds.max()
num_proposals_each_level_ = num_proposals_each_level.copy()
num_proposals_each_level_.insert(0, 0)
inds_level_interval = np.cumsum(num_proposals_each_level_)
pos_level_mask = []
for i in range(num_level):
mask = (pos_inds >= inds_level_interval[i]) & (
pos_inds < inds_level_interval[i + 1])
pos_level_mask.append(mask)
overlaps_matrix = self.convex_overlaps(gt_rbbox, pts_pred_init)
pos_inds_after_cfa = []
ignore_inds_after_cfa = []
re_assign_weights_after_cfa = []
for gt_ind in range(num_gt):
pos_inds_cfa = []
pos_loss_cfa = []
pos_overlaps_init_cfa = []
gt_mask = pos_gt_inds == (gt_ind + 1)
for level in range(num_level):
level_mask = pos_level_mask[level]
level_gt_mask = level_mask & gt_mask
value, topk_inds = pos_losses[level_gt_mask].topk(
min(level_gt_mask.sum(), self.topk), largest=False)
pos_inds_cfa.append(pos_inds[level_gt_mask][topk_inds])
pos_loss_cfa.append(value)
pos_overlaps_init_cfa.append(overlaps_matrix[:, pos_inds[level_gt_mask][topk_inds]])
pos_inds_cfa = torch.cat(pos_inds_cfa)
pos_loss_cfa = torch.cat(pos_loss_cfa)
pos_overlaps_init_cfa = torch.cat(pos_overlaps_init_cfa, 1)
if len(pos_inds_cfa) < 2:
pos_inds_after_cfa.append(pos_inds_cfa)
ignore_inds_after_cfa.append(pos_inds_cfa.new_tensor([]))
re_assign_weights_after_cfa.append(pos_loss_cfa.new_ones([len(pos_inds_cfa)]))
else:
pos_loss_cfa, sort_inds = pos_loss_cfa.sort()
pos_inds_cfa = pos_inds_cfa[sort_inds]
pos_overlaps_init_cfa = pos_overlaps_init_cfa[:, sort_inds].reshape(-1, len(pos_inds_cfa))
pos_loss_cfa = pos_loss_cfa.reshape(-1)
loss_mean = pos_loss_cfa.mean()
loss_var = pos_loss_cfa.var()
gauss_prob_density = (-(pos_loss_cfa - loss_mean) ** 2 / loss_var).exp() / loss_var.sqrt()
index_inverted, _ = torch.arange(len(gauss_prob_density)).sort(descending=True)
gauss_prob_inverted = torch.cumsum(gauss_prob_density[index_inverted], 0)
gauss_prob = gauss_prob_inverted[index_inverted]
gauss_prob_norm = (gauss_prob - gauss_prob.min()) / (gauss_prob.max() - gauss_prob.min())
# splitting by gradient consistency
loss_curve = gauss_prob_norm * pos_loss_cfa
_, max_thr = loss_curve.topk(1)
reweights = gauss_prob_norm[:max_thr + 1]
# feature anti-aliasing coefficient
pos_overlaps_init_cfa = pos_overlaps_init_cfa[:, :max_thr + 1]
overlaps_level = pos_overlaps_init_cfa[gt_ind] / (pos_overlaps_init_cfa.sum(0) + 1e-6)
reweights = self.anti_factor * overlaps_level * reweights + 1e-6
re_assign_weights = reweights.reshape(-1) / reweights.sum() * torch.ones(len(reweights)).type_as(gauss_prob_norm).sum()
pos_inds_temp = pos_inds_cfa[:max_thr + 1]
ignore_inds_temp = pos_inds_cfa.new_tensor([])
pos_inds_after_cfa.append(pos_inds_temp)
ignore_inds_after_cfa.append(ignore_inds_temp)
re_assign_weights_after_cfa.append(re_assign_weights)
pos_inds_after_cfa = torch.cat(pos_inds_after_cfa)
ignore_inds_after_cfa = torch.cat(ignore_inds_after_cfa)
re_assign_weights_after_cfa = torch.cat(re_assign_weights_after_cfa)
reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_cfa).all(1)
reassign_ids = pos_inds[reassign_mask]
label[reassign_ids] = 0
label_weight[ignore_inds_after_cfa] = 0
convex_weight[reassign_ids] = 0
num_pos = len(pos_inds_after_cfa)
re_assign_weights_mask = (pos_inds.unsqueeze(1) == pos_inds_after_cfa).any(1)
reweight_ids = pos_inds[re_assign_weights_mask]
label_weight[reweight_ids] = re_assign_weights_after_cfa
convex_weight[reweight_ids] = re_assign_weights_after_cfa
pos_level_mask_after_cfa = []
for i in range(num_level):
mask = (pos_inds_after_cfa >= inds_level_interval[i]) & (
pos_inds_after_cfa < inds_level_interval[i + 1])
pos_level_mask_after_cfa.append(mask)
pos_level_mask_after_cfa = torch.stack(pos_level_mask_after_cfa, 0).type_as(label)
pos_normalize_term = pos_level_mask_after_cfa * (
self.point_base_scale *
torch.as_tensor(self.point_strides).type_as(label)).reshape(-1, 1)
pos_normalize_term = pos_normalize_term[pos_normalize_term > 0].type_as(convex_weight)
assert len(pos_normalize_term) == len(pos_inds_after_cfa)
return label, label_weight, convex_weight, num_pos, pos_normalize_term
def point_target(self,
proposals_list,
valid_flag_list,
gt_rbboxes_list,
img_metas,
cfg,
gt_rbboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
sampling=True,
unmap_outputs=True,
featmap_sizes=None):
num_imgs = len(img_metas)
assert len(proposals_list) == len(valid_flag_list) == num_imgs
# points number of multi levels
num_level_proposals = [points.size(0) for points in proposals_list[0]]
# concat all level points and flags to a single tensor
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_rbboxes_ignore_list is None:
gt_rbboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
all_overlaps_rotate_list = [None] * 4
(all_labels, all_label_weights, all_rbbox_gt, all_proposals,
all_proposal_weights, pos_inds_list, neg_inds_list, all_gt_inds_list) = multi_apply(
self.point_target_single,
proposals_list,
valid_flag_list,
gt_rbboxes_list,
gt_rbboxes_ignore_list,
gt_labels_list,
all_overlaps_rotate_list,
cfg=cfg,
label_channels=label_channels,
sampling=sampling,
unmap_outputs=unmap_outputs)
# no valid points
if any([labels is None for labels in all_labels]):
return None
# sampled points of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
labels_list = images_to_levels(all_labels, num_level_proposals)
label_weights_list = images_to_levels(all_label_weights,
num_level_proposals)
rbbox_gt_list = images_to_levels(all_rbbox_gt, num_level_proposals)
proposals_list = images_to_levels(all_proposals, num_level_proposals)
proposal_weights_list = images_to_levels(all_proposal_weights,
num_level_proposals)
gt_inds_list = images_to_levels(all_gt_inds_list, num_level_proposals)
return (labels_list, label_weights_list, rbbox_gt_list, proposals_list,
proposal_weights_list, num_total_pos, num_total_neg, gt_inds_list)
def point_target_single(self,
flat_proposals,
valid_flags,
gt_rbboxes,
gt_rbboxes_ignore,
gt_labels,
overlaps,
cfg,
label_channels=1,
sampling=True,
unmap_outputs=True):
inside_flags = valid_flags
if not inside_flags.any():
return (None,) * 7
# assign gt and sample proposals
proposals = flat_proposals[inside_flags, :]
if sampling:
assign_result, sampling_result = assign_and_sample(
proposals, gt_rbboxes, gt_rbboxes_ignore, None, cfg)
else:
bbox_assigner = build_assigner(cfg.assigner)
assign_result = bbox_assigner.assign(proposals, gt_rbboxes, overlaps,
gt_rbboxes_ignore, gt_labels)
bbox_sampler = ConvexPseudoSampler()
sampling_result = bbox_sampler.sample(assign_result, proposals,
gt_rbboxes)
gt_inds = assign_result.gt_inds
num_valid_proposals = proposals.shape[0]
rbbox_gt = proposals.new_zeros([num_valid_proposals, 8])
pos_proposals = torch.zeros_like(proposals)
proposals_weights = proposals.new_zeros(num_valid_proposals)
labels = proposals.new_zeros(num_valid_proposals, dtype=torch.long)
label_weights = proposals.new_zeros(num_valid_proposals, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_gt_rbboxes = sampling_result.pos_gt_rbboxes
rbbox_gt[pos_inds, :] = pos_gt_rbboxes
pos_proposals[pos_inds, :] = proposals[pos_inds, :]
proposals_weights[pos_inds] = 1.0
if gt_labels is None:
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of proposals
if unmap_outputs:
num_total_proposals = flat_proposals.size(0)
labels = unmap(labels, num_total_proposals, inside_flags)
label_weights = unmap(label_weights, num_total_proposals, inside_flags)
rbbox_gt = unmap(rbbox_gt, num_total_proposals, inside_flags)
pos_proposals = unmap(pos_proposals, num_total_proposals, inside_flags)
proposals_weights = unmap(proposals_weights, num_total_proposals,
inside_flags)
gt_inds = unmap(gt_inds, num_total_proposals, inside_flags)
return (labels, label_weights, rbbox_gt, pos_proposals, proposals_weights,
pos_inds, neg_inds, gt_inds)
def cfa_point_target(self,
proposals_list,
valid_flag_list,
gt_rbboxes_list,
img_metas,
cfg,
gt_rbboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
sampling=True,
unmap_outputs=True):
num_imgs = len(img_metas)
assert len(proposals_list) == len(valid_flag_list) == num_imgs
# concat all level points and flags to a single tensor
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_rbboxes_ignore_list is None:
gt_rbboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
all_overlaps_rotate_list = [None] * 4
(all_labels, all_label_weights, all_rbbox_gt, all_proposals,
all_proposal_weights, pos_inds_list, neg_inds_list, all_gt_inds) = multi_apply(
self.cfa_point_target_single,
proposals_list,
valid_flag_list,
gt_rbboxes_list,
gt_rbboxes_ignore_list,
gt_labels_list,
all_overlaps_rotate_list,
cfg=cfg,
label_channels=label_channels,
sampling=sampling,
unmap_outputs=unmap_outputs)
pos_inds = []
pos_gt_index = []
for i, single_labels in enumerate(all_labels):
pos_mask = single_labels > 0
pos_inds.append(pos_mask.nonzero().view(-1))
pos_gt_index.append(all_gt_inds[i][pos_mask.nonzero().view(-1)])
return (all_labels, all_label_weights, all_rbbox_gt, all_proposals,
all_proposal_weights, pos_inds, pos_gt_index)
def cfa_point_target_single(self,
flat_proposals,
valid_flags,
gt_rbboxes,
gt_rbboxes_ignore,
gt_labels,
overlaps,
cfg,
label_channels=1,
sampling=True,
unmap_outputs=True):
inside_flags = valid_flags
if not inside_flags.any():
return (None,) * 7
# assign gt and sample proposals
proposals = flat_proposals[inside_flags, :]
if sampling:
assign_result, sampling_result = assign_and_sample(
proposals, gt_rbboxes, gt_rbboxes_ignore, None, cfg)
else:
bbox_assigner = build_assigner(cfg.assigner)
assign_result = bbox_assigner.assign(proposals, gt_rbboxes, overlaps,
gt_rbboxes_ignore, gt_labels)
bbox_sampler = ConvexPseudoSampler()
sampling_result = bbox_sampler.sample(assign_result, proposals,
gt_rbboxes)
gt_inds = assign_result.gt_inds
num_valid_proposals = proposals.shape[0]
rbbox_gt = proposals.new_zeros([num_valid_proposals, 8])
pos_proposals = torch.zeros_like(proposals)
proposals_weights = proposals.new_zeros(num_valid_proposals)
labels = proposals.new_zeros(num_valid_proposals, dtype=torch.long)
label_weights = proposals.new_zeros(num_valid_proposals, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_gt_rbboxes = sampling_result.pos_gt_rbboxes
rbbox_gt[pos_inds, :] = pos_gt_rbboxes
pos_proposals[pos_inds, :] = proposals[pos_inds, :]
proposals_weights[pos_inds] = 1.0
if gt_labels is None:
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of proposals
if unmap_outputs:
num_total_proposals = flat_proposals.size(0)
labels = unmap(labels, num_total_proposals, inside_flags)
label_weights = unmap(label_weights, num_total_proposals, inside_flags)
rbbox_gt = unmap(rbbox_gt, num_total_proposals, inside_flags)
pos_proposals = unmap(pos_proposals, num_total_proposals, inside_flags)
proposals_weights = unmap(proposals_weights, num_total_proposals,
inside_flags)
gt_inds = unmap(gt_inds, num_total_proposals, inside_flags)
return (labels, label_weights, rbbox_gt, pos_proposals, proposals_weights,
pos_inds, neg_inds, gt_inds)
def get_bboxes(self,
cls_scores,
pts_preds_init,
pts_preds_refine,
img_metas,
cfg,
rescale=False,
nms=True):
assert len(cls_scores) == len(pts_preds_refine)
num_levels = len(cls_scores)
mlvl_points = [
self.point_generators[i].grid_points(cls_scores[i].size()[-2:],
self.point_strides[i])
for i in range(num_levels)
]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
points_pred_list = [
pts_preds_refine[i][img_id].detach()
for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self.get_bboxes_single(cls_score_list, points_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale, nms)
result_list.append(proposals)
return result_list
def get_bboxes_single(self,
cls_scores,
points_preds,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False,
nms=True):
assert len(cls_scores) == len(points_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
if self.show_points:
mlvl_reppoints = []
for i_lvl, (cls_score, points_pred, points) in enumerate(
zip(cls_scores, points_preds, mlvl_points)):
assert cls_score.size()[-2:] == points_pred.size()[-2:]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
points_pred = points_pred.permute(1, 2, 0).reshape(-1, 2 * self.num_points)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
max_scores, _ = scores[:, 1:].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
points_pred = points_pred[topk_inds, :]
scores = scores[topk_inds, :]
bbox_pred = self.points2rotrect(points_pred, y_first=True)
bbox_pos_center = points[:, :2].repeat(1, 4)
bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
if self.show_points:
points_pred = points_pred.reshape(-1, self.num_points, 2)
points_pred_dy = points_pred[:, :, 0::2]
points_pred_dx = points_pred[:, :, 1::2]
pts = torch.cat([points_pred_dx, points_pred_dy], dim=2).reshape(-1, 2 * self.num_points)
pts_pos_center = points[:, :2].repeat(1, self.num_points)
pts = pts * self.point_strides[i_lvl] + pts_pos_center
mlvl_reppoints.append(pts)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if self.show_points:
mlvl_reppoints = torch.cat(mlvl_reppoints)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_reppoints /= mlvl_reppoints.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
if nms:
det_bboxes, det_labels = multiclass_rnms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img, multi_reppoints=mlvl_reppoints if self.show_points else None)
return det_bboxes, det_labels
else:
return mlvl_bboxes, mlvl_scores
|
python
|
"""
Django settings for sentry_django_example project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
from uuid import uuid4
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-4kzfiq7vb(t0+jbl#vq)u=%06ouf)n*=l%730c8=tk(wkm9i9o"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# PostHog Setup (can be a separate app)
import posthog
# You can find this key on the /setup page in PostHog
posthog.api_key = "LXP6nQXvo-2TCqGVrWvPah8uJIyVykoMmhnEkEBi5PA" # TODO: replace with your api key
posthog.personal_api_key = ""
# Where you host PostHog, with no trailing /.
# You can remove this line if you're using posthog.com
posthog.host = "http://127.0.0.1:8000"
from posthog.sentry.posthog_integration import PostHogIntegration
PostHogIntegration.organization = "posthog" # TODO: your sentry organization
# PostHogIntegration.prefix = # TODO: your self hosted Sentry url. (default: https://sentry.io/organizations/)
# Since Sentry doesn't allow Integrations configuration (see https://github.com/getsentry/sentry-python/blob/master/sentry_sdk/integrations/__init__.py#L171-L183)
# we work around this by setting static class variables beforehand
# Sentry Setup
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn="https://[email protected]/5624115", # TODO: your Sentry DSN here
integrations=[DjangoIntegration(), PostHogIntegration()],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True,
)
POSTHOG_DJANGO = {
"distinct_id": lambda request: str(uuid4()) # TODO: your logic for generating unique ID, given the request object
}
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"posthog.sentry.django.PosthogDistinctIdMiddleware",
]
ROOT_URLCONF = "sentry_django_example.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "sentry_django_example.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
|
python
|
""" calc.py - simple code to show how to execute testing
"""
def sum(a: float, b: float) -> float:
"""sum adds two numbers
Args:
a (float): First number
b (float): Second number
Returns:
float: sum of a and b
>>> sum(2,3)
5
"""
return a+b
def mul(a: float, b: float) -> float:
"""mul multiples two numbers
Args:
a (float): First number
b (float): Second number
Returns:
float: mul of a and b
>>> mul(2,3)
6
"""
return a*b
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
|
python
|
#!/usr/bin/env python
"""
File metadata consistency scanner.
Python requirements: Python 2.7, :mod:`psycopg2`, Enstore modules.
"""
# Python imports
from __future__ import division, print_function, unicode_literals
import atexit
import ConfigParser
import copy
import ctypes
import datetime
import errno
import fcntl
import functools
import grp
import hashlib
import inspect
import locale
import itertools
import json
import math
import multiprocessing
import optparse
import os
import pwd
import Queue
import random
import stat
import sys
import threading
import time
# Chimera and Enstore imports
import chimera
import checksum as enstore_checksum
import configuration_client as enstore_configuration_client
import e_errors as enstore_errors
import enstore_constants
import enstore_functions3
import info_client as enstore_info_client
import namespace as enstore_namespace
import volume_family as enstore_volume_family
# Other imports
import psycopg2.extras # This imports psycopg2 as well. @UnresolvedImport
# Setup global environment
locale.setlocale(locale.LC_ALL, '')
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
# Specify settings
settings = {
'cache_volume_info': False, # There is no speedup if this is True, and there is
# potentially a gradual slow-down.
# If True, ensure sys.version_info >= (2, 7).
# See MPSubDictCache.__init__
'checkpoint_max_age': 60, # (days)
'checkpoint_write_interval': 5, # (seconds)
'fs_root': '/pnfs/fs/usr',
#'fs_root': '/pnfs/fs/usr/astro/fulla', # for quick test
#'fs_root': '/pnfs/fs/usr/astro/fulla/BACKUP', # for quicker test
'num_scan_processes_per_cpu': 3,
'scriptname_root': os.path.splitext(os.path.basename(__file__))[0],
'sleep_time_at_exit': 0.01, # (seconds)
'status_interval': 600, # (seconds)
}
class Memoize(object):
"""
Cache the return value of a method.
This class is meant to be used as a decorator of methods. The return value
from a given method invocation will be cached on the instance whose method
was invoked. All arguments passed to a method decorated with memoize must
be hashable.
If a memoized method is invoked directly on its class the result will not
be cached. Instead the method will be invoked like a static method::
class Obj(object):
@memoize
def add_to(self, arg):
return self + arg
Obj.add_to(1) # not enough arguments
Obj.add_to(1, 2) # returns 3, result is not cached
.. note :: This class is derived from
http://code.activestate.com/recipes/577452/history/1/.
.. warning :: This class should not be used directly, as doing so can
prevent Sphinx from documenting the decorated method correctly. Use the
:func:`memoize` or :func:`memoize_property` decorator instead.
"""
def __init__(self, func):
self.func = func
#self.__name__ = self.func.__name__ # For Sphinx.
#self.__doc__ = self.func.__doc__ # For Sphinx.
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return functools.partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
def memoize(f):
"""
Return a memoization decorator for methods of classes.
This wraps the :class:`Memoize` class using :py:func:`functools.wraps`.
This allows the decorated method to be documented correctly by Sphinx.
.. note :: This function is derived from
http://stackoverflow.com/a/6394966/832230.
"""
memoized = Memoize(f)
@functools.wraps(f)
def helper(*args, **kws):
return memoized(*args, **kws)
return helper
def memoize_property(f):
"""
Return a memoization decorator for methods of classes, with it being usable
as a :obj:`property`.
This uses the :func:`memoize` function.
"""
return property(memoize(f))
def do_random_test(negexp=3):
"""
Return :obj:`True` if a random probability is not greater than a threshold,
otherwise return :obj:`False`.
:type negexp: :obj:`int` (non-negative)
:arg negexp: This is the negative exponent that is used to compute a
probability threshold. Higher values of ``negexp`` make the threshold
exponentially smaller. A value of 0 naturally makes the threshold equal
1, in which case the returned value will be :obj:`True`.
:rtype: :obj:`bool`
"""
# Compare random probability with threshold 1e-negexp
return random.random() <= 10**(-1*negexp)
class PeriodicRunner:
"Run a callable periodically."
_concurrency_classes = {'thread': threading.Thread,
'process': multiprocessing.Process,}
def __init__(self, is_active_indicator, target, interval, concurrency,
name=None):
"""
Run the ``target`` callable periodically with the specified idle
``interval`` in seconds between each run.
:type target: :obj:`callable`
:arg target: This is a callable which is to be run periodically. Only
one instance of the target is called and run at a time. Even so,
the target should be thread or process safe, depending upon the
indicated ``concurrency`` type. The target is also run once upon
program or thread termination.
:type is_active_indicator: :obj:`multiprocessing.Value` or
:obj:`multiprocessing.RawValue`
:arg is_active_indicator: This must have an attribute ``value`` which
is evaluated as a :obj:`bool`. The target is called only so long as
``is_active_indicator.value`` evaluates to :obj:`True`. If and when
this indicator evaluates to :obj:`False`, the loop is terminated,
although the target is then still run one final time.
:type interval: :obj:`int` or :obj:`float` (positive)
:arg interval: number of seconds to sleep between each run of the
target.
:type concurrency: :obj:`str`
:arg concurrency: This can be ``thread`` or ``process``, indicating
whether the target should run in a new thread or a new process.
:type name: :obj:`str` or :obj:`None`
:arg name: This is the name assigned to the target thread or process.
If :obj:`None`, it is determined automatically.
"""
# Setup variables
self._is_active = is_active_indicator
self._target = target
self._interval = interval
self._concurrency_type = concurrency
self._target_name = name
# Setup and start runner
self._setup_runner()
self._runner_instance.start()
def _setup_runner(self):
"""Setup the target runner."""
# Setup variables
self._runner_class = self._concurrency_classes[self._concurrency_type]
if self._target_name is None:
self._target_name = '{0}{1}'.format(self._target.__name__.title(),
self._concurrency_type.title())
# Setup runner
self._runner_instance = self._runner_class(target=self._target_runner,
name=self._target_name)
self._runner_instance.daemon = True
atexit.register(self._target)
def _target_runner(self):
"""Run the target periodically."""
target = self._target
interval = self._interval
try:
while self._is_active.value:
target()
time.sleep(interval)
except (KeyboardInterrupt, SystemExit):
pass
finally:
target()
class FileParser(ConfigParser.SafeConfigParser):
"""
Provide a file parser based on :class:`ConfigParser.SafeConfigParser`.
Stored options are case-sensitive.
"""
#_filepath_suffixes_lock = multiprocessing.Lock() # Must be a class attr.
#_filepath_suffixes_in_use = multiprocessing.Manager().list() # For Py2.7+
# Note: multiprocessing.Manager().list() hangs on import in Python 2.6.3.
# This Python bug is not expected to be present in Python 2.7+.
def __init__(self, is_active_indicator, filepath_suffix=None):
"""
Initialize the parser.
:type is_active_indicator: :obj:`multiprocessing.Value` or
:obj:`multiprocessing.RawValue`
:arg is_active_indicator: This must have an attribute ``value`` which
is evaluated as a :obj:`bool`.
:type filepath_suffix: :obj:`str` or :obj:`None`
:arg filepath_suffix: If :obj:`None`, no suffix is used, otherwise the
provided suffix string is joined to the default file path with an
underscore.
Only one instance of this class may be initialized for each unique
file path suffix.
"""
ConfigParser.SafeConfigParser.__init__(self)
# Setup variables
self._is_active = is_active_indicator
self._filepath_suffix = filepath_suffix
self._setup_vars()
#self._add_filepath_suffix() #For Py2.7+. See _filepath_suffixes_in_use.
self._makedirs()
self.read()
def _setup_vars(self):
"""Setup miscellaneous variables."""
home_dir = os.getenv('HOME')
# Note: The environment variable HOME is not available with httpd cgi.
# Given that the scan is run by the root user, the value of HOME is
# expected to be "\root".
filepath_base = os.path.join(home_dir, '.enstore',
settings['scriptname_root'])
# Note: The value of `filepath_base` is intended to be unique for each
# module.
self._filepath = filepath_base
if self._filepath_suffix:
self._filepath += '_{0}'.format(self._filepath_suffix)
self._parser_lock = multiprocessing.Lock()
def _add_filepath_suffix(self):
"""
Add the provided filepath suffix to the list of suffixes in use.
:exc:`ValueError` is raised if the suffix is already in use.
"""
with self._filepath_suffixes_lock:
if self._filepath_suffix in self._filepath_suffixes_in_use:
msg = ('File path suffix "{}" was previously initialized. A '
'suffix can be initialized only once.'
).format(self._filepath_suffix)
raise ValueError(msg)
else:
self._filepath_suffixes_in_use.append(self._filepath_suffix)
def read(self):
"""
Read the saved values from file if the file exists.
Note that the retrieved values will be read only into the process from
which this method is called.
"""
filepath = self._filepath
with self._parser_lock:
if os.path.isfile(filepath):
ConfigParser.SafeConfigParser.read(self, filepath)
def _makedirs(self):
"""As necessary, make the directories into which the file will be
written."""
path = os.path.dirname(self._filepath)
try:
os.makedirs(path)
except OSError:
# Note: "OSError: [Errno 17] File exists" exception is raised if
# the path previously exists, irrespective of the path being a file
# or a directory, etc.
if not os.path.isdir(path):
raise
def write(self):
"""
Write the set values to file.
While this method itself is process safe, the underlying set values are
not process safe - they are unique to each process.
"""
try:
with self._parser_lock:
with open(self._filepath, 'wb') as file_:
ConfigParser.SafeConfigParser.write(self, file_)
file_.flush()
file_.close()
except:
if self._is_active.value: raise
def optionxform(self, option):
return option # prevents conversion to lower case
class Checkpoint(object):
"""
Provide a checkpoint manager to get and set a checkpoint.
The :class:`FileParser` class is used internally to read and write the
checkpoint.
"""
version = 1 # Version number of checkpointing implementation.
"""Version number of checkpointing implementation."""
def __init__(self, is_active_indicator, scanner_name):
"""
Initialize the checkpoint manager with the provided scanner name.
Each unique notices output file has its own unique checkpoint.
This class must be initialized only once for a scanner.
All old or invalid checkpoints are initially deleted.
:type is_active_indicator: :obj:`multiprocessing.Value` or
:obj:`multiprocessing.RawValue`
:arg is_active_indicator: This must have an attribute ``value`` which
is evaluated as a :obj:`bool`. So long as
``is_active_indicator.value`` evaluates to :obj:`True`, the current
checkpoint is periodically written to file. If and when this
indicator evaluates to :obj:`False`, the write loop is terminated.
The checkpoint is then still written one final time.
:type scanner_name: :obj:`str`
:arg scanner_name: This represents the name of the current scanner,
e.g. ``ScannerForward``. It is expected to be the name of the class
of the current scanner.
"""
# Setup variables
self._is_active = is_active_indicator
self._file_basename = '{0}_checkpoints'.format(scanner_name)
self._setup_vars()
self._setup_writer()
def _setup_vars(self):
"""Setup miscellaneous variables."""
self._parser = FileParser(self._is_active, self._file_basename)
self._parser_lock = multiprocessing.Lock()
self._is_parser_set_enabled = multiprocessing.RawValue(ctypes.c_bool,
True)
self._section = 'Version {0}'.format(self.version)
self._option = settings['output_file']
self._value = multiprocessing.Manager().Value(unicode, u'')
def _setup_writer(self):
"""Setup and start the writer thread."""
self._add_section()
self._cleanup()
self.read()
PeriodicRunner(self._is_active, self.write,
settings['checkpoint_write_interval'], 'process',
'CheckpointLogger')
def _is_reliably_active(self):
"""
Return whether the ``self._is_active`` indicator reliably returns
:obj:`True`.
The indicator is checked twice with a time delay between the checks.
The time delay provides time for the indicator to possibly be set to
:obj:`False`, such as during an abort.
This method may be used from any thread or process. It is thread and
process safe.
:rtype: :obj:`bool`
"""
return (self._is_active.value and
(time.sleep(0.1) or self._is_active.value))
# Note: "bool(time.sleep(0.1))" is False. Because it is
# succeeded by "or", it is essentially ignored for boolean
# considerations. Its only purpose is to introduce a time
# delay.
@property
def value(self):
"""
For use as a getter, return the locally stored checkpoint.
:rtype: :obj:`str` (when *getting*)
For use as a setter, update the locally stored checkpoint with the
provided value. The checkpoint is updated into the parser
asynchronously.
:type value: :obj:`str` (when *setting*)
:arg value: the current checkpoint.
The getter or setter may be used from any thread or process. They are
thread and process safe.
"""
try: return self._value.value
except:
if self._is_reliably_active(): raise
else: return ''
@value.setter
def value(self, value):
"""
See the documentation for the getter method.
This method is not documented here because this docstring is ignored by
Sphinx.
"""
value = unicode(value)
try: self._value.value = value
except:
if self._is_reliably_active(): raise
def _add_section(self):
"""Add the pertinent checkpoint section to the parser."""
with self._parser_lock:
if not self._parser.has_section(self._section):
self._parser.add_section(self._section)
def _cleanup(self):
"""Delete old or invalid checkpoints."""
max_age = time.time() - 86400 * settings['checkpoint_max_age']
with self._parser_lock:
for filepath in self._parser.options(self._section):
if (((not os.path.isfile(filepath))
or (os.stat(filepath).st_mtime < max_age))
and (filepath != self._option)):
self._parser.remove_option(self._section, filepath)
def read(self):
"""
Read and return the checkpoint from the parser.
The checkpoint defaults to an empty string if it is unavailable in the
parser.
"""
try:
checkpoint = self._parser.get(self._section, self._option)
# Note: A parser lock is not required or useful for a get
# operation.
except ConfigParser.NoOptionError:
checkpoint = ''
self.value = checkpoint
return checkpoint
def write(self):
"""
Set and write the locally stored checkpoint, if valid, into the parser.
The action will happen only if setting it is enabled, otherwise the
command will be ignored.
This method is process safe. It is practical to generally call it in
only one process, however.
"""
with self._parser_lock:
if self._is_parser_set_enabled.value:
checkpoint = self.value
if checkpoint: # Note: checkpoint is initially an empty string.
self._parser.set(self._section, self._option, checkpoint)
self._parser.write()
def remove_permanently(self):
"""
Remove and also disable the checkpoint altogether from the parser,
effectively preventing it from being set into the parser again.
This method is process safe, but it is expected to be called only once.
"""
self._is_parser_set_enabled.value = False
with self._parser_lock:
self._parser.remove_option(self._section, self._option)
self._parser.write()
class PrintableList(list):
"""Provide a list object which has a valid English string
representation."""
def __init__(self, plist=None, separator=', ', separate_last=False):
"""
Initialize the object.
:type plist: :obj:`list` or :obj:`None`
:arg plist: This is the initial :obj:`list` with which to initialize
the object. It is optional, with a default value of :obj:`None`, in
which case an empty :obj:`list` is initialized.
:type separator: :obj:`str`
:arg separator: the string which is used to delimit consecutive items
in the list.
:type separate_last: :obj:`bool`
:arg separate_last: indicates whether to separate the last two items in
the list with the specified ``separator``.
"""
self.separator = separator
self.separate_last = separate_last
if plist is None: plist = []
list.__init__(self, plist)
def __str__(self):
"""
Return a valid English string representation.
:rtype: :obj:`str`
Example::
>>> for i in range(5):
... str(PrintableList(range(i)))
...
''
'0'
'0 and 1'
'0, 1 and 2'
'0, 1, 2 and 3'
"""
separator = self.separator
separator_last = separator if self.separate_last else ' '
separator_last = '{0}and '.format(separator_last)
s = (str(i) for i in self)
s = separator.join(s)
s = separator_last.join(s.rsplit(separator, 1))
return s
class ReversibleDict(dict):
"""
Provide a reversible :obj:`dict`.
Initialize the object with a :obj:`dict`.
"""
def reversed(self, sort_values=True):
"""
Return a reversed :obj:`dict`, with keys corresponding to non-unique
values in the original :obj:`dict` grouped into a :obj:`list` in the
returned :obj:`dict`.
:type sort_values: :obj:`bool`
:arg sort_values: sort the items in each :obj:`list` in each value of
the returned :obj:`dict`.
:rtype: :obj:`dict`
Example::
>>> d = ReversibleDict({'a':3, 'c':2, 'b':2, 'e':3, 'd':1, 'f':2})
>>> d.reversed()
{1: ['d'], 2: ['b', 'c', 'f'], 3: ['a', 'e']}
"""
revdict = {}
for k, v in self.iteritems():
revdict.setdefault(v, []).append(k)
if sort_values:
revdict = dict((k, sorted(v)) for k, v in revdict.items())
return revdict
def _reversed_tuple_revlensorted(self):
"""
Return a :obj:`tuple` created from the reversed dict's items.
The items in the :obj:`tuple` are reverse-sorted by the length of the
reversed dict's values.
:rtype: :obj:`tuple`
Example::
>>> d = ReversibleDict({'a':3, 'c':2, 'b':2, 'e':3, 'd':1, 'f':2})
>>> d._reversed_tuple_revlensorted()
((2, ['b', 'c', 'f']), (3, ['a', 'e']), (1, ['d']))
"""
revitems = self.reversed().items()
sortkey = lambda i: (len(i[1]), i[0])
revtuple = tuple(sorted(revitems, key=sortkey, reverse=True))
return revtuple
def __str__(self):
"""
Return a string representation of the reversed dict's items using the
:class:`PrintableList` class.
The items in the returned string are reverse-sorted by the length of
the reversed dict's values.
:rtype: :obj:`str`
Example::
>>> print(ReversibleDict({'a':3, 'c':2, 'b':2, 'e':3, 'd':1, 'f':2}))
b, c and f (2); a and e (3); and d (1)
>>> print(ReversibleDict({'a': 3, 'c': 2}))
a (3) and c (2)
"""
revtuple = self._reversed_tuple_revlensorted()
revstrs = ('{0} ({1})'.format(PrintableList(values), key)
for key, values in revtuple)
pl_args = ('; ', True) if (max(len(i[1]) for i in revtuple) > 1) else ()
revstrs = PrintableList(revstrs, *pl_args)
revstr = str(revstrs)
return revstr
class MPSubDictCache:
"""
Provide a memory-efficient and :mod:`multiprocessing`-safe subset of a
:obj:`dict` for use when the values in the :obj:`dict` are themselves
dicts.
Memory efficiency is derived from sharing the keys used in the level 2
dicts.
"""
def __init__(self):
"""Initialize the object."""
self._manager = multiprocessing.Manager()
# Note: If the above line is executed at import-time, it makes the
# program hang on exit in Python 2.6.3. In addition, the dict creation
# lines below, if executed at import-time, make the program hang at
# import-time in Python 2.6.3. With Python 2.7+, it may better to
# declare `_manager` as a class variable instead of an instance
# variable.
self._cache = self._manager.dict()
self._subkeys = self._manager.dict() # e.g. {'kA': 1, 'kB': 2}
self._subkeys_reverse = self._manager.dict() # e.g. {1: 'kA', 2: 'kB'}
self._index = multiprocessing.Value(ctypes.c_ushort)
self._setitem_lock = multiprocessing.Lock()
def __contains__(self, k):
"""``D.__contains__(k)`` returns :obj:`True` if ``D`` has a key ``k``,
else returns :obj:`False`."""
return (k in self._cache)
def __getitem__(self, k):
"""``x.__getitem__(y) <==> x[y]``"""
subdict = self._cache[k] # can raise KeyError
subdict = self._decompress_subkeys(subdict) # should not raise KeyError
return subdict
def __setitem__(self, k, subdict):
"""``x.__setitem__(i, y) <==> x[i]=y``"""
with self._setitem_lock:
subdict = self._compress_subkeys(subdict)
self._cache[k] = subdict
@property
def _next_index(self):
index = self._index.value
self._index.value += 1
return index
def _compress_subkeys(self, subdict):
"""
Compress the keys in the ``subdict`` using an internal index.
:type subdict: :obj:`dict`
:arg subdict:
:rtype: :obj:`dict`
"""
subdict_compressed = {}
for k,v in subdict.items():
try: k_compressed = self._subkeys[k]
except KeyError:
k_compressed = self._next_index
self._subkeys[k] = k_compressed
self._subkeys_reverse[k_compressed] = k
subdict_compressed[k_compressed] = v
return subdict_compressed
def _decompress_subkeys(self, subdict):
"""
Decompress the keys in the ``subdict`` using the internal index.
:type subdict: :obj:`dict`
:arg subdict:
:rtype: :obj:`dict`
"""
return dict((self._subkeys_reverse[k],v) for k,v in subdict.items())
class CommandLineOptionsParser(optparse.OptionParser):
"""
Parse program options specified on the command line.
These are made available using instance attributes ``options`` and
``args``, as described below.
``options`` is an attribute providing values for all options. For example,
if ``--file`` takes a single string argument, then ``options.file`` will be
the filename supplied by the user. Alternatively, its value will be
:obj:`None` if the user did not supply this option.
``args`` is the list of positional arguments leftover after parsing
``options``.
The :meth:`add_option` method can be used to add an option.
"""
# In newer versions of Python, i.e. 2.7 or higher, the usage of the
# optparse module should be replaced by the newer argparse module.
_options_seq = []
def __init__(self):
"""Parse options."""
usage = '%prog -t SCAN_TYPE [OPTIONS]'
optparse.OptionParser.__init__(self, usage=usage)
self._add_options()
(self.options, self.args) = self.parse_args()
self._check_option_values()
self._process_options()
def add_option(self, *args, **kwargs):
# Refer to the docstring of the overridden method.
optparse.OptionParser.add_option(self, *args, **kwargs)
if kwargs.get('dest'): self._options_seq.append(kwargs['dest'])
@staticmethod
def output_filename():
"""Return the name of the output file for notices."""
datetime_str = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
filen = '{0}_{1}.log'.format(settings['scriptname_root'], datetime_str)
pathn = os.path.abspath(filen)
return pathn
def _add_options(self):
"""Add various options."""
# Add scan type option
self.add_option('-t', '--type', dest='scan_type',
choices=('forward', 'reverse'),
help='(forward, reverse) scan type',
)
# Add scan directory option
directory = settings['fs_root']
self.add_option('-d', '--directory', dest='fs_root',
default=directory,
help=('(for forward scan only) absolute path of '
'directory to scan recursively '
'(recommended default is {0}) (not recommended '
'to be specified for large nested directories)'
).format(directory))
# Add notices output file option
filename = self.output_filename()
self.add_option('-o', '--output_file', dest='output_file',
default=filename,
help=('absolute path to output file for notices '
'(default is dynamic, e.g. {0}) (appended if '
'exists)'
).format(filename))
# Add printing option
self.add_option('-p', '--print', dest='print',
choices=('checks', 'notices'),
help=('(checks, notices) for the specified scan type, '
'print all runnable checks and their overviews, '
'or all notice templates, and exit'),
)
# Add resume option
self.add_option('-r', '--resume', dest='resume_scan', default=False,
action='store_true',
help=('for specified output file (per -o), resume scan '
'where aborted (default is to restart scan) (use '
'with same database only)'))
# Add status interval option
status_interval = settings['status_interval']
self.add_option('-s', '--status_interval', dest='status_interval',
type=float,
default=status_interval,
help=('max status output interval in seconds (default '
'approaches {0})').format(status_interval),
)
def _check_option_values(self):
"""Check whether options have been specified correctly, and exit
otherwise."""
# Check required options
if not all((self.options.scan_type, self.options.output_file)):
self.print_help()
self.exit(2)
def _process_options(self):
"""Process and convert options as relevant."""
# Process options as relevant
self.options.fs_root = os.path.abspath(self.options.fs_root)
self.options.output_file = os.path.abspath(self.options.output_file)
# Convert options from attributes to keys
self.options = dict((k, getattr(self.options, k)) for k in
self._options_seq)
class Enstore:
"""
Provide an interface to various Enstore modules.
A unique instance of this class must be created in each process in which
the interface is used.
"""
def __init__(self):
"""Initialize the interface."""
self.config_client = \
enstore_configuration_client.ConfigurationClient()
self.config_dict = self.config_client.dump_and_save()
self.library_managers = [k[:-16] for k in self.config_dict
if k.endswith('.library_manager')]
info_client_flags = (enstore_constants.NO_LOG |
enstore_constants.NO_ALARM)
self.info_client = \
enstore_info_client.infoClient(self.config_client,
flags=info_client_flags)
#self.storagefs = enstore_namespace.StorageFS()
class Chimera:
"""
Provide an interface to the Chimera database.
A unique database connection is internally used for each process and thread
combination from which the class instance is used.
"""
_connections = {}
_cursors = {}
@staticmethod
def confirm_psycopg2_version():
"""Exit the calling process with an error if the available
:mod:`psycopg2` version is less than the minimally required version."""
ver_str = psycopg2.__version__
ver = [int(i) for i in ver_str.partition(' ')[0].split('.')]
min_ver = [2, 4, 5]
min_ver_str = '.'.join(str(v) for v in min_ver)
if ver < min_ver:
msg = ('The installed psycopg2 version ({0}) is older than the'
' minimally required version ({1}). Its path is {2}.'
).format(ver_str, min_ver_str, psycopg2.__path__)
exit(msg) # Return code is 1.
@staticmethod
def confirm_fs():
"""Exit the calling process with an error if the filesystem root is not
a Chimera filesystem."""
# Note: This method helps confirm that PNFS is not being used.
fs_root = str(settings['fs_root']) # StorageFS requires str, not unicode
fs_type_reqd = chimera.ChimeraFS
#import pnfs; fs_type_reqd = pnfs.Pnfs
fs_type_seen = enstore_namespace.StorageFS(fs_root).__class__
if fs_type_reqd != fs_type_seen:
msg = ('The filesystem root ({0}) is required to be of type {1} '
'but is of type {2}.'
).format(fs_root,
fs_type_reqd.__name__, fs_type_seen.__name__)
exit(msg) # Return code is 1.
@property
def _cursor(self):
"""Return the cursor for the current process and thread combination."""
key = (multiprocessing.current_process().ident,
threading.current_thread().ident)
if key in self._cursors:
return self._cursors[key]
else:
# Create connection
conn = psycopg2.connect(database='chimera', user='enstore')
#conn.set_session(readonly=True, autocommit=True)
self._connections[key] = conn
# Create cursor
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
self._cursors[key] = cursor
return cursor
def fetchone(self, *args):
"""Fetch and return a row for the provided query arguments."""
self._cursor.execute(*args)
return self._cursor.fetchone()
class Scanner:
"""
This is the base class for derived scanner classes.
A derived class must:
- Define the :meth:`validate_scan_location` method to validate the scan
location root.
- Define the :meth:`queue_items` method which puts items into the
:attr:`items_q` queue.
- Define *check methods* as relevant, which raise the appropriate
:class:`Notice` exception.
- Define the :attr:`checks` variable which is a
:obj:`~collections.Sequence` of *check method* names, indicating the
order in which to execute the methods.
- Define the :attr:`notices` variable, the requirements for which are
specified by the :meth:`Notice.update_notices` method.
- Define the :meth:`get_num_items` method returning a non-negative integer
which is the total number of items to be processed.
To run the scan, initialize the derived class, and call the :meth:`run`
method.
"""
num_scan_processes = (multiprocessing.cpu_count() *
settings['num_scan_processes_per_cpu'])
# Estimate the max number of pending items that may remain queued
est_max_scan_speed = 120 * 5 # (per second) (Guesstimate with SSD.)
est_max_dir_list_time = 15 # Est. max seconds to walk a single large dir.
queue_max_len = est_max_scan_speed * est_max_dir_list_time
# Note: The goals here are twofold:
# - limiting memory usage
# - ensuring a process does not run out of pending items to scan
# This will need to be moved to the derived class if different scanners have
# very different scan speeds.
def __init__(self):
"""Set up the scanner."""
self._check_prereqs_prevars()
self._setup_vars()
self._check_prereqs_postvars()
self._setup_workers()
def _check_prereqs_prevars(self):
"""Check various prerequisites before setting up instance variables."""
self._check_ugid()
self._check_exclusive()
#Chimera.confirm_psycopg2_version()
Chimera.confirm_fs()
def _setup_vars(self):
"""Setup miscellaneous variables."""
settings['output_file_dict'] = '{0}.dict'.format(
settings['output_file'])
Notice.update_notices(self.notices)
self.is_active = multiprocessing.RawValue(ctypes.c_bool, True)
self.checkpoint = Checkpoint(self.is_active, self.__class__.__name__)
self.num_items_total = multiprocessing.RawValue(ctypes.c_ulonglong,
self.get_num_items())
self._start_time = time.time() # Intentionally defined now.
def _check_prereqs_postvars(self):
"""Check various prerequisites after setting up instance variables."""
self.validate_scan_location() # Implemented in derived class.
self._validate_output_files_paths()
self._validate_checkpoint()
def _setup_workers(self):
"""Setup and start worker processes and threads."""
self._setup_mp()
self._start_workers()
self._start_ScannerWorker_monitor()
def run(self):
"""
Perform scan.
This method blocks for the duration of the scan.
"""
try:
self.queue_items() # Defined by derived class # is blocking
except (KeyboardInterrupt, SystemExit) as e:
self.is_active.value = False
if isinstance(e, KeyboardInterrupt):
print() # Prints a line break after "^C"
if str(e):
print(e, file=sys.stderr)
except:
self.is_active.value = False
raise
finally:
# Note: self.is_active.value must *not* be set to False here,
# independent of whether it was previously set to False. It should
# essentially remain True if the work ends normally.
self._stop_workers()
self._do_postreqs()
if not self.is_active.value:
# Note: This condition will here be True if an abort was
# instructed, such as using KeyboardInterrupt or SystemExit.
# This can cause an unknown non-daemonic process to hang on
# exit. To get past the hang, the exit is forced. The hang is
# not observed to occur during a normal scan completion.
os._exit(1)
def _do_postreqs(self):
"""Perform post-requisites after completion of the scan."""
# Finalize checkpoint
if self.is_active.value:
self.checkpoint.remove_permanently()
else:
self.checkpoint.write()
# Log status
try: self._log_status() # Warning: Not executed if using `tee`.
except IOError: pass
def _check_ugid(self):
"""Check whether process UID and GID are 0, exiting the process with an
error otherwise."""
id_reqd = 0
if (os.getuid() != id_reqd) or (os.getgid() != id_reqd):
msg = ('Error: UID and GID must be {0}, but are {1} and {2} '
'instead.').format(id_reqd, os.getuid(), os.getgid())
exit(msg)
else:
# Set "effective" IDs in case they are not equal to "real" IDs
os.setegid(id_reqd)
os.seteuid(id_reqd)
# Note: EGID is intentionally set before EUID. This is because
# setting EUID first (to a non-root value) can result in a loss of
# power, causing the EGID to then become unchangeable.
def _check_exclusive(self):
"""
Check if the scanner is running exclusively.
Requirements:
- Root permissions.
- The ``enstore`` user and group must exist.
This check is required to be performed only once. For safety, this must
be done before the initialization of file-writing objects such as
:attr:`self.checkpoint`, etc.
This check is performed to prevent accidentally overwriting any of the
following:
- Notices output files if they have the same name.
- The common checkpoint file.
"""
# Establish lock dir settings
ld_name = '/var/run/enstore' # Parent dir is later assumed to exist.
# Note: Writing in "/var/run" requires root permissions.
ld_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP
# = 0o660 = 432
try:
ld_uid = pwd.getpwnam('enstore').pw_uid
except KeyError: # User 'enstore' does not exist.
ld_uid = -1 # -1 keeps the value unchanged.
try:
ld_gid = grp.getgrnam('enstore').gr_gid
except KeyError: # Group 'enstore' does not exist.
ld_gid = -1 # -1 keeps the value unchanged.
# Create lock directory
umask_original = os.umask(0)
try:
os.mkdir(ld_name, ld_mode) # This assumes parent dir exists.
except OSError:
if not os.path.isdir(ld_name): raise
else:
os.chown(ld_name, ld_uid, ld_gid)
finally:
os.umask(umask_original)
# Establish lock file settings
lf_name = '{0}.lock'.format(settings['scriptname_root'])
lf_path = os.path.join(ld_name, lf_name)
lf_flags = os.O_WRONLY | os.O_CREAT
lf_mode = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH # = 0o222 = 146
# Create lock file
umask_original = os.umask(0)
try:
lf_fd = os.open(lf_path, lf_flags, lf_mode)
finally:
os.umask(umask_original)
# Note: It is not necessary to use "os.fdopen(lf_fd, 'w')" to open a
# file handle, or to keep it open for the duration of the process.
# Try locking the file
try:
fcntl.lockf(lf_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
msg = ('Error: {0} may already be running. Only one instance of it '
'can run at a time.'
).format(settings['scriptname_root'].title())
exit(msg)
# Note: Because fcntl is used, it is not necessary for the locked file
# to be deleted at the end of the scan.
def _validate_output_files_paths(self):
"""Validate the paths of the scan notices output files."""
# Determine file properties
of_map = {'main': 'output_file', 'dict': 'output_file_dict'}
of_all = {}
for of_name, of_key in of_map.items():
of = {}
of['path'] = settings[of_key]
of['path_basename'] = os.path.basename(of['path'])
of['path_exists'] = os.path.isfile(of['path'])
of_all[of_name] = of
# Add file paths to message
ofall_paths = ['"{0}"'.format(of['path']) for of in of_all.values()]
ofall_paths = sorted(ofall_paths)
ofall_paths_str = str(PrintableList(ofall_paths))
msg = 'The scan notices output files are {0}.'.format(ofall_paths_str)
# Add an "appended" or "created" status to message
if of_all['main']['path_exists'] == of_all['dict']['path_exists']:
if of_all['main']['path_exists']:
msg += ' These files exist and will be appended.\n'
elif not settings['resume_scan']:
msg += ' These files do not exist and will be created.\n'
else:
msg += '\n'
print(msg)
else:
msg = ('Error: {0} These must be in a consistent state. Both must '
'either exist (so they can be appended), or both must not '
'exist (so they can be created). If one of these files is '
'missing, it is recommended the other be deleted.'
).format(msg)
exit(msg)
def _validate_checkpoint(self):
"""
Validate usability of checkpoint.
This is performed only when a scan resumption is requested.
"""
# Note: It is possible that this method belongs in the derived class
# instead.
if not settings['resume_scan']: return
output_file = settings['output_file']
output_file_dict = settings['output_file_dict']
checkpoint = self.checkpoint.value
# Caution: Checks specific to the current scanner musn't be done here.
if ((not os.path.isfile(output_file)) or
(not os.path.isfile(output_file_dict))):
msg = ('Error: A request to resume the scan for the current set of '
'output files ("{0}" and "{1}") was received, but one or '
'more of these output files do not already exist. A scan '
'can be resumed only for an existing set of incomplete '
'output files.'
).format(output_file, output_file_dict)
exit(msg)
elif not checkpoint:
msg = ('Error: A request to resume the scan was received, but a '
'checkpoint is unavailable for the current primary output '
'file ({0}). If an output file was previously specified, '
'the currently specified path must match the previous path '
'exactly.'
).format(output_file)
exit(msg)
else:
msg = ('Scan will be resumed at the approximate checkpoint "{0}". '
'Items preceding this checkpoint will still be traversed by '
'the scanner but will not be rescanned. Because the '
'checkpoint is an approximate one, a few duplicate entries '
'may exist in the notices output file near the checkpoint.\n'
).format(checkpoint)
# Note: For more info on setting a checkpoint, see
# self._item_handler.
print(msg)
def _setup_mp(self):
"""Setup :mod:`multiprocessing` environment."""
# Setup queues
self.items_q = multiprocessing.Queue(self.queue_max_len)
self.noticegrps_q = multiprocessing.Queue(self.queue_max_len)
# Setup counter
self.num_ScannerWorker_alive = multiprocessing.Value(ctypes.c_ubyte)
self.num_items_processed = multiprocessing.Value(ctypes.c_ulonglong,
lock=True)
self._num_items_processed_lock = multiprocessing.Lock()
# Define processes
# Note defaults: {'num': 1, 'join': True, 'queue': None}
self._processes = ({'name': 'ScannerWorker',
'num': self.num_scan_processes,
'target': self._item_handler,
'queue': self.items_q,},
{'name': 'NotificationLogger',
'target': self._noticegrp_handler,
'queue': self.noticegrps_q,},
{'name': 'StatusLogger',
'target': self._status_logger, 'join': False,},
)
# Note that processes will be started in the order specified in the
# above sequence. They will also later be terminated in the same order.
# It is important to terminate them in the same order.
def _increment_num_items_processed(self, amount=1):
"""
Increment the number of items processed by the specified amount.
This method is process safe.
:type amount: :obj:`int` (positive)
:arg amount: number by which to increment
"""
with self._num_items_processed_lock: # required
self.num_items_processed.value += amount
def _start_workers(self):
"""Start workers."""
# Start processes in order
for pgrp in self._processes:
pgrp['processes'] = []
num_processes = pgrp.get('num', 1)
for i in range(num_processes):
name = pgrp['name']
if num_processes > 1: name += str(i)
process = multiprocessing.Process(target=pgrp['target'],
name=name)
process.daemon = True
pgrp['processes'].append(process)
process.start()
def _start_ScannerWorker_monitor(self):
"""Start a monitor to track the number of running worker processes."""
def monitor():
interval = settings['status_interval']/4
processes = [pgrp for pgrp in self._processes if
pgrp['name']=='ScannerWorker']
processes = processes[0]['processes']
try:
while self.is_active.value:
try:
num_alive = sum(p.is_alive() for p in processes)
# Note: The "is_alive" Process method used above can be
# used only in the parent process. As such, this
# function must be run in the main process and not in a
# child process.
except OSError:
if self.is_active.value: raise
else:
self.num_ScannerWorker_alive.value = num_alive
time.sleep(interval)
except (KeyboardInterrupt, SystemExit):
pass
monitor_thread = threading.Thread(target=monitor)
monitor_thread.daemon = True
monitor_thread.start()
def _stop_workers(self):
"""Stop workers cleanly."""
# Stop processes in order
for pgrp in self._processes:
pgrp_queue = pgrp.get('queue')
if pgrp_queue is not None:
pgrp_num = pgrp.get('num', 1)
for _ in range(pgrp_num):
if self.is_active.value:
pgrp_queue.put(None) # is blocking
else:
try: pgrp_queue.put_nowait(None)
except Queue.Full: pass
if pgrp.get('join', True):
for p in pgrp['processes']:
p.join()
time.sleep(settings['sleep_time_at_exit'])
def _item_handler(self):
"""
Handle queued item paths.
Multiple instances of this method are expected to run simultaneously.
"""
# Create process-specific interfaces
Item.enstore = Enstore()
# Setup local variables
local_count = 0
# Note: A separate local count (specific to the current process) and
# global count are maintained. This allows for infrequent updates to
# the global count. The local count is the number of items that have
# been processed locally in the current process, but have not yet been
# incremented in the global count.
update_time = time.time() # Most recent global state update time.
update_thresholds = {'count': self.num_scan_processes * 2,
'time': settings['status_interval'] / 2,}
# Note: The thresholds are used to reduce possible contention from
# multiple scan processes for updating the shared global values. A
# global update is executed when any of the thresholds are reached.
# Setup required external variables locally
item_get = self.items_q.get
noticegrp_put = self.noticegrps_q.put
gc_updater = self._increment_num_items_processed # For global count.
def process_item(item):
noticegrp = self._scan_item(item)
if noticegrp:
noticegrp = noticegrp.to_dict()
# Note: The object is transmitted over multiprocessing as a
# dict. This is because a dict can be easily pickled and
# unpickled, whereas the object itself is more difficult to
# pickle and unpickle.
noticegrp_put(noticegrp)
def update_state(item, local_count, update_time, force_update=False):
if (item is not None) and item.is_scanned:
local_count += 1
update_age = time.time() - update_time
update_thresholds_met = {
'count': local_count == update_thresholds['count'],
'time': update_age > update_thresholds['time'],}
update_threshold_met = any(update_thresholds_met.values())
if (local_count > 0) and (update_threshold_met or force_update):
gc_updater(local_count)
local_count = 0
update_time = time.time()
if (item is not None) and item.is_scanned and item.is_file():
# Note: The checkpoint is set only if the item is a
# file, as opposed to a directory. This is because
# the resumption code can currently only use a file
# as a checkpoint.
self.checkpoint.value = item
# Note: Given that multiple workers work on the
# work queue concurrently, this setting of the
# checkpoint is an approximate one, as coordinated
# checkpointing is not used. A blocking version of
# coordinated checkpointing can possibly be
# implemented with multiprocessing.Barrier, which
# was only introduced in Python 3.3.
return (local_count, update_time)
# Process items
item = None # Prevents possible NameError exception in finally-block.
try:
while self.is_active.value:
item = item_get()
if item is None: # Put by self._stop_workers()
break
else:
process_item(item)
local_count, update_time = \
update_state(item, local_count, update_time)
except (KeyboardInterrupt, SystemExit):
pass
finally:
update_state(item, local_count, update_time, force_update=True)
# Note: If an exception is raised in the try-block, due to
# implementation limitations, it is possible that a duplicate
# update is performed on the global count.
time.sleep(settings['sleep_time_at_exit'])
def _noticegrp_handler(self):
"""
Handle queued :class:`NoticeGrp` objects.
Only a single instance of this method is expected to run.
"""
noticegrp_get = self.noticegrps_q.get
# Obtain handles to output files
output_file = open(settings['output_file'], 'a')
output_file_dict = open(settings['output_file_dict'], 'a')
# Write to output files
try:
while self.is_active.value:
noticegrp = noticegrp_get()
if noticegrp is None: # Put by self._stop_workers()
break
else:
noticegrp = NoticeGrp.from_dict(noticegrp)
# Note: The object is transmitted over multiprocessing
# as a dict. This is because a dict can be easily
# pickled and unpickled, whereas the object itself is
# more difficult to pickle or unpickle.
if noticegrp:
str_out = '{0}\n\n'.format(noticegrp)
output_file.write(str_out)
str_out = '{0}\n'.format(noticegrp.to_exportable_dict())
output_file_dict.write(str_out)
output_file.flush()
output_file_dict.flush()
except (KeyboardInterrupt, SystemExit):
pass
finally:
time.sleep(settings['sleep_time_at_exit'])
output_file.flush()
output_file_dict.flush()
output_file.close()
output_file_dict.close()
def _status_logger(self):
"""
Log the current status after each interval of time.
Only a single instance of this method is expected to run.
"""
# Setup environment
interval = settings['status_interval']
interval_cur = 1
num_items_processed_previously = secs_elapsed_previously = 0
# Log status
try:
while self.is_active.value:
(num_items_processed_previously, secs_elapsed_previously) = \
self._log_status(num_items_processed_previously,
secs_elapsed_previously,
interval_cur)
time.sleep(interval_cur)
interval_cur = min(interval, interval_cur*math.e)
# Note: `interval_cur` converges to `interval`.
# `interval_cur*math.e` is equivalent to `math.e**x` with
# incrementing x.
except (KeyboardInterrupt, SystemExit):
pass
# Note: Execution of "self._log_status()" inside a "finally" block here
# does not happen. It is done in the main process instead.
def _log_status(self, num_items_processed_previously=None,
secs_elapsed_previously=None,
interval_next=None):
"""
Log the current status once.
:type num_items_processed_previously: :obj:`int` (non-negative) or
:obj:`None`
:arg num_items_processed_previously: Number of items processed
cumulatively, as returned previously.
:type secs_elapsed_previously: :obj:`int` (non-negative) or :obj:`None`
:arg secs_elapsed_previously: Number of seconds elapsed cumulatively,
as returned previously.
:type interval_next: :obj:`int` (non-negative) or :obj:`None`
:arg interval_next: Number of seconds to next update.
.. todo:: The `num_items_processed_previously` and
`secs_elapsed_previously` arguments are to be removed and replaced
using class instance variables.
"""
dttd = lambda s: datetime.timedelta(seconds=round(s))
intstr = lambda i: locale.format('%i', i, grouping=True)
# fmtstrint = lambda s, i: '{0} {1}'.format(s, intstr(i))
items = []
# Prepare basic items for status
datetime_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
num_items_total = int(self.num_items_total.value)
num_items_processed_cum = int(self.num_items_processed.value)
secs_elapsed_cum = time.time() - self._start_time
speed_cum = num_items_processed_cum / secs_elapsed_cum # items/second
# Add basic stats
items += [('Active workers',
intstr(self.num_ScannerWorker_alive.value)), #approx
('Time elapsed',
dttd(secs_elapsed_cum)),
('Items scanned (cumulative)',
intstr(num_items_processed_cum)), #approx
('Speed (cumulative) (items/s)',
intstr(speed_cum)), #approx
]
# Conditionally add current iteration status
if ((num_items_processed_previously is not None) and
(secs_elapsed_previously is not None)):
# Above is explicit check to disambiguate from 0
num_items_processed_cur = (num_items_processed_cum -
num_items_processed_previously)
secs_elapsed_cur = secs_elapsed_cum - secs_elapsed_previously
speed_cur = num_items_processed_cur / secs_elapsed_cur
items += [
# ('Items processed (current)',
# intstr(num_items_processed_cur)), #approx
('Speed (current) (items/s)',
intstr(speed_cur)), #approx
]
# Conditionally add remaining time
remaining_secs = float('inf')
if num_items_total is not None:
num_items_remaining = num_items_total - num_items_processed_cum
items.append(('Items remaining',
intstr(num_items_remaining))) #approx
if speed_cum != 0:
remaining_secs = num_items_remaining / speed_cum
total_secs = secs_elapsed_cum + remaining_secs
items += [('Time remaining', dttd(remaining_secs)), #approx
('Time total', dttd(total_secs)), #approx
]
# Conditionally add time to next update
if interval_next is not None:
interval_next = min(interval_next, remaining_secs)
items += [('Time to next update', dttd(interval_next))] #approx
# Prepare status string
max_item_key_len = max(len(i[0]) for i in items)
items = ['{0:{1}}: {2}'.format(i[0], max_item_key_len, i[1]) for i in
items]
items.insert(0, datetime_now)
status = '\n'.join(items)
print(status, end='\n\n')
return (num_items_processed_cum, secs_elapsed_cum)
def _scan_item(self, item):
"""
Scan provided item as per the *check method* names and dependencies
which are specified in the :attr:`self.checks` attribute, returning the
updated :class:`NoticeGrp` instance for the item.
Multiple instances of this method are expected to run on different
items simultaneously.
"""
# Note: self.checks should not be modified.
checks_pending = list(self.checks)
# Note: Using `list(some_dict)` creates a copy of the dict's keys in
# both Python 2.x and 3.x. A copy is created here because the list is
# mutated later in the method.
#random.shuffle(checks_pending)
# Note: The list of pending checks can be randomized to reduce the
# possibility of multiple worker processes trying to read the same type
# of file metadata at the same time. By explicitly randomizing the
# order in which various checks may run, there may be slightly less
# contention. It is unclear if this randomization actually affects the
# scan speed.
ccs = {} # This refers to checks completion statuses.
def pop_pending_check_name():
for check in copy.copy(checks_pending):
# Note: A copy is used above because checks_pending is modified
# inside the loop. Not using a copy may cause a problem with
# iteration.
prereqs = self.checks[check]
is_prereqs_passed = all(ccs.get(pr)==True for pr in prereqs)
is_prereqs_failed = any(ccs.get(pr)==False for pr in prereqs)
if is_prereqs_passed or is_prereqs_failed:
checks_pending.remove(check)
if is_prereqs_passed: return check
raise IndexError('no pending check')
# Run checks
while True:
# Get check
try: check_name = pop_pending_check_name()
except IndexError: break
check = getattr(self, check_name)
# Run check
run_status = None
try:
check(item)
except Notice as notice:
item.add_notice(notice)
run_status = not (isinstance(notice, CriticalNotice)
or isinstance(notice, ErrorNotice))
if isinstance(notice, CriticalNotice): break
else:
run_status = True
finally:
if run_status is not None:
ccs[check_name] = run_status
item.is_scanned = True
return item.noticegrp
@classmethod
def print_checks(cls):
"""
Print names and docstrings of all runnable *check methods*.
A check is runnable if all its prerequisites, if any, are runnable.
"""
def is_runnable(check):
return ((check in cls.checks) and
all(is_runnable(prereq) for prereq in cls.checks[check]))
# Note: This check is performed to help with debugging in the
# event some checks are manually disabled.
def describe(check):
method = getattr(cls, check)
desc = inspect.getdoc(method)
desc = desc.partition('\n\n')[0] # Truncate at first empty line.
desc = desc.replace('\n', ' ') # Joins multiple lines.
desc = desc.replace('`', '') # Removes what may be Sphinx syntax.
desc = '{}: {}'.format(check, desc)
return desc
checks = (describe(c) for c in sorted(cls.checks) if is_runnable(c))
for c in checks: print(c)
class ScannerForward(Scanner):
"""Perform forward scan."""
checks = {
# Each dict key is the name of a check method which accepts an Item object.
# The method can optionally raise a Notice (or an inherited) exception. If
# the ErrorNotice or CriticalNotice exception is raised, the check is
# considered failed, otherwise succeeded. If a CriticalNotice exception is
# raised, all remaining checks for the Item are skipped.
#
# Each dict value is a sequence of names of dependencies of check methods.
#
# The noted check methods can be run in a pseudo-random order as long as
# the respective dependencies have succeeded.
'check_path_attr': (), # Test check; never fails.
'check_is_storage_path': (),
'check_lstat': ('check_is_storage_path',),
'check_link_target': ('check_is_storage_path',),
'check_file_nlink': ('check_lstat',),
'check_file_temp': ('check_is_storage_path',),
'check_file_bad': ('check_is_storage_path',),
'check_fsid': ('check_is_storage_path',),
# 'check_parentfsid': # Never fails
# ('check_fsid',
# 'check_lstat',),
'check_fslayer2lines': ('check_fsid',
'check_lstat',),
'check_fslayer2': ('check_fslayer2lines',),
'check_fslayer2_crc': ('check_fslayer2',),
'check_fslayer2_size': ('check_fslayer2',),
'check_fslayer2_size_match': ('check_fslayer2',
'check_fslayer2_size',),
'check_fslayer1lines': ('check_fsid',
'check_lstat',
'check_fslayer2lines',
'check_fslayer2',),
'check_fslayer1': ('check_fslayer1lines',
'check_lstat',),
'check_fslayer4lines': ('check_fsid',
'check_lstat',
'check_fslayer2lines',
'check_fslayer2',),
'check_fslayer4': ('check_fslayer4lines',
'check_lstat',),
'check_enstore_file_info': ('check_lstat',
'check_fslayer1lines',), # L1 BFID is used.
'check_enstore_volume_info': ('check_lstat',
'check_enstore_file_info',),
'check_bfid_match': ('check_fslayer1',
'check_fslayer4',
'check_enstore_file_info',),
'check_file_deleted': ('check_enstore_file_info',),
'check_recent': ('check_lstat',),
'check_empty': ('check_lstat',
'check_fslayer2_size_match',
'check_enstore_file_info',),
'check_sfs_path': ('check_is_storage_path',),
'check_volume_label': ('check_fslayer4',
'check_enstore_file_info',),
'check_location_cookie': ('check_fslayer4',
'check_enstore_file_info',),
'check_size': ('check_lstat',
'check_fslayer2',
'check_fslayer4',
'check_enstore_file_info',),
'check_file_family': ('check_fslayer4',
'check_enstore_volume_info',),
# 'check_library': # Not necessary
# ('check_enstore_volume_info',),
'check_drive': ('check_fslayer4',
'check_enstore_file_info',),
'check_crc': (#'check_fslayer2',
'check_fslayer4',
#'check_enstore_volume_info',
'check_enstore_file_info',),
'check_path': ('check_fslayer4',
'check_enstore_file_info',),
'check_pnfsid': ('check_fsid',
'check_fslayer4',
'check_enstore_file_info',),
'check_copy': ('check_enstore_file_info',),
}
notices = {
'NoPath': "Cannot access object's path.",
'NotStorage': 'Not an Enstore file or directory.',
'NoStat': "Cannot access object's filesystem stat. {exception}",
'NlinkGT1': ("lstat nlink (number of hard links) count is >1. It is "
"{nlink}."),
'LinkBroken': ('Symbolic link is broken. It points to "{target}" which '
'does not exist.'),
'TempFile': ('File is a likely temporary file because its name '
'or extension ends with "{ending}".'),
'MarkedBad': 'File is marked bad.',
'NoID': 'Cannot read filesystem ID file "{filename}". {exception}',
'NoL2File': 'Cannot read layer 2 metadata file "{filename}". {exception}',
'L2MultVal': 'Multiple layer 2 "{property_}" property values exist.',
'L2RepVal': 'Repetition of layer 2 "{property_}" property values.',
'L2Extra': 'Layer 2 has these {num_extra_lines} extra lines: '
'{extra_lines}',
# 'NoParentID': ('Cannot read parent\'s filesystem ID file "{filename}". '
# '{exception}'),
# 'ParentIDMismatch': ('Parent ID mismatch. The parent IDs provided by files'
# ' "{filename1}" ({parentid1}) and "{filename2}" '
# '({parentid2}) are not the same.'),
'L2CRCNone': 'Layer 2 CRC is unavailable.',
'L2SizeNone': 'Layer 2 size is missing.',
'L2SizeMismatch': ("File size mismatch. Layer 2 size ({size_layer2})"
" doesn't match lstat size ({size_lstat})."),
'NoL1File': 'Cannot read layer 1 metadata file "{filename}". {exception}',
'L1Empty': 'Layer 1 metadata file "{filename}" is empty.',
'L1Extra': 'Extra layer 1 lines detected.',
# 'L1Mismatch': ('Layer 1 mismatch. Layer 1 provided by files "{filename1}" '
# 'and "{filename2}" are not the same.'),
'L1BFIDBad': 'Layer 1 BFID ({bfid}) is invalid.',
'L1BFIDNone': 'Layer 1 BFID is missing.',
'NoL4File': ('Cannot read layer 4 metadata file "{filename}". '
'{exception}'),
'L4Empty': 'Layer 4 metadata file "{filename}" is empty.',
'L4Extra': 'Extra layer 4 lines detected.',
# 'L4Mismatch': ('Layer 4 mismatch. Layer 4 provided by files "{filename1}" '
# 'and "{filename2}" are not the same.'),
'L4BFIDNone': 'Layer 4 BFID is missing.',
'FileInfoBadType': ('Enstore file info is not a dict. It is of type '
'"{type_}" and has value "{value}".'),
'NoFileInfo': 'File is not in Enstore database.',
'FileInfoBad': ('File status in file info provided by Enstore is not ok. '
'It is "{status}".'),
'FileInfoPathNone': 'File path in Enstore file info is missing.',
'FileInfoPNFSIDNone': 'PNFS ID in Enstore file info is missing.',
'FileInfoPNFSIDBad': ('PNFS ID in file info provided by Enstore database '
'is invalid. It is "{pnfsid}".'),
'VolInfoBadType': ('Enstore volume info is not a dict. It is of type '
'"{type_}" and has value "{value}".'),
'NoVolInfo': 'Volume is not in Enstore database.',
'VolInfoBad': ('Volume status in volume info provided by Enstore database '
'is not ok. It is "{status}".'),
'BFIDMismatch': ('BFID mismatch. The BFIDs provided by layer 1 '
'({bfid_layer1}) and layer 4 ({bfid_layer4}) are not the '
'same.'),
'MarkedDel': ('File is marked deleted by Enstore file info, but its entry '
'still unexpectedly exists in the filesystem.'),
'TooRecent': ('Object was modified in the past one day since the scan '
'began.'),
'Size0FileInfoOk': ('File is empty. Its lstat size is 0 and its layer 2 '
'size is {layer2_size}. Its info in Enstore is ok.'),
'Size0FileInfoNo': ('File is empty. Its lstat size is 0 and its layer 2 '
'size is {layer2_size}. Its info in Enstore is not '
'ok. It presumably has no info in Enstore.'),
'MultSFSPaths': ('Multiple paths were returned for PNFS ID {pnfsid}, '
'namely: {paths}'),
'L4VolLabelNone': 'Layer 4 volume label is missing.',
'FileInfoVolLabelNone': 'Volume label in Enstore file info is missing.',
'VolLabelMismatch': ("Volume label mismatch. File's layer 4 volume label "
"({volume_layer4}) doesn't match its Enstore file "
"info volume label ({volume_enstore})."),
'L4LCNone': 'Layer 4 location cookie is missing.',
'FileInfoLCNone': 'Location cookie in Enstore file info is missing.',
'L4LCBad': 'Layer 4 location cookie ({lc_layer4}) is invalid.',
'FileInfoLCBad': ('Location cookie in Enstore file info is invalid. It is '
'"{lc_enstore}".'),
'LCMismatch': ("Current location cookie mismatch. File's current layer 4 "
"location cookie ({current_lc_layer4}) doesn't match its "
"current Enstore file info location cookie "
"({current_lc_enstore})."),
'SizeNone': 'lstat size is missing.',
'L4SizeNone': 'Layer 4 size is missing.',
'FileInfoSizeNone': 'File size in Enstore file info is missing.',
'SizeMismatch': ("File size mismatch. File sizes for file with layer 1 "
"BFID \"{bfid_layer1}\" provided by {size} don't all "
"match."),
'L4FFNone': 'Layer 4 file family is missing.',
'VolInfoFFNone': 'File family is missing in Enstore volume info.',
'FFMismatch': ("File family mismatch. File's layer 4 file family "
"({ff_layer4}) doesn't match its Enstore volume info "
"file family ({ff_enstore})."),
'VolInfoLibNone': 'Library is missing in Enstore volume info.',
'VolInfoLibBad': ('Library ({library}) in Enstore volume info is not '
'recognized.'),
'L4DriveNone': 'Layer 4 drive is missing.',
'FileInfoDriveNone': 'Drive is missing in Enstore file info.',
'DriveMismatch': ("Drive mismatch. File's layer 4 drive "
"({drive_layer4}) doesn't match its Enstore file info "
"drive ({drive_enstore})."),
'CRCNone': 'CRC is missing in both layer 4 and Enstore file info.',
'L4CRCNone': 'Layer 4 CRC is missing.',
'FileInfoCRCNone': 'CRC is missing in Enstore file info.',
'L4CRCMismatch': ("CRC mismatch. File's layer 4 CRC ({crc_layer4}) doesn't"
" match its Enstore file info CRC ({crc_enstore})."),
'L2CRCMismatch': ("CRC mismatch. File's layer 2 CRC ({crc_layer2}) doesn't"
" match its Enstore file info 0-seeded CRC "
"({crc_enstore_0seeded}) or its Enstore file info "
"1-seeded CRC ({crc_enstore_1seeded})."),
'L4PathNone': 'Layer 4 file path is missing.',
'PathMismatch': ("File path mismatch. Normalized file paths for file with "
"layer 1 BFID \"{bfid_layer1}\" provided by {path} don't "
"all match. File may have been moved."),
'L4PNFSIDNone': 'Layer 4 PNFS ID is missing.',
'PNFSIDMismatch': ("PNFS ID mismatch. PNFS IDs for file with layer 1 BFID "
"\"{bfid_layer1}\" provided by {pnfsid} don't all "
"match. File may have been moved."),
'FileInfoDelNone': 'The "deleted" field is missing in Enstore file info.',
'FileInfoDelBad': ('The value of the "deleted" field ({deleted}) in '
'Enstore file info is not recognized.'),
'MarkedCopy': 'File is marked as {copy_types} by Enstore file info.',
}
# Note: The type of each notice, i.e. warning, error, etc. is not noted in
# the above dict because it can sometimes be dynamic.
@memoize
def get_num_items(self):
"""
Return the total number of items to be scanned.
This method is thread and process safe.
.. note:: As implemented, the :class:`memoize` decorator will rerun the
target method if and when the target is called upon program exit.
This is one reason why this method is not implemented as a reused
*property*.
"""
# Note: settings['fs_root'] is known to be an absolute path (and not
# have a trailing slash).
if settings['fs_root'] == '/pnfs/fs/usr':
# Use database
# Note: This can take a few seconds.
operation = ('select count(*) from t_inodes where itype in '
'(16384, 32768)')
# itype=16384 indicates a directory.
# itype=32768 indicates a file.
return int(Chimera().fetchone(operation)['count'])
else:
self.validate_scan_location()
# Use filesystem
# Note: This can be slow, depending upon the number of directories.
# This state is not normally expected.
count = -1 # This allows exclusion of fs_root itself, as done in
# self.queue_items. Resultant minimum will still be 0.
for _root, _dirs, files in os.walk(settings['fs_root']):
count += (1 + len(files)) # 1 is for _root
return count
def queue_items(self):
"""Queue items for scanning."""
# Provide methods locally
fs_root = settings['fs_root']
os_walker = os.walk(fs_root)
os_path_join = os.path.join
items_q_put = self.items_q.put
put_item = lambda item: items_q_put(Item(item))
put_file = lambda root, file_: put_item(os_path_join(root, file_))
def put_files(root, files):
for file_ in files:
put_file(root, file_)
# Provide checkpointing related variables
resume_flag = settings['resume_scan']
checkpoint = self.checkpoint.value # Guaranteed to be a file path.
def process_pre_checkpoint_dirs():
if resume_flag and checkpoint:
checkpoint_dir, checkpoint_file = os.path.split(checkpoint)
generic_failure_msg = ('The scan cannot be resumed for the '
'specified output file.\n')
# Perform some checks that are specific to the current Scanner.
# Ensure checkpoint is in fs_root
if not checkpoint.startswith(fs_root):
msg = ('Error: Checkpoint file "{0}" is outside of '
'scanning directory "{1}". {2}'
).format(checkpoint, fs_root, generic_failure_msg)
exit(msg)
# Ensure checkpoint file exists
if not os.path.isfile(checkpoint):
msg = ('Error: Checkpoint file "{0}" does not exist. {1}'
).format(checkpoint, generic_failure_msg)
exit(msg)
# Skip items preceding checkpoint
checkpoint_crossed = False
for root, _dirs, files in os_walker:
if root != checkpoint_dir:
num_items_less = 1 + len(files) # 1 is for root
self.num_items_total.value -= num_items_less
else:
# Note: Checkpoint is now a file in the current dir.
# As such, this state happens only once.
self.num_items_total.value -= 1 # for root
for file_ in files:
if checkpoint_crossed:
# Note: self.num_items_total.value should not
# be reduced here.
put_file(root, file_)
else:
self.num_items_total.value -= 1 # for file_
if file_ == checkpoint_file:
checkpoint_crossed = True
print('Checkpoint crossed.\n')
else:
if not checkpoint_crossed:
msg = ('Error: Checkpoint directory "{0}" was'
' not found to contain checkpoint file '
'"{1}". {2}'
).format(checkpoint_dir,
checkpoint_file,
generic_failure_msg)
exit(msg)
break
else:
if not checkpoint_crossed:
msg = ('Error: Checkpoint directory "{0}" was not '
'found. {1}'
).format(checkpoint_dir, generic_failure_msg)
exit(msg)
def process_post_checkpoint_dirs():
if (not resume_flag) or (not checkpoint):
# Queue only the files from only the initial root directory
root, _dirs, files = next(os_walker)
# put_item(root) is intentionally skipped, to not test fs_root.
# Doing this here allows avoidance of a persistent "if"
# statement.
put_files(root, files)
# Queue each remaining directory and file for scanning
for root, _dirs, files in os_walker:
put_item(root)
put_files(root, files)
# Process items
process_pre_checkpoint_dirs()
process_post_checkpoint_dirs()
def validate_scan_location(self):
"""Validate the scan location root."""
loc = settings['fs_root']
if not os.path.isdir(loc):
msg = 'Error: Scan root "{0}" is not a directory.'.format(loc)
exit(msg)
# Note: There is no reason to print loc if it is valid, as it is
# already evident.
def check_path_attr(self, item):
"""
Check whether ``item`` has a path.
:type item: :class:`Item`
:arg item: object to check
"""
try:
item.path
except AttributeError:
raise CriticalNotice('NoPath')
def check_is_storage_path(self, item):
"""
Check whether ``item`` is an Enstore ``item``.
:type item: :class:`Item`
:arg item: object to check
"""
if not item.is_storage_path():
raise CriticalNotice('NotStorage')
def check_lstat(self, item):
"""
Check whether ``item``'s stats are accessible.
:type item: :class:`Item`
:arg item: object to check
"""
try:
item.lstat
except OSError as e:
# If this occurs, refer to the get_stat method of the previous
# implementation of this module for a possible modification to this
# section.
raise CriticalNotice('NoStat', exception=e.strerror or '')
def check_file_nlink(self, item):
"""
Check whether file has more than 1 hard links.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file() and (item.lstat.st_nlink > 1):
raise InfoNotice('NlinkGT1', nlink=item.lstat.st_nlink)
# There is no usual reason for the link count to be greater than 1.
# There have been cases where a move was aborted early and two
# directory entries were left pointing to one i-node, but the i-node
# only had a link count of 1 and not 2. Since there may be legitimate
# reasons for multiple hard links, it is not considered an error or a
# warning.
def check_link_target(self, item):
"""
Check whether symbolic link is broken.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_link() and (not os.path.exists(item.path)):
# Note: os.path.exists returns False for broken symbolic links.
raise WarningNotice('LinkBroken',
target=os.path.realpath(item.path))
def check_file_temp(self, item):
"""
Check whether ``item`` is a temporary file.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file():
path = item.path
endings = ('.nfs', '.lock', '_lock')
for ending in endings:
if path.endswith(ending):
raise InfoNotice('TempFile', ending=ending)
# Note: If the item is a temporary file, this method reports the
# corresponding ending string. This is why an "is_temp_file" method in
# the Item class is not implemented or used insted.
def check_file_bad(self, item):
"""
Check whether ``item`` is a file that is marked bad.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file_bad:
raise InfoNotice('MarkedBad')
def check_fsid(self, item):
"""
Check whether ``item`` has an accessible filesystem ID.
:type item: :class:`Item`
:arg item: object to check
"""
try:
_fsid = item.fsid
except (IOError, OSError) as e:
raise CriticalNotice('NoID', filename=item.fsidname,
exception=e.strerror or '')
# def check_parentfsid(self, item):
# """
# Check file's parent's filesystem ID for its presence.
#
# :type item: :class:`Item`
# :arg item: object to check
# """
#
# if item.is_file():
#
# # Check value for availability
# #sources = 'parent_file', 'parent_dir'
# sources = ('parent_file',)
# parentfsids = {}
# for source in sources:
# try:
# parentfsids[source] = item.parentfsid(source)
# except (IOError, OSError) as e:
# raise ErrorNotice('NoParentID',
# filename=item.parentfsidname(source),
# exception=e.strerror or '')
#
# # Check values for consistency (never observed to fail)
# source1, source2 = 'parent_file', 'parent_dir'
# if ((source1 in parentfsids) and (source2 in parentfsids) and
# (parentfsids[source1] != parentfsids[source2])):
# raise ErrorNotice('ParentIDMismatch',
# filename1=item.parentfsidname(source1),
# parentid1=parentfsids[source1],
# filename2=item.parentfsidname(source2),
# parentid2=parentfsids[source2])
def check_fslayer2lines(self, item):
"""
Check whether a file's filesystem provided layer 2 is corrupted.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file():
try:
_layer_lines = item.fslayerlines(2, 'filename')
except (OSError, IOError) as e:
if not (e.errno == errno.ENOENT): # confirmed
raise ErrorNotice('NoL2File', filename=item.fslayername(2),
exception=e.strerror or '')
def check_fslayer2(self, item):
"""
Check for inconsistencies in a file's filesystem provided layer 2.
The check is performed only if the ``item`` is a file and if its layer
2 is present. If its layer 2 is missing, this is not an error
condition.
:type item: :class:`Item`
:arg item: object to check
"""
def check_properties(properties):
for property_, values in properties.items():
if len(values) > 1:
notice = ErrorNotice('L2MultVal', property_=property_)
item.add_notice(notice)
if len(values) != len(set(values)):
notice = WarningNotice('L2RepVal', property_=property_)
item.add_notice(notice)
# Do checks
if item.is_file() and item.has_fslayer(2):
layer_dict = item.fslayer2('filename')
# Check properties
try: properties = layer_dict['properties']
except KeyError: pass
else: check_properties(properties)
# Check pools
if not item.is_file_empty:
try: pools = layer_dict['pools']
except KeyError: pass
else: InfoNotice('L2Extra', num_extra_lines=len(pools),
extra_lines=pools)
def check_fslayer2_crc(self, item):
"""
Check whether a file's layer 2 CRC is available.
The check is performed only if the ``item`` is a file and a HSM is not
used.
:type item: :class:`Item`
:arg item: object to check
"""
if (item.is_file() and item.has_fslayer(2)
and (item.fslayer2_property('hsm') == 'no')
and (item.fslayer2_property('crc') is None)):
raise WarningNotice('L2CRCNone')
def check_fslayer2_size(self, item):
"""
Check whether a file's layer 2 size is available.
The check is performed only if the ``item`` is a file and a HSM is not
used.
:type item: :class:`Item`
:arg item: object to check
"""
if (item.is_file() and item.has_fslayer(2)
and (item.fslayer2_property('hsm') == 'no')
and (item.fslayer2_property('length') is None)):
raise WarningNotice('L2SizeNone')
def check_fslayer2_size_match(self, item):
"""
Conditionally check whether ``item``'s layer 2 and filesystem sizes
match.
The check is performed only if the ``item`` is a file and a HSM is not
used.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file() and (item.fslayer2_property('hsm') == 'no'):
size_lstat = item.lstat.st_size # int
size_layer2 = item.fslayer2_property('length') # int or None
if ((size_layer2 is not None) and (size_lstat != size_layer2) and
not (size_lstat==1 and size_layer2>2147483647)):
# Not sure why the check below was done:
# "not (size_lstat==1 and size_layer2>2147483647)"
# Note that 2147483647 is 2GiB-1.
raise ErrorNotice('L2SizeMismatch', size_layer2=size_layer2,
size_lstat=size_lstat)
def check_fslayer1lines(self, item):
"""
Check whether file's filesystem provided layer 1 is corrupted.
The check is performed only if the ``item`` is a non-recent file and if
a HSM may be used.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_nonrecent_file and (item.fslayer2_property('hsm') != 'no'):
try:
layer_lines = item.fslayerlines(1, 'filename')
except (OSError, IOError) as e:
raise ErrorNotice('NoL1File', filename=item.fslayername(1),
exception=e.strerror or '')
else:
if not layer_lines:
raise ErrorNotice('L1Empty', filename=item.fslayername(1))
def check_fslayer1(self, item):
"""
Check for inconsistencies in file's filesystem provided layer 1.
The check is performed only if the ``item`` is a non-recent file and if
its layer 1 is present. If its layer 1 is missing, an appropriate
notice is raised by the :meth:`check_fslayer1lines` method as
applicable.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_nonrecent_file and item.has_fslayer(1):
layer1 = item.fslayer1('filename')
# Check for extra lines
if 'pools' in layer1:
item.add_notice(WarningNotice('L1Extra'))
# # Check for mismatch (never observed to fail)
# if item.fslayer1('filename') != item.fslayer1('fsid'):
# raise ErrorNotice('L1Mismatch',
# filename1=item.fslayername(1,'filename'),
# filename2=item.fslayername(1,'fsid'))
# Check BFID
if not item.is_file_empty:
bfid = item.fslayer1_bfid() or ''
if not bfid:
raise ErrorNotice('L1BFIDNone')
elif len(bfid) < 8:
raise ErrorNotice('L1BFIDBad', bfid=bfid)
def check_fslayer4lines(self, item):
"""
Check whether a file's filesystem provided layer 4 is corrupted.
The check is performed only if the ``item`` is a non-recent file and if
a HSM may be used.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_nonrecent_file and (item.fslayer2_property('hsm') != 'no'):
try:
layer_lines = item.fslayerlines(4, 'filename')
except (OSError, IOError) as e:
raise ErrorNotice('NoL4File', filename=item.fslayername(4),
exception=e.strerror or '')
else:
if not layer_lines:
raise ErrorNotice('L4Empty', filename=item.fslayername(4))
def check_fslayer4(self, item):
"""
Check for inconsistencies in ``item``'s filesystem provided layer 4.
The check is performed only if the ``item`` is a non-recent file and if
its layer 4 is present. If its layer 4 is missing, an appropriate
notice is raised by the :meth:`check_fslayer4lines` method as
applicable.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_nonrecent_file and item.has_fslayer(4):
layer4 = item.fslayer4('filename')
# Check for extra lines
if 'pools' in layer4:
item.add_notice(WarningNotice('L4Extra'))
# # Check for mismatch (never observed to fail)
# if item.fslayer4('filename') != item.fslayer4('fsid'):
# raise ErrorNotice('L4Mismatch',
# filename1=item.fslayername(4,'filename'),
# filename2=item.fslayername(4,'fsid'))
# Check BFID
if (not item.is_file_empty) and (not item.fslayer4_bfid()):
raise ErrorNotice('L4BFIDNone')
def check_enstore_file_info(self, item):
"""
Check for inconsistencies in ``item``'s Enstore provided file info.
This check is performed only if the ``item`` is a non-recent file and
its BFID is obtained.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_nonrecent_file:
efi = item.enstore_file_info
bfid_layer1 = item.fslayer1_bfid() # Links Chimera with Enstore.
if not isinstance(efi, dict):
raise ErrorNotice('FileInfoBadType',
bfid_layer1=bfid_layer1,
type_=type(efi).__name__,
value=efi)
if efi.get('bfid'): # Unsure if this line is necessary.
if not item.is_enstore_file_info_ok:
if efi['status'][0] == enstore_errors.NO_FILE:
# Note: enstore_errors.NO_FILE == 'NO SUCH FILE/BFID'
raise ErrorNotice('NoFileInfo',
bfid_layer1=bfid_layer1,)
else:
raise ErrorNotice('FileInfoBad',
bfid_layer1=bfid_layer1,
status=efi['status'])
elif not item.is_file_deleted:
# This state is normal.
empty_values = ('', None, 'None')
if efi.get('pnfs_name0') in empty_values:
raise ErrorNotice('FileInfoPathNone',
bfid_layer1=bfid_layer1)
efi_pnfsid = efi.get('pnfsid')
if efi_pnfsid in empty_values:
raise ErrorNotice('FileInfoPNFSIDNone',
bfid_layer1=bfid_layer1)
elif not enstore_namespace.is_id(efi_pnfsid):
# Note: enstore_namespace.is_id expects a str.
raise ErrorNotice('FileInfoPNFSIDBad',
bfid_layer1=bfid_layer1,
pnfsid=efi_pnfsid)
def check_enstore_volume_info(self, item):
"""
Check for inconsistencies in ``item``'s Enstore provided volume info.
This check is performed only if the ``item`` is a non-recent file and
its volume name is obtained.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_nonrecent_file:
evi = item.enstore_volume_info
bfid_layer1 = item.fslayer1_bfid() # Links Chimera with Enstore.
if not isinstance(evi, dict):
raise ErrorNotice('VolInfoBadType',
bfid_layer1=bfid_layer1,
type_=type(evi).__name__,
value=evi)
if evi.get('external_label'): # Unsure if this line is necessary.
if not item.is_enstore_volume_info_ok:
if evi['status'][0] == enstore_errors.NOVOLUME:
# enstore_errors.NOVOLUME = 'NOVOLUME'
raise ErrorNotice('NoVolInfo', bfid_layer1=bfid_layer1)
else:
raise ErrorNotice('VolInfoBad',
bfid_layer1=bfid_layer1,
status=evi['status'])
def check_bfid_match(self, item):
"""
Check for a mismatch in the file's layer 1 and layer 4 BFIDs.
This check is performed only if the ``item`` is a file, is not a copy,
is not marked deleted, and its layer 1 and 4 exist.
:type item: :class:`Item`
:arg item: object to check
"""
if (item.is_file() and (not item.is_file_a_copy) and
(not item.is_file_deleted) and item.has_fslayer(1) and
item.has_fslayer(4)):
fslayer1_bfid = item.fslayer1_bfid()
fslayer4_bfid = item.fslayer4_bfid()
if fslayer1_bfid != fslayer4_bfid:
raise ErrorNotice('BFIDMismatch',
bfid_layer1=fslayer1_bfid,
bfid_layer4=fslayer4_bfid)
def check_file_deleted(self, item):
"""
Check if file is marked deleted.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file():
deleted = item.enstore_file_info.get('deleted')
if deleted is None:
raise ErrorNotice('FileInfoDelNone')
else:
if (not item.is_file_a_copy) and (deleted=='yes'):
raise ErrorNotice('MarkedDel')
if deleted not in ('yes', 'no'):
raise ErrorNotice('FileInfoDelBad', deleted=deleted)
def check_recent(self, item):
"""
Check if the file is recent.
This check can help in the assessment of other notices.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_recent:
raise InfoNotice('TooRecent')
def check_empty(self, item):
"""
Check if the file has :obj:`~os.lstat` size zero.
This check is performed only if the ``item`` is a non-recent file.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_nonrecent_file and item.is_file_empty:
fslayer2_size = item.fslayer2_property('length')
# fslayer2_size can be 0, None, etc.
if fslayer2_size is None: fslayer2_size = 'not present'
NoticeType = InfoNotice if fslayer2_size==0 else ErrorNotice
notice_key_suffix = 'Ok' if item.is_enstore_file_info_ok else 'No'
notice_key = 'Size0FileInfo{0}'.format(notice_key_suffix)
raise NoticeType(notice_key, layer2_size=fslayer2_size)
def check_sfs_path(self, item):
"""
Check if any of the PNFS IDs in Enstore for the current file has
more than one path.
:type item: :class:`Item`
:arg item: object to check
"""
# Note: This is very slow with PNFS, but is not too slow with Chimera.
if item.is_file():
for file_info in item.enstore_files_list_by_path:
pnfs_name = file_info.get('pnfs_name0')
pnfs_id = file_info.get('pnfsid')
if (not pnfs_name) or (pnfs_id in ('', None, 'None')):
continue
sfs_paths = item.sfs_paths(pnfs_name, pnfs_id)
if len(sfs_paths) > 1:
item.add_notice(ErrorNotice('MultSFSPaths',
pnfsid=pnfs_id,
paths=', '.join(sfs_paths)))
def check_volume_label(self, item):
"""
Check file's layer 4 and Enstore volume labels for their presence and
also for a mismatch.
This check is performed only if the ``item`` is a file and is not a
copy.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file() and (not item.is_file_a_copy):
# Retrieve values
volume_layer4 = item.fslayer4('filename').get('volume')
volume_enstore = item.enstore_file_info.get('external_label')
# Check values for availability
if item.has_fslayer(4) and (not volume_layer4):
item.add_notice(ErrorNotice('L4VolLabelNone'))
if item.is_enstore_file_info_ok and (not volume_enstore):
item.add_notice(ErrorNotice('FileInfoVolLabelNone'))
# Check values for consistency
if (volume_layer4 and volume_enstore and
(volume_layer4 != volume_enstore)):
raise ErrorNotice('VolLabelMismatch',
volume_layer4=volume_layer4,
volume_enstore=volume_enstore)
def check_location_cookie(self, item):
"""
Check file's layer 4 and Enstore location cookies for their presence
and also for a mismatch.
This check is performed only if the ``item`` is a file and is not a
copy.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file() and (not item.is_file_a_copy):
# Retrieve values
layer4 = item.fslayer4('filename')
efi = item.enstore_file_info
lc_layer4 = layer4.get('location_cookie')
lc_enstore = efi.get('location_cookie')
lc_cur_layer4 = layer4.get('location_cookie_current')
lc_cur_enstore = efi.get('location_cookie_current')
is_lc = enstore_functions3.is_location_cookie
# Check values for availability
if item.has_fslayer(4):
if not lc_layer4:
item.add_notice(ErrorNotice('L4LCNone'))
elif not is_lc(lc_layer4):
item.add_notice(ErrorNotice('L4LCBad',
lc_layer4=lc_layer4))
if item.is_enstore_file_info_ok:
if not lc_enstore:
item.add_notice(ErrorNotice('FileInfoLCNone'))
elif not is_lc(lc_enstore):
item.add_notice(ErrorNotice('FileInfoLCBad',
lc_enstore=lc_enstore))
# Check values for consistency
if (lc_cur_layer4 and lc_cur_enstore and
(lc_cur_layer4 != lc_cur_enstore)):
raise ErrorNotice('LCMismatch',
current_lc_layer4=lc_cur_layer4,
current_lc_enstore=lc_cur_enstore)
def check_size(self, item):
"""
Check file's lstat, layer 4 and Enstore file sizes for their presence
and also for a mismatch.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file():
# Retrieve values
sizes = item.sizes
# Check values for availability
if sizes['lstat'] is None:
item.add_notice(ErrorNotice('SizeNone'))
# Note: Layer 2 size is checked in the check_fslayer2_size and
# check_fslayer2_size_match methods.
if item.has_fslayer(4) and (sizes['layer 4'] is None):
item.add_notice(ErrorNotice('L4SizeNone'))
if item.is_enstore_file_info_ok and (sizes['Enstore'] is None):
item.add_notice(ErrorNotice('FileInfoSizeNone'))
# Check values for consistency
num_unique_sizes = len(set(s for s in sizes.values() if
(s is not None))) # Disambiguates from 0
if num_unique_sizes > 1:
# sizes = dict((b'size_{0}'.format(k),v) for k,v in
# sizes.items())
# raise ErrorNotice('SizeMismatch', **sizes)
raise ErrorNotice('SizeMismatch',
bfid_layer1=item.fslayer1_bfid(),
size=ReversibleDict(sizes))
# Note: The `size` arg name above must remain singular because
# this name is joined to the key names in its value. Refer to
# the `Notice.to_exportable_dict.flattened_dict` function.
def check_file_family(self, item):
"""
Check file's layer 4 and Enstore file family for their presence and
also for a mismatch.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file():
# Retrieve values
ff_layer4 = item.fslayer4('filename').get('file_family')
ff_enstore = item.enstore_volume_info.get('file_family')
# Check values for availability
if item.has_fslayer(4) and (not ff_layer4):
item.add_notice(ErrorNotice('L4FFNone'))
if item.is_enstore_volume_info_ok and (not ff_enstore):
item.add_notice(ErrorNotice('VolInfoFFNone'))
# Check values for consistency
if (ff_layer4 and ff_enstore and
(ff_enstore not in (ff_layer4,
'{0}-MIGRATION'.format(ff_layer4),
ff_layer4.partition('-MIGRATION')[0],
ff_layer4.partition('_copy_')[0],))):
raise ErrorNotice('FFMismatch', ff_layer4=ff_layer4,
ff_enstore=ff_enstore)
# def check_library(self, item):
# """
# Check file's Enstore library name for its presence and validity.
#
# This check is performed only if the ``item`` is a file and its Enstore
# volume info is ok.
#
# :type item: :class:`Item`
# :arg item: object to check
# """
#
# if item.is_file() and item.is_enstore_volume_info_ok:
#
# try: library = item.enstore_volume_info['library']
# except KeyError: raise ErrorNotice('VolInfoLibNone')
# else:
# if (library and
# (library not in item.enstore.library_managers) and
# ('shelf' not in library)):
# raise ErrorNotice('VolInfoLibBad', library=library)
def check_drive(self, item):
"""
Check file's layer 4 and Enstore drives for their presence and also for
a mismatch.
This check is performed only if the ``item`` is a file and is not a
copy.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file() and (not item.is_file_a_copy):
# Retrieve values
drive_layer4 = item.fslayer4('filename').get('drive')
drive_enstore = item.enstore_file_info.get('drive')
# Check values for availability
if item.has_fslayer(4) and (not drive_layer4):
item.add_notice(WarningNotice('L4DriveNone'))
if item.is_enstore_file_info_ok and (not drive_enstore):
item.add_notice(WarningNotice('FileInfoDriveNone'))
# Check values for consistency
drive_enstore_excludes = (drive_layer4, 'imported', 'missing',
'unknown:unknown')
if (drive_layer4 and drive_enstore and
(drive_enstore not in drive_enstore_excludes)):
raise ErrorNotice('DriveMismatch', drive_layer4=drive_layer4,
drive_enstore=drive_enstore)
def check_crc(self, item):
"""
Check file's layer 4 and Enstore CRCs for their presence and also for
a mismatch.
This check is performed only if the ``item`` is not an empty file.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file() and (not item.is_file_empty):
# Retrieve values
crc_layer2 = item.fslayer2_property('crc')
crc_layer4 = item.fslayer4('filename').get('crc')
efi = item.enstore_file_info
crc_enstore = crc_enstore_0seeded = efi.get('complete_crc')
crc_enstore_1seeded = efi.get('complete_crc_1seeded')
media_type = item.enstore_volume_info.get('media_type')
# Check values for availability
# Note: It is ok for crc_layer2 to be unavailable.
if (item.has_fslayer(4) and (not crc_layer4) and
item.is_enstore_file_info_ok and (not crc_enstore)):
# Note: When crc_layer4 or crc_enstore are missing, they are
# often missing together.
item.add_notice(WarningNotice('CRCNone'))
else:
if item.has_fslayer(4) and (not crc_layer4):
item.add_notice(WarningNotice('L4CRCNone'))
if item.is_enstore_file_info_ok and (not crc_enstore):
item.add_notice(WarningNotice('FileInfoCRCNone'))
# Check values for consistency
if (crc_layer2 and (crc_enstore_0seeded or crc_enstore_1seeded) and
media_type and (media_type != 'null') and
(crc_layer2 not in (crc_enstore_0seeded, crc_enstore_1seeded))):
item.add_notice(ErrorNotice('L2CRCMismatch',
crc_layer2=crc_layer2,
crc_enstore_0seeded=crc_enstore_0seeded,
crc_enstore_1seeded=crc_enstore_1seeded))
if crc_layer4 and crc_enstore and (crc_layer4 != crc_enstore):
raise ErrorNotice('L4CRCMismatch', crc_layer4=crc_layer4,
crc_enstore=crc_enstore)
def check_path(self, item):
"""
Check file's layer 4, Enstore, and filesystem provided paths for
their presence and also for a mismatch.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file() and (not item.is_file_a_copy):
# Retrieve values
paths = item.norm_paths
# Check values for availability
# Note: 'filesystem' and 'Enstore' paths are checked previously.
if item.has_fslayer(4) and (not paths['layer 4']):
item.add_notice(ErrorNotice('L4PathNone'))
# Check values for consistency
num_unique_paths = len(set(p for p in paths.values() if p))
if num_unique_paths > 1:
raise ErrorNotice('PathMismatch',
bfid_layer1=item.fslayer1_bfid(),
path=ReversibleDict(paths))
# Note: The `path` arg name above must remain singular because
# this name is joined to the key names in its value. Refer to
# the `Notice.to_exportable_dict.flattened_dict` function.
def check_pnfsid(self, item):
"""
Check file's filesystem ID file, layer 4 and Enstore PNFS IDs for
their presence and also for a mismatch.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file():
# Retrieve values
pnfsids = {'ID-file': item.fsid,
'layer 4': item.fslayer4('filename').get('pnfsid'),
'Enstore': item.enstore_file_info.get('pnfsid'),
}
# Check values for availability
# Note: 'ID-file' and 'Enstore' PNFS IDs are checked previously.
if item.has_fslayer(4) and (not pnfsids['layer 4']):
item.add_notice(ErrorNotice('L4PNFSIDNone'))
# Check values for consistency
num_unique_paths = len(set(p for p in pnfsids.values() if p))
if num_unique_paths > 1:
raise ErrorNotice('PNFSIDMismatch',
bfid_layer1=item.fslayer1_bfid(),
pnfsid=ReversibleDict(pnfsids))
# Note: The `pnfsid` arg name above must remain singular
# because this name is joined to the key names in its value.
# Refer to the `Notice.to_exportable_dict.flattened_dict`
# function.
def check_copy(self, item):
"""
Check if file has any of the copy attributes set.
This check is performed only if the ``item`` is a file and its Enstore
file info is ok.
:type item: :class:`Item`
:arg item: object to check
"""
if item.is_file() and item.is_enstore_file_info_ok:
# Retrieve values
efi = item.enstore_file_info
# Identify positive values
possible_copy_types = ('multiple', 'primary', 'migrated',
'migrated_to')
is_true = lambda v: v in ('yes', 'Yes', '1', 1, True)
detected_copy_types = []
for copy_type in possible_copy_types:
key = 'is_{0}_copy'.format(copy_type)
val = efi.get(key)
if val and is_true(val):
copy_type = copy_type.replace('_', ' ')
detected_copy_types.append(copy_type)
# Report positive values
if detected_copy_types:
detected_copy_types = [c.replace('_', ' ') for c in
detected_copy_types]
detected_copy_types = ['{0} copy'.format(c) for c in
detected_copy_types]
detected_copy_types = str(PrintableList(detected_copy_types))
raise InfoNotice('MarkedCopy', copy_types=detected_copy_types)
class Item:
"""
Return an object corresponding to a file or a directory.
Before an instance is created:
- The :attr:`enstore` class attribute must be set to an :class:`Enstore`
class instance. This must be done individually in each process in which
this :class:`Item` class is to be used.
"""
start_time = time.time()
enstore = None # To be set individually in each process.
chimera = Chimera()
_cache_volume_info = settings['cache_volume_info']
if _cache_volume_info:
#volume_info_cache = multiprocessing.Manager().dict()
volume_info_cache = MPSubDictCache()
def __init__(self, item_path):
"""Initialize the ``item``.
:type item_path: :obj:`str`
:arg item_path: absolute filesystem path of the item.
"""
self.path = item_path
self.noticegrp = NoticeGrp(self.path)
self.is_scanned = False
self._cached_exceptions = {}
def __repr__(self):
"""
Return a string representation.
This string allows the class instance to be reconstructed in another
process.
"""
return '{0}({1})'.format(self.__class__.__name__, repr(self.path))
def __eq__(self, other):
"""
Perform an equality comparison.
:type other: :class:`Item`
:arg other: object to compare.
:rtype: :obj:`bool`
"""
return (self.path==other.path)
def __str__(self):
"""Return a string representation."""
return self.path
@staticmethod
def _readfile(filename):
"""Return the stripped contents of the file corresponding to the
specified filename."""
return open(filename).read().strip()
def add_notice(self, notice):
"""
Add the provided ``notice`` to the group of notices associated with
the ``item``.
:type notice: :class:`Notice`
:arg notice: object to add.
"""
self.noticegrp.add_notice(notice)
@memoize_property
def dirname(self):
"""
Return the directory name of the ``item``.
:rtype: :obj:`str`
"""
return os.path.dirname(self.path)
@memoize_property
def basename(self):
"""
Return the base name of the ``item``.
:rtype: :obj:`str`
"""
return os.path.basename(self.path)
@memoize_property
def fsidname(self):
"""
Return the name of the ID file.
:rtype: :obj:`str`
"""
return os.path.join(self.dirname, '.(id)({0})'.format(self.basename))
@memoize
def parentfsidname(self, source='parent_file'):
"""
Return the name of the parent ID file.
:type source: :obj:`str`
:arg source: ``parent_file`` or ``parent_dir``.
:rtype: :obj:`str`
"""
if source == 'parent_file':
return os.path.join(self.dirname,
'.(parent)({0})'.format(self.fsid))
elif source == 'parent_dir':
return self.__class__(self.dirname).fsidname
else:
msg = 'Invalid value for source: {0}'.format(source)
raise ValueError(msg)
@memoize
def fslayername(self, layer_num, source='filename'):
"""
Return the name of layer information file for the specified layer
number.
:type layer_num: :obj:`int`
:arg layer_num: valid layer number
:type source: :obj:`str`
:arg source: ``filename`` or ``fsid``.
:rtype: :obj:`str`
"""
if source == 'filename':
return os.path.join(self.dirname,
'.(use)({0})({1})'.format(layer_num,
self.basename))
elif source == 'fsid':
return os.path.join(self.dirname,
'.(access)({0})({1})'.format(self.fsid,
layer_num))
else:
msg = 'Invalid value for source: {0}'.format(source)
raise ValueError(msg)
@memoize
def fslayerlines(self, layer_num, source):
"""
Return a :obj:`list` containing the lines contained in the layer
information file for the specified layer number.
:type layer_num: :obj:`int`
:arg layer_num: valid layer number
:type source: :obj:`str`
:arg source: ``filename`` or ``fsid``. It does not have a default value
to allow memoization to work correctly.
:rtype: :obj:`list`
A sample returned :obj:`list` for layer 1 is::
['CDMS124711171800000']
A sample returned :obj:`list` for layer 2 is::
['2,0,0,0.0,0.0', ':c=1:6d0f3ab9;h=yes;l=8192000000;']
A sample returned :obj:`list` for layer 4 is::
['VON077', '0000_000000000_0001544', '635567027', 'volatile',
'/pnfs/ilc4c/LOI/uu/uu_Runs_130.FNAL2.tar.gz', '',
'003A0000000000000005E2E0', '', 'CDMS124655315900000',
'stkenmvr204a:/dev/rmt/tps0d0n:1310050819', '954405925']
Layers are unavailable for directories.
The following exception will be raised if the layer information file is
unavailable::
IOError: [Errno 2] No such file or directory
Error 2 is ENOENT.
"""
# Re-raise cached exception if it exists
key = ('fslayerlines', layer_num, source)
try:
raise self._cached_exceptions[key]
except KeyError: pass
# Read file
fslayername = self.fslayername(layer_num, source)
try:
lines = open(fslayername).readlines()
except Exception as e:
self._cached_exceptions[key] = e
raise
lines = [ln.strip() for ln in lines]
return lines
@memoize
def fslayer1(self, source):
"""
Return a :obj:`dict` containing layer 1 information for a file.
:type source: :obj:`str`
:arg source: ``filename`` or ``fsid``. It does not have a default value
to allow memoization to work correctly.
:rtype: :obj:`dict`
A sample returned :obj:`dict` is::
{u'bfid': 'CDMS124711171800000'}
An empty :obj:`dict` is returned in the event of an OS or IO exception.
Layer 1 information is unavailable for directories.
"""
layer = {}
# Get lines
try: fslayerlines = self.fslayerlines(1, source)
except (OSError, IOError): return layer
# Parse lines
if fslayerlines:
# Save BFID
try: layer['bfid'] = fslayerlines[0]
except IndexError: pass
# Save anything found in any remaining lines
pools = fslayerlines[1:]
if pools: layer['pools'] = pools
return layer
@memoize
def fslayer1_bfid(self, source='filename'):
"""
Return the layer 1 value for BFID, or return :obj:`None` if
unavailable.
:type source: :obj:`str`
:arg source: ``filename`` or ``fsid``
:rtype: :obj:`str` or :obj:`None`
"""
return self.fslayer1(source).get('bfid')
@memoize
def fslayer2(self, source):
"""
Return a :obj:`dict` containing layer 2 information for a file.
:type source: :obj:`str`
:arg source: ``filename`` or ``fsid``. It does not have a default value
to allow memoization to work correctly.
:rtype: :obj:`dict`
A sample returned dict is::
{u'numbers': [u'2', u'0', u'0', u'0.0', u'0.0'],
u'properties': {u'crc': [4000420189], u'length': [8192000000],
u'hsm': [u'yes']}}
Each value in the ``properties`` :obj:`dict` is a :obj:`list`. This
:obj:`list` can have more than one item.
An empty :obj:`dict` is returned in the event of an OS or IO exception.
Layer 2 information is unavailable for directories.
"""
layer = {}
# Get lines
try: fslayerlines = self.fslayerlines(2, source)
except (OSError, IOError): return layer
# Parse lines
if fslayerlines:
# Save numbers
try: numbers = fslayerlines[0]
except IndexError: pass
else:
layer['numbers'] = numbers.split(',') # (line 1)
# Save properties
try: properties = fslayerlines[1] # (line 2)
except IndexError: pass
else:
pkm = { # Mapped names for Property Keys.
'c': 'crc', 'h': 'hsm', 'l': 'length'}
pvt = { # Transforms applied to Property Values.
'crc': lambda s: int(s.split(':')[1], 16),
'length': int,
# 'hsm': lambda h: {'yes': True,
# 'no': False}.get(h), #ignore
}
# Sample: ':c=1:ee71915d;h=yes;l=8192;'
if properties[0] == ':': properties = properties[1:]
# Sample: 'c=1:ee71915d;h=yes;l=8192;'
properties = sorted(p.split('=',1) for p in
properties.split(';') if p)
# Sample: [['c', '1:ee71915d'], ['h', 'yes'], ['l', '8192']]
properties = dict((k,[v[1] for v in v])
for k,v in
itertools.groupby(properties, lambda p: p[0]))
# This transforms the list into a dict in a way that if
# multiple values exist for a key, they are all noted in the
# list. Duplicate values are noted as well. Missing items are
# possible.
# Sample: {'h': ['yes'], 'c': ['1:ee71915d'], 'l': ['8192']}
properties = dict((pkm.get(k,k), v) for k,v in
properties.items())
# Sample: {'hsm': ['yes'], 'crc': ['1:ee71915d'],
# 'length': ['8192']}
properties = dict((k, [(pvt[k](v) if pvt.get(k) else v) for v
in vlist])
for k,vlist in properties.items())
# Sample: {'hsm': ['yes'], 'crc': [4000420189],
# 'length': [8192]}
layer['properties'] = properties
# Save anything found in any remaining lines
pools = fslayerlines[2:]
if pools: layer['pools'] = pools
return layer
@memoize
def fslayer2_property(self, l2property, source='filename'):
"""
Return the value for the specified layer 2 property, or return
:obj:`None` if unavailable.
:type l2property: :obj:`str`
:arg l2property: typically ``crc``, ``hsm``, or ``length``.
:type source: :obj:`str`
:arg source: ``filename`` or ``fsid``
"""
return self.fslayer2(source).get('properties', {}).get(l2property,
[None])[0]
@memoize
def fslayer4(self, source):
"""
Return a :obj:`dict` containing layer 4 information for a file.
:type source: :obj:`str`
:arg source: ``filename`` or ``fsid``. It does not have a default value
to allow memoization to work correctly.
:rtype: :obj:`dict`
A sample returned dict is::
{u'original_name': '/pnfs/BDMS/tariq/p2_004/p2_004_struc.LN22',
u'drive': 'stkenmvr214a:/dev/rmt/tps0d0n:1310051193',
u'volume': 'VOO007', 'crc': 1253462682, u'file_family': 'BDMS',
u'location_cookie': '0000_000000000_0000454',
u'pnfsid': '000E00000000000000013248',
u'bfid': 'CDMS123258695100001',
'size': 294912,
'location_cookie_list': ['0000', '000000000', '0000454'],
'location_cookie_current': '0000454',}
An empty :obj:`dict` is returned in the event of an OS or IO exception.
Layer 4 information is unavailable for directories.
"""
layer = {}
# Get lines
try: fslayerlines = self.fslayerlines(4, source)
except (OSError, IOError): return layer
# Parse lines
if fslayerlines:
keys = ('volume', 'location_cookie', 'size', 'file_family',
'original_name', None, 'pnfsid', None, 'bfid', 'drive',
'crc',)
transforms = \
{'crc': lambda s: int(long(s)),
'size': lambda s: int(long(s)),
# Note that "int(long(s))" covers strings such as '123L' also.
}
# Parse known lines
for i, k in enumerate(keys):
if k is not None:
try: layer[k] = fslayerlines[i]
except IndexError: break
# Transform as applicable
for k, tr_func in transforms.items():
if k in layer:
layer[k] = tr_func(layer[k])
# Parse extra lines
pools = fslayerlines[len(keys):]
if pools: layer['pools'] = pools
# Add calculated fields
# Also see similar section in `file_info` method.
try:
layer['location_cookie_list'] = \
layer['location_cookie'].split('_')
except KeyError: pass
else:
try:
layer['location_cookie_current'] = \
layer['location_cookie_list'][-1]
except IndexError: pass
return layer
@memoize
def fslayer4_bfid(self, source='filename'):
"""
Return the layer 4 value for BFID, or return :obj:`None` if
unavailable.
:type source: :obj:`str`
:arg source: ``filename`` or ``fsid``
:rtype: :obj:`str` or :obj:`None`
"""
return self.fslayer4(source).get('bfid')
@memoize
def has_fslayer(self, layer_num, source='filename'):
"""
Return a :obj:`bool` indicating whether the specified layer number is
available.
:type source: :obj:`str`
:arg source: ``filename`` or ``fsid``
:rtype: :obj:`bool`
"""
attr = 'fslayer{0}'.format(layer_num)
getter = getattr(self, attr, {})
return bool(getter(source))
@memoize_property
def fsid(self):
"""
Return the filesystem ID.
This is available for both files and directories.
:rtype: :obj:`str`
Example: ``00320000000000000001D6B0``
"""
# Re-raise cached exception if it exists
key = 'fsid'
try: raise self._cached_exceptions[key]
except KeyError: pass
# Read file
try:
return self._readfile(self.fsidname)
except Exception as e:
self._cached_exceptions[key] = e
raise
@memoize
def parentfsid(self, source):
"""
Return the parent's filesystem ID.
:type source: :obj:`str`
:arg source: ``filename`` or ``fsid``. It does not have a default value
to allow memoization to work correctly.
:rtype: :obj:`str`
This is available for both files and directories.
"""
# Re-raise cached exception if it exists
key = ('parentfsid', source)
try: raise self._cached_exceptions[key]
except KeyError: pass
# Read file
filename = self.parentfsidname(source)
try:
return self._readfile(filename)
except Exception as e:
self._cached_exceptions[key] = e
raise
@memoize_property
def lstat(self):
"""
Return :obj:`~os.lstat` info.
Sample::
posix.stat_result(st_mode=33188, st_ino=1275083688, st_dev=24L,
st_nlink=1, st_uid=13194, st_gid=1623,
st_size=8192000000, st_atime=1248570004,
st_mtime=1153410176, st_ctime=1153407484)
"""
return os.lstat(self.path)
@memoize_property
def is_recent(self):
"""Return a :obj:`bool` indicating whether the ``item`` was modified
within the past one day since the scan began."""
return (self.start_time - self.lstat.st_mtime) < 86400
@memoize_property
def is_nonrecent(self):
"""Return a :obj:`bool` indicating whether the ``item`` was modified
before the past one day since the scan began."""
return (not self.is_recent)
@memoize_property
def st_mode(self):
"""
Return the protection mode.
:rtype: :obj:`int`
"""
return self.lstat.st_mode
@memoize
def is_file(self):
"""
Return a :obj:`bool` indicating whether the ``item`` is a regular file.
Given that :obj:`~os.lstat` is used, this returns :obj:`False` for
symbolic links.
"""
return stat.S_ISREG(self.st_mode)
@memoize_property
def is_recent_file(self):
"""Return a :obj:`bool` indicating whether the ``item`` is a file that
was modified within the past one day since the scan began."""
return (self.is_file() and self.is_recent)
@memoize_property
def is_nonrecent_file(self):
"""Return a :obj:`bool` indicating whether the ``item`` is a file that
was modified before the past one day since the scan began."""
return (self.is_file() and self.is_nonrecent)
@memoize_property
def is_file_a_copy(self):
"""Return a :obj:`bool` indicating whether the ``item`` is a file that
is a copy."""
is_true = lambda v: v in ('yes', 'Yes', '1', 1, True)
is_true2 = lambda k: is_true(self.enstore_file_info.get(k))
return (self.is_file() and
(is_true2('is_multiple_copy') or is_true2('is_migrated_copy')))
@memoize_property
def is_file_bad(self):
"""Return a :obj:`bool` indicating whether the ``item`` is a file that
is marked bad."""
return (self.is_file() and
(self.path.startswith('.bad') or self.path.endswith('.bad')))
@memoize_property
def is_file_deleted(self):
"""Return a :obj:`bool` indicating whether the ``item`` is a deleted
file."""
return (self.is_file() and
(self.enstore_file_info.get('deleted')=='yes'))
@memoize_property
def is_file_empty(self):
"""Return a :obj:`bool` indicating whether the ``item`` is a file with
size 0."""
return (self.is_file() and (self.lstat.st_size==0))
@memoize
def is_dir(self):
"""
Return a :obj:`bool` indicating whether the ``item`` is a directory.
Given that :obj:`~os.lstat` is used, this returns :obj:`False` for
symbolic links.
"""
return stat.S_ISDIR(self.st_mode)
@memoize
def is_link(self):
"""
Return a :obj:`bool` indicating whether the ``item`` is a symbolic
link.
.. Given that :obj:`~os.lstat` is used, this returns :obj:`True` for
symbolic links.
"""
return stat.S_ISLNK(self.st_mode)
@memoize
def is_storage_path(self):
"""Return a :obj:`bool` indicating whether the ``item`` is a path in
the Enstore namespace."""
return bool(enstore_namespace.is_storage_path(self.path,
check_name_only=1))
# is_access_name_re = re.compile("\.\(access\)\([0-9A-Fa-f]+\)")
# @memoize
# def is_access_name(self):
# return bool(re.search(self.is_access_name_re, self.path_basename))
@memoize_property
def enstore_file_info(self):
"""
Return the available file info from the Enstore info client.
This is returned for the layer 1 BFID.
:rtype: :obj:`dict`
Sample::
{'storage_group': 'astro', 'uid': 0,
'pnfs_name0': '/pnfs/fs/usr/astro/fulla/fulla.gnedin.026.tar',
'library': 'CD-LTO4F1', 'package_id': None,
'complete_crc': 3741678891L, 'size': 8192000000L,
'external_label': 'VOO732', 'wrapper': 'cpio_odc',
'package_files_count': None, 'active_package_files_count': None,
'gid': 0, 'pnfsid': '00320000000000000001DF20',
'archive_mod_time': None, 'file_family_width': None,
'status': ('ok', None), 'deleted': 'no', 'archive_status': None,
'cache_mod_time': None, 'update': '2009-07-25 23:57:23.222394',
'file_family': 'astro',
'location_cookie': '0000_000000000_0000150',
'cache_location': None, 'original_library': None,
'bfid': 'CDMS124858424200000', 'tape_label': 'VOO732',
'sanity_cookie': (65536L, 1288913660L), 'cache_status': None,
'drive': 'stkenmvr211a:/dev/rmt/tps0d0n:1310051081',
'location_cookie_list': ['0000', '000000000', '0000150'],
'location_cookie_current': '0000150',
'complete_crc_1seeded': 2096135468L,}
Sample if BFID is :obj:`None`::
{'status': ('KEYERROR', 'info_server: key bfid is None'),
'bfid': None}
Sample if BFID is invalid::
{'status': ('WRONG FORMAT', 'info_server: bfid 12345 not valid'),
'bfid': '12345'}
"""
bfid = self.fslayer1_bfid()
file_info = self.enstore.info_client.bfid_info(bfid)
# Update status field as necessary
if isinstance(file_info, dict) and ('status' not in file_info):
file_info['status'] = (enstore_errors.OK, None)
# Note: enstore_errors.OK == 'ok'
# Add calculated fields
# Also see similar section in `fslayer4` method.
try:
file_info['location_cookie_list'] = \
file_info['location_cookie'].split('_')
except KeyError: pass
else:
try:
file_info['location_cookie_current'] = \
file_info['location_cookie_list'][-1]
except IndexError: pass
try:
crc, size = file_info['complete_crc'], file_info['size']
except KeyError: pass
else:
#file_info['complete_crc_0seeded'] = crc # not necessary
if (crc is not None) and (size is not None):
file_info['complete_crc_1seeded'] = \
enstore_checksum.convert_0_adler32_to_1_adler32(crc, size)
else:
file_info['complete_crc_1seeded'] = None
return file_info
@memoize_property
def is_enstore_file_info_ok(self):
"""Return a :obj:`bool` indicating whether Enstore file info is ok or
not."""
return bool(enstore_errors.is_ok(self.enstore_file_info))
@memoize_property
def enstore_volume_info(self):
"""
Return the available volume info from the Enstore info client.
If volume caching is enabled, volume info may be cached and may be
returned from cache if possible.
:rtype: :obj:`dict`
Sample (with no keys deleted)::
{'comment': ' ', 'declared': 1217546233.0, 'blocksize': 131072,
'sum_rd_access': 4, 'library': 'CD-LTO4F1',
'si_time': [1317155514.0, 1246553166.0], 'wrapper': 'cpio_odc',
'deleted_bytes': 77020524269L, 'user_inhibit': ['none', 'none'],
u'storage_group': 'ilc4c', 'system_inhibit': ['none', 'full'],
'external_label': 'VON077', 'deleted_files': 214,
'remaining_bytes': 598819328L, 'sum_mounts': 149,
'capacity_bytes': 858993459200L, 'media_type': 'LTO4',
'last_access': 1331879563.0, 'status': ('ok', None),
'eod_cookie': '0000_000000000_0001545', 'non_del_files': 1556,
'sum_wr_err': 0, 'unknown_files': 10, 'sum_wr_access': 1544,
'active_bytes': 746796833291L,
'volume_family': 'ilc4c.volatile.cpio_odc',
'unknown_bytes': 7112706371L, 'modification_time': 1246553159.0,
u'file_family': 'volatile', 'write_protected': 'y',
'sum_rd_err': 0, 'active_files': 1330,
'first_access': 1234948627.0}
Sample (with unneeded keys deleted)::
{'library': 'CD-LTO4F1', 'wrapper': 'cpio_odc',
'deleted_bytes': 0L, 'user_inhibit': ['none', 'none'],
u'storage_group': 'ilc4c', 'system_inhibit': ['none', 'full'],
'external_label': 'VON778', 'deleted_files': 0,
'media_type': 'LTO4', 'status': ('ok', None), 'unknown_files': 0,
'active_bytes': 818291877632L,
'volume_family': 'ilc4c.static.cpio_odc', 'unknown_bytes': 0L,
u'file_family': 'static', 'active_files': 411}
Sample if volume is :obj:`None`::
{'status': ('KEYERROR', 'info_server: key external_label is None'),
'work': 'inquire_vol', 'external_label': None}
Sample if volume is invalid::
{'status': ('WRONG FORMAT', 'info_server: bfid 12345 not valid'),
'bfid': '12345'}
"""
# Get volume name
volume = self.enstore_file_info.get('external_label')
if volume == '':
# This is done because self.enstore.info_client.inquire_vol('')
# hangs.
volume = None
# Conditionally try returning cached volume info
if self._cache_volume_info:
try: return self.volume_info_cache[volume]
except KeyError: volume_is_cacheable = True
else: volume_is_cacheable = False
# Get volume info from Enstore info client
volume_info = self.enstore.info_client.inquire_vol(volume)
if isinstance(volume_info, dict) and ('status' not in volume_info):
volume_info['status'] = (enstore_errors.OK, None)
# Note: enstore_errors.OK == 'ok'
# Add calculated volume info keys
try: volume_family = volume_info['volume_family']
except KeyError: pass
else:
calculated_keys = ('storage_group', 'file_family', 'wrapper')
# Note: 'wrapper' may very well already exist.
for k in calculated_keys:
if k not in volume_info:
getter = getattr(enstore_volume_family,
'extract_{0}'.format(k))
volume_info[k] = getter(volume_family)
# Conditionally process and cache volume info
if volume_is_cacheable and (volume not in self.volume_info_cache):
# Remove unneeded volume info keys, in order to reduce its memory
# usage. An alternate approach, possibly even a better one, is to
# use a whitelist instead of a blacklist.
unneeded_keys = (
'blocksize',
'capacity_bytes',
'comment',
'declared',
'eod_cookie',
'first_access',
'last_access',
'modification_time',
'non_del_files',
'remaining_bytes',
'si_time',
'sum_mounts',
'sum_rd_access',
'sum_rd_err',
'sum_wr_access',
'sum_wr_err',
'write_protected',
)
for k in unneeded_keys:
try: del volume_info[k]
except KeyError: pass
# Cache volume info
self.volume_info_cache[volume] = volume_info
return volume_info
@memoize_property
def is_enstore_volume_info_ok(self):
"""Return a :obj:`bool` indicating whether Enstore volume info is ok or
not."""
return bool(enstore_errors.is_ok(self.enstore_volume_info))
@memoize_property
def enstore_files_by_path(self):
"""
Return a :obj:`dict` containing information about the list of known
files in Enstore for the current path.
:rtype: :obj:`dict`
Sample::
{'status': ('ok', None),
'r_a': (('131.225.13.10', 59329), 21L,
'131.225.13.10-59329-1347478604.303243-2022-47708927252656'),
'pnfs_name0': '/pnfs/fs/usr/astro/idunn/rei256bin.015.tar',
'file_list':
[{'storage_group': 'astro', 'uid': 0,
'pnfs_name0': '/pnfs/fs/usr/astro/idunn/rei256bin.015.tar',
'library': 'shelf-CD-9940B', 'package_id': None,
'complete_crc': 2995796126L, 'size': 8192000000L,
'external_label': 'VO9502', 'wrapper': 'cpio_odc',
'package_files_count': None, 'active_package_files_count': None,
'gid': 0, 'pnfsid': '00320000000000000001CF38',
'archive_status': None, 'file_family_width': None,
'deleted': 'yes', 'archive_mod_time': None,
'cache_mod_time': None, 'update': '2009-09-27 15:57:10.011141',
'file_family': 'astro',
'location_cookie': '0000_000000000_0000020',
'cache_location': None,
'original_library': None, 'bfid': 'CDMS113926889300000',
'tape_label': 'VO9502', 'sanity_cookie': (65536L, 2312288512L),
'cache_status': None,
'drive': 'stkenmvr36a:/dev/rmt/tps0d0n:479000032467'},
{'storage_group': 'astro', 'uid': 0,
'pnfs_name0': '/pnfs/fs/usr/astro/idunn/rei256bin.015.tar',
'library': 'CD-LTO4F1', 'package_id': None,
'complete_crc': 2995796126L, 'size': 8192000000L,
'external_label': 'VOO732', 'wrapper': 'cpio_odc',
'package_files_count': None, 'active_package_files_count': None,
'gid': 0, 'pnfsid': '00320000000000000001CF38',
'archive_status': None, 'file_family_width': None,
'deleted': 'no', 'archive_mod_time': None,
'cache_mod_time': None, 'update': '2009-07-25 23:53:46.278724',
'file_family': 'astro',
'location_cookie': '0000_000000000_0000149',
'cache_location': None, 'original_library': None,
'bfid': 'CDMS124858402500000', 'tape_label': 'VOO732',
'sanity_cookie': (65536L, 2312288512L), 'cache_status': None,
'drive': 'stkenmvr211a:/dev/rmt/tps0d0n:1310051081'}]}
"""
return self.enstore.info_client.find_file_by_path(self.path)
@memoize_property
def is_enstore_files_by_path_ok(self):
"""Return a :obj:`bool` indicating whether the list of Enstore provided
files for the current path is ok or not."""
return bool(enstore_errors.is_ok(self.enstore_files_by_path))
@memoize_property
def enstore_files_list_by_path(self):
"""
If the :obj:`list` of Enstore provided files for the current path is
ok, return this :obj:`list`, otherwise an empty :obj:`list`.
:rtype: :obj:`list`
For a sample, see the value of the ``file_list`` key in the sample
noted for :attr:`enstore_files_by_path`.
"""
if self.is_enstore_files_by_path_ok:
enstore_files_by_path = self.enstore_files_by_path
try:
return enstore_files_by_path['file_list']
except KeyError:
return [enstore_files_by_path] # Unsure of correctness.
return []
@memoize_property
def _sfs(self):
"""Return the :class:`~enstore_namespace.StorageFS` instance for the
current path."""
return enstore_namespace.StorageFS(self.path)
@memoize_property
def sfs_pnfsid(self):
"""
Return the PNFS ID for the current file as provided by
:class:`~enstore_namespace.StorageFS`.
:rtype: :obj:`str`
"""
return self._sfs.get_id(self.path)
@memoize
def sfs_paths(self, filepath, pnfsid):
"""
Return the paths for the indicated file path and PNFS ID, as
provided by :class:`~enstore_namespace.StorageFS`.
:type filepath: :obj:`str`
:arg filepath: Absolute path of file, as provided by
:class:`~enstore_namespace.StorageFS`.
:type pnfsid: :obj:`str`
:arg pnfsid: PNFS ID of file, as provided by
:class:`~enstore_namespace.StorageFS`.
"""
try:
sfs_paths = self._sfs.get_path(id=pnfsid,
directory=os.path.dirname(filepath))
# Note: This was observed to _not_ work with a common StorageFS
# instance, which is why a file-specific instance is used. The `dir`
# argument was also observed to be required.
except (OSError, ValueError): # observed exceptions
sfs_paths = []
else:
sfs_paths = sorted(set(sfs_paths))
return sfs_paths
@memoize_property
def sizes(self):
"""
Return a :obj:`dict` containing all available file sizes for the
current file.
:rtype: :obj:`dict`
The available keys in the returned :obj:`dict` are ``lstat``,
``layer 2``, ``layer 4``, and ``Enstore``. Each value in the
:obj:`dict` is an :obj:`int` or is :obj:`None` if unavailable.
"""
return {'lstat': self.lstat.st_size,
'layer 2': self.fslayer2_property('length'),
'layer 4': self.fslayer4('filename').get('size'),
'Enstore': self.enstore_file_info.get('size'),
}
@memoize_property
def norm_paths(self):
"""
Return a :obj:`dict` containing all available paths for the current
file.
The paths are normalized to remove common mount locations such as
``/pnfs/fs/usr``, etc.
The available keys in the returned :obj:`dict` are ``filesystem``,
``layer 4``, and ``Enstore``. A value in the :obj:`dict` may be
:obj:`None` if unavailable.
"""
# Retrieve values
paths = {'filesystem': self.path,
'layer 4': self.fslayer4('filename').get('original_name'),
'Enstore': self.enstore_file_info.get('pnfs_name0'),
}
# Normalize values
variations = ('/pnfs/fnal.gov/usr/',
'/pnfs/fs/usr/',
'/pnfs/',
'/chimera/',
)
for path_key, path_val in paths.items():
if path_val:
for variation in variations:
if variation in path_val:
paths[path_key] = path_val.replace(variation,
'/<pnfs>/', 1)
# Note: The variation in question can occur anywhere
# in the path, and not just at the start of the path.
# str.startswith is not used for this reason.
break
return paths
class Notice(Exception):
"""Provide an :obj:`~exceptions.Exception` representing a single notice."""
_notices = {'Test': 'Testing "{test}".',} # Updated externally, specific
#--> # to current scanner.
@classmethod
def update_notices(cls, notices):
"""
Register the provided notices.
:type notices: :obj:`dict`
:arg notices: This is a :obj:`dict` containing notices to register. Its
keys are compact string identifiers. Its values are corresponding
string message templates. For example::
{'NoL1File': ('Cannot read layer 1 metadata file "{filename}".'
'{exception}'),}
In the above example, ``filename`` and ``exception`` represent
keyword arguments whose values will be used to :obj:`~str.format`
the message template.
"""
cls._notices.update(notices)
def __init__(self, key, **kwargs):
"""
Return an :obj:`~exceptions.Exception` containing a single notice for
the provided message arguments.
Before any instance is created, the :meth:`update_notices` classmethod
must have been called at least once.
:type key: :obj:`str`
:arg key: This refers to the identifying-type of the notice. It must
have been previously registered using the :meth:`update_notices`
classmethod.
:rtype: :class:`Notice`
Additionally provided keyword arguments are used to :obj:`~str.format`
the corresponding message template. This template must have been
previously provided together with the ``key`` using the
:meth:`update_notices` classmethod.
All arguments are also included in a :obj:`dict` representation of the
notice.
"""
self.key = key
self._kwargs = kwargs
#code = self.hashtext(self.key)
message_template = self._notices[self.key]
message = message_template.format(**self._kwargs).strip()
self._level = self.__class__.__name__.rpartition('Notice')[0].upper()
self._level = self._level or 'INFO'
message = '{0} ({1}): {2}'.format(self._level, self.key, message)
Exception.__init__(self, message)
@staticmethod
def hashtext(text):
"""
Return a four-hexadecimal-digit string hash of the provided text.
:type text: :obj:`str`
:arg text: Text to hash.
:rtype: :obj:`str`
"""
hash_ = hashlib.md5(text).hexdigest()[:4]
hash_ = '0x{0}'.format(hash_)
return hash_
@classmethod
def print_notice_templates(cls):
"""Print all notice templates."""
#notice_str = lambda k,v: '{0} ({1}): {2}'.format(cls.hashtext(k), k, v)
notice_str = lambda k, v: '{0}: {1}'.format(k, v)
notices = (notice_str(k,v) for k, v in
sorted(cls._notices.items()) if k!='Test')
for n in notices: print(n)
def __repr__(self):
"""
Return a string representation.
This string allows the class instance to be reconstructed in another
process.
"""
repr_ = '{0}({1}, **{2})'.format(self.__class__.__name__,
repr(self.key), repr(self._kwargs))
return repr_
def __eq__(self, other):
"""
Perform an equality comparison.
:type other: :class:`Notice`
:arg other: object to compare.
:rtype: :obj:`bool`
"""
return (str(self)==str(other) and self._level==other._level and
self.key==other.key and self._kwargs==other._kwargs)
def to_dict(self):
"""
Return a :obj:`dict` which describes the notice.
:rtype: :obj:`dict`
The returned :obj:`dict` can be used to reconstruct this
:class:`Notice` instance using the :meth:`from_dict` method.
"""
return {self.key: {'level': self._level,
'args': self._kwargs},
}
def to_exportable_dict(self):
"""
Return an exportable :obj:`dict` which describes the notice.
:rtype: :obj:`dict`
The returned :obj:`dict` cannot be used to reconstruct this
:class:`Notice` instance using the :meth:`from_dict` method.
"""
def flatten_dict(d):
"""
Return a flattened version of a dict.
This is based on http://stackoverflow.com/a/13781829/832230. Keys
are also converted to lowercase, and spaces in a key are removed.
"""
sep='_'
final = {}
def _flatten_dict(obj, parent_keys=[]):
for k, v in obj.iteritems():
k = k.lower().replace(' ', '')
if isinstance(v, dict):
_flatten_dict(v, parent_keys + [k])
else:
key = sep.join(parent_keys + [k])
final[key] = v
_flatten_dict(d)
return final
return {self.key: {'level': self._level,
'args': flatten_dict(self._kwargs)},
}
@staticmethod
def from_dict(notice):
"""
Return a :class:`Notice` instance constructed from the provided
:obj:`dict`.
:type notice: :obj:`dict`
:arg notice: This is the object from which to construct a
:class:`Notice`. It must have the same structure as is returned by
:meth:`to_dict`.
:rtype: :class:`Notice`
"""
nkey, notice = notice.items()[0]
nlevel = notice['level'].title()
nclass_name = '{0}Notice'.format(nlevel)
nclass = globals()[nclass_name]
return nclass(nkey, **notice['args'])
def to_json(self, indent=None):
"""
Return a sorted JSON representation of the :obj:`dict` describing the
notice.
:type indent: :obj:`None` or :obj:`int` (non-negative)
:arg indent: See :py:func:`json.dumps`.
:rtype: :obj:`str`
"""
return json.dumps(self.to_dict(), sort_keys=True, indent=indent)
@classmethod
def from_json(cls, notice):
"""
Return a :class:`Notice` instance constructed from the provided JSON
string.
:type notice: :obj:`str`
:arg notice: This is the object from which to construct a
:class:`Notice`. It must have the same structure as is returned by
:meth:`to_json`.
:rtype: :class:`Notice`
.. note:: This method can be used only with Python 2.7 or higher. It
may raise the following :obj:`~exceptions.Exception` with Python
2.6::
TypeError: __init__() keywords must be strings
"""
return cls.from_dict(json.loads(notice))
class TestNotice(Notice):
"""Test notice."""
pass
class InfoNotice(Notice):
"""Informational notice."""
pass
class WarningNotice(Notice):
"""Warning notice."""
pass
class ErrorNotice(Notice):
"""Error notice."""
pass
class CriticalNotice(Notice):
"""Critical error notice."""
pass
class NoticeGrp(object):
"""Provide a container for :class:`Notice` objects."""
def __init__(self, item_path, notices=None):
"""
Return an object that is a group of :class:`Notice` objects for the
specified item path and notices.
:type item_path: :obj:`str`
:arg item_path: absolute filesystem path of the item.
:type notices: :obj:`~collections.Sequence` or :obj:`None`
:arg notices: This can be a :obj:`~collections.Sequence` of
:class:`Notice` objects that are added to the group. If multiple
:class:`Notice` objects exist in the sequence with the same ``key``
attribute, only the last such :class:`Notice` will be stored.
"""
self.item = item_path
self.notices = dict()
if notices:
for notice in notices: self.add_notice(notice)
def __repr__(self):
"""
Return a string representation.
This string allows the class instance to be reconstructed in another
process.
"""
repr_ = '{0}({1}, {2})'.format(self.__class__.__name__,
repr(self.item), repr(self.notices))
return repr_.decode()
def add_notice(self, notice):
"""
Add the provided :class:`Notice`.
If a :class:`Notice` already exists with the same ``key`` attribute, it
will be overwritten.
"""
self.notices[notice.key] = notice
def __eq__(self, other):
"""
Perform an equality comparison.
:type other: :class:`NoticeGrp`
:arg other: object to compare.
:rtype: :obj:`bool`
"""
return (self.item==other.item and self.notices==other.notices)
def __nonzero__(self):
"""
Return :obj:`True` if one or more notices exist in the group, otherwise
return :obj:`False`.
:rtype: :obj:`bool`
"""
return bool(self.notices)
def __str__(self):
"""
Return a multiline string representation of the notice group.
Example::
/pnfs/fs/usr/mydir1/myfile1
ERROR (ErrType1): This is error 1.
WARNING (WarnType1): This is warning 1.
"""
notices_strs = (str(n) for n in self.notices.values())
return '{0}\n{1}'.format(self.item, '\n'.join(notices_strs))
def to_dict(self):
"""
Return a :obj:`dict` which describes the notice group.
:rtype: :obj:`dict`
The returned :obj:`dict` can be used to reconstruct this
:class:`NoticeGrp` instance using the :meth:`from_dict` method.
"""
return {'path': self.item,
'notices': dict(v.to_dict().items()[0] for v in
self.notices.values()),
}
def to_exportable_dict(self):
"""
Return an exportable :obj:`dict` which describes the notice group.
:rtype: :obj:`dict`
The returned :obj:`dict` cannot be used to reconstruct this
:class:`NoticeGrp` instance using the :meth:`from_dict` method.
"""
return {'path': self.item,
'notices': dict(v.to_exportable_dict().items()[0] for v in
self.notices.values()),
}
@classmethod
def from_dict(cls, noticegrp):
"""
Return a :class:`NoticeGrp` instance constructed from the provided
:obj:`dict`.
:type notice: :obj:`dict`
:arg notice: This is the object from which to construct a
:class:`NoticeGrp`. It must have the same structure as is returned
by :meth:`to_dict`.
:rtype: :class:`NoticeGrp`
"""
notices = noticegrp['notices'].items()
notices = (Notice.from_dict({k:v}) for k,v in notices)
return cls(noticegrp['path'], notices)
def to_json(self, indent=None):
"""
Return a sorted JSON representation of the :obj:`dict` describing the
notice group.
:type indent: :obj:`None` or :obj:`int` (non-negative)
:arg indent: See :py:func:`json.dumps`.
:rtype: :obj:`str`
"""
return json.dumps(self.to_dict(), sort_keys=True,
indent=indent)
class ScanInterface:
"""
This class is referenced and initialized by the :mod:`enstore` module.
While this class is intended by the :mod:`enstore` module to perform
command-line option parsing using the :class:`option.Interface` base class,
it does not do so. Instead, the command-line option parsing for this module
is performed by the :class:`CommandLineOptionsParser` class which is called
independently. As such, this :class:`ScanInterface` class intentionally
does not inherit the :class:`option.Interface` class.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the interface.
No input arguments are used.
"""
pass
def do_work(*args, **kwargs):
"""
Perform a scan as specified on the command line, using the options
described within the :meth:`CommandLineOptionsParser._add_options` method.
"""
# Parse command line options
options = CommandLineOptionsParser().options
#options = {'scan_type': 'forward'} # These options are minimally required.
settings.update(options) # Merge provided options.
# Select scanner
scanners = {'forward': ScannerForward}
scan_type = settings['scan_type']
try: scanner = scanners[scan_type]
except KeyError:
msg = ('Error: "{0}" scan is not implemented. Select from: {1}'
).format(scan_type, ', '.join(scanners))
exit(msg)
# Perform specified action
if settings.get('print') == 'checks':
scanner.print_checks()
elif settings.get('print') == 'notices':
Notice.update_notices(scanner.notices)
Notice.print_notice_templates()
else:
scanner().run()
if __name__ == '__main__':
do_work()
|
python
|
# SPDX-FileCopyrightText: 2019-2021 REFITT Team
# SPDX-License-Identifier: Apache-2.0
"""Integration tests for forecast interface."""
# type annotations
# standard libs
# external libs
import pytest
# internal libs
from refitt.data.forecast import Forecast
from refitt.database.model import Observation as ObservationModel, Model
from tests.unit.test_forecast import generate_random_forecast
class TestForecastPublish:
"""Tests for database integration with forecast interface."""
def test_publish(self) -> None:
"""Verify roundtrip with database."""
data = generate_random_forecast()
num_forecasts = Model.count()
num_observations = ObservationModel.count()
model = Forecast.from_dict(data).publish()
assert Model.count() == num_forecasts + 1
assert ObservationModel.count() == num_observations + 1
assert model.to_dict() == Model.from_id(model.id).to_dict()
Model.delete(model.id)
ObservationModel.delete(model.observation_id)
assert Model.count() == num_forecasts
assert ObservationModel.count() == num_observations
with pytest.raises(Model.NotFound):
Model.from_id(model.id)
with pytest.raises(ObservationModel.NotFound):
ObservationModel.from_id(model.observation_id)
|
python
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request):
return render(request,'home.html',{'name':'Smit'})
def add(request):
val1 = request.POST['num1']
val2 = request.POST['num2']
return render(request,'result.html',{'result_add':int(val1)+int(val2)})
|
python
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import json
import tensorflow as tf
from utils import queuer
def decoding(sprobs, samples, params, mask=None):
"""Generate decoded sequence from seqs"""
if mask is None:
mask = [1.] * len(sprobs)
flat_sprobs = []
for _sprobs, _m in zip(sprobs, mask):
if _m < 1.: continue
for start_prob in _sprobs:
flat_sprobs.append(start_prob)
assert len(flat_sprobs) == len(samples), 'Decoding length mismatch!'
results = []
for (idx, sample), pred in zip(samples, flat_sprobs):
gold_label = sample[0]
pred_label = pred
results.append({
'pred_answer': int(pred_label),
'sample_id': idx,
'gold_answer': gold_label
})
return results
def predict(session, features,
out_pred, dataset, params, train=True):
"""Performing decoding with exising information"""
results = []
batcher = dataset.batcher(params.eval_batch_size,
buffer_size=params.buffer_size,
shuffle=False, train=train)
eval_queue = queuer.EnQueuer(batcher,
multiprocessing=params.data_multiprocessing,
random_seed=params.random_seed)
eval_queue.start(workers=params.nthreads,
max_queue_size=params.max_queue_size)
def _predict_one_batch(data_on_gpu):
feed_dicts = {}
flat_raw_data = []
for fidx, data in enumerate(data_on_gpu):
# define feed_dict
feed_dict = {
features[fidx]["p"]: data['p_token_ids'],
features[fidx]["h"]: data['h_token_ids'],
features[fidx]["l"]: data['l_id'],
}
if params.use_char:
feed_dict[features[fidx]["pc"]] = data['p_char_ids']
feed_dict[features[fidx]["hc"]] = data['h_char_ids']
if params.enable_bert:
feed_dict[features[fidx]["ps"]] = data['p_subword_ids']
feed_dict[features[fidx]["hs"]] = data['h_subword_ids']
feed_dict[features[fidx]["pb"]] = data['p_subword_back']
feed_dict[features[fidx]["hb"]] = data['h_subword_back']
feed_dicts.update(feed_dict)
flat_raw_data.extend(data['raw'])
# pick up valid outputs
data_size = len(data_on_gpu)
valid_out_pred = out_pred[:data_size]
decode_spred = session.run(
valid_out_pred, feed_dict=feed_dicts)
predictions = decoding(
decode_spred, flat_raw_data, params
)
return predictions
very_begin_time = time.time()
data_on_gpu = []
for bidx, data in enumerate(eval_queue.get()):
data_on_gpu.append(data)
# use multiple gpus, and data samples is not enough
if len(params.gpus) > 0 and len(data_on_gpu) < len(params.gpus):
continue
start_time = time.time()
predictions = _predict_one_batch(data_on_gpu)
data_on_gpu = []
results.extend(predictions)
tf.logging.info(
"Decoding Batch {} using {:.3f} s, translating {} "
"sentences using {:.3f} s in total".format(
bidx, time.time() - start_time,
len(results), time.time() - very_begin_time
)
)
eval_queue.stop()
if len(data_on_gpu) > 0:
start_time = time.time()
predictions = _predict_one_batch(data_on_gpu)
results.extend(predictions)
tf.logging.info(
"Decoding Batch {} using {:.3f} s, translating {} "
"sentences using {:.3f} s in total".format(
'final', time.time() - start_time,
len(results), time.time() - very_begin_time
)
)
return results
def eval_metric(results, params):
"""BLEU Evaluate """
crr_cnt, total_cnt = 0, 0
for result in results:
total_cnt += 1
p = result['pred_answer']
g = result['gold_answer']
if p == g:
crr_cnt += 1
return crr_cnt * 100. / total_cnt
def dump_predictions(results, output):
"""save translation"""
with tf.gfile.Open(output, 'w') as writer:
for sample in results:
writer.write(json.dumps(sample) + "\n")
tf.logging.info("Saving translations into {}".format(output))
|
python
|
# --------------------------------------------------------------------------- #
# aionextid.py #
# #
# Copyright © 2015-2022, Rajiv Bakulesh Shah, original author. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at: #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# --------------------------------------------------------------------------- #
'Asynchronous distributed Redis-powered monotonically increasing ID generator.'
# TODO: Remove the following import after deferred evaluation of annotations
# because the default.
# 1. https://docs.python.org/3/whatsnew/3.7.html#whatsnew37-pep563
# 2. https://www.python.org/dev/peps/pep-0563/
# 3. https://www.python.org/dev/peps/pep-0649/
from __future__ import annotations
import asyncio
import contextlib
from typing import ClassVar
from typing import Iterable
from redis import RedisError
from redis.asyncio import Redis as AIORedis # type: ignore
from .base import AIOPrimitive
from .base import logger
from .exceptions import QuorumNotAchieved
from .nextid import NextID
from .nextid import Scripts
class AIONextID(Scripts, AIOPrimitive):
'Async distributed Redis-powered monotonically increasing ID generator.'
__slots__ = ('num_tries',)
_KEY_PREFIX: ClassVar[str] = NextID._KEY_PREFIX
def __init__(self, # type: ignore
*,
key: str = 'current',
masters: Iterable[AIORedis] = frozenset(),
num_tries: int = NextID._NUM_TRIES,
) -> None:
'Initialize an AIONextID ID generator.'
super().__init__(key=key, masters=masters)
self.num_tries = num_tries
def __aiter__(self) -> AIONextID:
return self # pragma: no cover
async def __anext__(self) -> int:
for _ in range(self.num_tries):
with contextlib.suppress(QuorumNotAchieved):
next_id = await self.__get_current_ids() + 1
await self.__set_current_ids(next_id)
return next_id
raise QuorumNotAchieved(self.key, self.masters)
async def __get_current_id(self, master: AIORedis) -> int: # type: ignore
current_id: int = await master.get(self.key)
return current_id
async def __set_current_id(self, master: AIORedis, value: int) -> bool: # type: ignore
current_id: int | None = await self._set_id_script( # type: ignore
keys=(self.key,),
args=(value,),
client=master,
)
return current_id == value
async def __reset_current_id(self, master: AIORedis) -> int: # type: ignore
await master.delete(self.key)
async def __get_current_ids(self) -> int:
current_ids, redis_errors = [], []
coros = [self.__get_current_id(master) for master in self.masters]
for coro in asyncio.as_completed(coros): # type: ignore
try:
current_id = int(await coro or b'0')
except RedisError as error:
redis_errors.append(error)
logger.exception(
'%s.__get_current_ids() caught %s',
self.__class__.__qualname__,
error.__class__.__qualname__,
)
else:
current_ids.append(current_id)
if len(current_ids) > len(self.masters) // 2:
return max(current_ids)
raise QuorumNotAchieved(
self.key,
self.masters,
redis_errors=redis_errors,
)
async def __set_current_ids(self, value: int) -> None:
num_masters_set, redis_errors = 0, []
coros = [self.__set_current_id(master, value) for master in self.masters]
for coro in asyncio.as_completed(coros):
try:
num_masters_set += await coro
except RedisError as error:
redis_errors.append(error)
logger.exception(
'%s.__set_current_ids() caught %s',
self.__class__.__qualname__,
error.__class__.__qualname__,
)
if num_masters_set > len(self.masters) // 2:
return
raise QuorumNotAchieved(
self.key,
self.masters,
redis_errors=redis_errors,
)
async def reset(self) -> None:
num_masters_reset, redis_errors = 0, []
coros = [self.__reset_current_id(master) for master in self.masters]
for coro in asyncio.as_completed(coros):
try:
await coro
except RedisError as error:
redis_errors.append(error)
logger.exception(
'%s.reset() caught %s',
self.__class__.__qualname__,
error.__class__.__qualname__,
)
else:
num_masters_reset += 1
if num_masters_reset > len(self.masters) // 2:
return
raise QuorumNotAchieved(
self.key,
self.masters,
redis_errors=redis_errors,
)
def __repr__(self) -> str:
return f'<{self.__class__.__qualname__} key={self.key}>'
|
python
|
import os
import sys
import logging
import re
from django.core.exceptions import ImproperlyConfigured
from pathlib import Path # python3 only
logger = logging.getLogger(__name__)
def dotenv_values(dotenv_path):
lines = []
try:
with open(dotenv_path) as fp:
lines = fp.read().splitlines()
except FileNotFoundError as e:
if sys.argv[1] == 'test':
logger.warning(
f'No dotenv file found using dotenv_path:{dotenv_path}'
)
return {}
else:
raise e
# get tuples of values,property splitting each line of the file
lines = map(lambda l: tuple(re.split(r'\s*=\s*', l, 1)), filter(
None, lines
))
lines = list(lines)
print(f"dotenv_values: found {len(lines)} valid lines")
if not lines:
return dict()
return dict(lines)
def get_env_variable(var_name, default=None):
if var_name in dotenv_dict:
return dotenv_dict[var_name]
try:
return os.environ[var_name]
except KeyError:
if default:
return default
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
# e.g. set ENV=production to get .production.env file
dotenv_filename = '.{0}.env'.format(
os.environ.get('ENV', '')
) if 'ENV' in os.environ else '.env'
dotenv_path = str(Path('.') / dotenv_filename)
dotenv_dict = dotenv_values(dotenv_path=dotenv_path)
print('loading env file: {0}'.format(dotenv_filename))
|
python
|
"""
Tools for asking the ESP about any alarms that have been raised,
and telling the user about them if so.
The top alarmbar shows little QPushButtons for each alarm that is currently active.
If the user clicks a button, they are shown the message text and a "snooze" button
for that alarm.
There is a single physical snooze button which is manipulated based on which alarm
the user has selected.
"""
import sys
from communication import rpi
from PyQt5 import QtCore, QtWidgets
BITMAP = {1 << x: x for x in range(32)}
ERROR = 0
WARNING = 1
class SnoozeButton:
"""
Takes care of snoozing alarms.
Class members:
- _esp32: ESP32Serial object for communication
- _alarm_h: AlarmHandler
- _alarmsnooze: QPushButton that user will press
- _code: The alarm code that the user is currently dealing with
- _mode: Whether the current alarm is an ERROR or a WARNING
"""
def __init__(self, esp32, alarm_h, alarmsnooze):
"""
Constructor
Arguments: see relevant class members
"""
self._esp32 = esp32
self._alarm_h = alarm_h
self._alarmsnooze = alarmsnooze
self._alarmsnooze.hide()
self._code = None
self._mode = None
self._alarmsnooze.clicked.connect(self._on_click_snooze)
self._alarmsnooze.setStyleSheet(
'background-color: rgb(0,0,205); color: white; font-weight: bold;')
self._alarmsnooze.setMaximumWidth(150)
def set_code(self, code):
"""
Sets the alarm code
Arguments:
- code: Integer alarm code
"""
self._code = code
self._alarmsnooze.setText('Snooze %s' % str(BITMAP[self._code]))
def set_mode(self, mode):
"""
Sets the mode.
Arguments:
- mode: ALARM or WARNING
"""
self._mode = mode
def show(self):
"""
Shows the snooze alarm button
"""
self._alarmsnooze.show()
def _on_click_snooze(self):
"""
The callback function called when the alarm snooze button is clicked.
"""
if self._mode not in [WARNING, ERROR]:
raise Exception('mode must be alarm or warning.')
# Reset the alarms/warnings in the ESP
# If the ESP connection fails at this
# time, raise an error box
if self._mode == ERROR:
self._esp32.snooze_hw_alarm(self._code)
self._alarm_h.snooze_alarm(self._code)
else:
self._esp32.reset_warnings()
self._alarm_h.snooze_warning(self._code)
class AlarmButton(QtWidgets.QPushButton):
"""
The alarm and warning buttons shown in the top alarmbar.
Class members:
- _mode: Whether this alarm is an ERROR or a WARNING
- _code: The integer code for this alarm.
- _errstr: Test describing this alarm.
- _label: The QLabel to populate with the error message, if the user
clicks our button.
- _snooze_btn: The SnoozeButton to manipulate if the user clicks our
button.
"""
def __init__(self, mode, code, errstr, label, snooze_btn):
super(AlarmButton, self).__init__()
self._mode = mode
self._code = code
self._errstr = errstr
self._label = label
self._snooze_btn = snooze_btn
self.clicked.connect(self._on_click_event)
if self._mode == ERROR:
self._bkg_color = 'red'
elif self._mode == WARNING:
self._bkg_color = 'orange'
else:
raise Exception('Option %s not supported' % self._mode)
self.setText(str(BITMAP[self._code]))
style = """background-color: %s;
color: white;
border: 0.5px solid white;
font-weight: bold;
""" % self._bkg_color
self.setStyleSheet(style)
self.setMaximumWidth(35)
self.setMaximumHeight(30)
def _on_click_event(self):
"""
The callback function called when the user clicks on an alarm button
"""
# Set the label showing the alarm name
style = """QLabel {
background-color: %s;
color: white;
font-weight: bold;
}""" % self._bkg_color
self._label.setStyleSheet(style)
self._label.setText(self._errstr)
self._label.show()
self._activate_snooze_btn()
def _activate_snooze_btn(self):
"""
Activates the snooze button that will silence this alarm
"""
self._snooze_btn.set_mode(self._mode)
self._snooze_btn.set_code(self._code)
self._snooze_btn.show()
class AlarmHandler:
"""
This class starts a QTimer dedicated to checking is there are any errors
or warnings coming from ESP32
Class members:
- _esp32: ESP32Serial object for communication
- _alarm_time: Timer that will periodically ask the ESP about any alarms
- _err_buttons: {int: AlarmButton} for any active ERROR alarms
- _war_buttons: {int: AlarmButton} for any active WARNING alarms
- _alarmlabel: QLabel showing text of the currently-selected alarm
- _alarmstack: Stack of QPushButtons for active alarms
- _alarmsnooze: QPushButton for snoozing an alarm
- _snooze_btn: SnoozeButton that manipulates _alarmsnooze
"""
def __init__(self, config, esp32, alarmbar, hwfail_func):
"""
Constructor
Arguments: see relevant class members.
"""
self._esp32 = esp32
self._alarm_timer = QtCore.QTimer()
self._alarm_timer.timeout.connect(self.handle_alarms)
self._alarm_timer.start(config["alarminterval"] * 1000)
self._err_buttons = {}
self._war_buttons = {}
self._hwfail_func = hwfail_func
self._hwfail_codes = [1 << code for code in config['hwfail_codes']]
self._alarmlabel = alarmbar.findChild(QtWidgets.QLabel, "alarmlabel")
self._alarmstack = alarmbar.findChild(QtWidgets.QHBoxLayout, "alarmstack")
self._alarmsnooze = alarmbar.findChild(QtWidgets.QPushButton, "alarmsnooze")
self._snooze_btn = SnoozeButton(self._esp32, self, self._alarmsnooze)
def handle_alarms(self):
"""
The callback method which is called periodically to check if the ESP raised any
alarm or warning.
"""
# Retrieve alarms and warnings from the ESP
esp32alarm = self._esp32.get_alarms()
esp32warning = self._esp32.get_warnings()
#
# ALARMS
#
if esp32alarm:
errors = esp32alarm.strerror_all()
alarm_codes = esp32alarm.get_alarm_codes()
for alarm_code, err_str in zip(alarm_codes, errors):
if alarm_code in self._hwfail_codes:
self._hwfail_func(err_str)
print("Critical harware failure")
if alarm_code not in self._err_buttons:
btn = AlarmButton(ERROR, alarm_code, err_str,
self._alarmlabel, self._snooze_btn)
self._alarmstack.addWidget(btn)
self._err_buttons[alarm_code] = btn
#
# WARNINGS
#
if esp32warning:
errors = esp32warning.strerror_all()
warning_codes = esp32warning.get_alarm_codes()
for warning_code, err_str in zip(warning_codes, errors):
if warning_code not in self._war_buttons:
btn = AlarmButton(
WARNING, warning_code, err_str, self._alarmlabel, self._snooze_btn)
self._alarmstack.addWidget(btn)
self._war_buttons[warning_code] = btn
def snooze_alarm(self, code):
"""
Graphically snoozes alarm corresponding to 'code'
Arguments:
- code: integer alarm code
"""
if code not in self._err_buttons:
raise Exception('Cannot snooze code %s as alarm button doesn\'t exist.' % code)
self._err_buttons[code].deleteLater()
del self._err_buttons[code]
self._alarmlabel.setText('')
self._alarmlabel.setStyleSheet('QLabel { background-color: black; }')
self._alarmsnooze.hide()
def snooze_warning(self, code):
"""
Graphically snoozes warning corresponding to 'code'
Arguments:
- code: integer alarm code
"""
if code not in self._war_buttons:
raise Exception('Cannot snooze code %s as warning button doesn\'t exist.' % code)
self._war_buttons[code].deleteLater()
del self._war_buttons[code]
self._alarmlabel.setText('')
self._alarmlabel.setStyleSheet('QLabel { background-color: black; }')
self._alarmsnooze.hide()
class CriticalAlarmHandler:
"""
Handles severe communication and hardware malfunction errors.
These errors have a low chance of recovery, but this class handles irrecoverable as well as
potentially recoverable errors (with options to retry).
"""
def __init__(self, mainparent, esp32):
"""
Main constructor. Grabs necessary widgets from the main window
Arguments:
- mainparent: Reference to the mainwindow widget.
- esp32: Reference to the ESP32 interface.
"""
self._esp32 = esp32
self._toppane = mainparent.toppane
self._criticalerrorpage = mainparent.criticalerrorpage
self._bottombar = mainparent.bottombar
self._criticalerrorbar = mainparent.criticalerrorbar
self._mainparent = mainparent
self.nretry = 0
self._label_criticalerror = mainparent.findChild(QtWidgets.QLabel, "label_criticalerror")
self._label_criticaldetails = mainparent.findChild(
QtWidgets.QLabel,
"label_criticaldetails")
self._button_retrycmd = mainparent.findChild(QtWidgets.QPushButton, "button_retrycmd")
def show_critical_error(self, text, details=""):
"""
Shows the critical error in the mainwindow.
This includes changing the screen to red and displaying a big message to this effect.
"""
self._label_criticalerror.setText(text)
self._toppane.setCurrentWidget(self._criticalerrorpage)
self._bottombar.setCurrentWidget(self._criticalerrorbar)
self._label_criticaldetails.setText(details)
rpi.start_alarm_system()
self._mainparent.repaint()
input("Hang on wait reboot")
def call_system_failure(self, details=""):
"""
Calls a system failure and sets the mainwindow into a state that is irrecoverable without
maintenance support.
"""
self._button_retrycmd.hide()
disp_msg = "*** SYSTEM FAILURE ***\nCall the Maintenance Service"
details = str(details).replace("\n", "")
self.show_critical_error(disp_msg, details=details)
|
python
|
"""
atpthings.util.dictionary
-------------------------
"""
def getKeys(dictionary: dict, keys: list) -> dict:
"""Get keys from dictionary.
Parameters
----------
dictionary : dict
Dictionary.
keys : list
List of keys wonted to be extracted.
Returns
-------
dict
Dictionry wit only specificated keys.
Examples
--------
>>> dict1 = {"one": 1, "two": 1,"three": 1}
>>> keysList = ["one", "three"]
>>> dict2 = atpthings.util.dictionary.getKeys(dict1,keysList)
>>> {"one": 1,"three": 1}
"""
return {key: dictionary[key] for key in dictionary.keys() & keys}
|
python
|
import wx
from html import escape
import sys
import Model
import Utils
class Commentary( wx.Panel ):
def __init__( self, parent, id = wx.ID_ANY ):
super().__init__(parent, id, style=wx.BORDER_SUNKEN)
self.SetDoubleBuffered(True)
self.SetBackgroundColour( wx.WHITE )
self.hbs = wx.BoxSizer(wx.HORIZONTAL)
self.text = wx.TextCtrl( self, style=wx.TE_MULTILINE|wx.TE_READONLY )
self.hbs.Add( self.text, 1, wx.EXPAND )
self.SetSizer( self.hbs )
def getText( self ):
race = Model.race
riderInfo = {info.bib:info for info in race.riderInfo} if race else {}
def infoLinesSprint( sprint, bibs ):
lines = []
pfpText = ''
for place_in, bib in enumerate(bibs,1):
ri = riderInfo.get( bib, None )
points, place, tie = race.getSprintPoints( sprint, place_in, bibs )
if points:
pfpText = ' ({:+d} pts)'.format(points)
else:
pfpText = ''
if ri is not None:
lines.append( ' {}.{} {}: {} {}, {}'.format(place, pfpText, bib, ri.first_name, ri.last_name, ri.team) )
else:
lines.append( ' {}.{} {}'.format(place, pfpText, bib) )
return lines
def infoLines( bibs, pointsForPlace=None ):
lines = []
pfpText = ''
for place_in, bib in enumerate(bibs,1):
ri = riderInfo.get( bib, None )
points, place = pointsForPlace, place_in
if points:
pfpText = ' ({:+d} pts)'.format(points)
else:
pfpText = ''
if ri is not None:
lines.append( ' {}.{} {}: {} {}, {}'.format(place, pfpText, bib, ri.first_name, ri.last_name, ri.team) )
else:
lines.append( ' {}.{} {}'.format(place, pfpText, bib) )
return lines
RaceEvent = Model.RaceEvent
lines = []
self.sprintCount = 0
for e in race.events:
if e.eventType == RaceEvent.Sprint:
self.sprintCount += 1
lines.append( 'Sprint {} Result:'.format(self.sprintCount) )
lines.extend( infoLinesSprint(self.sprintCount, e.bibs[:len(race.pointsForPlace)]) )
elif e.eventType == RaceEvent.LapUp:
lines.append( 'Gained a Lap:' )
lines.extend( infoLines(e.bibs, race.pointsForLapping) )
elif e.eventType == RaceEvent.LapDown:
lines.append( 'Lost a Lap:' )
lines.extend( infoLines(e.bibs, -race.pointsForLapping) )
elif e.eventType == RaceEvent.Finish:
lines.append( 'Finish:' )
self.sprintCount += 1
lines.extend( infoLinesSprint(self.sprintCount, e.bibs) )
elif e.eventType == RaceEvent.DNF:
lines.append( 'DNF (Did Not Finish):' )
lines.extend( infoLines(e.bibs) )
elif e.eventType == RaceEvent.DNS:
lines.append( 'DNS (Did Not Start):' )
lines.extend( infoLines(e.bibs) )
elif e.eventType == RaceEvent.PUL:
lines.append( 'PUL (Pulled by Race Officials):' )
lines.extend( infoLines(e.bibs) )
elif e.eventType == RaceEvent.DSQ:
lines.append( 'DSQ (Disqualified)' )
lines.extend( infoLines(e.bibs) )
lines.append( '' )
return '\n'.join(lines)
def toHtml( self, html ):
text = self.getText().replace('.', '')
if not text:
return ''
lines = []
inList = False
html.write( '<dl>' )
for line in text.split('\n'):
if not line:
continue
if line[:1] != ' ':
if inList:
html.write('</ol>\n')
html.write('</dd>\n')
inList = False
html.write( '<dd>\n' )
html.write( escape(line) )
html.write( '<ol>' )
inList = True
continue
line = line.strip()
html.write( '<li>{}</li>\n'.format(line.split(' ',1)[1].strip()) )
html.write('</ol>\n')
html.write('</dd>\n')
html.write('</dl>\n')
def refresh( self ):
self.text.Clear()
self.text.AppendText( self.getText() )
def commit( self ):
pass
if __name__ == '__main__':
app = wx.App( False )
mainWin = wx.Frame(None,title="Commentary", size=(600,400))
Model.newRace()
Model.race._populate()
rd = Commentary(mainWin)
rd.refresh()
rd.toHtml( sys.stdout )
mainWin.Show()
app.MainLoop()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<username>[\w.@+-]+)/$', views.UserDetailView.as_view(), name='detail'),
]
|
python
|
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from . import *
class GlowLevel(nn.Module):
def __init__(self, in_channel, filters=512, n_levels=1, n_steps=2):
'''
Iniitialized Glow Layer
Parameters
----------
in_channel : int
number of input channels
filters : int
number of filters in affine coupling layer
n_levels : int
number of Glow layers
n_steps : int
number of flow steps
'''
super(GlowLevel, self).__init__()
# init flow layers
self.flowsteps = nn.ModuleList([Flow(in_channel*4, filters = filters)
for _ in range(n_steps)])
# init Glow levels
if(n_levels > 1):
self.nextLevel = GlowLevel(in_channel = in_channel * 2,
filters = filters,
n_levels = n_levels-1,
n_steps = n_steps)
else:
self.nextLevel = None
def forward(self, x, direction = 0):
'''
forward function for each glow level
Parameters
----------
x : torch.tensor
input batch
direction : int
0 means forward
1 means reverse
Returns
-------
x : torch.tensor
output of the glow layer
logdet : float
the log-determinant term
'''
sum_logdet = 0
x = self.squeeze(x)
if not direction: # direction is forward
for flowStep in self.flowsteps:
x, log_det = flowStep(x, direction=direction)
sum_logdet += log_det
if self.nextLevel is not None:
x, x_split = x.chunk(2, 1)
x, log_det = self.nextLevel(x, direction)
sum_logdet += log_det
x = torch.cat((x, x_split), dim = 1)
if direction: # direction is reverse
for flowStep in reversed(self.flowsteps):
x, log_det = flowStep(x, direction)
sum_logdet += log_det
x = self.unsqueeze(x)
return x, sum_logdet
def squeeze(self, x):
"""
Quadruples the number of channels of the input
this is done to increase the spatial information volume for the image channels
Parameters
----------
x : torch.Tensor
batch of input images with shape
batch_size, channels, height, width
Returns
-------
x : torch.tensor
squeezed input with shape
batch_size, channels * 4, height//2, width//2
"""
batch_size, channels, h, w = x.size()
x = x.view(batch_size, channels, h // 2, 2, w //2, 2)
x = x.permute(0, 1, 3, 5, 2, 4).contiguous() # to apply permutation from the glow paper
x = x.view(batch_size, channels * 4, h // 2, w // 2)
return x
def unsqueeze(self, x):
"""
Unsqueeze the image by dividing the channels by 4 and
reconstructs the squeezed image
Parameters
----------
x : torch.Tensor
batch of input images with shape
batch_size, channels * 4, height//2, width//2
Returns
-------
x : torch.tensor
unsqueezed input with shape
batch_size, channels, height, width
"""
batch_size, channels, h, w = x.size()
x = x.view(batch_size, channels // 4, 2, 2, h, w)
x = x.permute(0, 1, 4, 2, 5, 3).contiguous()
x = x.view(batch_size, channels // 4, h * 2, w * 2)
return x
|
python
|
from collections import defaultdict
from glypy.io import iupac, glycoct
from glypy.structure.glycan_composition import HashableGlycanComposition, FrozenGlycanComposition
from glypy.enzyme import (
make_n_glycan_pathway, make_mucin_type_o_glycan_pathway,
MultiprocessingGlycome, Glycosylase, Glycosyltransferase,
EnzymeGraph, GlycanCompositionEnzymeGraph, _enzyme_graph_inner)
from glycan_profiling.task import TaskBase, log_handle
from glycan_profiling.database.builder.glycan.glycan_source import (
GlycanHypothesisSerializerBase, GlycanTransformer,
DBGlycanComposition, formula, GlycanCompositionToClass,
GlycanTypes)
from glycan_profiling.structure import KeyTransformingDecoratorDict
def key_transform(name):
return str(name).lower().replace(" ", '-')
synthesis_register = KeyTransformingDecoratorDict(key_transform)
class MultiprocessingGlycomeTask(MultiprocessingGlycome, TaskBase):
def _log(self, message):
log_handle.log(message)
def log_generation_chunk(self, i, chunks, current_generation):
self._log(".... Task %d/%d finished (%d items generated)" % (
i, len(chunks), len(current_generation)))
class GlycanSynthesis(TaskBase):
glycan_classification = None
def __init__(self, glycosylases=None, glycosyltransferases=None, seeds=None, limits=None,
convert_to_composition=True, n_processes=5):
self.glycosylases = glycosylases or {}
self.glycosyltransferases = glycosyltransferases or {}
self.seeds = seeds or []
self.limits = limits or []
self.convert_to_composition = convert_to_composition
self.n_processes = n_processes
def remove_enzyme(self, enzyme_name):
if enzyme_name in self.glycosylases:
return self.glycosylases.pop(enzyme_name)
elif enzyme_name in self.glycosyltransferases:
return self.glycosyltransferases.pop(enzyme_name)
else:
raise KeyError(enzyme_name)
def add_enzyme(self, enzyme_name, enzyme):
if isinstance(enzyme, Glycosylase):
self.glycosylases[enzyme_name] = enzyme
elif isinstance(enzyme, Glycosyltransferase):
self.glycosyltransferases[enzyme_name] = enzyme
else:
raise TypeError("Don't know where to put object of type %r" % type(enzyme))
def add_limit(self, limit):
self.limits.append(limit)
def add_seed(self, structure):
if structure in self.seeds:
return
self.seeds.append(structure)
def build_glycome(self):
glycome = MultiprocessingGlycomeTask(
self.glycosylases, self.glycosyltransferases,
self.seeds, track_generations=False,
limits=self.limits, processes=self.n_processes)
return glycome
def convert_enzyme_graph_composition(self, glycome):
self.log("Converting Enzyme Graph into Glycan Set")
glycans = set()
glycans.update(glycome.enzyme_graph)
for i, v in enumerate(glycome.enzyme_graph.values()):
if i and i % 100000 == 0:
self.log(".... %d Glycans In Set" % (len(glycans)))
glycans.update(v)
self.log(".... %d Glycans In Set" % (len(glycans)))
composition_graph = defaultdict(_enzyme_graph_inner)
compositions = set()
cache = StructureConverter()
i = 0
for s in glycans:
i += 1
gc = cache[s]
for child, enz in glycome.enzyme_graph[s].items():
composition_graph[gc][cache[child]].update(enz)
if i % 1000 == 0:
self.log(".... Converted %d Compositions (%d/%d Structures, %0.2f%%)" % (
len(compositions), i, len(glycans), float(i) / len(glycans) * 100.0))
compositions.add(gc)
return compositions, composition_graph
def extract_structures(self, glycome):
self.log("Converting Enzyme Graph into Glycan Set")
solutions = list()
for i, structure in enumerate(glycome.seen):
if i and i % 10000 == 0:
self.log(".... %d Glycans Extracted" % (i,))
solutions.append(structure)
return solutions, glycome.enzyme_graph
def run(self):
logger = self.ipc_logger()
glycome = self.build_glycome()
old_logger = glycome._log
glycome._log = logger.handler
for i, gen in enumerate(glycome.run()):
self.log(".... Generation %d: %d Structures" % (i, len(gen)))
self.glycome = glycome
logger.stop()
glycome._log = old_logger
if self.convert_to_composition:
compositions, composition_enzyme_graph = self.convert_enzyme_graph_composition(glycome)
return compositions, composition_enzyme_graph
else:
structures, enzyme_graph = self.extract_structures(glycome)
return structures, enzyme_graph
class Limiter(object):
def __init__(self, max_nodes=26, max_mass=5500.0):
self.max_mass = max_mass
self.max_nodes = max_nodes
def __call__(self, x):
return len(x) < self.max_nodes and x.mass() < self.max_mass
GlycanSynthesis.size_limiter_type = Limiter
@synthesis_register("n-glycan")
@synthesis_register("mammalian-n-glycan")
class NGlycanSynthesis(GlycanSynthesis):
glycan_classification = GlycanTypes.n_glycan
def _get_initial_components(self):
glycosidases, glycosyltransferases, seeds = make_n_glycan_pathway()
glycosyltransferases.pop('siat2_3')
child = iupac.loads("a-D-Neup5Gc")
parent = iupac.loads("b-D-Galp-(1-4)-b-D-Glcp2NAc")
siagct2_6 = Glycosyltransferase(6, 2, parent, child, parent_node_id=3)
glycosyltransferases['siagct2_6'] = siagct2_6
return glycosidases, glycosyltransferases, seeds
def __init__(self, glycosylases=None, glycosyltransferases=None, seeds=None, limits=None,
convert_to_composition=True, n_processes=5):
sylases, transferases, more_seeds = self._get_initial_components()
sylases.update(glycosylases or {})
transferases.update(glycosyltransferases or {})
more_seeds.extend(seeds or [])
super(NGlycanSynthesis, self).__init__(sylases, transferases, more_seeds,
limits, convert_to_composition, n_processes)
@synthesis_register("human-n-glycan")
class HumanNGlycanSynthesis(NGlycanSynthesis):
def _get_initial_components(self):
glycosidases, glycosyltransferases, seeds = super(
HumanNGlycanSynthesis, self)._get_initial_components()
glycosyltransferases.pop('siagct2_6')
glycosyltransferases.pop('agal13galt')
glycosyltransferases.pop('gntE')
return glycosidases, glycosyltransferases, seeds
@synthesis_register("mucin-o-glycan")
@synthesis_register("mammalian-mucin-o-glycan")
class MucinOGlycanSynthesis(GlycanSynthesis):
glycan_classification = GlycanTypes.o_glycan
def _get_initial_components(self):
glycosidases, glycosyltransferases, seeds = make_mucin_type_o_glycan_pathway()
parent = iupac.loads("a-D-Galp2NAc")
child = iupac.loads("a-D-Neup5Gc")
sgt6gal1 = Glycosyltransferase(6, 2, parent, child, terminal=False)
glycosyltransferases['sgt6gal1'] = sgt6gal1
parent = iupac.loads("b-D-Galp-(1-3)-a-D-Galp2NAc")
child = iupac.loads("a-D-Neup5Gc")
sgt3gal2 = Glycosyltransferase(3, 2, parent, child, parent_node_id=3)
glycosyltransferases['sgt3gal2'] = sgt3gal2
parent = iupac.loads("b-D-Galp-(1-3)-a-D-Galp2NAc")
child = iupac.loads("a-D-Neup5Gc")
sgt6gal2 = Glycosyltransferase(6, 2, parent, child, parent_node_id=3)
glycosyltransferases['sgt6gal2'] = sgt6gal2
return glycosidases, glycosyltransferases, seeds
def __init__(self, glycosylases=None, glycosyltransferases=None, seeds=None, limits=None,
convert_to_composition=True, n_processes=5):
sylases, transferases, more_seeds = self._get_initial_components()
sylases.update(glycosylases or {})
transferases.update(glycosyltransferases or {})
more_seeds.extend(seeds or [])
super(MucinOGlycanSynthesis, self).__init__(sylases, transferases, more_seeds,
limits, convert_to_composition, n_processes)
@synthesis_register("human-mucin-o-glycan")
class HumanMucinOGlycanSynthesis(MucinOGlycanSynthesis):
def _get_initial_components(self):
glycosidases, glycosyltransferases, seeds = super(HumanMucinOGlycanSynthesis)._get_initial_components()
glycosyltransferases.pop("sgt6gal1")
glycosyltransferases.pop("sgt3gal2")
glycosyltransferases.pop("sgt6gal2")
return glycosidases, glycosyltransferases, seeds
class StructureConverter(object):
def __init__(self):
self.cache = dict()
def convert(self, structure_text):
if structure_text in self.cache:
return self.cache[structure_text]
structure = glycoct.loads(structure_text)
gc = HashableGlycanComposition.from_glycan(structure).thaw()
gc.drop_stems()
gc.drop_configurations()
gc.drop_positions()
gc = HashableGlycanComposition(gc)
self.cache[structure_text] = gc
return gc
def __getitem__(self, structure_text):
return self.convert(structure_text)
def __repr__(self):
return "%s(%d)" % (self.__class__.__name__, len(self.cache))
class AdaptExistingGlycanGraph(TaskBase):
def __init__(self, graph, enzymes_to_remove):
self.graph = graph
self.enzymes_to_remove = set(enzymes_to_remove)
self.enzymes_available = set(self.graph.enzymes())
if (self.enzymes_to_remove - self.enzymes_available):
raise ValueError("Required enzymes %r not found" % (
self.enzymes_to_remove - self.enzymes_available,))
def remove_enzymes(self):
enz_to_remove = self.enzymes_to_remove
for enz in enz_to_remove:
self.log(".... Removing Enzyme %s" % (enz,))
self.graph.remove_enzyme(enz)
for entity in (self.graph.parentless() - self.graph.seeds):
self.graph.remove(entity)
def run(self):
self.log("Adapting Enzyme Graph with %d nodes and %d edges" % (
self.graph.node_count(), self.graph.edge_count()))
self.remove_enzymes()
self.log("After Adaption, Graph has %d nodes and %d edges" % (
self.graph.node_count(), self.graph.edge_count()))
class ExistingGraphGlycanHypothesisSerializer(GlycanHypothesisSerializerBase):
def __init__(self, enzyme_graph, database_connection, enzymes_to_remove=None, reduction=None,
derivatization=None, hypothesis_name=None, glycan_classification=None):
if enzymes_to_remove is None:
enzymes_to_remove = set()
GlycanHypothesisSerializerBase.__init__(self, database_connection, hypothesis_name)
self.enzyme_graph = enzyme_graph
self.enzymes_to_remove = set(enzymes_to_remove)
self.glycan_classification = glycan_classification
self.reduction = reduction
self.derivatization = derivatization
self.loader = None
self.transformer = None
def build_glycan_compositions(self):
adapter = AdaptExistingGlycanGraph(self.enzyme_graph, self.enzymes_to_remove)
adapter.start()
components = adapter.graph.nodes()
for component in components:
if isinstance(component, FrozenGlycanComposition):
component = component.thaw()
yield component, [self.glycan_classification]
def make_pipeline(self):
self.loader = self.build_glycan_compositions()
self.transformer = GlycanTransformer(self.loader, self.reduction, self.derivatization)
def run(self):
self.make_pipeline()
structure_class_lookup = self.structure_class_loader
acc = []
counter = 0
for composition, structure_classes in self.transformer:
mass = composition.mass()
composition_string = composition.serialize()
formula_string = formula(composition.total_composition())
inst = DBGlycanComposition(
calculated_mass=mass, formula=formula_string,
composition=composition_string,
hypothesis_id=self.hypothesis_id)
self.session.add(inst)
self.session.flush()
counter += 1
for structure_class in structure_classes:
structure_class = structure_class_lookup[structure_class]
acc.append(dict(glycan_id=inst.id, class_id=structure_class.id))
if len(acc) % 100 == 0:
self.session.execute(GlycanCompositionToClass.insert(), acc)
acc = []
if acc:
self.session.execute(GlycanCompositionToClass.insert(), acc)
acc = []
self.session.commit()
self.log("Generated %d glycan compositions" % counter)
class SynthesisGlycanHypothesisSerializer(GlycanHypothesisSerializerBase):
def __init__(self, glycome, database_connection, reduction=None,
derivatization=None, hypothesis_name=None, glycan_classification=None):
if glycan_classification is None:
glycan_classification = glycome.glycan_classification
GlycanHypothesisSerializerBase.__init__(self, database_connection, hypothesis_name)
self.glycome = glycome
self.glycan_classification = glycan_classification
self.reduction = reduction
self.derivatization = derivatization
self.loader = None
self.transformer = None
def build_glycan_compositions(self):
components, enzyme_graph = self.glycome.start()
for component in components:
yield component, [self.glycan_classification]
def make_pipeline(self):
self.loader = self.build_glycan_compositions()
self.transformer = GlycanTransformer(self.loader, self.reduction, self.derivatization)
def run(self):
self.make_pipeline()
structure_class_lookup = self.structure_class_loader
acc = []
counter = 0
for composition, structure_classes in self.transformer:
mass = composition.mass()
composition_string = composition.serialize()
formula_string = formula(composition.total_composition())
inst = DBGlycanComposition(
calculated_mass=mass, formula=formula_string,
composition=composition_string,
hypothesis_id=self.hypothesis_id)
if (counter + 1) % 100 == 0:
self.log("Stored %d glycan compositions" % counter)
self.session.add(inst)
self.session.flush()
counter += 1
for structure_class in structure_classes:
structure_class = structure_class_lookup[structure_class]
acc.append(dict(glycan_id=inst.id, class_id=structure_class.id))
if len(acc) % 100 == 0:
self.session.execute(GlycanCompositionToClass.insert(), acc)
acc = []
if acc:
self.session.execute(GlycanCompositionToClass.insert(), acc)
acc = []
self.session.commit()
self.log("Stored %d glycan compositions" % counter)
|
python
|
"""Define the setup function using setup.cfg."""
from setuptools import setup
setup()
|
python
|
"""
====================
einsteinpy_geodesics
====================
Julia wrapper for Geodesics
"""
__version__ = "0.2.dev0"
from .geodesics_wrapper import solveSystem
|
python
|
from leek.api.routes.api_v1 import api_v1_blueprint
from leek.api.routes.manage import manage_bp
from leek.api.routes.users import users_bp
from leek.api.routes.applications import applications_bp
from leek.api.routes.events import events_bp
from leek.api.routes.search import search_bp
from leek.api.routes.agent import agent_bp
from leek.api.routes.control import control_bp
def register_blueprints(app):
# Register blueprints
app.register_blueprint(api_v1_blueprint)
app.register_blueprint(manage_bp)
app.register_blueprint(users_bp)
app.register_blueprint(applications_bp)
app.register_blueprint(events_bp)
app.register_blueprint(search_bp)
app.register_blueprint(agent_bp)
app.register_blueprint(control_bp)
|
python
|
from __future__ import division
from __future__ import absolute_import
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import atexit
STREAM_POOL = []
def get_stream():
if STREAM_POOL:
return STREAM_POOL.pop()
else:
return drv.Stream()
class AsyncInnerProduct:
def __init__(self, a, b, pagelocked_allocator):
self.gpu_result = gpuarray.dot(a, b)
self.gpu_finished_evt = drv.Event()
self.gpu_finished_evt.record()
self.gpu_finished = False
self.pagelocked_allocator = pagelocked_allocator
def get_host_result(self):
if not self.gpu_finished:
if self.gpu_finished_evt.query():
self.gpu_finished = True
self.copy_stream = get_stream()
self.host_dest = self.pagelocked_allocator(
self.gpu_result.shape, self.gpu_result.dtype, self.copy_stream
)
drv.memcpy_dtoh_async(
self.host_dest, self.gpu_result.gpudata, self.copy_stream
)
self.copy_finished_evt = drv.Event()
self.copy_finished_evt.record()
else:
if self.copy_finished_evt.query():
STREAM_POOL.append(self.copy_stream)
return self.host_dest
def _at_exit():
STREAM_POOL[:] = []
atexit.register(_at_exit)
|
python
|
from setuptools import setup,find_packages
import os
pathroot = os.path.split(os.path.realpath(__file__))[0]
setup(
name='sqrt_std',
version='0.1.0',
packages = find_packages(),
entry_points = {
'console_scripts': ['sqrt_std=lib.sqrt_std:main'],
}
)
|
python
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
def h2ocluster_status():
"""
Python API test: h2o.cluster_status()
Deprecated, use h2o.cluster().show_status(True)
"""
ret = h2o.cluster_status() # no return type
assert ret is None
if __name__ == "__main__":
pyunit_utils.standalone_test(h2ocluster_status)
else:
h2ocluster_status()
|
python
|
#Main 'run through' of the program. This is run periodically
#to update the state of the file to match what has happened on
#Qualtrics, as well as to send out new surveys in accordance
#with how many have expired or been completed
import urllib
import config
import quapi
import parsers
import helpers
import filemanager
import os
from datetime import datetime, timedelta
#load config.
config.init()
curDir = os.path.dirname(__file__)
#read old validation file, exit if it's bad
valPath = os.path.join(curDir, 'validation.txt')
validation = filemanager.readcsv(valPath)
if validation[0][0] != 'GOOD':
print(validation[0][0])
print('Something went wrong at ' + str(datetime.now()) + ', stopping')
exit()
validation[0][0] = 'BAD'
filemanager.writecsv(validation, valPath)
#read old csv to get data.
csvPath = os.path.join(curDir, config.fileName)
arr = filemanager.readcsv(csvPath)
users = filemanager.arrayToUsers(arr)
precount = 0
#count how many completed surveys there were at the end of the last run
for i in range(0,len(users)):
if users[i].state == 'c':
preCount = preCount +1
#query qualtrics for responses
#TODO This part needs to be changed to accomodate more than two surveys
#It's silly to have essentially the same line twice, let's generalize it.
xmlResp1 = quapi.makeRequest(quapi.getLegacyResponseData(
config.survey1ID, config.survey1Questions))
xmlResp2 = quapi.makeRequest(quapi.getLegacyResponseData(
config.survey2ID,config.survey2Questions))
#TODO This part needs to be changed to accomodate more than two surveys
#It's silly to have essentially the same line twice, let's generalize it.
arr1 = parsers.parseForEmails(xmlResp1,config.survey1Refs)
arr2 = parsers.parseForEmails(xmlResp2,config.survey2Refs)
#integratge new responses with old data, set repeats and invalids to D,
#set completed surveys to C, send them thank yous
#TODO Generalize this to more than two surveys
for(d in range(0,2):
currSurv = arr1
if(d == 1)
currSurv = arr2
for i in range(0,len(currSurv)):
for j in range(0,len(users)):
if currSurv[i][0] == users[j].email:
users[j].state = 'c'
for k in range(0,len(users[j].childrenID)):
users[j].childrenID[k] = currSurv[i][k+1]
if users[j].childrenID[k] == None:
users[j].childrenID[k] = ''
users[j].childrenID[k] = users[j].childrenID[k].replace(' ', '')
#count number of completed surveys in integrated list
postCount = 0
for i in range(len(users)):
if users[i].state == 'c':
postCount = postCount + 1
#calculate number of 'credits' or number or maximum possible
#new surveys to be sent this round
credits = (postCount - preCount) * config.creditsForCompletions
#kill all nonvalid surveys
if config.allowOnlySuffixes == 1:
for i in range(0,len(users)):
if config.suffix not in users[i].email:
users[i].state == 'd'
for i in range(0,len(users)):
invalid = '~`!#$%^&*()_-+={}[]:>;\',</?*-+'
for j in users[i].email:
for k in invalid:
if j ==k:
users[i].state = 'd'
#kill expired surveys
for i in range(len(users)):
if ((users[i].sendTime + config.expiry) < datetime.now()
and users[i].state == 's'):
print('killed expired survey belonging to '+ users[i].email + 'at'
+ users[i].sendTime)
users[i].state = 'd'
credits = credits + 1
#add children to the list of users
for i in range(0, len(users)):
if users[i].state == 'c':
for j in range(0, len(users[i].childrenID)):
found = 0
for k in range(0, len(users)):
if users[i].childrenID[j] == users[k].email:
found = 1
if found == 0:
if users[i].childrenID[j]:
new = filemanager.User()
new.email = users[i].childrenID[j]
new.email = new.email.replace(' ', '')
new.parentID = users[i].email
new.state = 'n'
users.append(new)
#check for Qs more than 1 day old, send surveys, set state to S
for i in range(0,len(users)):
if (users[i].state == 'q'
and datetime.now() > (users[i].selectTime + config.delay)):
surv = helpers.chooseSurvey()
subj = ''
if users[i].parentID == '' or config.altSubject == 1:
subj = config.subject
else:
subj = config.subject2 + users[i].parentID
quapi.sendSurveySubjectExpiry(users[i].email, surv,subj)
users[i].state = 's'
users[i].sendTime = datetime.now()
users[i].survey = surv
#calculate ave distance of each N to Qs and Ss and Cs
listofListOfParents = [None] *len(users)
dists = [0] * len(users)
for i in range(0,len(users)):
listofListOfParents[i] = helpers.getParentList(users,i)
for i in range(0,len(users)):
if users[i].state == 'n':
if not users[i].parentID:
dists[i] = 10000
else:
for j in range(0,len(users)):
if (users[j].state == 's'or users[j].state == 'c'
or users[j].state == 'q'):
dists[i] = (dists[i] + helpers.calcDist(
listofListOfParents[i],listofListOfParents[j]))
#Count the total number of eligible surveys for denominator
running = 0
for i in range(0,len(users)):
if users[i].state == 's' or users[i].state == 'c' or users[i].state == 'q':
running = running +1
#set Ns to Qs until either credits, or total coupons are exceeded.
credits = 0
while credits > 0 and running < config.total:
index = dists.index(max(dists))
if users[index].state == 'n':
users[index].state = 'q'
users[index].selectTime = datetime.now()
credits = credits-1
running = running + 1
if running == config.total:
print('ran out of coupons at ' + str(datetime.now()))
exit()
dists[index] = 0
found = 0
for i in range(0,len(users)):
if users[i].state == 'n':
found = 1
if not found:
credits = 0
#write to csv
filemanager.writecsv(filemanager.usersToArray(users), csvpath)
print(str(datetime.now()))
validation[0][0] = 'GOOD'
filemanager.writecsv(validation,valpath)
|
python
|
"""HTML Template Generator
Intended to parse HTML code and emit Python code compatible with Python
Templates.
TODO
- Change tag calls to pass void=True
Open Questions
1. How to merge the generic structure of parsed HTML with existing classes like
HTMLTemplate? For example: the <head> and <body> tags should be merged.
2. How to identify common patterns between templates and refactor those common
patterns into parent classes?
3. How to identify common patterns within a template and refactor to template
class methods (possibly with different parameters)?
"""
import argparse
import bs4
import requests
from urllib.parse import urljoin
class HTMLTemplateGenerator:
def __init__(self, url, name):
self.url = url
self.name = name
self.indent = 0
def put(self, value):
print(f"{' ' * self.indent}{value}")
def run(self):
self.put(f'class {self.name}(HTMLTemplate):')
self.indent += 4
self.put('def run(self):')
self.indent += 4
resp = requests.get(self.url)
soup = bs4.BeautifulSoup(resp.content, 'lxml')
for element in soup.contents:
self.visit(element)
def visit(self, element):
visit_name = 'visit_' + type(element).__name__.lower()
visit_method = getattr(self, visit_name)
visit_method(element)
def visit_navigablestring(self, element):
text = str(element)
text = text.strip()
if text:
self.put(f'self.add({text!r})')
def visit_comment(self, element):
text = str(element)
comment = f'<!--{text}-->'
self.put(f'self.add({comment!r})')
def visit_tag(self, element):
attrs = getattr(element, 'attrs', {})
for key, value in attrs.items():
if isinstance(value, list) and len(value) == 1:
value = value[0]
if key == 'href':
value = urljoin(self.url, value)
elif key == 'src':
value = urljoin(self.url, value)
attrs[key] = value
attrs_arg = f', attrs={attrs!r}' if attrs else ''
if not element.contents:
self.put(f'self.tag({element.name!r}{attrs_arg})')
else:
self.put(f'with self.tag({element.name!r}{attrs_arg}):')
self.indent += 4
for subelement in element.contents:
self.visit(subelement)
self.indent -= 4
def visit_doctype(self, element):
print(f"{' ' * self.indent}self.add('<!doctype {element}>')")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('name')
args = parser.parse_args()
generator = HTMLTemplateGenerator(args.url, args.name)
generator.run()
if __name__ == '__main__':
main()
|
python
|
import dash
import dash_bootstrap_components as dbc
from apps.monitor import Monitor
from apps.controller import Controller
from apps.navbar import navbar
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=[
dbc.themes.BOOTSTRAP, 'assets/app.css'])
def start():
monitor = Monitor(app)
controller = Controller(app)
monitor.callbacks()
controller.callbacks()
app.layout = dbc.Container(
[
navbar,
dbc.Row(
[
dbc.Col(monitor.layout(), id="id-left-panel", width=6),
dbc.Col(controller.layout(), id="id-right-panel", width=6),
]
)
],
fluid=True,
style={"padding": 0}
)
app.run_server(debug=False, host='0.0.0.0')
if __name__ == "__main__":
start()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.PosDishGroupModel import PosDishGroupModel
class KoubeiCateringPosDishgroupSyncModel(object):
def __init__(self):
self._pos_dish_group_model = None
@property
def pos_dish_group_model(self):
return self._pos_dish_group_model
@pos_dish_group_model.setter
def pos_dish_group_model(self, value):
if isinstance(value, PosDishGroupModel):
self._pos_dish_group_model = value
else:
self._pos_dish_group_model = PosDishGroupModel.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.pos_dish_group_model:
if hasattr(self.pos_dish_group_model, 'to_alipay_dict'):
params['pos_dish_group_model'] = self.pos_dish_group_model.to_alipay_dict()
else:
params['pos_dish_group_model'] = self.pos_dish_group_model
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringPosDishgroupSyncModel()
if 'pos_dish_group_model' in d:
o.pos_dish_group_model = d['pos_dish_group_model']
return o
|
python
|
with open("file.txt") as f:
something(f)
|
python
|
#!/app/ansible2/bin/python
# -* coding: utf-8 -*-
DOCUMENTATION = '''
---
author: Arthur Reyes
module: pyvim_facts
description:
- This module gathers and correlates a larger number of useful facts
from a specified guest on VMWare vSphere.
version_added: "0.1"
requirements:
- pyVim
notes:
- This module disables SSL Security and warnings for invalid certificates.
- Tested with Ansible 2.0.1.0
options:
host:
description:
- The vSphere server that manages the cluster where the guest is
located on.
required: true
aliases: ['vsphere']
login:
description:
- A login name which can authenticate to the vSphere cluster.
required: true
aliases: ['admin']
password:
description:
- The password used to authenticate to the vSphere cluster.
required: true
aliases: ['secret']
port:
description:
- The port the vSphere listens on.
required: false
default: 443
guest:
description:
- The name of the guest to gather facts from the vSphere cluster.
Apparently the same guest name can exist in multiple datacenters, so
this value is ignored if uuid is defined.
required: true
uuid:
description:
- the instanceUuid of the guest. Useful to identify a unique guest
when multiple virtual machines with the same name exist across
clusters. If not defined and multiple guests are returned by a query
then this module will fail. If defined, guest name is ignored.
required: false
default: null
'''
import atexit
import sys
import requests
try:
from pyVim import connect
from pyVmomi import vmodl
from pyVmomi import vim
except ImportError:
print "failed=True msg='pyvmoni python module unavailable'"
sys.exit(1)
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True, aliases=['vsphere']),
port = dict(required=False, default=443),
login = dict(required=True, aliases=['admin']),
password = dict(required=True, aliases=['secret']),
guest = dict(required=True),
uuid = dict(required=False, default=None),
)
)
host = module.params.get('host')
port = module.params.get('port')
login = module.params.get('login')
password = module.params.get('password')
guest = module.params.get('guest')
uuid = module.params.get('uuid')
context = connect.ssl.SSLContext(connect.ssl.PROTOCOL_TLSv1)
context.verify_mode = connect.ssl.CERT_NONE
requests.packages.urllib3.disable_warnings()
try:
service_instance = connect.SmartConnect(host=host,
port=int(port),
user=login,
pwd=password,
sslContext=context)
except Exception, e:
module.fail_json(msg='Failed to connect to %s: %s' % (host, e))
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
VMView = content.viewManager.CreateContainerView(
content.rootFolder, [vim.VirtualMachine], True)
vms = []
children = VMView.view
VMView.Destroy()
for child in children:
if uuid and child.summary.config.instanceUuid == uuid:
# defining a uuid in the module params overrides guest name
vms.append(child)
break
elif not uuid and child.summary.config.name == guest:
vms.append(child)
if len(vms) == 1:
vm = vms[0]
sane_disk = vm.summary.config.vmPathName.replace('[', '').replace('] ', '/')
sane_path = "/".join(sane_disk.split('/')[0:-1])
#sanitize the datastore name so we can use it as search criteria
datastore = sane_path.split('/')[0]
# corrolate datacenter facts
DCView = content.viewManager.CreateContainerView( content.rootFolder, [vim.Datacenter],
True )
for dc in DCView.view:
DSView = content.viewManager.CreateContainerView( dc, [vim.Datastore], True )
for ds in DSView.view:
if ds.info.name == datastore:
vm_host_datacenter = dc.name
break
DCView.Destroy()
DSView.Destroy()
# corrolate datastore facts
HSView = content.viewManager.CreateContainerView(content.rootFolder,
[vim.HostSystem],
True)
esxhosts = HSView.view
HSView.Destroy()
for esxhost in esxhosts:
if esxhost.name == vm.summary.runtime.host.summary.config.name:
vm_host = esxhost
host_storage = vm_host.configManager.storageSystem
host_storage_info = host_storage.fileSystemVolumeInfo.mountInfo
for mount in host_storage_info:
if str(mount.volume.name) == str(datastore):
vm_host_datastore = mount.volume.name
vm_host_datastore_capacity = mount.volume.capacity
vm_host_datastore_max_blocks = mount.volume.maxBlocks
break
break
facts = {
'general' : {
'name': vm.summary.config.name,
'full_name': vm.summary.config.guestFullName,
'id': vm.summary.config.guestId,
'instance_uuid': vm.summary.config.instanceUuid,
'bios_uuid': vm.summary.config.uuid,
'processor_count': vm.summary.config.numCpu,
'memtotal_mb': vm.summary.config.memorySizeMB,
'datacenter': vm_host_datacenter,
}
}
facts['vm_state'] = {
'host': vm.summary.runtime.host.summary.config.name,
'power': vm.summary.runtime.powerState,
'status': vm.summary.overallStatus,
}
facts['hm_datastore'] = {
'name': vm_host_datastore,
'capacity': vm_host_datastore_capacity,
'max_block_size': vm_host_datastore_max_blocks,
'guest_disk': vm.summary.config.vmPathName,
'guest_path_sane': sane_path,
'guest_path': "/".join((vm.summary.config.vmPathName).split('/')[0:-1]),
'guest_disk_sane': sane_disk,
}
facts['vm_bios'] = {
'bootOrder': vm.config.bootOptions.bootOrder,
}
# enumerate network
ints = {}
intidx = 0
for entry in vm.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
int_name = 'eth' + str(intidx)
ints[int_name] = {
'address_type' : entry.addressType,
'mac' : entry.macAddress,
'mac_upper' : entry.macAddress.upper(),
'mac_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
intidx += 1
facts['vm_network'] = ints
# enumerate virtual medial
virtual_devices = {}
virtual_media_types = ['CD/DVD drive', 'USB controller', 'Floppy drive' ]
for entry in vm.config.hardware.device:
if hasattr(entry, 'macAddress'):
continue
if not any(device in entry.deviceInfo.label for device in virtual_media_types):
continue
virtual_devices[entry.deviceInfo.label] = {
'summary': entry.deviceInfo.summary,
'unitNumber': entry.unitNumber,
}
facts['vm_removeable_media'] = virtual_devices
elif len(vms) == 0:
module.fail_json(msg='no virtual machines found')
else:
# we only want a single unique host.
module.fail_json(msg='guest lookup returned multiple virtual machines: %s'(vms))
module.exit_json(ansible_facts=facts)
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
|
python
|
from bk_db_tools.xlsx_data_replace import XlsxDataReplace
class SOther(XlsxDataReplace):
dataSets = [
{
'sheetName':'s_player_unit',
'firstColumn':1,
'firstRow':2,
'sql':
"""
select *
from (
select pu.unit_id
, mn.full_name
, ul.unit_level
, pu.use_for_gb12
, pu.use_for_db12
, pu.use_for_nb12
, pu.use_for_sf10
, pu.use_for_pc10
, pu.use_for_toa
, pu.use_for_dhole_griffon
, pu.use_for_dhole_inugami
, pu.use_for_dhole_warbear
, pu.use_for_dhole_fairy
, pu.use_for_dhole_pixie
, pu.use_for_dhole_werewolf
, pu.use_for_dhole_cat
, pu.use_for_dhole_howl
, pu.use_for_dhole_grim
, pu.use_for_dhole_karzhan
, pu.use_for_dhole_ellunia
, pu.use_for_dhole_lumel
, pu.use_for_dhole_khalderun
, pu.use_for_d_predator
, pu.use_for_rift_beast_fire
, pu.use_for_rift_beast_ice
, pu.use_for_rift_beast_wind
, pu.use_for_rift_beast_light
, pu.use_for_rift_beast_dark
, pu.use_for_r5
, pu.use_for_lab
, pu.use_for_arena
, pu.use_for_gwo
, pu.use_for_gwd
, pu.use_for_rta
, pu.spd_tune_max
, pu.spd_tune_atk_bar
, pu.spd_tune_buffer
, pu.buffs
, pu.spd_tune_strip
, pu.spd_tune_debuff
, pu.debuffs
, pu.spd_tune_dmg
, pu.cleanser
, pu.reviver
, pu.runed_as_healer
, pu.runed_as_tank
, pu.runed_as_bruiser
, mn.full_name || ' lvl ' || ul.unit_level || ' | ' || ul.unit_id as unit_select
from s_player_unit pu
join swex_unit_list ul on pu.unit_id = ul.unit_id
left join swarfarm_monster_names mn on mn.com2us_id = ul.com2us_id
union all
select ul2.unit_id
, mn2.full_name
, ul2.unit_level
, null as use_for_gb12
, null as use_for_db12
, null as use_for_nb12
, null as use_for_sf10
, null as use_for_pc10
, null as use_for_toa
, null as use_for_dhole_griffon
, null as use_for_dhole_inugami
, null as use_for_dhole_warbear
, null as use_for_dhole_fairy
, null as use_for_dhole_pixie
, null as use_for_dhole_werewolf
, null as use_for_dhole_cat
, null as use_for_dhole_howl
, null as use_for_dhole_grim
, null as use_for_dhole_karzhan
, null as use_for_dhole_ellunia
, null as use_for_dhole_lumel
, null as use_for_dhole_khalderun
, null as use_for_d_predator
, null as use_for_rift_beast_fire
, null as use_for_rift_beast_ice
, null as use_for_rift_beast_wind
, null as use_for_rift_beast_light
, null as use_for_rift_beast_dark
, null as use_for_r5
, null as use_for_lab
, null as use_for_arena
, null as use_for_gwo
, null as use_for_gwd
, null as use_for_rta
, null as spd_tune_max
, null as spd_tune_atk_bar
, null as spd_tune_buffer
, null as buffs
, null as spd_tune_strip
, null as spd_tune_debuff
, null as debuffs
, null as spd_tune_dmg
, null as cleanser
, null as reviver
, null as runed_as_healer
, null as runed_as_tank
, null as runed_as_bruiser
, mn2.full_name || ' lvl ' || ul2.unit_level || ' | ' || ul2.unit_id as unit_select
from swex_unit_list ul2
left join swarfarm_monster_names mn2 on mn2.com2us_id = ul2.com2us_id
left join s_player_unit pu2 on ul2.unit_id = pu2.unit_id
where pu2.unit_id is null
and ul2.unit_level >= 35
) as r
order by r.unit_level desc
, r.full_name;
"""
},
]
|
python
|
import os
import h5py
import numpy as np
type_to_id = {'worse':[0,0,1], 'okay':[0,1,0], 'better':[1,0,0]}
dirs = ['can',
'lift',
'square',
'transport']
for env in dirs:
path = "datasets/" + env + "/mh"
old = path + "/low_dim.hdf5"
new = path + "/low_dim_fewer_better.hdf5"
os.system('cp {0} {1}'.format(old, new))
def add_task_ids_for_obs(data, keys, task_id):
for k in keys:
demo = data[k]
obs = demo['obs']
t = obs['object'].shape[0]
task_indices_arr = np.tile(np.array(task_id), (t, 1))
obs.create_dataset("task_id", shape=(t, len(task_id)), dtype='f8', data=task_indices_arr)
def add_task_ids_for_next_obs(data, keys, task_id):
for k in keys:
demo = data[k]
obs = demo['next_obs']
t = obs['object'].shape[0]
task_indices_arr = np.tile(np.array(task_id), (t, 1))
obs.create_dataset("task_id", shape=(t, len(task_id)), dtype='f8', data=task_indices_arr)
def remove_eighty_percent_better(f):
# Get demos to be deleted
sorted_better_demos = sorted(f['mask']['better'][:])
num_remaining_demos = int(len(sorted_better_demos)/5)
demos_to_be_deleted = sorted_better_demos[num_remaining_demos:]
print("demos to be deleted", demos_to_be_deleted)
print("nuber of remaining better demos", num_remaining_demos)
# Delete demos in masks
for k in f['mask'].keys():
original_arr = f['mask'][k][:]
new_arr = np.array([item for item in original_arr if item not in demos_to_be_deleted])
del f['mask'][k]
f['mask'].create_dataset(k, data=new_arr)
# Delete demos in data
demos_to_be_deleted_strings = [demo.decode("utf-8") for demo in demos_to_be_deleted]
for demo in f['data'].keys():
if demo in demos_to_be_deleted_strings:
del f['data'][demo]
def remove_demos_without_task_id(f):
demos_without_task_id = []
for demo in f['data'].keys():
if 'task_id' not in f['data'][demo]['obs'].keys():
demos_without_task_id.append(demo)
for demo in demos_without_task_id:
del f['data'][demo]
for k in f['mask'].keys():
original_arr = f['mask'][k][:]
new_arr = np.array([item for item in original_arr if item.decode("utf-8") not in demos_without_task_id])
del f['mask'][k]
f['mask'].create_dataset(k, data=new_arr)
for dir in dirs:
print('modifying ' + dir)
path = 'datasets/{}/mh/low_dim_fewer_better.hdf5'.format(dir)
f = h5py.File(path, "r+")
data=f['data']
mask=f['mask']
for k,v in type_to_id.items():
demos = [demo.decode("utf-8") for demo in mask[k]]
add_task_ids_for_obs(data, demos, v)
add_task_ids_for_next_obs(data, demos, v)
remove_demos_without_task_id(f)
remove_eighty_percent_better(f)
f.close()
|
python
|
import sys
from pathlib import Path
path = str(Path(__file__).parents[1].resolve())
sys.path.append(path)
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import soundfile as sf
import torch
from jiwer import wer
import numpy as np
from sonorus.speech.lm import (
FairseqTokenDictionary,
W2lKenLMDecoder,
W2lViterbiDecoder,
W2lFairseqLMDecoder,
)
import optuna
from optuna.integration import BoTorchSampler
import joblib
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
batch["speech"] = speech
return batch
def map_to_pred(batch):
input_values = processor(
batch["speech"], return_tensors="pt", padding="longest"
).input_values
with torch.no_grad():
logits = model(input_values.to("cuda")).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)
batch["transcription"] = transcription
return batch
def map_to_pred_lm(batch, decoder):
input_values = processor(
batch["speech"], return_tensors="pt", padding="longest"
).input_values
with torch.no_grad():
logits = model(input_values.to("cuda")).logits
logits = logits.float().cpu().contiguous()
decoded = decoder.decode(logits)
# 1st sample, 1st best transcription
transcription = decoder.post_process(decoded)
batch["transcription"] = transcription
return batch
def get_wer(result, batch_size=-1, lm=False):
def transcripts():
return (
[x[0] for x in result["transcription"]] if lm else result["transcription"]
)
errors = []
if batch_size > 0:
for i in range(0, len(result), batch_size):
errors.append(
wer(
result["text"][i : i + batch_size],
transcripts()[i : i + batch_size],
)
)
else:
errors.append(wer(result["text"], transcripts()))
return np.mean(errors)
librispeech_eval = load_dataset(
"librispeech_asr",
"clean",
split="validation",
# split="test",
ignore_verifications=True,
) # ,
# download_mode="force_redownload")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
librispeech_eval = librispeech_eval.map(map_to_array)
result = librispeech_eval.map(
map_to_pred, batched=True, batch_size=1, remove_columns=["speech"]
)
print("Acoustic WER:", get_wer(result, batch_size=1000, lm=False))
token_dict = FairseqTokenDictionary(indexed_symbols=processor.get_vocab())
lexicon_path = "/home/harold/Documents/IISc-work/imperio/data/speech/fairseq/librispeech_lexicon.lst"
lm_path = "/home/harold/Documents/IISc-work/imperio/data/speech/fairseq/lm_librispeech_kenlm_word_4g_200kvocab.bin"
# decoder = W2lKenLMDecoder(
# token_dict=token_dict,
# lexicon=lexicon_path,
# lang_model=lm_path,
# beam=1500,
# beam_size_token=100,
# beam_threshold=25,
# lm_weight=1.5,
# word_weight=-1,
# unk_weight=float("-inf"),
# sil_weight=0,
# )
# result = librispeech_eval.map(lambda batch: map_to_pred_lm(batch, decoder), batched=True, batch_size=1, remove_columns=["speech"])
# print("KenLM WER:", get_wer(result, batch_size=1000, lm=True))
n_startup_trials = 10
bayes_opt_sampler = BoTorchSampler(n_startup_trials=n_startup_trials)
study = optuna.create_study(sampler=bayes_opt_sampler)
def objective(trial):
lm_weight = trial.suggest_float("lm_weight", 0, 5)
word_weight = trial.suggest_float("word_weight", -5, 5)
sil_weight = trial.suggest_float("sil_weight", -5, 5)
decoder = W2lKenLMDecoder(
token_dict=token_dict,
lexicon=lexicon_path,
lang_model=lm_path,
beam=500,
beam_size_token=100,
beam_threshold=25,
lm_weight=lm_weight,
word_weight=word_weight,
unk_weight=float("-inf"),
sil_weight=sil_weight,
)
result = librispeech_eval.map(
lambda batch: map_to_pred_lm(batch, decoder),
batched=True,
batch_size=1,
remove_columns=["speech"],
)
return get_wer(result, batch_size=1000, lm=True)
n_trials = 150
study.optimize(objective, n_trials=n_trials, show_progress_bar=True)
print("Best KenLM WER: ", study.best_value)
print("Best params: ", study.best_params)
joblib.dump(study, "speech-lm-hyperparams-opt-study.jb")
|
python
|
from __future__ import unicode_literals
import logging
from django import template
from ..utils import get_seo_model
from ..models import SeoUrl
logger = logging.getLogger(__name__)
register = template.Library()
class SeoDataNode(template.Node):
def __init__(self, variable_name):
self.variable_name = variable_name
def render(self, context):
seo_model = get_seo_model()
flat_context = context.flatten()
path = flat_context['request'].path
logger.debug('Looking for SEO object')
for obj in flat_context.values():
if (hasattr(obj, 'get_absolute_url') and
obj.get_absolute_url() == path):
logger.debug('Found object: `{}`'.format(obj))
seo = {}
for field in seo_model._meta.fields:
if getattr(obj, field.name, '') != '':
logger.debug('Adding field `{}` to SEO dict'
.format(field.name))
seo[field.name] = getattr(obj, field.name)
if seo:
context[self.variable_name] = seo
logger.debug('Returning with object data')
return ''
logger.debug('Looking for SEO URL')
try:
seo_url = SeoUrl.objects.get(url=path)
except SeoUrl.DoesNotExist:
logger.debug('No SEO URL found')
return ''
logger.debug('SEO URL found')
seo = {}
for field in seo_model._meta.fields:
if getattr(seo_url, field.name, '') != '':
seo[field.name] = getattr(seo_url, field.name)
logger.debug('Adding field `{}` to SEO dict'
.format(field.name))
context[self.variable_name] = seo
logger.debug('Returning with URL data')
return ''
def do_get_seo_data(parser, token):
bits = token.split_contents()
if len(bits) > 1 and (len(bits) > 3 or bits[1] != 'as'):
raise template.TemplateSyntaxError(('Format is {} [as variable] '
.format(bits[0])))
try:
variable_name = bits[2]
except IndexError:
variable_name = 'seo'
return SeoDataNode(variable_name)
register.tag('get_seo_data', do_get_seo_data)
|
python
|
from setuptools import setup, find_packages
packages = find_packages()
setup(name = 'cddm_experiment',
version = "1.0.0",
description = 'Tools for cross-differential dynamic microscopy experiment',
author = 'Andrej Petelin',
author_email = '[email protected]',
url="https://github.com/pypa/sampleproject",
packages = packages,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",],
python_requires='>=3.7',
)
|
python
|
class MyClass:
pass
obj = MyClass() # creating a MyClass Object
print(obj)
|
python
|
def is_palindrome(input_string):
"""Check if a string is a palindrome
irrespective of capitalisation
Returns:
True if string is a palindrome
e.g
>>> is_palindrome("kayak")
OUTPUT : True
False if string is not a palindrome
e.g
>>> is_palindrome("Boat")
OUTPUT : False
"""
# Create variables to hold new strings to be compared
new_string = ""
reversed_string = ""
# Ensure that the string is not empty
if input_string != '':
# Change input into lower case and loop through each letter
for char in input_string.lower():
# Remove all white spaces
# Add each letter to a string
# Reverse the string
if char != " ":
new_string += char
reversed_string = ''.join(reversed(new_string))
# Compare the strings
if new_string == reversed_string:
return True
return False
return "String is empty"
# Tests
print(is_palindrome("kayak")) # Return True
print(is_palindrome("Hold Your fire")) # Return False
print(is_palindrome("Never Odd or Even")) # Return True
print(is_palindrome("abc")) # Return False
print(is_palindrome("")) # Return "String is empty"
|
python
|
from izihawa_utils.text import camel_to_snake
def test_camel_to_snake():
assert camel_to_snake('CamelCase') == 'camel_case'
assert camel_to_snake('camelCase') == 'camel_case'
assert camel_to_snake('camelCase camel123Case') == 'camel_case camel123_case'
assert camel_to_snake('camelCase\ncamelCase') == 'camel_case\ncamel_case'
|
python
|
from flask import Blueprint, request
from kaos_backend.controllers.notebook import NotebookController
from kaos_backend.util.flask import jsonify
from kaos_model.api import Response
def build_notebook_blueprint(controller: NotebookController):
blueprint = Blueprint('notebook', __name__)
@blueprint.route("/notebook/<workspace>", methods=["GET"])
@jsonify
def notebook_list(workspace):
return Response(
response=controller.list_notebooks(workspace)
)
@blueprint.route("/notebook/<workspace>", methods=["POST"])
@jsonify
def notebook_create(workspace):
user = request.args.get('user', 'default').replace('.', '')
cpu = request.args.get('cpu', None)
memory = request.args.get('memory', None)
gpu = int(request.args.get('gpu', 0))
return controller.submit_notebook(workspace, user, request.data, cpu=cpu, memory=memory, gpu=gpu)
@blueprint.route("/notebook/<workspace>/build/<job_id>/logs", methods=["GET"])
@jsonify
def build_notebook_logs(workspace, job_id):
return controller.get_build_logs(workspace, job_id)
@blueprint.route("/notebook/<notebook_name>", methods=["DELETE"])
@jsonify
def notebook_remove(notebook_name):
return controller.remove_notebook(notebook_name)
return blueprint
|
python
|
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
# setup.py
from setuptools import setup
DESCRIPTION = "See ./README.md"
LONG_DESCRIPTION = DESCRIPTION
setup(
author="Dan'",
author_email="dan@home",
name="mfm",
version="0.0.0",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url="http://mfm.dan.net",
platforms=['OS Independant'],
license='See ./LICENSE',
classifiers=[
"Programming Language::Python::3.5",
],
packages=['src']
)
|
python
|
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import SimpleITK as sitk
def convert_mask_to_distance_map(mask, squaredDistance=False, normalise=False):
raw_map = sitk.SignedMaurerDistanceMap(
mask,
insideIsPositive=True,
squaredDistance=squaredDistance,
useImageSpacing=True,
)
if normalise:
return raw_map / (sitk.GetArrayFromImage(raw_map).max())
else:
return raw_map
def convert_mask_to_reg_structure(mask, expansion=1, scale=lambda x: x):
distance_map = sitk.Cast(
convert_mask_to_distance_map(mask, squaredDistance=False), sitk.sitkFloat64
)
inverted_distance_map = sitk.Threshold(
distance_map
+ expansion * sitk.Cast(distance_map < (expansion), sitk.sitkFloat64),
lower=0,
upper=1000,
)
scaled_distance_map = inverted_distance_map / (
sitk.GetArrayViewFromImage(inverted_distance_map).max()
)
return scale(scaled_distance_map)
def initial_registration_command_iteration(method):
"""
Utility function to print information during initial (rigid, similarity, affine, translation) registration
"""
print(
"{0:3} = {1:10.5f}".format(
method.GetOptimizerIteration(), method.GetMetricValue()
)
)
def deformable_registration_command_iteration(method):
"""
Utility function to print information during demons registration
"""
print("{0:3} = {1:10.5f}".format(method.GetElapsedIterations(), method.GetMetric()))
def stage_iteration(method):
"""
Utility function to print information during stage change in registration
"""
print(
f"Number of parameters = {method.GetInitialTransform().GetNumberOfParameters()}"
)
def control_point_spacing_distance_to_number(image, grid_spacing):
"""
Convert grid spacing specified in distance to number of control points
"""
image_spacing = np.array(image.GetSpacing())
image_size = np.array(image.GetSize())
number_points = image_size * image_spacing / np.array(grid_spacing)
return (number_points + 0.5).astype(int)
def alignment_registration(fixed_image, moving_image, default_value=0, moments=True):
moving_image_type = moving_image.GetPixelIDValue()
fixed_image = sitk.Cast(fixed_image, sitk.sitkFloat32)
moving_image = sitk.Cast(moving_image, sitk.sitkFloat32)
initial_transform = sitk.CenteredTransformInitializer(
fixed_image, moving_image, sitk.VersorRigid3DTransform(), moments
)
aligned_image = sitk.Resample(moving_image, fixed_image, initial_transform)
aligned_image = sitk.Cast(aligned_image, moving_image_type)
return aligned_image, initial_transform
def initial_registration(
fixed_image,
moving_image,
moving_structure=False,
fixed_structure=False,
options={
"shrink_factors": [8, 2, 1],
"smooth_sigmas": [4, 2, 0],
"sampling_rate": 0.1,
"final_interp": 3,
"metric": "mean_squares",
"optimiser": "gradient_descent",
"number_of_iterations": 50,
},
default_value=-1024,
trace=False,
reg_method="Similarity",
):
"""
Rigid image registration using ITK
Args
fixed_image (sitk.Image) : the fixed image
moving_image (sitk.Image): the moving image, transformed to match fixed_image
options (dict) : registration options
structure (bool) : True if the image is a structure image
Returns
registered_image (sitk.Image): the rigidly registered moving image
transform (transform : the transform, can be used directly with
sitk.ResampleImageFilter
"""
# Re-cast
fixed_image = sitk.Cast(fixed_image, sitk.sitkFloat32)
moving_image_type = moving_image.GetPixelIDValue()
moving_image = sitk.Cast(moving_image, sitk.sitkFloat32)
# Get the options
shrink_factors = options["shrink_factors"]
smooth_sigmas = options["smooth_sigmas"]
sampling_rate = options["sampling_rate"]
final_interp = options["final_interp"]
metric = options["metric"]
optimiser = options["optimiser"]
number_of_iterations = options["number_of_iterations"]
# Initialise using a VersorRigid3DTransform
initial_transform = sitk.CenteredTransformInitializer(
fixed_image, moving_image, sitk.Euler3DTransform(), False
)
# Set up image registration method
registration = sitk.ImageRegistrationMethod()
registration.SetShrinkFactorsPerLevel(shrink_factors)
registration.SetSmoothingSigmasPerLevel(smooth_sigmas)
registration.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration.SetMovingInitialTransform(initial_transform)
if metric == "correlation":
registration.SetMetricAsCorrelation()
elif metric == "mean_squares":
registration.SetMetricAsMeanSquares()
elif metric == "mattes_mi":
registration.SetMetricAsMattesMutualInformation()
elif metric == "joint_hist_mi":
registration.SetMetricAsJointHistogramMutualInformation()
elif metric == "ants":
try:
ants_radius = options["ants_radius"]
except:
ants_radius = 3
registration.SetMetricAsANTSNeighborhoodCorrelation(ants_radius)
# to do: add the rest
registration.SetInterpolator(sitk.sitkLinear) # Perhaps a small gain in improvement
registration.SetMetricSamplingPercentage(sampling_rate)
registration.SetMetricSamplingStrategy(sitk.ImageRegistrationMethod.REGULAR)
# This is only necessary if using a transform comprising changes with different units
# e.g. rigid (rotation: radians, translation: mm)
# It can safely be left on
registration.SetOptimizerScalesFromPhysicalShift()
if moving_structure:
registration.SetMetricMovingMask(moving_structure)
if fixed_structure:
registration.SetMetricFixedMask(fixed_structure)
if reg_method.lower() == "translation":
registration.SetInitialTransform(sitk.TranslationTransform(3))
elif reg_method.lower() == "similarity":
registration.SetInitialTransform(sitk.Similarity3DTransform())
elif reg_method.lower() == "affine":
registration.SetInitialTransform(sitk.AffineTransform(3))
elif reg_method.lower() == "rigid":
registration.SetInitialTransform(sitk.VersorRigid3DTransform())
elif reg_method.lower() == "scaleversor":
registration.SetInitialTransform(sitk.ScaleVersor3DTransform())
elif reg_method.lower() == "scaleskewversor":
registration.SetInitialTransform(sitk.ScaleSkewVersor3DTransform())
else:
raise ValueError(
"You have selected a registration method that does not exist.\n Please select from Translation, Similarity, Affine, Rigid"
)
if optimiser.lower() == "lbfgsb":
registration.SetOptimizerAsLBFGSB(
gradientConvergenceTolerance=1e-5,
numberOfIterations=number_of_iterations,
maximumNumberOfCorrections=50,
maximumNumberOfFunctionEvaluations=1024,
costFunctionConvergenceFactor=1e7,
trace=trace,
)
elif optimiser.lower() == "exhaustive":
"""
This isn't well implemented
Needs some work to give options for sampling rates
Use is not currently recommended
"""
samples = [10, 10, 10, 10, 10, 10]
registration.SetOptimizerAsExhaustive(samples)
elif optimiser.lower() == "gradient_descent_line_search":
registration.SetOptimizerAsGradientDescentLineSearch(
learningRate=1.0, numberOfIterations=number_of_iterations
)
elif optimiser.lower() == "gradient_descent":
registration.SetOptimizerAsGradientDescent(
learningRate=1.0, numberOfIterations=number_of_iterations
)
if trace:
registration.AddCommand(
sitk.sitkIterationEvent,
lambda: initial_registration_command_iteration(registration),
)
output_transform = registration.Execute(fixed=fixed_image, moving=moving_image)
# Combine initial and optimised transform
combined_transform = sitk.CompositeTransform([initial_transform, output_transform])
registered_image = transform_propagation(
fixed_image,
moving_image,
combined_transform,
default_value=default_value,
interp=final_interp,
)
registered_image = sitk.Cast(registered_image, moving_image_type)
return registered_image, combined_transform
def transform_propagation(
fixed_image,
moving_image,
transform,
structure=False,
default_value=-1024,
interp=sitk.sitkNearestNeighbor,
debug=False,
):
"""
Transform propagation using ITK
Args
fixed_image (sitk.Image) : the fixed image
moving_image (sitk.Image) : the moving image, to be propagated
transform (sitk.transform) : the transformation; e.g. VersorRigid3DTransform,
AffineTransform
structure (bool) : True if the image is a structure image
interp (int) : the interpolation
sitk.sitkNearestNeighbor
sitk.sitkLinear
sitk.sitkBSpline
Returns
registered_image (sitk.Image) : the rigidly registered moving image
"""
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed_image)
resampler.SetTransform(transform)
resampler.SetInterpolator(interp)
if structure:
resampler.SetDefaultPixelValue(0)
else:
resampler.SetDefaultPixelValue(default_value)
output_image = resampler.Execute(moving_image)
if structure and interp > 1:
if debug:
print(
"Note: Higher order interpolation on binary mask - using 32-bit floating point output"
)
output_image = sitk.Cast(output_image, sitk.sitkFloat32)
# Safe way to remove dodgy values that can cause issues later
output_image = sitk.Threshold(output_image, lower=1e-5, upper=100.0)
else:
output_image = sitk.Cast(output_image, moving_image.GetPixelID())
return output_image
def smooth_and_resample(
image,
shrink_factor,
smoothing_sigma,
isotropic_resample=False,
resampler=sitk.sitkLinear,
):
"""
Args:
image: The image we want to resample.
shrink_factor: A number greater than one, such that the new image's size is
original_size/shrink_factor.
If isotropic_resample is True, this will instead define the voxel size (mm)
smoothing_sigma: Sigma for Gaussian smoothing, this is in physical (image spacing) units,
not pixels.
isotropic_resample: A flag that changes the behaviour to resample the image to isotropic voxels of size (shrink_factor)
Return:
Image which is a result of smoothing the input and then resampling it using the given sigma
and shrink factor.
"""
if smoothing_sigma > 0:
# smoothed_image = sitk.SmoothingRecursiveGaussian(image, smoothing_sigma)
maximumKernelWidth = int(
max([8 * smoothing_sigma * i for i in image.GetSpacing()])
)
smoothed_image = sitk.DiscreteGaussian(
image, smoothing_sigma ** 2, maximumKernelWidth
)
else:
smoothed_image = image
original_spacing = image.GetSpacing()
original_size = image.GetSize()
if isotropic_resample:
scale_factor = (
shrink_factor * np.ones_like(image.GetSize()) / np.array(image.GetSpacing())
)
new_size = [
int(sz / float(sf) + 0.5) for sz, sf in zip(original_size, scale_factor)
]
if not isotropic_resample:
if type(shrink_factor) == list:
new_size = [
int(sz / float(sf) + 0.5)
for sz, sf in zip(original_size, shrink_factor)
]
else:
new_size = [int(sz / float(shrink_factor) + 0.5) for sz in original_size]
new_spacing = [
((original_sz - 1) * original_spc) / (new_sz - 1)
for original_sz, original_spc, new_sz in zip(
original_size, original_spacing, new_size
)
]
return sitk.Resample(
smoothed_image,
new_size,
sitk.Transform(),
resampler,
image.GetOrigin(),
new_spacing,
image.GetDirection(),
0.0,
image.GetPixelID(),
)
def multiscale_demons(
registration_algorithm,
fixed_image,
moving_image,
initial_transform=None,
initial_displacement_field=None,
shrink_factors=None,
smoothing_sigmas=None,
iteration_staging=None,
isotropic_resample=False,
return_field=False,
):
"""
Run the given registration algorithm in a multiscale fashion. The original scale should not be
given as input as the original images are implicitly incorporated as the base of the pyramid.
Args:
registration_algorithm: Any registration algorithm that has an Execute(fixed_image,
moving_image, displacement_field_image) method.
fixed_image: Resulting transformation maps points from this image's spatial domain to the
moving image spatial domain.
moving_image: Resulting transformation maps points from the fixed_image's spatial domain to
this image's spatial domain.
initial_transform: Any SimpleITK transform, used to initialize the displacement field.
initial_displacement_field: Initial displacement field, if this is provided
initial_transform will be ignored
shrink_factors: Shrink factors relative to the original image's size.
smoothing_sigmas: Amount of smoothing which is done prior to resmapling the image using the
given shrink factor. These are in physical (image spacing) units.
Returns:
SimpleITK.DisplacementFieldTransform
[Optional] Displacemment (vector) field
"""
# Create image pyramid.
fixed_images = []
moving_images = []
for shrink_factor, smoothing_sigma in reversed(
list(zip(shrink_factors, smoothing_sigmas))
):
fixed_images.append(
smooth_and_resample(
fixed_image,
shrink_factor,
smoothing_sigma,
isotropic_resample=isotropic_resample,
)
)
moving_images.append(
smooth_and_resample(
moving_image,
shrink_factor,
smoothing_sigma,
isotropic_resample=isotropic_resample,
)
)
# Create initial displacement field at lowest resolution.
# Currently, the pixel type is required to be sitkVectorFloat64 because of a constraint imposed
# by the Demons filters.
if not initial_displacement_field:
if initial_transform:
initial_displacement_field = sitk.TransformToDisplacementField(
initial_transform,
sitk.sitkVectorFloat64,
fixed_images[-1].GetSize(),
fixed_images[-1].GetOrigin(),
fixed_images[-1].GetSpacing(),
fixed_images[-1].GetDirection(),
)
else:
if len(moving_image.GetSize()) == 2:
initial_displacement_field = sitk.Image(
fixed_images[-1].GetWidth(),
fixed_images[-1].GetHeight(),
sitk.sitkVectorFloat64,
)
elif len(moving_image.GetSize()) == 3:
initial_displacement_field = sitk.Image(
fixed_images[-1].GetWidth(),
fixed_images[-1].GetHeight(),
fixed_images[-1].GetDepth(),
sitk.sitkVectorFloat64,
)
initial_displacement_field.CopyInformation(fixed_images[-1])
else:
initial_displacement_field = sitk.Resample(
initial_displacement_field, fixed_images[-1]
)
# Run the registration.
iters = iteration_staging[0]
registration_algorithm.SetNumberOfIterations(iters)
initial_displacement_field = registration_algorithm.Execute(
fixed_images[-1], moving_images[-1], initial_displacement_field
)
# Start at the top of the pyramid and work our way down.
for i, (f_image, m_image) in enumerate(
reversed(list(zip(fixed_images[0:-1], moving_images[0:-1])))
):
initial_displacement_field = sitk.Resample(initial_displacement_field, f_image)
iters = iteration_staging[i + 1]
registration_algorithm.SetNumberOfIterations(iters)
initial_displacement_field = registration_algorithm.Execute(
f_image, m_image, initial_displacement_field
)
output_displacement_field = sitk.Resample(
initial_displacement_field, initial_displacement_field
)
if return_field:
return (
sitk.DisplacementFieldTransform(initial_displacement_field),
output_displacement_field,
)
else:
return sitk.DisplacementFieldTransform(initial_displacement_field)
def fast_symmetric_forces_demons_registration(
fixed_image,
moving_image,
resolution_staging=[8, 4, 1],
iteration_staging=[10, 10, 10],
isotropic_resample=False,
initial_displacement_field=None,
smoothing_sigma_factor=1,
smoothing_sigmas=False,
default_value=-1024,
ncores=1,
structure=False,
interp_order=2,
trace=False,
return_field=False,
):
"""
Deformable image propagation using Fast Symmetric-Forces Demons
Args
fixed_image (sitk.Image) : the fixed image
moving_image (sitk.Image) : the moving image, to be deformable registered (must be in
the same image space)
resolution_staging (list[int]) : down-sampling factor for each resolution level
iteration_staging (list[int]) : number of iterations for each resolution level
isotropic_resample (bool) : flag to request isotropic resampling of images, in which
case resolution_staging is used to define voxel size (mm) per level
initial_displacement_field (sitk.Image) : Initial displacement field to use
ncores (int) : number of processing cores to use
structure (bool) : True if the image is a structure image
smoothing_sigma_factor (float) : the relative width of the Gaussian smoothing kernel
interp_order (int) : the interpolation order
1 = Nearest neighbour
2 = Bi-linear splines
3 = B-Spline (cubic)
Returns
registered_image (sitk.Image) : the registered moving image
output_transform : the displacement field transform
[optional] deformation_field
"""
# Cast to floating point representation, if necessary
moving_image_type = moving_image.GetPixelID()
if fixed_image.GetPixelID() != 6:
fixed_image = sitk.Cast(fixed_image, sitk.sitkFloat32)
if moving_image.GetPixelID() != 6:
moving_image = sitk.Cast(moving_image, sitk.sitkFloat32)
# Set up the appropriate image filter
registration_method = sitk.FastSymmetricForcesDemonsRegistrationFilter()
# Multi-resolution framework
registration_method.SetNumberOfThreads(ncores)
registration_method.SetSmoothUpdateField(True)
registration_method.SetSmoothDisplacementField(True)
registration_method.SetStandardDeviations(1.5)
# This allows monitoring of the progress
if trace:
registration_method.AddCommand(
sitk.sitkIterationEvent,
lambda: deformable_registration_command_iteration(registration_method),
)
if not smoothing_sigmas:
smoothing_sigmas = [i * smoothing_sigma_factor for i in resolution_staging]
output = multiscale_demons(
registration_algorithm=registration_method,
fixed_image=fixed_image,
moving_image=moving_image,
shrink_factors=resolution_staging,
smoothing_sigmas=smoothing_sigmas,
iteration_staging=iteration_staging,
isotropic_resample=isotropic_resample,
initial_displacement_field=initial_displacement_field,
return_field=return_field,
)
if return_field:
output_transform, deformation_field = output
else:
output_transform = output
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed_image)
resampler.SetInterpolator(interp_order)
if structure:
resampler.SetDefaultPixelValue(0)
else:
resampler.SetDefaultPixelValue(default_value)
resampler.SetTransform(output_transform)
registered_image = resampler.Execute(moving_image)
if structure:
registered_image = sitk.Cast(registered_image, sitk.sitkFloat32)
registered_image = sitk.BinaryThreshold(
registered_image, lowerThreshold=1e-5, upperThreshold=100
)
registered_image.CopyInformation(fixed_image)
registered_image = sitk.Cast(registered_image, moving_image_type)
if return_field:
resampled_field = sitk.Resample(deformation_field, fixed_image)
return registered_image, output_transform, resampled_field
else:
return registered_image, output_transform
def apply_field(
input_image,
transform,
structure=False,
default_value=-1024,
interp=sitk.sitkNearestNeighbor,
):
"""
Transform a volume of structure with the given deformation field.
Args
input_image (sitk.Image) : the image to transform
transform (sitk.Transform) : the transform to apply to the structure or mask
structure (bool) : if true, the input will be treated as a struture, as a volume otherwise
interp (int) : the type of interpolation to use, eg. sitk.sitkNearestNeighbor
Returns
resampled_image (sitk.Image) : the transformed image
"""
input_image_type = input_image.GetPixelIDValue()
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(input_image)
if structure:
resampler.SetDefaultPixelValue(0)
else:
resampler.SetDefaultPixelValue(default_value)
resampler.SetTransform(transform)
resampler.SetInterpolator(interp)
resampled_image = resampler.Execute(sitk.Cast(input_image, sitk.sitkFloat32))
return sitk.Cast(resampled_image, input_image_type)
def bspline_registration(
fixed_image,
moving_image,
moving_structure=False,
fixed_structure=False,
options={
"resolution_staging": [8, 4, 2],
"smooth_sigmas": [4, 2, 1],
"sampling_rate": 0.1,
"optimiser": "LBFGS",
"metric": "correlation",
"initial_grid_spacing": 64,
"grid_scale_factors": [1, 2, 4],
"interp_order": 3,
"default_value": -1024,
"number_of_iterations": 20,
},
isotropic_resample=False,
initial_isotropic_size=1,
initial_isotropic_smooth_scale=0,
trace=False,
ncores=8,
debug=False,
):
"""
B-Spline image registration using ITK
IMPORTANT - THIS IS UNDER ACTIVE DEVELOPMENT
Args
fixed_image (sitk.Image) : the fixed image
moving_image (sitk.Image): the moving image, transformed to match fixed_image
options (dict) : registration options
structure (bool) : True if the image is a structure image
Returns
registered_image (sitk.Image): the rigidly registered moving image
transform (transform : the transform, can be used directly with
sitk.ResampleImageFilter
Notes:
- smooth_sigmas are relative to resolution staging
e.g. for image spacing of 1x1x1 mm^3, with smooth sigma=2 and resolution_staging=4, the scale of the Gaussian filter would be 2x4 = 8mm (i.e. 8x8x8 mm^3)
"""
# Get the settings
resolution_staging = options["resolution_staging"]
smooth_sigmas = options["smooth_sigmas"]
sampling_rate = options["sampling_rate"]
optimiser = options["optimiser"]
metric = options["metric"]
initial_grid_spacing = options["initial_grid_spacing"]
grid_scale_factors = options["grid_scale_factors"]
number_of_iterations = options["number_of_iterations"]
interp_order = options["interp_order"]
default_value = options["default_value"]
# Re-cast input images
fixed_image = sitk.Cast(fixed_image, sitk.sitkFloat32)
moving_image_type = moving_image.GetPixelID()
moving_image = sitk.Cast(moving_image, sitk.sitkFloat32)
# (Optional) isotropic resample
# This changes the behaviour, so care should be taken
# For highly anisotropic images may be preferable
if isotropic_resample:
# First, copy the fixed image so we can resample back into this space at the end
fixed_image_original = fixed_image
fixed_image_original.MakeUnique()
fixed_image = smooth_and_resample(
fixed_image,
initial_isotropic_size,
initial_isotropic_smooth_scale,
isotropic_resample=True,
)
moving_image = smooth_and_resample(
moving_image,
initial_isotropic_size,
initial_isotropic_smooth_scale,
isotropic_resample=True,
)
else:
fixed_image_original = fixed_image
# Set up image registration method
registration = sitk.ImageRegistrationMethod()
registration.SetNumberOfThreads(ncores)
registration.SetShrinkFactorsPerLevel(resolution_staging)
registration.SetSmoothingSigmasPerLevel(smooth_sigmas)
registration.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# Choose optimiser
if optimiser == "LBFGSB":
registration.SetOptimizerAsLBFGSB(
gradientConvergenceTolerance=1e-5,
numberOfIterations=number_of_iterations,
maximumNumberOfCorrections=5,
maximumNumberOfFunctionEvaluations=1024,
costFunctionConvergenceFactor=1e7,
trace=trace,
)
elif optimiser == "LBFGS":
registration.SetOptimizerAsLBFGS2(
numberOfIterations=number_of_iterations,
solutionAccuracy=1e-2,
hessianApproximateAccuracy=6,
deltaConvergenceDistance=0,
deltaConvergenceTolerance=0.01,
lineSearchMaximumEvaluations=40,
lineSearchMinimumStep=1e-20,
lineSearchMaximumStep=1e20,
lineSearchAccuracy=0.01,
)
elif optimiser == "CGLS":
registration.SetOptimizerAsConjugateGradientLineSearch(
learningRate=0.05, numberOfIterations=number_of_iterations
)
registration.SetOptimizerScalesFromPhysicalShift()
elif optimiser == "GradientDescent":
registration.SetOptimizerAsGradientDescent(
learningRate=5.0,
numberOfIterations=number_of_iterations,
convergenceMinimumValue=1e-6,
convergenceWindowSize=10,
)
registration.SetOptimizerScalesFromPhysicalShift()
elif optimiser == "GradientDescentLineSearch":
registration.SetOptimizerAsGradientDescentLineSearch(
learningRate=1.0, numberOfIterations=number_of_iterations
)
registration.SetOptimizerScalesFromPhysicalShift()
# Set metric
if metric == "correlation":
registration.SetMetricAsCorrelation()
elif metric == "mean_squares":
registration.SetMetricAsMeanSquares()
elif metric == "demons":
registration.SetMetricAsDemons()
elif metric == "mutual_information":
try:
number_of_histogram_bins = options["number_of_histogram_bins"]
except:
number_of_histogram_bins = 30
registration.SetMetricAsMattesMutualInformation(
numberOfHistogramBins=number_of_histogram_bins
)
registration.SetInterpolator(sitk.sitkLinear)
# Set sampling
if type(sampling_rate) == float:
registration.SetMetricSamplingPercentage(sampling_rate)
elif type(sampling_rate) in [np.ndarray, list]:
registration.SetMetricSamplingPercentagePerLevel(sampling_rate)
registration.SetMetricSamplingStrategy(sitk.ImageRegistrationMethod.REGULAR)
# Set masks
if moving_structure is not False:
registration.SetMetricMovingMask(moving_structure)
if fixed_structure is not False:
registration.SetMetricFixedMask(fixed_structure)
# Set control point spacing
transform_domain_mesh_size = control_point_spacing_distance_to_number(
fixed_image, initial_grid_spacing
)
if debug:
print(f"Initial grid size: {transform_domain_mesh_size}")
# Initialise transform
initial_transform = sitk.BSplineTransformInitializer(
fixed_image,
transformDomainMeshSize=[int(i) for i in transform_domain_mesh_size],
)
registration.SetInitialTransformAsBSpline(
initial_transform, inPlace=True, scaleFactors=grid_scale_factors
)
# (Optionally) add iteration commands
if trace:
registration.AddCommand(
sitk.sitkIterationEvent,
lambda: initial_registration_command_iteration(registration),
)
registration.AddCommand(
sitk.sitkMultiResolutionIterationEvent,
lambda: stage_iteration(registration),
)
# Run the registration
output_transform = registration.Execute(fixed=fixed_image, moving=moving_image)
# Resample moving image
registered_image = transform_propagation(
fixed_image_original,
moving_image,
output_transform,
default_value=default_value,
interp=interp_order,
)
registered_image = sitk.Cast(registered_image, moving_image_type)
# Return outputs
return registered_image, output_transform
|
python
|
"""Overview plots of transcet"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress
from os.path import join
from src import sonic_layer_depth
plt.ion()
bbox = dict(boxstyle='round', fc='w')
savedir = 'reports/jasa/figures'
c_fields = np.load('data/processed/inputed_decomp.npz')
z_a = c_fields['z_a']
x_a = c_fields['x_a']
c_bg = c_fields['c_bg']
c_tilt = c_fields['c_tilt']
c_spice = c_fields['c_spice']
c_total = c_fields['c_total']
prof_i = 150
plt_i = z_a <= 150.
c_field = c_total[plt_i, :]
prop_i = z_a <= 150.
sld_z, _ = sonic_layer_depth(z_a[plt_i], c_field)
fig, ax = plt.subplots(figsize=(6.5, 3))
ax.plot(x_a / 1e3, sld_z, 'k')
reg = linregress(x_a, sld_z)
ax.plot(x_a / 1e3, x_a * reg.slope + reg.intercept, 'C0')
#ax[0].text(120, 20, f'm={reg.slope * 1e3:0.3f} m'+' km$^{-1}$',
#bbox=bbox)
ax.set_xlabel('Range (km)')
ax.set_ylabel('Sonic layer depth (m)')
ax.grid()
ax.set_ylim(150, 0)
ax.set_xlim(0, 970)
pos = ax.get_position()
pos.x0 -= 0.01
pos.x1 += 0.08
pos.y0 += 0.04
pos.y1 += 0.08
ax.set_position(pos)
fig.savefig(join(savedir, 'sld_linregress.png'), dpi=300)
"""
fig, ax = plt.subplots(3, 1, sharex=True, figsize=(6.5, 6))
ax[0].plot(sec4.x_a / 1e3, sld_z, 'k')
reg = linregress(sec4.x_a, sld_z)
ax[0].plot(sec4.x_a / 1e3, sec4.x_a * reg.slope + reg.intercept, 'C0')
#ax[0].text(120, 20, f'm={reg.slope * 1e3:0.3f} m'+' km$^{-1}$',
#bbox=bbox)
i_40 = np.argmin(np.abs(sec4.z_a - 40))
tau = sec4.spice[i_40, :]
reg = linregress(sec4.x_a, tau)
ax[1].plot(sec4.x_a / 1e3, tau, 'k')
ax[1].plot(sec4.x_a / 1e3, sec4.x_a * reg.slope + reg.intercept, 'C0')
#ax[1].text(20, 2.3, f'm={reg.slope * 1e3:0.3e}'+' kg/m$^3$ km$^{-1}$',
#bbox=bbox)
sig = sec4.sigma0[i_40, :]
reg = linregress(sec4.x_a, sig)
ax[2].plot(sec4.x_a / 1e3, sig, 'k')
ax[2].plot(sec4.x_a / 1e3, sec4.x_a * reg.slope + reg.intercept, 'C0')
#ax[2].text(20, 24.95, f'm={reg.slope * 1e3:0.3e}' +' kg /m$^3$ km$^{-1}$',
#bbox=bbox)
ax[2].set_xlabel('Range (km)')
ax[0].set_ylabel('Sonic layer depth (m)')
ax[1].set_ylabel(r'$\tau$ (kg / m$^3$)')
ax[2].set_ylabel(r'$\sigma_0$ (kg / m$^3$)')
ax[0].grid()
ax[1].grid()
ax[2].grid()
ax[0].set_xlim(0, 970)
ax[0].set_ylim(150, 0)
pos = ax[0].get_position()
pos.x0 -= 0.01
pos.x1 += 0.08
pos.y0 += 0.04
pos.y1 += 0.10
ax[0].set_position(pos)
pos = ax[1].get_position()
pos.x0 -= 0.01
pos.x1 += 0.08
pos.y0 += 0.005
pos.y1 += 0.065
ax[1].set_position(pos)
pos = ax[2].get_position()
pos.x0 -= 0.01
pos.x1 += 0.08
pos.y0 -= 0.03
pos.y1 += 0.03
ax[2].set_position(pos)
fig.savefig(join(savedir, 'sld_dens_linregress.png'), dpi=300)
"""
|
python
|
import time
from datetime import datetime, timedelta
import funcy
from unittest import mock
import asyncio
from octoprint.events import EventManager
async def wait_untill(
condition,
poll_period=timedelta(seconds=1),
timeout=timedelta(seconds=10),
condition_name="no_name",
time=time.time,
*condition_args,
**condition_kwargs,
):
"""
Waits untill the following condition function returns true
args:
condition: A zero-arity callable
poll_period: How often to call the condition
time: Callable that returns the current time in seconds since epoch
sleep: Callable that blocks the thread for a certain amount of seconds
timeout: Total time to wait for the condition
condition_name: A human friendly name for the condition that will be mentioned in the Timeout error
throws:
TimeoutError
"""
# holds the starting time in seconds since the epoch
start_time = int(time())
cond_callable = funcy.partial(condition, *condition_args, **condition_kwargs)
condition_is_true = cond_callable()
while (
int(time()) < start_time + timeout.total_seconds()
and not condition_is_true
):
await asyncio.sleep(poll_period.total_seconds())
condition_is_true = cond_callable()
if not condition_is_true:
raise TimeoutError(
f"Waited {timeout} time for condition '{condition_name}' to be True"
)
async def wait_untill_event(
event_manager: EventManager,
event,
payload=None,
poll_period=timedelta(seconds=1),
timeout=timedelta(seconds=10),
):
try:
subscriber = mock.Mock()
def event_was_published():
return subscriber.call_args == mock.call(event, payload)
event_manager.subscribe(event=event, callback=subscriber)
await wait_untill(
condition=event_was_published,
poll_period=poll_period,
timeout=timeout,
condition_name=f"Event {event} was published",
)
finally:
event_manager.unsubscribe(event=event, callback=subscriber)
|
python
|
from subsystems.drivesubsystem import DriveSubsystem
import commands2
import wpilib
#time in seconds
ticksDistance = 0
class rotateArm(commands2.CommandBase):
def __init__(self, power: float, distance: float) -> None:
super().__init__()
self.power = power
self.distance = distance
def initialize(self):
pass
def execute(self) -> None:
self.rotateArm.set(self.power)
def end(self):
self.rotateArm.set(0)
def isFinished(self) -> bool:
return self.rotateArmEncoder.getPosition() > self.distance or self.rotateArmEncoder.getPosition() < -self.distance
|
python
|
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import structlog
import arrow
from voltha.protos.events_pb2 import AlarmEventType, \
AlarmEventSeverity, AlarmEventState, AlarmEventCategory
# TODO: In the device adapter, the following alarms are still TBD
# (Taken from microsemi, so mileage may vare
# ON_ALARM_SOFTWARE_ERROR = 0
# PON_ALARM_LOS = 1
# PON_ALARM_LOSI = 2
# PON_ALARM_DOWI = 3
# PON_ALARM_LOFI = 4
# PON_ALARM_RDII = 5
# PON_ALARM_LOAMI = 6
# PON_ALARM_LCDGI = 7
# PON_ALARM_LOAI = 8
# PON_ALARM_SDI = 9
# PON_ALARM_SFI = 10
# PON_ALARM_PEE = 11
# PON_ALARM_DGI = 12
# PON_ALARM_LOKI = 13
# PON_ALARM_TIWI = 14
# PON_ALARM_TIA = 15
# PON_ALARM_VIRTUAL_SCOPE_ONU_LASER_ALWAYS_ON = 16
# PON_ALARM_VIRTUAL_SCOPE_ONU_SIGNAL_DEGRADATION = 17
# PON_ALARM_VIRTUAL_SCOPE_ONU_EOL = 18
# PON_ALARM_VIRTUAL_SCOPE_ONU_EOL_DATABASE_IS_FULL = 19
# PON_ALARM_AUTH_FAILED_IN_REGISTRATION_ID_MODE = 20
# PON_ALARM_SUFI = 21
class AdapterAlarms:
def __init__(self, adapter, device_id):
self.log = structlog.get_logger(device_id=device_id)
self.adapter = adapter
self.device_id = device_id
self.lc = None
def format_id(self, alarm):
return 'voltha.{}.{}.{}'.format(self.adapter.name,
self.device_id,
alarm)
def format_description(self, _object, alarm, status):
return '{} Alarm - {} - {}'.format(_object.upper(),
alarm.upper(),
'Raised' if status else 'Cleared')
def send_alarm(self, context_data, alarm_data):
try:
current_context = {}
if isinstance(context_data, dict):
for key, value in context_data.iteritems():
current_context[key] = str(value)
alarm_event = self.adapter.adapter_agent.create_alarm(
id=alarm_data.get('id', 'voltha.{}.{}.olt'.format(self.adapter.name,
self.device_id)),
resource_id=str(alarm_data.get('resource_id', self.device_id)),
description="{}.{} - {}".format(self.adapter.name, self.device_id,
alarm_data.get('description')),
type=alarm_data.get('type'),
category=alarm_data.get('category'),
severity=alarm_data.get('severity'),
state=alarm_data.get('state'),
raised_ts=alarm_data.get('ts', 0),
context=current_context
)
self.adapter.adapter_agent.submit_alarm(self.device_id, alarm_event)
except Exception as e:
self.log.exception('failed-to-send-alarm', e=e)
class AlarmBase(object):
def __init__(self, handler, object_type, alarm,
alarm_category,
resource_id=None,
alarm_type=AlarmEventType.EQUIPMENT,
alarm_severity=AlarmEventSeverity.CRITICAL):
self._handler = handler
self._object_type = object_type
self._alarm = alarm
self._alarm_category = alarm_category
self._alarm_type = alarm_type
self._alarm_severity = alarm_severity
self._resource_id = resource_id
def get_alarm_data(self, status):
data = {
'ts': arrow.utcnow().timestamp,
'description': self._handler.alarms.format_description(self._object_type,
self._alarm,
status),
'id': self._handler.alarms.format_id(self._alarm),
'type': self._alarm_type,
'category': self._alarm_category,
'severity': self._alarm_severity,
'state': AlarmEventState.RAISED if status else AlarmEventState.CLEARED
}
if self._resource_id is not None:
data['resource_id'] = self._resource_id
return data
def get_context_data(self):
return {} # You should override this if needed
def raise_alarm(self):
alarm_data = self.get_alarm_data(True)
context_data = self.get_context_data()
self._handler.alarms.send_alarm(context_data, alarm_data)
def clear_alarm(self):
alarm_data = self.get_alarm_data(False)
context_data = self.get_context_data()
self._handler.alarms.send_alarm(context_data, alarm_data)
|
python
|
#! /usr/bin/env python3
import numpy as np
from PIL import Image
def load_raw(f, w=3280, h=3280):
# Metadata: width, height
# Metadata: pixelPacking
arr = np.fromfile(f, np.uint8, w*h*3//2).reshape((h,w//2,3)).astype('H')
# Image is big endian 12 bit 2d array, bayered.
# This is detailed in the TXT (JSON metadata).
# Bayer pattern is r,gr:gb,b but upper left pixel is blue
# Metadata: mosaic
b = (arr[0::2,:,0]<<4 | arr[0::2,:,1]>>4) & 0xfff
g0 = (arr[0::2,:,1]<<8 | arr[0::2,:,2] ) & 0xfff
g1 = (arr[1::2,:,0]<<4 | arr[1::2,:,1]>>4) & 0xfff
r = (arr[1::2,:,1]<<8 | arr[1::2,:,2] ) & 0xfff
# Subsampled RGB image for now. Just a proof of concept.
a = np.zeros((h//2,w//2,3))
a[:,:,:]=168 # black level
a[:,:,0] = r
a[:,:,1] = (g0+g1)/2 # Average since we have more green photosites
a[:,:,2] = b
# Rescale a to 0..1 levels (Metadata: pixelFormat)
a = (a-168)/(4095-168)
a = np.maximum(a, 0)
# White balance (Metadata: color)
a[:,:,0] *= 1.015625
a[:,:,2] *= 1.2578125
# Gamma (Metadata: color)
a **= 0.416660010814666748046875
a = np.minimum(a, 1.0) # Gain may have pushed values out of range
#print(a.max(), a.min())
img = Image.frombytes('RGB', (w//2,h//2), (a*255).astype('B').tobytes())
return img
# Rendering thoughts: use scipy or ndsplines for interpolation?
# Can both trace rays to specific depth for focusing, or do full 4D
# interpolation for lightfield conversion.
if __name__=='__main__':
from sys import argv
name = argv[1] if argv[1:] else "../0001.RAW"
#'sha1-d004cadb9917237bde5145d77d970a4b252de1e9.RAW'
load_raw(open(name,'rb')).show()
|
python
|
ANSI_COLOURS = [
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white'
]
for i, name in enumerate(ANSI_COLOURS):
globals()[name] = str(30 + i)
globals()['intense_' + name] = str(30 + i) + ';1'
def get_colours():
cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
'intense_cyan', 'intense_yellow', 'intense_green',
'intense_magenta', 'intense_red', 'intense_blue']
cs = [globals()[c] for c in cs]
i = 0
while True:
yield cs[i % len(cs)]
i += 1
|
python
|
#######################################################################
# Copyright (C) 2017 Shangtong Zhang([email protected]) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from ..network import *
from ..component import *
from .BaseAgent import *
from copy import deepcopy
import numpy as np
class PPOAgent(BaseAgent):
def __init__(self, config):
BaseAgent.__init__(self, config)
self.config = config
self.task = config.task_fn()
self.network = config.network_fn(self.task.state_dim, self.task.action_dim)
self.opt = config.optimizer_fn(self.network.parameters())
self.total_steps = 0
self.episode_rewards = np.zeros(config.num_workers)
self.last_episode_rewards = np.zeros(config.num_workers)
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
def iteration(self):
config = self.config
rollout = []
states = self.states
for _ in range(config.rollout_length):
actions, log_probs, _, values = self.network.predict(states)
next_states, rewards, terminals, _ = self.task.step(actions.cpu().detach().numpy())
self.episode_rewards += rewards
rewards = config.reward_normalizer(rewards)
for i, terminal in enumerate(terminals):
if terminals[i]:
self.last_episode_rewards[i] = self.episode_rewards[i]
self.episode_rewards[i] = 0
next_states = config.state_normalizer(next_states)
rollout.append([states, values.detach(), actions.detach(), log_probs.detach(), rewards, 1 - terminals])
states = next_states
self.states = states
pending_value = self.network.predict(states)[-1]
rollout.append([states, pending_value, None, None, None, None])
processed_rollout = [None] * (len(rollout) - 1)
advantages = tensor(np.zeros((config.num_workers, 1)))
returns = pending_value.detach()
for i in reversed(range(len(rollout) - 1)):
states, value, actions, log_probs, rewards, terminals = rollout[i]
terminals = tensor(terminals).unsqueeze(1)
rewards = tensor(rewards).unsqueeze(1)
actions = tensor(actions)
states = tensor(states)
next_value = rollout[i + 1][1]
returns = rewards + config.discount * terminals * returns
if not config.use_gae:
advantages = returns - value.detach()
else:
td_error = rewards + config.discount * terminals * next_value.detach() - value.detach()
advantages = advantages * config.gae_tau * config.discount * terminals + td_error
processed_rollout[i] = [states, actions, log_probs, returns, advantages]
states, actions, log_probs_old, returns, advantages = map(lambda x: torch.cat(x, dim=0), zip(*processed_rollout))
advantages = (advantages - advantages.mean()) / advantages.std()
batcher = Batcher(states.size(0) // config.num_mini_batches, [np.arange(states.size(0))])
for _ in range(config.optimization_epochs):
batcher.shuffle()
while not batcher.end():
batch_indices = batcher.next_batch()[0]
batch_indices = tensor(batch_indices).long()
sampled_states = states[batch_indices]
sampled_actions = actions[batch_indices]
sampled_log_probs_old = log_probs_old[batch_indices]
sampled_returns = returns[batch_indices]
sampled_advantages = advantages[batch_indices]
_, log_probs, entropy_loss, values = self.network.predict(sampled_states, sampled_actions)
ratio = (log_probs - sampled_log_probs_old).exp()
obj = ratio * sampled_advantages
obj_clipped = ratio.clamp(1.0 - self.config.ppo_ratio_clip,
1.0 + self.config.ppo_ratio_clip) * sampled_advantages
policy_loss = -torch.min(obj, obj_clipped).mean(0) - config.entropy_weight * entropy_loss.mean()
value_loss = 0.5 * (sampled_returns - values).pow(2).mean()
self.opt.zero_grad()
(policy_loss + value_loss).backward()
nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)
self.opt.step()
steps = config.rollout_length * config.num_workers
self.total_steps += steps
class PPOContinualLearnerAgent(BaseContinualLearnerAgent):
def __init__(self, config):
BaseContinualLearnerAgent.__init__(self, config)
self.config = config
self.task = None if config.task_fn is None else config.task_fn()
if config.eval_task_fn is None:
self.evaluation_env = None
else:
self.evaluation_env = config.eval_task_fn(config.log_dir)
self.task = self.evaluation_env if self.task is None else self.task
tasks_ = self.task.get_all_tasks(config.cl_requires_task_label)
tasks = [tasks_[task_id] for task_id in config.task_ids]
del tasks_
self.config.cl_tasks_info = tasks
label_dim = 0 if tasks[0]['task_label'] is None else len(tasks[0]['task_label'])
# set seed before creating network to ensure network parameters are
# same across all shell agents
torch.manual_seed(config.seed)
self.network = config.network_fn(self.task.state_dim, self.task.action_dim, label_dim)
_params = list(self.network.parameters())
self.opt = config.optimizer_fn(_params, config.lr)
self.total_steps = 0
self.episode_rewards = np.zeros(config.num_workers)
self.last_episode_rewards = np.zeros(config.num_workers)
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
self.layers_output = None
self.data_buffer = Replay(memory_size=int(1e4), batch_size=256)
self.curr_train_task_label = None
self.curr_eval_task_label = None
def iteration(self):
config = self.config
rollout = []
states = self.states
if self.curr_train_task_label is not None:
task_label = self.curr_train_task_label
else:
task_label = self.task.get_task()['task_label']
assert False, 'manually set (temporary) breakpoint. code should not get here.'
task_label = tensor(task_label)
batch_dim = config.num_workers
if batch_dim == 1:
batch_task_label = task_label.reshape(1, -1)
else:
batch_task_label = torch.repeat_interleave(task_label.reshape(1, -1), batch_dim, dim=0)
for _ in range(config.rollout_length):
_, actions, log_probs, _, values, _ = self.network.predict(states, \
task_label=batch_task_label)
next_states, rewards, terminals, _ = self.task.step(actions.cpu().detach().numpy())
self.episode_rewards += rewards
rewards = config.reward_normalizer(rewards)
for i, terminal in enumerate(terminals):
if terminals[i]:
self.last_episode_rewards[i] = self.episode_rewards[i]
self.episode_rewards[i] = 0
next_states = config.state_normalizer(next_states)
# save data to buffer for the detect module
self.data_buffer.feed_batch([states, actions, rewards, terminals, next_states])
rollout.append([states, values.detach(), actions.detach(), log_probs.detach(), \
rewards, 1 - terminals])
states = next_states
self.states = states
pending_value = self.network.predict(states, task_label=batch_task_label)[-2]
rollout.append([states, pending_value, None, None, None, None])
processed_rollout = [None] * (len(rollout) - 1)
advantages = tensor(np.zeros((config.num_workers, 1)))
returns = pending_value.detach()
for i in reversed(range(len(rollout) - 1)):
states, value, actions, log_probs, rewards, terminals = rollout[i]
terminals = tensor(terminals).unsqueeze(1)
rewards = tensor(rewards).unsqueeze(1)
actions = tensor(actions)
states = tensor(states)
next_value = rollout[i + 1][1]
returns = rewards + config.discount * terminals * returns
if not config.use_gae:
advantages = returns - value.detach()
else:
td_error = rewards + config.discount*terminals*next_value.detach() - value.detach()
advantages = advantages * config.gae_tau * config.discount * terminals + td_error
processed_rollout[i] = [states, actions, log_probs, returns, advantages]
states, actions, log_probs_old, returns, advantages = map(lambda x: torch.cat(x, dim=0), \
zip(*processed_rollout))
advantages = (advantages - advantages.mean()) / advantages.std()
grad_norms_ = []
batcher = Batcher(states.size(0) // config.num_mini_batches, [np.arange(states.size(0))])
for _ in range(config.optimization_epochs):
batcher.shuffle()
while not batcher.end():
batch_indices = batcher.next_batch()[0]
batch_indices = tensor(batch_indices).long()
sampled_states = states[batch_indices]
sampled_actions = actions[batch_indices]
sampled_log_probs_old = log_probs_old[batch_indices]
sampled_returns = returns[batch_indices]
sampled_advantages = advantages[batch_indices]
batch_dim = sampled_states.shape[0]
batch_task_label = torch.repeat_interleave(task_label.reshape(1, -1), batch_dim, \
dim=0)
_, _, log_probs, entropy_loss, values, outs = self.network.predict(sampled_states, \
sampled_actions, task_label=batch_task_label, return_layer_output=True)
ratio = (log_probs - sampled_log_probs_old).exp()
obj = ratio * sampled_advantages
obj_clipped = ratio.clamp(1.0 - self.config.ppo_ratio_clip,
1.0 + self.config.ppo_ratio_clip) * sampled_advantages
policy_loss = -torch.min(obj, obj_clipped).mean(0) \
- config.entropy_weight * entropy_loss.mean()
value_loss = 0.5 * (sampled_returns - values).pow(2).mean()
self.opt.zero_grad()
(policy_loss + value_loss).backward()
norm_ = nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)
grad_norms_.append(norm_.detach().cpu().numpy())
self.opt.step()
steps = config.rollout_length * config.num_workers
self.total_steps += steps
self.layers_output = outs
return np.mean(grad_norms_)
class BaselineAgent(PPOContinualLearnerAgent):
'''
PPO continual learning agent baseline (experience catastrophic forgetting)
'''
def __init__(self, config):
PPOContinualLearnerAgent.__init__(self, config)
def task_train_start(self, task_label):
self.curr_train_task_label = task_label
return
def task_train_end(self):
self.curr_train_task_label = None
return
def task_eval_start(self, task_label):
self.curr_eval_task_label = task_label
return
def task_eval_end(self):
self.curr_eval_task_label = None
return
class LLAgent(PPOContinualLearnerAgent):
'''
PPO continual learning agent using supermask superposition algorithm
task oracle available: agent informed about task boundaries (i.e., when
one task ends and the other begins)
supermask lifelong learning algorithm: https://arxiv.org/abs/2006.14769
'''
def __init__(self, config):
PPOContinualLearnerAgent.__init__(self, config)
self.seen_tasks = {} # contains task labels that agent has experienced so far.
self.new_task = False
self.curr_train_task_label = None
def _label_to_idx(self, task_label):
eps = 1e-5
found_task_idx = None
for task_idx, seen_task_label in self.seen_tasks.items():
if np.linalg.norm((task_label - seen_task_label), ord=2) < eps:
found_task_idx = task_idx
break
return found_task_idx
def _select_mask(self, agents, masks, ensemble=False):
found_mask = None
if ensemble:
raise NotImplementedError
else:
for agent, mask in zip(agents, masks):
if mask is not None:
found_mask = mask
break
return found_mask
def task_train_start(self, task_label):
task_idx = self._label_to_idx(task_label)
if task_idx is None:
# new task. add it to the agent's seen_tasks dictionary
task_idx = len(self.seen_tasks) # generate an internal task index for new task
self.seen_tasks[task_idx] = task_label
self.new_task = True
set_model_task(self.network, task_idx)
self.curr_train_task_label = task_label
return
def task_train_end(self):
self.curr_train_task_label = None
cache_masks(self.network)
if self.new_task:
set_num_tasks_learned(self.network, len(self.seen_tasks))
self.new_task = False # reset flag
return
def task_eval_start(self, task_label):
self.network.eval()
task_idx = self._label_to_idx(task_label)
if task_idx is None:
# agent has not been trained on current task
# being evaluated. therefore use a random mask
# TODO: random task hardcoded to the first learnt
# task/mask. update this later to use a random
# previous task, or implementing a way for
# agent to use an ensemble of different mask
# internally for the task not yet seen.
task_idx = 0
set_model_task(self.network, task_idx)
self.curr_eval_task_label = task_label
return
def task_eval_end(self):
self.curr_eval_task_label = None
self.network.train()
# resume training the model on train task label if training
# was on before running evaluations.
if self.curr_train_task_label is not None:
task_idx = self._label_to_idx(self.curr_train_task_label)
set_model_task(self.network, task_idx)
return
class ShellAgent_SP(LLAgent):
'''
Lifelong learning (ppo continual learning with supermask) agent in ShELL
settings. All agents executing in a single/uni process (SP) setting.
'''
def __init__(self, config):
LLAgent.__init__(self, config)
def ping_agents(self, agents):
task_label = self.task.get_task()['task_label']
task_idx = self._label_to_idx(task_label)
masks = [agent.ping_response(task_label) for agent in agents]
mask = self._select_mask(agents, masks)
if mask is not None:
# function from deep_rl/network/ssmask_utils.py
set_mask(self.network, mask, task_idx)
return True
else:
return False
def ping_response(self, task_label):
task_idx = self._label_to_idx(task_label)
# get task mask.
if task_idx is None:
mask = None
else:
mask = get_mask(self.network, task_idx)
return mask
class ShellAgent_DP(LLAgent):
'''
Lifelong learning (ppo continual learning with supermask) agent in ShELL
settings. All agents executing in a distributed (multi-) process (DP) setting.
'''
def __init__(self, config):
LLAgent.__init__(self, config)
_mask = get_mask(self.network, task=0)
self.mask_info = {}
for key, value in _mask.items():
self.mask_info[key] = tuple(value.shape)
model_mask_dim = 0
for k, v in self.mask_info.items():
model_mask_dim += np.prod(v)
self.model_mask_dim = model_mask_dim
def infuse_masks(self, masks):
print('to be implemented')
return False
def ping_response(self, task_label):
task_idx = self._label_to_idx(task_label)
# get task mask.
if task_idx is None:
mask = None
else:
mask = get_mask(self.network, task_idx)
return mask
class LLAgent_NoOracle(PPOContinualLearnerAgent):
'''
PPO continual learning agent using supermask superposition algorithm
with *no task oracle*: agent is not informed about task boundaries
(i.e., when one task ends and the other begins) and has to detect task
change by itself.
supermask lifelong learning algorithm: https://arxiv.org/abs/2006.14769
'''
def __init__(self, config):
PPOContinualLearnerAgent.__init__(self, config)
self.seen_tasks = {} # contains task labels that agent has experienced so far.
self.new_task = False
self.curr_train_task_label = None
def _name_to_idx(self, name):
found_task_idx = None
for task_idx, value in self.seen_tasks.items():
seen_task_label, task_name = value
if name == task_name:
found_task_idx = task_idx
break
return found_task_idx
def _label_to_idx(self, task_label):
eps = 1e-5
found_task_idx = None
for task_idx, value in self.seen_tasks.items():
seen_task_label, task_name = value
if np.linalg.norm((task_label - seen_task_label), ord=2) < eps:
found_task_idx = task_idx
break
return found_task_idx
def _select_mask(self, agents, masks, ensemble=False):
found_mask = None
if ensemble:
raise NotImplementedError
else:
for agent, mask in zip(agents, masks):
if mask is not None:
found_mask = mask
break
return found_mask
def update_task_label(self, task_label):
# TODO: consider other ways to update the label as detect module
# alters it. Maybe moving average?
task_idx = self._label_to_idx(self.curr_train_task_label)
self.seen_tasks[task_idx][0] = task_label
self.curr_train_task_label = task_label
def set_first_task(self, task_label, task_name):
# start first task
task_idx = 0 # first task idx is 0
self.seen_tasks[task_idx] = [task_label, task_name]
self.new_task = True
set_model_task(self.network, task_idx)
self.curr_train_task_label = task_label
return
def task_change_detected(self, task_label, task_name):
# end current task (if any)
if self.curr_train_task_label is not None:
cache_masks(self.network)
if self.new_task:
set_num_tasks_learned(self.network, len(self.seen_tasks))
self.new_task = False # reset flag
self.curr_train_task_label = None
# start next task
# use task label or task name to check if task already exist in model
task_idx = self._label_to_idx(task_label)
if task_idx is None:
# new task. add it to the agent's seen_tasks dictionary
task_idx = len(self.seen_tasks) # generate an internal task index for new task
self.seen_tasks[task_idx] = [task_label, task_name]
self.new_task = True
set_model_task(self.network, task_idx)
self.curr_train_task_label = task_label
return
def task_eval_start(self, task_name):
self.network.eval()
task_idx = self._name_to_idx(task_name)
if task_idx is None:
# agent has not been trained on current task
# being evaluated. therefore use a random mask
# TODO: random task hardcoded to the first learnt
# task/mask. update this later to use a random
# previous task, or implementing a way for
# agent to use an ensemble of different mask
# internally for the task not yet seen.
task_idx = 0
set_model_task(self.network, task_idx)
return
def task_eval_end(self):
self.network.train()
# resume training the model on train task label if training
# was on before running evaluations.
if self.curr_train_task_label is not None:
task_idx = self._label_to_idx(self.curr_train_task_label)
set_model_task(self.network, task_idx)
return
|
python
|
import itertools
def flatten(*arg):
return [item for sublist in arg for item in sublist]
def intersection(*args):
result = set(args[0])
for i in range(1, len(args)):
result = result.intersection(set(args[i]))
return result
def is_unique(*item_lists):
all_items = flatten(*item_lists)
previous = None
for item in sorted(all_items):
if item == previous:
return False
previous = item
return True
|
python
|
import torch
import torch.nn as nn
from torch.autograd import Function
class GdnFunction(Function):
@staticmethod
def forward(ctx, x, gamma, beta):
ctx.save_for_backward(x, gamma, beta)
n, c, h, w = list(x.size())
tx = x.permute(0, 2, 3, 1).contiguous()
tx = tx.view(-1, c)
tx2 = tx * tx
denominator = tx2.mm(gamma) + beta
ty = tx / torch.sqrt(denominator)
y = ty.view(n, h, w, c)
y = y.permute(0, 3, 1, 2).contiguous()
return y
@staticmethod
def backward(ctx, grad_output):
x, gamma, beta = ctx.saved_variables
n, c, h, w = list(grad_output.size())
tx = x.permute(0, 2, 3, 1).contiguous()
tx = tx.view(-1, c)
tx2 = tx * tx
denominator = tx2.mm(gamma) + beta
tdzdy = grad_output.permute(0, 2, 3, 1).contiguous()
tdzdy = tdzdy.view(-1, c)
gy = (tdzdy * torch.pow(denominator, -0.5) - (tdzdy * tx *
torch.pow(denominator, -1.5)).mm(gamma.t()) * tx)
gy = gy.view(n, h, w, c)
grad_input = gy.permute(0, 3, 1, 2).contiguous()
tmp = -0.5 * torch.pow(denominator, -1.5) * tx * tdzdy
grad_beta = torch.sum(tmp, 0)
grad_gamma = tx2.t().mm(tmp)
return grad_input, grad_gamma, grad_beta
class Gdn(nn.Module):
def __init__(self, input_channel):
super(Gdn, self).__init__()
self.input_channel = input_channel
self.gamma = nn.Parameter(torch.Tensor(input_channel, input_channel))
self.beta = nn.Parameter(torch.Tensor(input_channel))
def forward(self, input):
return GdnFunction.apply(input, self.gamma, self.beta)
def __str__(self):
return self.__class__.__name__ + '(gamma_size=(%d, %d), beta_size=(%d))' %\
(self.gamma.size()[0], self.gamma.size()[1], self.beta.size()[0])
__repr__ = __str__
|
python
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Callable creation utility unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._util.utilfunc.utilfuncmake` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from pytest import raises
# ....................{ GLOBALS }....................
# Arbitrary global referenced in functions created below.
AND_SEE_THE_GREAT_ACHILLES = 'whom we knew'
# ....................{ TESTS ~ make }....................
#FIXME: Consider excising. Although awesome, this is no longer needed.
# def test_copy_func_shallow_pass() -> None:
# '''
# Test successful usage of the
# :func:`beartype._util.func.utilfuncmake.copy_func_shallow` function.
# '''
#
# # Defer heavyweight imports.
# from beartype.roar import BeartypeDecorWrapperException
# from beartype._util.func.utilfuncmake import copy_func_shallow
#
# # Tuple of the names of all attributes expected to be shallowly copied.
# ATTRS_NAME_COPIED = (
# '__annotations__',
# '__closure__',
# '__code__',
# '__defaults__',
# '__doc__',
# '__globals__',
# # '__kwdefaults__',
# '__module__',
# '__name__',
# '__qualname__',
# )
#
# # String returned by the in_memoriam() function declared below when passed
# # an even integer.
# IN_MEMORIAM_RETURN_IF_PARAM_EVEN = 'And all we met was fair and good,'
#
# # String returned by the in_memoriam() function declared below when passed
# # an even integer.
# IN_MEMORIAM_RETURN_IF_PARAM_ODD = ' And all was good that Time could bring,'
#
# # String suffixing the string returned by that function.
# IN_MEMORIAM_RETURN_SUFFIX = 'I sing to him that rests below,'
#
# # Arbitrary closure to be shallowly copied.
# def in_memoriam(
# # Mandatory parameter.
# the_shadow: int,
#
# # Optional parameter.
# the_shroud: str = IN_MEMORIAM_RETURN_SUFFIX,
# ) -> str:
# '''
# The Shadow sits and waits for me.
# '''
#
# return (
# IN_MEMORIAM_RETURN_IF_PARAM_EVEN + the_shroud
# if the_shadow % 2 == 0 else
# IN_MEMORIAM_RETURN_IF_PARAM_ODD + the_shroud
# )
#
# # Set a custom attribute on this callable to be shallowly copied.
# in_memoriam.the_clock = '''
# And in the dusk of thee, the clock
# Beats out the little lives of men.'''
#
# # Function shallowly copied from this callable.
# captive_void = copy_func_shallow(func=in_memoriam)
#
# # Assert this copy returns the expected value.
# assert captive_void(27) == (
# f'{IN_MEMORIAM_RETURN_IF_PARAM_ODD}{IN_MEMORIAM_RETURN_SUFFIX}')
#
# # Assert this copy shares the same custom attribute as the original.
# assert captive_void.the_clock == in_memoriam.the_clock
#
# # Assert this copy contains the same dunder attributes.
# for attr_name_copied in ATTRS_NAME_COPIED:
# assert (
# getattr(captive_void, attr_name_copied) ==
# getattr(in_memoriam, attr_name_copied)
# )
#
# # Assert this function rejects C-based functions.
# with raises(BeartypeDecorWrapperException):
# copy_func_shallow(
# func=iter, exception_cls=BeartypeDecorWrapperException)
# ....................{ TESTS ~ make }....................
def test_make_func_pass() -> None:
'''
Test successful usage of the
:func:`beartype._util.func.utilfuncmake.make_func` function.
'''
# Defer heavyweight imports.
from beartype._util.func.utilfuncmake import make_func
from typing import Optional
# Arbitrary local referenced in functions created below.
THO_MUCH_IS_TAKEN = 'much abides; and tho’'
# Arbitrary callable wrapped by wrappers created below.
def we_are_not_now_that_strength_which_in_old_days() -> str:
'''
One equal temper of heroic hearts,
'''
return 'Moved earth and heaven, that which we are, we are;'
# Arbitrary wrapper accessing both globally and locally scoped attributes,
# exercising most optional parameters.
ulysses = make_func(
func_name='it_may_be_that_the_gulfs_will_wash_us_down',
func_code='''
def it_may_be_that_the_gulfs_will_wash_us_down(
it_may_be_we_shall_touch_the_happy_isles: Optional[str]) -> str:
return (
AND_SEE_THE_GREAT_ACHILLES +
THO_MUCH_IS_TAKEN +
we_are_not_now_that_strength_which_in_old_days() +
(
it_may_be_we_shall_touch_the_happy_isles or
'Made weak by time and fate, but strong in will'
)
)
''',
func_globals={
'AND_SEE_THE_GREAT_ACHILLES': AND_SEE_THE_GREAT_ACHILLES,
'THO_MUCH_IS_TAKEN': THO_MUCH_IS_TAKEN,
'we_are_not_now_that_strength_which_in_old_days': (
we_are_not_now_that_strength_which_in_old_days),
},
func_locals={
'Optional': Optional,
},
func_wrapped=we_are_not_now_that_strength_which_in_old_days,
)
# Assert this wrapper wrapped this wrappee.
assert ulysses.__doc__ == (
we_are_not_now_that_strength_which_in_old_days.__doc__)
# Assert this wrapper returns an expected value.
odyssey = ulysses('Made weak by time and fate, but strong in will')
assert 'Made weak by time and fate, but strong in will' in odyssey
# Arbitrary callable accessing no scoped attributes.
to_strive_to_seek_to_find = make_func(
func_name='to_strive_to_seek_to_find',
func_code='''
def to_strive_to_seek_to_find(and_not_to_yield: str) -> str:
return and_not_to_yield
''',
)
# Assert this wrapper returns an expected value.
assert (
to_strive_to_seek_to_find('Tis not too late to seek a newer world.') ==
'Tis not too late to seek a newer world.'
)
def test_make_func_fail() -> None:
'''
Test unsuccessful usage of the
:func:`beartype._util.func.utilfuncmake.make_func` function.
'''
# Defer heavyweight imports.
from beartype.roar import BeartypeDecorWrapperException
from beartype.roar._roarexc import _BeartypeUtilCallableException
from beartype._util.func.utilfuncmake import make_func
# Assert that attempting to create a function whose name collides with that
# of a caller-defined local variable raises the expected exception.
with raises(_BeartypeUtilCallableException):
make_func(
func_name='come_my_friends',
func_code='''
def come_my_friends(T: str) -> str:
return T + 'is not too late to seek a newer world'
''',
func_label='Magnanimous come_my_friends() function',
func_locals={
'come_my_friends': 'Push off, and sitting well in order smite',
},
)
# Assert that attempting to execute a syntactically invalid snippet raises
# the expected exception.
with raises(BeartypeDecorWrapperException):
make_func(
func_name='to_sail_beyond_the_sunset',
func_code='''
def to_sail_beyond_the_sunset(and_the_baths: str) -> str:
Of all the western stars, until I die.
''',
func_label='Heroic to_sail_beyond_the_sunset() function',
exception_cls=BeartypeDecorWrapperException,
)
# Assert that attempting to execute a syntactically valid snippet failing
# to declare this function raises the expected exception.
with raises(BeartypeDecorWrapperException):
make_func(
func_name='you_and_i_are_old',
func_code='''
def old_age_hath_yet_his_honour_and_his_toil() -> str:
return 'Death closes all: but something ere the end'
''',
func_label='Geriatric you_and_i_are_old() function',
exception_cls=BeartypeDecorWrapperException,
)
|
python
|
from os import *
import traceback
import sys
if len(sys.argv) != 2:
print("Utilisation: %s fichier"%(sys.argv[0]))
exit(0)
SIZE=256
try:
f=open(sys.argv[1], O_RDONLY)
bs = read(f, SIZE)
while len(bs) > 0:
write(sys.stdout.fileno(), bs)
bs = read(f, SIZE)
close(f)
except OSError as e:
traceback.print_exc()
print(e.strerror)
exit(1)
|
python
|
from falcor import *
def render_graph_DefaultRenderGraph():
g = RenderGraph('DefaultRenderGraph')
loadRenderPassLibrary('BSDFViewer.dll')
loadRenderPassLibrary('AccumulatePass.dll')
loadRenderPassLibrary('TemporalDelayPass.dll')
loadRenderPassLibrary('Antialiasing.dll')
loadRenderPassLibrary('BlitPass.dll')
loadRenderPassLibrary('CSM.dll')
loadRenderPassLibrary('DebugPasses.dll')
loadRenderPassLibrary('DepthPass.dll')
loadRenderPassLibrary('ErrorMeasurePass.dll')
loadRenderPassLibrary('ExampleBlitPass.dll')
loadRenderPassLibrary('FLIPPass.dll')
loadRenderPassLibrary('WhittedRayTracer.dll')
loadRenderPassLibrary('PixelInspectorPass.dll')
loadRenderPassLibrary('ForwardLightingPass.dll')
loadRenderPassLibrary('GBuffer.dll')
loadRenderPassLibrary('SkyBox.dll')
loadRenderPassLibrary('ImageLoader.dll')
loadRenderPassLibrary('MegakernelPathTracer.dll')
loadRenderPassLibrary('MinimalPathTracer.dll')
loadRenderPassLibrary('OptixDenoiser.dll')
loadRenderPassLibrary('PassLibraryTemplate.dll')
loadRenderPassLibrary('SceneDebugger.dll')
loadRenderPassLibrary('SimplePostFX.dll')
loadRenderPassLibrary('SSAO.dll')
loadRenderPassLibrary('TestPasses.dll')
loadRenderPassLibrary('SVGFPass.dll')
loadRenderPassLibrary('ToneMapper.dll')
loadRenderPassLibrary('Utils.dll')
ImageLoader = createPass('ImageLoader', {'outputFormat': ResourceFormat.BGRA8UnormSrgb, 'filename': 'C:\\Users\\Song\\Desktop\\pictures\\65578040_p0.jpg', 'mips': False, 'srgb': True, 'arrayIndex': 0, 'mipLevel': 0})
g.addPass(ImageLoader, 'ImageLoader')
ExampleBlitPass = createPass('ExampleBlitPass')
g.addPass(ExampleBlitPass, 'ExampleBlitPass')
g.addEdge('ImageLoader.dst', 'ExampleBlitPass.input')
g.markOutput('ExampleBlitPass.output')
return g
DefaultRenderGraph = render_graph_DefaultRenderGraph()
try: m.addGraph(DefaultRenderGraph)
except NameError: None
|
python
|
import histogram_dct as hd
import pandas as pd
# load data
data = pd.read_excel(r'Example\distributions.xlsx')
# uniform distribution:
hd.histogram_dct(data['unf'], bin=12.5, name='uniform distribution')
# normal distribution:
hd.histogram_dct(data['nrm'], bin=4, name='normal distribution')
# exp distribution:
hd.histogram_dct(data['exp'], bin=5, name='exp distribution')
# mix distribution:
hd.histogram_dct(data['mix'], bin=5, name='mixed distribution')
# use the output dictionary (find the maximum key):
max_key, max_vlu, dct = None, 0, hd.histogram_dct(data['nrm'], bin=5)
for key, value in dct.items():
if max_vlu < value:
max_vlu =value
max_key =key
print(f'maximum key: {max_key}\nmaximum value: {max_vlu}'
f'\nprecent: {round(max_vlu/sum(dct.values())*100,2)}%')
|
python
|
""" Python Character Mapping Codec generated from '8859-2.TXT' with gencodec.py.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a2: 0x02d8, # BREVE
0x00a3: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00a5: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x00a6: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x00a9: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00aa: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00ab: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x00ac: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x00ae: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00af: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00b1: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00b2: 0x02db, # OGONEK
0x00b3: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x00b5: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x00b6: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x00b7: 0x02c7, # CARON
0x00b9: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00ba: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00bb: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x00bc: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00bd: 0x02dd, # DOUBLE ACUTE ACCENT
0x00be: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00bf: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00c0: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00c3: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c5: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x00c6: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x00c8: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ca: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00cc: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00cf: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d0: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00d2: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d5: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x00d8: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00d9: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00db: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00de: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00e0: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00e3: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00e5: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x00e6: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x00e8: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00ea: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ec: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00ef: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00f0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00f1: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00f2: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00f5: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x00f8: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00f9: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fe: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ff: 0x02d9, # DOT ABOVE
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flip(nn.Module):
"""
Flip indices transformation.
Example:
>>> f = stribor.Flip()
>>> x = torch.tensor([[1, 2], [3, 4]])
>>> f(x)[0]
tensor([[2, 1],
[4, 3]])
>>> f = stribor.Flip([0, 1])
>>> f(x)[0]
tensor([[4, 3],
[2, 1]])
Args:
dims (List[int]): Dimensions along which to flip the order of values.
Default: [-1]
"""
def __init__(self, dims=[-1]):
super().__init__()
self.dims = dims
def forward(self, x, **kwargs):
y = torch.flip(x, self.dims)
return y, torch.zeros_like(y)
def inverse(self, y, **kwargs):
x = torch.flip(y, self.dims)
return x, torch.zeros_like(x)
class Permute(nn.Module):
"""
Permute indices along the last dimension.
Example:
>>> torch.manual_seed(123)
>>> f = stribor.Permute(3)
>>> f(torch.tensor([1, 2, 3]))
(tensor([2, 3, 1]), tensor([0, 0, 0]))
>>> f.inverse(torch.tensor(tensor([2, 3, 1])))
(tensor([1, 2, 3]), tensor([0, 0, 0]))
Args:
dim (int): Dimension of data
"""
def __init__(self, dim):
super().__init__()
self.permutation = torch.randperm(dim)
self.inverse_permutation = torch.empty(dim).long()
self.inverse_permutation[self.permutation] = torch.arange(dim)
def forward(self, x):
y = x[..., self.permutation]
return y, torch.zeros_like(y)
def inverse(self, y):
x = y[..., self.inverse_permutation]
return x, torch.zeros_like(x)
|
python
|
# ••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
# Copyright (c) 2018, S.J.M. Steffann. This software is licensed under the BSD
# 3-Clause License. Please see the LICENSE file in the project root directory.
# ••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
# Generated by Django 2.0.7 on 2018-07-15 15:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instances', '0002_zaphod'),
]
operations = [
migrations.RenameField(
model_name='marvin',
old_name='alive',
new_name='is_alive',
),
migrations.AlterField(
model_name='marvin',
name='is_alive',
field=models.BooleanField(default=True, verbose_name='is alive'),
),
migrations.AlterModelOptions(
name='marvin',
options={'ordering': ('-is_alive', 'instance_type', '-last_seen')},
),
]
|
python
|
from typing import Dict, Optional
from fastapi import APIRouter, Depends, HTTPException
from src.api.serializers import UserIn, UserOut, UsersOut
from src.services.auth import get_current_user, get_user_login_from_token
from src.services.users import (
create_user,
get_user_by_id,
get_user_by_login,
search_users,
)
from starlette import status
router = APIRouter()
@router.post("/users", response_model=UserOut, status_code=status.HTTP_201_CREATED)
async def create_client_handler(user: UserIn):
await create_user(
login=user.login,
password=user.password,
first_name=user.first_name,
last_name=user.last_name,
age=user.age,
sex=user.sex,
interests=user.interests,
city_id=user.city_id,
)
user_data = await get_user_by_login(user.login)
return user_data
@router.get("/users/me", response_model=UserOut, status_code=status.HTTP_200_OK)
async def get_my_user_info(current_user: Dict = Depends(get_current_user)):
return current_user
@router.get(
"/users/{user_id}",
dependencies=[Depends(get_user_login_from_token)],
response_model=UserOut,
status_code=status.HTTP_200_OK,
)
async def get_user_info_by_id(user_id: int):
user_data = await get_user_by_id(user_id)
if not user_data:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="User not found",
)
return user_data
@router.get(
"/users",
dependencies=[Depends(get_user_login_from_token)],
response_model=UsersOut,
status_code=status.HTTP_200_OK,
)
async def get_users(first_name: Optional[str] = None, last_name: Optional[str] = None):
users_data = await search_users(first_name, last_name)
return {"data": users_data}
|
python
|
import maestro
import time
import numpy as np
import scipy as sp
import scipy.interpolate
import sys
a1 = 0.1
a2 = 0.26
# In degrees from straight config
ee_pos_list = [
np.array([0.12, 0.17]),
#np.array([0.12, 0]),
#np.array([0.14, 0]),
np.array([0.325, 0.12]),
np.array([0.12, 0.17]),
]
time_list = [0., 1., 2.]
# Automatically cycle smoothly
# Do interpolation
times = np.arange(min(time_list), max(time_list), 0.1)
ee_pos_generator = sp.interpolate.interp1d(
time_list, np.stack(ee_pos_list, axis=0), axis=0,
kind="linear",
bounds_error=False,
fill_value=(ee_pos_list[0], ee_pos_list[-1])
)
def do_ik(pos):
# https://robotacademy.net.au/lesson/inverse-kinematics-for-a-2-joint-robot-arm-using-geometry/
q2 = np.arccos( (pos[0]**2 + pos[1]**2 - a1**2 - a2**2) / (2 * a1 * a2) )
q1 = np.arctan2(pos[1], pos[0]) - np.arctan2((a2 * np.sin(q2)), (a1 + a2 * np.cos(q2)))
# 0 for q2 is actually 90*
return np.array([0, q1, q2 - np.pi/2.])*180./np.pi
n_servos = 3
servo_min = np.array([500, 500, 500])
servo_centers = np.array([1500, 1500, 1500])
servo_ms_per_deg = np.array([1000/90., 1000/90., 1000/90.])
if __name__ == "__main__":
servo = maestro.Controller(ttyStr='COM6')
while (1):
start_time = time.time()
t = time.time() - start_time
while t < max(time_list):
t = time.time() - start_time
ee_pos = ee_pos_generator(t)
pos = do_ik(ee_pos)
print("%f: %s -> %s" % (t, ee_pos, pos))
pos_in_ms = servo_centers + pos * servo_ms_per_deg
for k in range(n_servos):
# Commands in quarter-ms
servo.setTarget(k,int(pos_in_ms[k]*4))
servo.close()
|
python
|
import cv2
import numpy as np
#####################################
#adjust as per ratio required
widthImg = 640
heightImg = 480
#####################################
cap = cv2.VideoCapture(0)
cap.set(3, widthImg)
cap.set(4, heightImg)
cap.set(10, 150) #id: 10, represents brigthness => adjust as required based on setting
#Preprocessing Function
def preProcessing(img):
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5,5), 1)
imgCanny = cv2.Canny(imgBlur, 200, 200) #modify threshold as needed
kernel = np.ones((5,5))
imgDil = cv2.dilate(imgCanny,kernel,iterations=2)
imgThres = cv2.erode(imgDil, kernel, iterations=1)
return imgThres
#Contours function
def getContours(img):
biggest = np.array([])
maxArea = 0
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 5000: #modify based on size
# cv2.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02*perimeter, True)
if area > maxArea and len(approx) == 4:
biggest = approx
maxArea = area
cv2.drawContours(imgContour, biggest, -1, (255, 0, 0), 20) # show edges
return biggest
#Reorder function
def reorder(myPoints):
myPoints = myPoints.reshape((4,2))
myPointsNew = np.zeros((4, 1, 2),np.int32)
add = myPoints.sum(axis=1) #use axis 1
# print("add", add)
myPointsNew[0] = myPoints[np.argmin(add)] #smallest sum at 0,0
myPointsNew[3] = myPoints[np.argmax(add)] #largest sum at widthImg, heightImg
diff = np.diff(myPoints, axis=1)
myPointsNew[1] = myPoints[np.argmin(diff)] # smaller of h and w
myPointsNew[2] = myPoints[np.argmax(diff)] # smaller of w and h
# print("New Points", myPointsNew)
return myPointsNew
#Warping function
def getWarp(img, biggest):
biggest = reorder(biggest)
pts1 = np.float32(biggest)
pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgOutput = cv2.warpPerspective(img, matrix, (widthImg, heightImg))
#minor crop to adjust can adjust if needed
imgCropped = imgOutput[20:imgOutput.shape[0]-20, 20:imgOutput.shape[1]-20]
imgCropped = cv2.resize(imgCropped, (widthImg, heightImg))
return imgCropped
#Stack function
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range ( 0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
#Body
while True:
success, img = cap.read()
img = cv2.resize(img, (widthImg, heightImg))
imgContour = img.copy()
imgThres = preProcessing(img)
biggest = getContours(imgThres)
#if biggest.size != 0:
imgWarped = getWarp(img, biggest)
imgArray = ([img, imgThres], [imgContour, imgWarped])
# else:
# imgArray = ([img, imgThres], [img, img])
stackedImages = stackImages(0.6, imgArray)
cv2.imshow("Steps", stackedImages)
if cv2.waitKey(1) & 0xFFF == ord('q'):
break
|
python
|
import django_tables2 as tables
from net.models import Connection
from utils.tables import BaseTable, ButtonsColumn, SelectColumn
class ConnectionTable(BaseTable):
pk = SelectColumn()
ipv6_address = tables.Column(linkify=True, verbose_name="IPv6")
ipv4_address = tables.Column(linkify=True, verbose_name="IPv4")
internet_exchange_point = tables.LinkColumn()
router = tables.LinkColumn()
buttons = ButtonsColumn(Connection)
class Meta(BaseTable.Meta):
model = Connection
fields = (
"pk",
"state",
"vlan",
"ipv6_address",
"ipv4_address",
"internet_exchange_point",
"router",
"interface",
"buttons",
)
default_columns = (
"pk",
"state",
"vlan",
"ipv6_address",
"ipv4_address",
"router",
"buttons",
)
empty_text = "None"
|
python
|
import os
import json
from .constants import CUST_ATTR_GROUP
def default_custom_attributes_definition():
json_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"custom_attributes.json"
)
with open(json_file_path, "r") as json_stream:
data = json.load(json_stream)
return data
def app_definitions_from_app_manager(app_manager):
_app_definitions = []
for app_name, app in app_manager.applications.items():
if app.enabled and app.is_host:
_app_definitions.append(
(app_name, app.full_label)
)
# Sort items by label
app_definitions = []
for key, label in sorted(_app_definitions, key=lambda item: item[1]):
app_definitions.append({key: label})
if not app_definitions:
app_definitions.append({"empty": "< Empty >"})
return app_definitions
def tool_definitions_from_app_manager(app_manager):
_tools_data = []
for tool_name, tool in app_manager.tools.items():
_tools_data.append(
(tool_name, tool.label)
)
# Sort items by label
tools_data = []
for key, label in sorted(_tools_data, key=lambda item: item[1]):
tools_data.append({key: label})
# Make sure there is at least one item
if not tools_data:
tools_data.append({"empty": "< Empty >"})
return tools_data
def get_openpype_attr(session, split_hierarchical=True, query_keys=None):
custom_attributes = []
hier_custom_attributes = []
if not query_keys:
query_keys = [
"id",
"entity_type",
"object_type_id",
"is_hierarchical",
"default"
]
# TODO remove deprecated "pype" group from query
cust_attrs_query = (
"select {}"
" from CustomAttributeConfiguration"
# Kept `pype` for Backwards Compatiblity
" where group.name in (\"pype\", \"{}\")"
).format(", ".join(query_keys), CUST_ATTR_GROUP)
all_avalon_attr = session.query(cust_attrs_query).all()
for cust_attr in all_avalon_attr:
if split_hierarchical and cust_attr["is_hierarchical"]:
hier_custom_attributes.append(cust_attr)
continue
custom_attributes.append(cust_attr)
if split_hierarchical:
# return tuple
return custom_attributes, hier_custom_attributes
return custom_attributes
def join_query_keys(keys):
"""Helper to join keys to query."""
return ",".join(["\"{}\"".format(key) for key in keys])
def query_custom_attributes(session, conf_ids, entity_ids, table_name=None):
"""Query custom attribute values from ftrack database.
Using ftrack call method result may differ based on used table name and
version of ftrack server.
Args:
session(ftrack_api.Session): Connected ftrack session.
conf_id(list, set, tuple): Configuration(attribute) ids which are
queried.
entity_ids(list, set, tuple): Entity ids for which are values queried.
table_name(str): Table nam from which values are queried. Not
recommended to change until you know what it means.
"""
output = []
# Just skip
if not conf_ids or not entity_ids:
return output
if table_name is None:
table_name = "ContextCustomAttributeValue"
# Prepare values to query
attributes_joined = join_query_keys(conf_ids)
attributes_len = len(conf_ids)
# Query values in chunks
chunk_size = int(5000 / attributes_len)
# Make sure entity_ids is `list` for chunk selection
entity_ids = list(entity_ids)
for idx in range(0, len(entity_ids), chunk_size):
entity_ids_joined = join_query_keys(
entity_ids[idx:idx + chunk_size]
)
call_expr = [{
"action": "query",
"expression": (
"select value, entity_id from {}"
" where entity_id in ({}) and configuration_id in ({})"
).format(table_name, entity_ids_joined, attributes_joined)
}]
if hasattr(session, "call"):
[result] = session.call(call_expr)
else:
[result] = session._call(call_expr)
for item in result["data"]:
output.append(item)
return output
|
python
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Tests for naming of modules when lowering JAX into MLIR.
# RUN: %PYTHON %s | FileCheck %s
from absl import app
import jax
from jax import lax
import numpy as np
from jax.tests.filecheck.jax_filecheck_helpers import print_ir
jax.config.update("jax_enable_mlir", True)
jax.config.update("jax_enable_x64", True)
def main(_):
# CHECK-LABEL: TEST: neg int32[7]
# CHECK: module @jit_neg
# CHECK: func public @main
print_ir(np.empty([7], np.int32))(lax.neg)
# CHECK-LABEL: TEST: foo int32[7]
# CHECK: module @jit_foo
# CHECK: func public @main
@print_ir(np.empty([7], np.int32))
@jax.jit
def foo(x): return x + 2
if __name__ == "__main__":
app.run(main)
|
python
|
from flask_restplus import Api
from jsonschema import FormatChecker
import logging
log = logging.getLogger(__name__)
# Instantiate a Flask-RESTPlus API
api = Api(version='1.0', title='iter8 analytics REST API',
description='API to perform analytics to support canary releases '
'and A/B tests',
format_checker=FormatChecker(formats=("date-time",)))
def build_http_error(msg, http_code):
'''Returns a specific error message and HTTP code pip that can be used by '
'the REST API'''
return {'message': msg}, http_code
@api.errorhandler
def default_error_handler(e):
'''Error handler for uncaught exceptions'''
message = 'An unexpected error occurred'
log.exception(message)
return {'message': message}, 500
|
python
|
import sys
def subst(s, ls):
if s == "":
return ""
for j in xrange(0, len(ls), 2):
i = s.find(ls[j])
if i != -1:
return subst(s[:i], ls) + ls[j + 1] + subst(s[i + len(ls[j]) :], ls)
return s
test_cases = open(sys.argv[1], "r")
for test in test_cases:
s, sub = test.strip().split(";")
print subst(s, sub.split(","))
test_cases.close()
|
python
|
from rest_framework import serializers
from daiquiri.metadata.models import Column
class ColumnSerializer(serializers.ModelSerializer):
class Meta:
model = Column
fields = (
'id',
'order',
'name',
'description',
'unit',
'ucd',
'datatype',
'arraysize',
'principal',
'indexed',
'std'
)
|
python
|
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.views.generic import ListView, DetailView, CreateView
from django.views.generic.edit import FormView, UpdateView
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from .models import Adventure, Room, Choice
from .forms import AdventureForm, RoomForm, ChoiceForm
# Create your views here.
class AdventureDetailView(DetailView):
model = Adventure
template_name = "adventure/adventure_detail.html"
context_object_name = 'adventure'
#adds start_room to context data
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
thisslug = self.kwargs.get("slug")
room = Room.startroom.filter(adventure__slug=thisslug).first()
if room:
context['start_room'] = room.pk
return context
class AdventureListView(ListView):
model = Adventure
template_name = "adventure/adventure_list.html"
context_object_name = 'adventure_list'
class AdventureCreateView(CreateView):
model = Adventure
template_name = "adventure/adventure_update.html"
form_class = AdventureForm
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.owner = self.request.user
return super().form_valid(form)
class AdventureUpdateView(UpdateView):
model = Adventure
template_name = "adventure/adventure_update.html"
form_class = AdventureForm
class AdventurePlayView(DetailView):
model = Adventure
current_room = None
template_name = "room/room_play.html"
context_object_name = 'room'
def get_object(self, queryset=None):
obj = super().get_object(queryset=queryset)
def set_current_room():
pass
def get_first_room():
query = Room.objects.filter(adventure__slug=thisslug).filter(start_room=True).first()
room = get_object_or_404(query)
return room
class RoomCreateView(CreateView):
model = Room
template_name = "adventure/adventure_update.html"
form_class = AdventureForm
def form_valid(self, form):
slug = self.kwargs.get(self.slug_url_kwarg)
queryset = Adventure.objects.filter(adventure__slug=slug).filter()
## TODO: finish this
self.object = form.save(commit=False)
self.object.adventure = self.request.user
return super().form_valid(form)
class RoomPlayView(DetailView):
model = Room
template_name = "room/room_play.html"
context_object_name = 'room'
slug_url_kwarg = 'adv_slug'
def get_object(self, queryset=None, **kwargs):
slug = self.kwargs.get(self.slug_url_kwarg)
r_pk = self.kwargs.get('pk')
queryset = Room.objects.filter(adventure__slug=slug).filter(pk=r_pk)
obj = super().get_object(queryset=queryset)
return obj
class RoomDetailView(DetailView):
model = Room
template_name = "room/room_detail.html"
context_object_name = 'room'
class RoomListView(ListView):
model = Adventure
template_name = "room/room_list.html"
context_object_name = 'room_list'
class RoomCreateView(CreateView):
model = Room
template_name = "room/room_update.html"
form_class = RoomForm
class RoomUpdateView(UpdateView):
model = Room
template_name = "room/room_update.html"
form_class = RoomForm
def register_view(request):
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=password)
login(request, user)
return redirect('adventure-list')
return render(request, 'user/register.html', {'form': form})
|
python
|
"""
Unit tests for PDF class
"""
import numpy as np
import unittest
import qp
class EvalFuncsTestCase(unittest.TestCase):
""" Tests of evaluations and interpolation functions """
def setUp(self):
"""
Make any objects that are used in multiple tests.
"""
self.xpts = np.linspace(0, 3, 7)
self.hpdfs = np.random.random((10, 50)) #pylint: disable=no-member
self.hbins = np.linspace(0, 5, 51)
self.hbins2 = np.linspace(0, 5, 51) + np.expand_dims(np.linspace(0.1, 1., 10), -1)
self.xvals = np.linspace(0, 5, 50)
self.xvals2 = np.linspace(0, 5, 50) + np.expand_dims(np.linspace(0.1, 1., 10), -1)
self.yvals1d = self.hpdfs[0]
self.rows = np.expand_dims(np.arange(10), -1)
self.grid = self.xpts * np.ones((10, 7))
self.range_grid = (self.rows * np.ones((10), int)).astype(int)
def tearDown(self):
"Clean up any mock data files created by the tests."
def _check_interface_function(self, ifunc, xvals, yvals, **kwargs):
v0 = ifunc(self.xpts, self.rows, xvals, yvals, **kwargs)
v1 = ifunc(self.grid.flatten(), self.rows.flatten(), xvals, yvals, **kwargs)
v2 = ifunc(self.grid, self.rows, xvals, yvals, **kwargs)
_ = ifunc(self.xpts, np.arange(7), xvals, yvals, **kwargs)
assert np.allclose(v0, v1)
assert np.allclose(v0, v2)
def test_evaluate_hist_x_multi_y(self):
""" Test the evaluate_hist_x_multi_y function """
self._check_interface_function(qp.utils.evaluate_hist_x_multi_y,
self.hbins, self.hpdfs)
def test_evaluate_hist_multi_x_multi_y(self):
""" Test the evaluate_hist_multi_x_multi_y function """
self._check_interface_function(qp.utils.evaluate_hist_multi_x_multi_y,
self.hbins2, self.hpdfs)
def test_interpolate_x_multi_y(self):
""" Test the interpolate_x_multi_y """
self._check_interface_function(qp.utils.interpolate_x_multi_y,
self.xvals, self.hpdfs, bounds_error=False, fill_value=0)
def test_interpolate_multi_x_multi_y(self):
""" Test the interpolate_multi_x_multi_y """
self._check_interface_function(qp.utils.interpolate_multi_x_multi_y,
self.xvals2, self.hpdfs, bounds_error=False, fill_value=0)
def test_interpolate_multi_x_y(self):
""" Test the interpolate_multi_x_y """
self._check_interface_function(qp.utils.interpolate_multi_x_y,
self.xvals2, self.yvals1d, bounds_error=False, fill_value=0)
if __name__ == '__main__':
unittest.main()
|
python
|
import itsdangerous, json, atexit, traceback, logging
from flask import redirect, render_template, url_for, abort, \
flash, request
from flask_login import login_user, logout_user, current_user, login_required
from flask_mail import Message
from flask_admin import Admin, AdminIndexView, expose
from flask_admin.contrib.sqla import ModelView
from flask_admin.form import SecureForm
from app_manager import app, db, ts, mail, DAY
from forms import SignupForm, LoginForm, UsernameForm, ResetPasswordForm, \
ChangePasswordForm, NominationForm, VoteForm, BanForm, AdminForm, \
NomIDForm, PhaseNomForm, PhaseVoteForm, PhaseStaticForm, SetPhaseForm, \
ClearForm, RemoveNomForm
from models import User, Award, Nomination, State
from login_manager import login_manager
from dbutils import clear_noms, clear_votes
from werkzeug.exceptions import default_exceptions
from urllib.parse import urlparse, urljoin
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.base import JobLookupError
from logging.handlers import SMTPHandler
from io import StringIO
scheduler = BackgroundScheduler(timezone="US/Eastern")
@app.route("/", methods=["GET"])
def index():
return render_template("index.html", phase=phase())
@app.route("/signup", methods=["GET", "POST"])
def signup():
if current_user.is_authenticated:
return redirect(url_for("index"))
form = SignupForm()
if form.validate_on_submit():
user = User(username=form.username.data.lower(),
password=form.password.data)
db.session.add(user)
db.session.flush()
if send_confirm_link(user.id, user.email):
db.session.commit()
flash("Account created! Please click the confirmation link sent "
"to %s" % user.email, "success")
return redirect(url_for("index"))
return render_template("signup.html", form=form)
@app.route("/confirm/<token>", methods=["GET"])
def confirm_email(token):
try:
userID, email = ts.loads(token, salt="email-confirm-key", max_age=DAY)
except itsdangerous.SignatureExpired:
return render_template("activate_expired.html", token=token)
except:
abort(404)
user = User.query.filter_by(id=userID).first_or_404()
if user.email != email:
abort(404) # this shouldn't ever happen
if user.email_confirmed == True:
return render_template("already_confirmed.html")
user.email_confirmed = True
db.session.commit()
flash("Email confirmed! Sign in!", "success")
return redirect(url_for("signin"))
@app.route("/newlink/<token>", methods=["GET"])
def new_link(token):
try:
userID, email = ts.loads(token, salt="email-confirm-key") # ignore age
except:
abort(404)
user = User.query.filter_by(id=userID).first_or_404()
if user.email != email:
abort(404) # this shouldn't ever happen
if send_confirm_link(userID, email):
flash("New confirmation link sent, check your email!", "success")
return redirect(url_for("index"))
else:
# send them back to the expired confirm page
return redirect(url_for("confirm_email", token=token))
@app.route("/resend", methods=["GET", "POST"])
def resend():
form = UsernameForm()
if form.validate_on_submit():
user = User.query.filter_by(
username=form.username.data.lower()).first_or_404()
if user.email_confirmed == True:
flash("Your email is already confirmed!", "error")
elif send_confirm_link(user.id, user.email):
flash("New confirmation link sent, check your email!", "success")
return redirect(url_for("index"))
return render_template("resend.html", form=form)
@app.route("/signin", methods=["GET", "POST"])
def signin():
if current_user.is_authenticated:
next_url = request.args.get("next")
if not is_safe_url(next_url):
abort(400)
return redirect(next_url or url_for("index"))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(
username=form.username.data.lower()).first_or_404()
if not user.email_confirmed:
flash("Please click the confirmation link sent to your email first",
"error")
elif user.is_correct_password(form.password.data):
if user.banned:
flash("Your account has been banned", "error")
elif login_user(user, remember=True):
flash("Logged in successfully", "success")
next_url = request.args.get("next")
if not is_safe_url(next_url):
abort(400)
return redirect(next_url or url_for("index"))
else:
flash("Account inactive", "error")
else:
flash("Password incorrect, try again", "error")
return render_template("signin.html", form=form)
@app.route("/signout", methods=["GET"])
def signout():
if current_user.is_authenticated:
logout_user()
flash("Logged out", "success")
return redirect(url_for("index"))
@app.route("/reset", methods=["GET", "POST"])
def reset():
if current_user.is_authenticated:
return redirect(url_for("index"))
form = UsernameForm()
if form.validate_on_submit():
user = User.query.filter_by(
username=form.username.data.lower()).first_or_404()
subject = "Password reset requested"
token = ts.dumps(user.username, salt="recover-key")
recover_url = url_for("reset_with_token", token=token, _external=True)
html = render_template("email/recover.html", recover_url=recover_url)
if send_email(user.email, subject, html):
flash("A password reset link has sent to your email address", "success")
return redirect(url_for("index"))
return render_template("reset.html", form=form)
@app.route("/reset/<token>", methods=["GET", "POST"])
def reset_with_token(token):
if current_user.is_authenticated:
return redirect(url_for("index"))
try:
username = ts.loads(token, salt="recover-key", max_age=DAY)
except itsdangerous.SignatureExpired:
return render_template("recover_expired.html")
except:
abort(404)
form = ResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(username=username).first_or_404()
user.password = form.password.data
db.session.commit()
flash("Password reset successfully! Sign in!", "success")
return redirect(url_for("signin"))
return render_template("reset_with_token.html", form=form)
@app.route("/changepass", methods=["GET", "POST"])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.is_correct_password(form.currentpass.data):
current_user.password = form.password.data
db.session.commit()
flash("Password changed!", "success")
login_user(current_user, remember=True)
return redirect(url_for("index"))
else:
flash("Current password incorrect, try again", "error")
return render_template("change_password.html", form=form)
@app.route("/awards", methods=["GET", "POST"])
@login_required
def awards():
p = phase()
if p == 0:
return render_template("nominees.html", awards=list_awards())
if p == 2:
return render_template("voting.html", form=VoteForm(),
awards=list_awards())
# else: nominations
form = NominationForm()
if form.validate_on_submit():
award = Award.query.filter_by(id=form.award_id.data).first_or_404()
award.nominations.append(Nomination(name=form.entry.data,
creator=current_user))
db.session.commit()
flash("Nomination successful!", "success")
return redirect(url_for("awards"))
return render_template("nominations.html", form=form, awards=list_awards())
@app.route("/submit_vote", methods=["POST"])
def submit_vote():
result = { "success" : 0,
"message" : "An error occurred" }
if phase() != 2:
result["message"] = "Not voting phase!"
return json.dumps(result), 200 # return 200 so message displays
if not current_user.is_authenticated:
# rather than login_required, this allows returning a json
result["message"] = "Not logged in"
return json.dumps(result), 200
form = VoteForm()
if form.validate() or True:
try:
nom_id = int(form.nomid.data)
except:
return json.dumps(result), 200
nom = Nomination.query.filter_by(id=nom_id).first()
if nom is None:
return json.dumps(result), 200
for sel in current_user.selections:
if sel in nom.award.nominations:
# take away vote from other nom in this category
# clicking same button will simply remove the vote
current_user.selections.remove(sel)
result["no_vote"] = str(sel.id)
if sel == nom:
# we removed the vote, so we are done
result["success"] = 1
result["message"] = "Vote removed"
db.session.commit()
return json.dumps(result), 200
break
# only add vote if it was a different nomination's button
nom.voters.append(current_user)
result["success"] = 2
result["message"] = "Vote submitted"
result["vote"] = str(nom.id)
db.session.commit()
return json.dumps(result), 200
# Admin Interface
class MyAdminIndexView(AdminIndexView):
def is_accessible(self):
if not current_user.is_active or not current_user.is_authenticated:
return False
return current_user.is_admin
def _handle_view(self, name, **kwds):
if not self.is_accessible():
if current_user.is_authenticated:
abort(403)
else:
return login_manager.unauthorized()
@expose("/", methods=["GET", "POST"])
def index(self):
spform = SetPhaseForm()
pnform = PhaseNomForm()
pvform = PhaseVoteForm()
psform = PhaseStaticForm()
bform = BanForm()
aform = AdminForm()
nform = NomIDForm()
cform = ClearForm()
if ((spform.static.data or spform.nom.data or spform.vote.data) and
spform.validate_on_submit()):
self.set_phase(spform)
return self.check_full_index()
if ((pnform.pnon.data and pnform.validate_on_submit()) or
pnform.pnoff.data):
self.phase_sched(pnform, 1)
return self.check_full_index()
if ((pvform.pvon.data and pvform.validate_on_submit()) or
pvform.pvoff.data):
self.phase_sched(pvform, 2)
return self.check_full_index()
if ((psform.pson.data and psform.validate_on_submit()) or
psform.psoff.data):
self.phase_sched(psform, 0)
return self.check_full_index()
if (bform.ban.data or bform.unban.data) and bform.validate_on_submit():
if self.ban(bform):
return self.check_full_index()
if (aform.give.data or aform.take.data) and aform.validate_on_submit():
self.change_admin(aform)
return self.check_full_index()
if ((nform.rem.data or nform.rwarn.data or nform.rban.data) and
nform.validate_on_submit()):
self.remove_nom(nform.nomid.data, nform.rwarn.data, nform.rban.data)
return self.check_full_index()
if ((cform.cnoms.data or cform.cvotes.data) and
cform.validate_on_submit()):
self.clear(cform)
return self.check_full_index()
full = self.get_full()
s = State.query.first()
if s.dtnom is not None:
pnform.dtnom.data = s.dtnom
if s.dtvote is not None:
pvform.dtvote.data = s.dtvote
if s.dtstatic is not None:
psform.dtstatic.data = s.dtstatic
return self.render("admin/index.html", spform=spform, pnform=pnform,
pvform=pvform, psform=psform, aform=aform, bform=bform, nform=nform,
cform=cform, awards=list_awards(), full=full, phase=phase())
@expose("/noms/<awd>", methods=["GET", "POST"])
def list_noms(self, awd):
form = RemoveNomForm()
if form.validate_on_submit():
self.remove_nom(form.nomid.data, form.warn.data, form.ban.data)
return redirect(url_for("admin.list_noms", awd=awd))
award = Award.query.filter_by(id=awd).first_or_404()
return self.render("admin/list_noms.html", form=form, award=award)
@expose("/guide", methods=["GET"])
def guide(self):
return self.render("admin/guide.html")
def set_phase(self, form):
p = 0 if form.static.data else 1 if form.nom.data else 2
assign_phase(p)
flash("Phase changed to %s" %
("static", "nominating", "voting")[p], "success")
def clear(self, form):
if form.cnoms.data:
clear_votes() # must be done first
clear_noms()
flash("Cleared all nominations", "success")
elif form.cvotes.data:
clear_votes()
flash("Cleared all votes", "success")
else:
abort(400)
def phase_sched(self, form, p):
if p == 1:
kwds = pndict
cancel = form.pnoff.data
dt = form.dtnom.data
pname = "Nominating"
elif p == 2:
kwds = pvdict
cancel = form.pvoff.data
dt = form.dtvote.data
pname = "Voting"
else:
kwds = psdict
cancel = form.psoff.data
dt = form.dtstatic.data
pname = "Static"
if cancel:
try:
scheduler.remove_job(kwds["id"])
flash("Canceled %s Phase" % pname, "success")
except JobLookupError:
flash("%s Phase schedule not found or "
"already passed" % pname, "warning")
dt = None
else:
scheduler.add_job(replace_existing=True,
run_date=dt, **kwds)
flash("Scheduled %s Phase for %s Eastern" %
(pname, dt.strftime("%A %B %d %Y at %I:%M %p")), "success")
s = State.query.first()
if p == 1:
s.dtnom = dt
elif p == 2:
s.dtvote = dt
else:
s.dtstatic = dt
db.session.commit()
def ban(self, bform):
user = User.query.filter_by(
username=bform.banuser.data.lower()).first_or_404()
if bform.ban.data:
user.ban()
msg = "Banned "
if bform.email.data:
subject = "Your account has been banned"
html = render_template("email/ban.html", award_name=None)
msg += "and notified "
elif bform.unban.data:
user.unban()
msg = "Unbanned "
if bform.email.data:
subject = "Your account is no longer banned"
html = render_template("email/unban.html")
msg += "and notified "
db.session.flush()
if not bform.email.data or send_email(user.email, subject, html):
db.session.commit()
flash(msg + user.username, "success") # flash once commit passes
return True
return False
def change_admin(self, aform):
user = User.query.filter_by(
username=aform.adminuser.data.lower()).first_or_404()
if aform.give.data:
user.give_admin()
msg = "Made %s an admin" % user.username
elif aform.take.data:
user.take_admin()
msg = "Removed %s as admin" % user.username
db.session.commit()
flash(msg, "success") # flash once commit passes
def remove_nom(self, nomid, warn, ban):
nom = Nomination.query.filter_by(id=nomid).first_or_404()
awd = nom.award
user = nom.creator
db.session.delete(nom) # any of the buttons will remove the nom
msgs = ["Removed %r ('%s' for '%s')" % (nom, nom.name, awd.name)]
if warn:
subject = "Inappropriate Content Warning"
html = render_template("email/warning.html", award_name=awd.name)
msgs.append("Warning sent to %s" % user.username)
elif ban:
user.ban()
subject = "Your account has been banned"
html = render_template("email/ban.html", award_name=awd.name)
msgs.append("Banned and notified %s" % user.username)
db.session.flush()
if not (warn or ban) or send_email(user.email, subject, html):
db.session.commit()
for msg in msgs: # flash once commit passes
flash(msg, "success")
return True
return False
def check_full_index(self):
full = self.get_full()
if full:
return redirect("/admin/?full")
else:
return redirect("/admin/")
def get_full(self):
full = request.args.get("full")
# if full appears as anything in request, render the full page
return full is not None
class MyModelView(ModelView):
form_base_class = SecureForm
is_accessible = MyAdminIndexView.is_accessible
_handle_view = MyAdminIndexView._handle_view
column_display_pk = True
class UserView(MyModelView):
column_exclude_list = ("_password", "sessTokenTime")
admin = Admin(app, name="Kudos Admin", template_mode="bootstrap3",
index_view=MyAdminIndexView())
admin.add_view(UserView(User, db.session))
admin.add_view(MyModelView(Award, db.session))
admin.add_view(MyModelView(Nomination, db.session))
admin.add_view(MyModelView(State, db.session))
def handle_error(e):
try:
code = e.code
except AttributeError:
code = 500
return render_template("error.html", error=e), code
def init_error_mail():
class MySMTPHandler(SMTPHandler):
def emit(self, record):
if current_user and current_user.is_authenticated:
record.username = current_user.username
else:
record.username = None
return super().emit(record)
def getSubject(self, record):
return f"{self.subject} ({record.levelname}) - {record.asctime}"
fromaddr = app.config["MAIL_USERNAME"]
tls = app.config.get("MAIL_USE_TLS", False)
ssl = app.config.get("MAIL_USE_SSL", False)
secure = () if tls or ssl else None
port = app.config["MAIL_PORT"] if not ssl else app.config["MAIL_PORT_TLS"]
mail_handler = MySMTPHandler(
mailhost=(app.config["MAIL_SERVER"], port),
fromaddr=f"Kudos <{fromaddr}>",
toaddrs=[fromaddr], # send it back to admin account
subject="Kudos Failure",
credentials=(fromaddr, app.config["MAIL_PASSWORD"]),
secure=secure)
mail_handler.setLevel(logging.ERROR)
mail_handler.setFormatter(logging.Formatter(
'[%(asctime)s] %(levelname)s in %(module)s by User <%(username)s>:\n'
'%(message)s'
))
app.logger.addHandler(mail_handler)
for code in default_exceptions:
app.register_error_handler(code, handle_error)
if not app.debug:
init_error_mail()
def send_confirm_link(userID, email):
subject = "Confirm your email"
token = ts.dumps([userID, email], salt="email-confirm-key")
confirm_url = url_for("confirm_email", token=token, _external=True)
html = render_template("email/activate.html", confirm_url=confirm_url)
return send_email(email, subject, html)
def try_send_msg(msg):
st = StringIO()
traceback.print_stack(limit=50, file=st)
try:
mail.send(msg)
except Exception as e:
msg = str(e) + "\n\nCalling stack:\n" + st.getvalue() + "\n"
app.logger.exception(msg)
flash("Email send error, try again", "error")
db.session.rollback() # assume we always want to undo flush
return False
return True
def send_email(email, subject, html, **kwds):
msg = Message("[KUDOS] " + subject, recipients=[email], html=html, **kwds)
return try_send_msg(msg)
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return (test_url.scheme in ("http", "https") and
ref_url.netloc == test_url.netloc)
def phase():
return State.query.first().phase
def list_awards():
return Award.query.order_by(Award.order).all()
def assign_phase(p):
s = State.query.first()
s.phase = p
db.session.commit()
pndict = dict(
func=assign_phase,
args=[1],
id="nom",
name="Change phase to nominating")
pvdict = dict(
func=assign_phase,
args=[2],
id="vote",
name="Change phase to voting")
psdict = dict(
func=assign_phase,
args=[0],
id="static",
name="Change phase to static")
@app.before_first_request
def initScheduler():
# this implementation assumes there is only one dyno on heroku
s = State.query.first()
if s.dtnom is not None:
scheduler.add_job(run_date=s.dtnom, **pndict)
if s.dtvote is not None:
scheduler.add_job(run_date=s.dtvote, **pvdict)
if s.dtstatic is not None:
scheduler.add_job(run_date=s.dtstatic, **psdict)
scheduler.start()
atexit.register(lambda: scheduler.shutdown())
if __name__ == "__main__":
app.run(debug=True) # should only be on debug when run locally
|
python
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# Updates by Pablo Palafox 2021
import torch.nn as nn
import torch
import torch.nn.functional as F
import kornia
from utils import embedder
from utils import geometry_utils
class PoseDecoder(nn.Module):
def __init__(
self,
latent_size,
dims,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
xyz_in_all=None,
latent_dropout=False,
positional_enc=False,
n_positional_freqs=8,
n_alpha_epochs=80,
):
super(PoseDecoder, self).__init__()
input_dim = 3
output_dim = 3
if positional_enc:
self.n_positional_freqs = n_positional_freqs
self.pos_embedder, pos_embedder_out_dim = embedder.get_embedder_nerf(
n_positional_freqs, input_dims=input_dim
)
input_dim = pos_embedder_out_dim
self.n_alpha_epochs = n_alpha_epochs
self.alpha_const = n_positional_freqs / n_alpha_epochs if n_alpha_epochs > 0 else self.n_positional_freqs
dims = [latent_size + input_dim] + dims + [output_dim]
self.num_layers = len(dims)
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
self.xyz_in_all = xyz_in_all
self.weight_norm = weight_norm
for l in range(0, self.num_layers - 1):
if l + 1 in latent_in:
out_dim = dims[l + 1] - dims[0]
else:
out_dim = dims[l + 1]
if self.xyz_in_all and l != self.num_layers - 2:
out_dim -= 3
if weight_norm and l in self.norm_layers:
setattr(self, "lin" + str(l), nn.utils.weight_norm(nn.Linear(dims[l], out_dim)))
else:
print(l, dims[l], out_dim)
setattr(self, "lin" + str(l), nn.Linear(dims[l], out_dim))
if (not weight_norm) and self.norm_layers is not None and l in self.norm_layers:
setattr(self, "bn" + str(l), nn.LayerNorm(out_dim))
self.relu = nn.ReLU()
self.dropout_prob = dropout_prob
self.dropout = dropout
# input: N x (L+3)
def forward(self, input, epoch=None):
xyz = input[:, -3:]
if hasattr(self, "pos_embedder"):
alpha = self.alpha_const * epoch if self.n_alpha_epochs > 0 else self.alpha_const
input_pos_embed = self.pos_embedder(xyz, alpha)
x = torch.cat([input[:, :-3], input_pos_embed], 1)
input_embed = x.clone()
else:
if input.shape[1] > 3 and self.latent_dropout:
latent_vecs = input[:, :-3]
latent_vecs = F.dropout(latent_vecs, p=0.2, training=self.training)
x = torch.cat([latent_vecs, xyz], 1)
else:
x = input
for l in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(l))
if l in self.latent_in:
if hasattr(self, "pos_embedder"):
x = torch.cat([x, input_embed], 1)
else:
x = torch.cat([x, input], 1)
elif l != 0 and self.xyz_in_all:
x = torch.cat([x, xyz], 1)
x = lin(x)
if l < self.num_layers - 2:
if self.norm_layers is not None and l in self.norm_layers and not self.weight_norm:
bn = getattr(self, "bn" + str(l))
x = bn(x)
x = self.relu(x)
if self.dropout is not None and l in self.dropout:
x = F.dropout(x, p=self.dropout_prob, training=self.training)
# Apply predicted translation
xyz_warped = xyz + x
return xyz_warped, x
class PoseDecoderSE3(nn.Module):
def __init__(
self,
latent_size,
dims,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
xyz_in_all=None,
latent_dropout=False,
positional_enc=False,
n_positional_freqs=8,
n_alpha_epochs=80,
):
super(PoseDecoderSE3, self).__init__()
input_dim = 3
output_dim = 9
if positional_enc:
self.n_positional_freqs = n_positional_freqs
self.pos_embedder, pos_embedder_out_dim = embedder.get_embedder_nerf(
n_positional_freqs, input_dims=input_dim
)
input_dim = pos_embedder_out_dim
self.n_alpha_epochs = n_alpha_epochs
self.alpha_const = n_positional_freqs / n_alpha_epochs if n_alpha_epochs > 0 else self.n_positional_freqs
dims = [latent_size + input_dim] + dims + [output_dim]
self.num_layers = len(dims)
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
self.xyz_in_all = xyz_in_all
self.weight_norm = weight_norm
for l in range(0, self.num_layers - 1):
if l + 1 in latent_in:
out_dim = dims[l + 1] - dims[0]
else:
out_dim = dims[l + 1]
if self.xyz_in_all and l != self.num_layers - 2:
out_dim -= 3
if weight_norm and l in self.norm_layers:
setattr(self, "lin" + str(l), nn.utils.weight_norm(nn.Linear(dims[l], out_dim)))
else:
setattr(self, "lin" + str(l), nn.Linear(dims[l], out_dim))
if (not weight_norm) and self.norm_layers is not None and l in self.norm_layers:
setattr(self, "bn" + str(l), nn.LayerNorm(out_dim))
# Initialize last layer from a uniform distribution U(-1e-5, 1e-5) to initialize the deformation near the identity (nerfie)
lin_last = getattr(self, "lin" + str(self.num_layers - 2))
torch.nn.init.uniform_(lin_last.weight, a=-1e-5, b=1e-5)
self.relu = nn.ReLU()
self.dropout_prob = dropout_prob
self.dropout = dropout
# input: N x (L+3)
def forward(self, input, epoch=None):
xyz = input[:, -3:]
if hasattr(self, "pos_embedder"):
alpha = self.alpha_const * epoch if self.n_alpha_epochs > 0 else self.alpha_const
input_pos_embed = self.pos_embedder(xyz, alpha)
x = torch.cat([input[:, :-3], input_pos_embed], 1)
input_embed = x.clone()
else:
if input.shape[1] > 3 and self.latent_dropout:
latent_vecs = input[:, :-3]
latent_vecs = F.dropout(latent_vecs, p=0.2, training=self.training)
x = torch.cat([latent_vecs, xyz], 1)
else:
x = input
for l in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(l))
if l in self.latent_in:
if hasattr(self, "pos_embedder"):
x = torch.cat([x, input_embed], 1)
else:
x = torch.cat([x, input], 1)
elif l != 0 and self.xyz_in_all:
x = torch.cat([x, xyz], 1)
x = lin(x)
if l < self.num_layers - 2:
if self.norm_layers is not None and l in self.norm_layers and not self.weight_norm:
bn = getattr(self, "bn" + str(l))
x = bn(x)
x = self.relu(x)
if self.dropout is not None and l in self.dropout:
x = F.dropout(x, p=self.dropout_prob, training=self.training)
#######################################################
# Apply SE(3) transformation to input point xyz
#######################################################
# Extract v (rotation), s (pivot point), t (translation)
v, s, t = x[:, :3], x[:, 3:-3], x[:, -3:]
# Convert log-quaternion to unit quaternion
q = kornia.quaternion_log_to_exp(v)
# Points centered around pivot points s
xyz_pivot = xyz - s
# Apply rotation
xyz_rotated = geometry_utils.rotate_points_with_quaternions(p=xyz_pivot, q=q)
# Transform back to world space by adding s and also add the additional translation
xyz_warped = xyz_rotated + s + t
return xyz_warped, x
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-08-03 06:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hacker', '0017_auto_20180803_0633'),
]
operations = [
migrations.AddField(
model_name='academicdata',
name='major',
field=models.CharField(default='None', help_text='What is your current major?', max_length=255),
preserve_default=False,
),
]
|
python
|
import re
from bottle import route, run, template
# shamelessly stolen from https://www.w3schools.com/howto/howto_js_sort_table.asp
js = """
<script>
function sortTable(n) {
var table, rows, switching, i, x, y, shouldSwitch, dir, switchcount = 0;
table = document.getElementById("tbl");
switching = true;
dir = "asc";
while (switching) {
switching = false;
rows = table.rows;
for (i = 1; i < (rows.length - 1); i++) {
shouldSwitch = false;
x = rows[i].getElementsByTagName("TD")[n].innerHTML.toLowerCase();
y = rows[i + 1].getElementsByTagName("TD")[n].innerHTML.toLowerCase();
if (y.includes("->")) {
xNum = x.match( /\d+/ )
x = Number(xNum[0]);
yNum = y.match( /\d+/ )
y = Number(yNum[0]);
}
if (dir == "asc") {
if (x > y) {
shouldSwitch = true;
break;
}
} else if (dir == "desc") {
if (x < y) {
shouldSwitch = true;
break;
}
}
}
if (shouldSwitch) {
rows[i].parentNode.insertBefore(rows[i + 1], rows[i]);
switching = true;
switchcount ++;
} else {
if (switchcount == 0 && dir == "asc") {
dir = "desc";
switching = true;
}
}
}
}
</script>
"""
css = """
<style>
table {
border-collapse: collapse;
}
th {
text-align: left;
}
th:hover {
cursor: pointer;
}
table, th, td {
border: 1px solid;
}
th, td {
padding-left: 10px;
padding-right: 10px;
padding-top: 5px;
}
tr:hover {
background-color: coral;
}
</style>
"""
head = f"<head>{js}{css}</head>"
@route("/")
def index():
html = f"<html>{head}<body><table id=\"tbl\"><tr><th onclick=\"sortTable(0)\">Ports</th><th onclick=\"sortTable(1)\">Container Name</th></tr>"
output = []
with open('/code/ports.txt') as f:
raw = f.readlines()
for l in raw:
words = [w.strip(',').lstrip('0.0.0.0:') for w in l.split() if ":::" not in w]
container = words[-1]
ports = [w for w in words[:-1] if "->" in w]
for port in ports:
row = f"<tr><td>{port}</td><td>{container}</td></tr>"
output.append(row)
key = lambda s: int(re.search(r'\d+', s)[0])
html += "".join(sorted(output, key=key)) + "</table></body></html>"
return html
run(host='0.0.0.0', port=80, debug=True)
|
python
|
#!/usr/bin/python3
import alley_oop
|
python
|
import glob
import json
import os
import argparse
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from sklearn.preprocessing import MinMaxScaler
POSE_BODY_25_PAIRS_RENDER_GPU = \
[1, 8, 1, 2, 1, 5, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11,
8, 12, 12, 13, 13, 14, 1, 0, 0, 15, 15, 17, 0, 16, 16, 18, 14,
19, 19, 20, 14, 21, 11, 22, 22, 23, 11, 24]
POSE_BODY_25_COLORS_RENDER_GPU = \
[255, 0, 85,
255, 0, 0,
255, 85, 0,
255, 170, 0,
255, 255, 0,
170, 255, 0,
85, 255, 0,
0, 255, 0,
255, 0, 0,
0, 255, 85,
0, 255, 170,
0, 255, 255,
0, 170, 255,
0, 85, 255,
0, 0, 255,
255, 0, 170,
170, 0, 255,
255, 0, 255,
85, 0, 255,
0, 0, 255,
0, 0, 255,
0, 0, 255,
0, 255, 255,
0, 255, 255,
0, 255, 255]
def main():
parser = argparse.ArgumentParser(
description="Convert poses to Parameter Space to Human Action Recognition"
)
parser.add_argument("--poses_base_dir", type=str,
default='/home/murilo/dataset/KTH',
help="Name of directory where input points are located.")
parser.add_argument("--input_dir", type=str,
default='2DPoses',
help="Name of directory to output computed features.")
parser.add_argument("--output_images_dir", type=str,
default='2DPoses_SpaceParam_Images',
help="Name of directory to output Parameter Space images.")
parser.add_argument("--image_height", type=int,
default='240',
help="(Frame Size)Image height to compute max distance in Parameter Space.")
parser.add_argument("--image_width", type=int,
default='320',
help="(Frame Size)Image width to compute max distance in Parameter Space.")
parser.add_argument("--draw_body_ids", type=int,
default='1',
help="Whether draw body joint ids in image with points in Parameter Space.")
parser.add_argument("--number_frames", type=int,
default=20,
help="Number of frames to extract features.")
parser.add_argument("--stride", type=int,
default=1,
help="Stride to compute features from the frames.")
args = parser.parse_args()
convert_parameter_space(args)
def convert_parameter_space(args):
# here compute image diagonal = max distance in Parameter Space
max_distance = int(((args.image_height ** 2) + (args.image_width ** 2)) ** (1 / 2))
print(max_distance)
thetas = np.linspace(-np.pi / 2, np.pi / 2, 180)
#poses_dir = os.path.join(args.poses_base_dir, args.input_dir)
points = 14
for root, directories, filenames in os.walk(os.path.join(args.poses_base_dir, args.input_dir)):
for directory in directories:
video_dir = os.path.join(root, directory)
print(video_dir)
frames = sorted(glob.glob(video_dir + '/*.json'))
if len(frames) > 0:
for x in range(0, len(frames), args.stride):
if x + args.number_frames < len(frames):
img_parameter_traj = {}
draw = {}
for u in range(14):
img_parameter_traj[u] = Image.new('RGB', (180 + 20, int(max_distance)), color='black')
draw[u] = ImageDraw.Draw(img_parameter_traj[u])
prev_points_parameter_space = None
for y in range(x, x + args.number_frames + 1):
body_parts = read_body_parts_file(frames[y])
if len(body_parts) > 0:
# compute parameter space points and draw image with points
points_parameter_space = \
compute_parameter_space(body_parts, max_distance, thetas)
if prev_points_parameter_space is None:
prev_points_parameter_space = points_parameter_space
else:
for a in range(len(points_parameter_space)):
#for a in [2,3,4,5]:
#if 1 == 1:
#a = 4
x1 = prev_points_parameter_space[a][0]
y1 = prev_points_parameter_space[a][1]
x2 = points_parameter_space[a][0]
y2 = points_parameter_space[a][1]
color_id = points_parameter_space[a][2]
shape = (x1, y1, x2, y2)
draw[a].line(shape, fill=get_color(color_id))
e_size = 2
draw[a].ellipse((x1 - e_size, abs(y1) - e_size, x1 + e_size, abs(y1) + e_size),
fill=get_color(color_id))
draw[a].ellipse((x2 - e_size, abs(y2) - e_size, x2 + e_size, abs(y2) + e_size),
fill=get_color(color_id))
prev_points_parameter_space = points_parameter_space
images_dir = video_dir.replace(args.input_dir, args.output_images_dir)
#images_dir, video_name = os.path.split(images_dir)
if not os.path.exists(images_dir):
os.makedirs(images_dir)
for i in range(14):
file = os.path.join(images_dir, str(i) + '_'+ str(x) + '_trajectories.png')
img_parameter_traj[i].save(file)
def read_body_parts_file(key_points_file):
body_parts_int = {}
# Read json pose points
with open(key_points_file) as f:
data = json.load(f)
body_parts = data['part_candidates'][0]
if len(body_parts) > 0:
for key, value in body_parts.items():
body_parts_int[int(key)] = [item for item in value]
return body_parts_int
def compute_parameter_space(body_parts, max_distance, thetas, draw_body_ids=True):
# Create image degrees x max_distance
points_parameter_space = {}
for i in range(0, 14, 1):
degree = degree_disc = theta = rho1 = rho2 = 0
x1, y1, x2, y2, color_id, id1, id2 = return_body_points_coord(i, body_parts)
if x1 > 0 and y1 > 0 and x2 > 0 and y2 > 0:
#print(i)
# print('x1:\t%i\ty1:\t%i\t\tx2:\t%i\ty2:\t%i' % (x1, y1, x2, y2))
if y1 - y2 != 0:
theta = np.arctan((x2 - x1) / (y1 - y2))
else:
theta = 0
# here convert theta from radians to degrees
degree = round(theta * (180 / np.pi))
# here find theta in thetas discrete list (only for image plot)
degree_disc = min(range(len(thetas)), key=lambda x: abs(thetas[x] - theta))
# position_min_degree = min(thetas, key=lambda x: abs(x - theta))
# compute rho from theta
rho1 = x1 * np.cos(theta) + y1 * np.sin(theta)
rho2 = x2 * np.cos(theta) + y2 * np.sin(theta)
#print(rho1, rho2)
#print(int(rho1), int(degree), x1, y1)
points_parameter_space[i] = (degree_disc, rho1, color_id)
# points_hough[i] = (degree, degree_disc, theta, int(rho))
return points_parameter_space
def return_body_points_coord(i, body_parts):
x1 = y1 = x2 = y2 = x = color_id = id1 = id2 = 0
if i == 0: # 1 => 0 Neck
x = 13
elif i == 1: # 1 => 8 Upper body
x = 0
elif i == 2: # 2 => 3 Right Arm
x = 3
elif i == 3: # 3 => 4 Right Forearm
x = 4
elif i == 4: # 5 => 6 Left Arm
x = 5
elif i == 5: # 6 => 7 Left Forearm
x = 6
elif i == 6: # 9 => 10 Right Thigh
x = 8
elif i == 7: # 10 => 11 Right Leg
x = 9
elif i == 8: # 12 => 13 Left Thigh
x = 11
elif i == 9: # 13 => 14 Left Leg
x = 12
elif i == 10: # 8 => 9 Right Hip
x = 7
elif i == 11: # 8 => 12 Left Hip
x = 10
elif i == 12: # 1 => 2 Right Shoulder
x = 1
elif i == 13: # 1 => 5 Left Shoulder
x = 2
x = x * 2
if (len(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]]) > 0 and len(
body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]]) > 0):
x1, y1 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]])
x2, y2 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])
color_id = POSE_BODY_25_PAIRS_RENDER_GPU[x + 1] * 3
id1 = POSE_BODY_25_PAIRS_RENDER_GPU[x]
id2 = POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]
return x1, y1, x2, y2, color_id, id1, id2
def draw_body(body_parts, height, width):
img = Image.new('RGB', (width, height), color='black')
draw = ImageDraw.Draw(img)
for k in sorted(body_parts):
if len(body_parts[k]) > 0:
x, y = get_max_prob(body_parts[k])
draw.point((x, y), fill=get_color(k * 3))
ctd = 0
for x in range(0, len(POSE_BODY_25_PAIRS_RENDER_GPU), 2):
print(x, x + 1)
print(POSE_BODY_25_PAIRS_RENDER_GPU[x], POSE_BODY_25_PAIRS_RENDER_GPU[x + 1])
print(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]], body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])
print('\n')
if (len(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]]) > 0 and len(
body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]]) > 0):
x1, y1 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]])
x2, y2 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])
draw.line((x1, y1, x2, y2), fill=get_color(POSE_BODY_25_PAIRS_RENDER_GPU[x + 1] * 3), width=1)
ctd = ctd + 1
print(ctd)
img.show()
img.save('pil_red.png')
def get_max_prob(body_part):
m = 0
x = 0
y = 0
for p in range(0, len(body_part), 3):
if body_part[p + 2] > m:
m = float(body_part[p + 2])
x = int(body_part[p])
y = int(body_part[p + 1])
return x, y
def get_color(k):
return POSE_BODY_25_COLORS_RENDER_GPU[k], \
POSE_BODY_25_COLORS_RENDER_GPU[k + 1], \
POSE_BODY_25_COLORS_RENDER_GPU[k + 2]
if __name__ == "__main__":
main()
|
python
|
from flask import render_template,redirect,session,request, flash
from flask_app import app
from ..models import user, bag
@app.route("/bag/create")
def new_bag():
if 'user_id' not in session:
return redirect('/')
data = {
"id":session['user_id']
}
return render_template("add_bag.html", user = user.User.get_by_id(data))
@app.route('/bag/new', methods = ['POST'])
def create_bag():
if 'user_id' not in session:
return redirect('/logout')
if not bag.Bag.validate_bag(request.form):
return redirect('/bag/create')
data = {
"name":request.form["name"],
"driver":request.form["driver"],
"woods":request.form["woods"],
"hybrids":request.form["hybrids"],
"irons":request.form["irons"],
"wedges":request.form["wedges"],
"putter":request.form["putter"],
"user_id": session["user_id"]
}
bag.Bag.create_bag(data)
return redirect('/dashboard')
#show specific bag
@app.route('/bag/<int:id>')
def show(id):
if 'user_id' not in session:
return redirect('/')
data = {
"id":id
}
user_data = {
"id":session['user_id']
}
return render_template("show_bag.html", bag=bag.Bag.get_by_id(data), user = user.User.get_by_id(user_data))
@app.route('/edit/<int:id>')
def edit_bag(id):
if 'user_id' not in session:
return redirect('/logout')
data = {
"id":id
}
user_data = {
"id":session['user_id']
}
return render_template("edit_bag.html", edit = bag.Bag.get_by_id(data), user = user.User.get_by_id(user_data))
@app.route("/bag/update",methods=['POST'])
def update_bag():
if 'user_id' not in session:
return redirect('/logout')
if not bag.Bag.validate_bag(request.form):
return redirect('/bag/create')
data = {
"name":request.form["name"],
"driver":request.form["driver"],
"woods":request.form["woods"],
"hybrids":request.form["hybrids"],
"irons":request.form["irons"],
"wedges":request.form["wedges"],
"putter":request.form["putter"],
"id": request.form['id']
}
bag.Bag.update(data)
return redirect('/dashboard')
@app.route("/like", methods = ["POST"])
def like():
data = {
'bag_id': request.form['bag_id'],
'user_id': session['user_id']
}
user.User.like(data)
return redirect ("/dashboard")
@app.route("/bag/delete/<int:id>")
def delete(id):
if 'user_id' not in session:
return redirect('/logout')
data = {
"id": id
}
bag.Bag.delete(data)
return redirect ('/dashboard')
|
python
|
import os
import zipfile
from arelle.CntlrCmdLine import parseAndRun
# from https://specifications.xbrl.org/work-product-index-registries-units-registry-1.0.html
REGISTRY_CONFORMANCE_SUITE = 'tests/resources/conformance_suites/utr/registry/utr-conf-cr-2013-05-17.zip/utr-conf-cr-2013-05-17/2013-05-17'
STRUCTURE_CONFORMANCE_SUITE_ZIP = 'tests/resources/conformance_suites/utr/structure/utr-structure-conf-cr-2013-11-18.zip'
STRUCTURE_CONFORMANCE_SUITE = os.path.join(STRUCTURE_CONFORMANCE_SUITE_ZIP, 'conf/utr-structure')
BASE_ARGS = [
'--testcaseResultsCaptureWarnings',
'--utr',
'--validate',
]
REGISTRY_ARGS = BASE_ARGS + [
'--file', os.path.join(REGISTRY_CONFORMANCE_SUITE, 'index.xml'),
'--utrUrl', 'tests/resources/conformance_suites/utr/registry/utr.xml',
'--csvTestReport', 'UTRunit-report.csv',
'--logFile', 'UTRunit-log.txt',
]
STRUCTURE_ARGS = BASE_ARGS + [
'--file', os.path.join(STRUCTURE_CONFORMANCE_SUITE, 'index.xml'),
'--utrUrl', os.path.join(STRUCTURE_CONFORMANCE_SUITE, 'utr-for-structure-conformance-tests.xml'),
'--csvTestReport', 'UTRstr-report.csv',
'--logFile', 'UTRstr-log.txt',
]
if __name__ == "__main__":
print('Running registry tests...')
parseAndRun(REGISTRY_ARGS)
print('Running structure tests...')
parseAndRun(STRUCTURE_ARGS)
print('Running malformed UTRs tests...')
malformed_utr_files = []
with zipfile.ZipFile(STRUCTURE_CONFORMANCE_SUITE_ZIP, 'r') as zipf:
for f in zipfile.Path(zipf, 'conf/utr-structure/malformed-utrs/').iterdir():
if f.is_file() and f.name.endswith('.xml'):
malformed_utr_files.append((f.at, f.name))
for path_in_zip, name in malformed_utr_files:
basename = name.removesuffix('.xml')
args = BASE_ARGS + [
'--file', os.path.join(STRUCTURE_CONFORMANCE_SUITE, 'tests', '01-simple', 'simpleValid.xml'),
'--utrUrl', os.path.join(STRUCTURE_CONFORMANCE_SUITE_ZIP, path_in_zip),
'--csvTestReport', f'UTRstr-report-{basename}.csv',
'--logFile', f'UTRstr-log-{basename}.txt',
]
parseAndRun(args)
|
python
|
import os
import torch as torch
import numpy as np
from io import BytesIO
import scipy.misc
#import tensorflow as tf
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torch.autograd import Variable
from matplotlib import pyplot as plt
from PIL import Image
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from skimage import io
TILE_SIZE = 256
BASE_DIR = '/raid.dell1/world/'
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return np.array(img.convert('RGB'))
def xy_from_geodetic(lat, lon):
lat = np.clip(lat, -85.05, 85.05)
lon = np.clip(lon, -179.9999999, 179.9999999)
x = lon/360.0 + .5
sinlat = np.sin(np.radians(lat))
y = 0.5 - np.log((1 + sinlat) / (1 - sinlat)) / (4 * np.pi)
return x,y
def get_tile_idx(x, y, zoom):
if (not 0 <= x < 1.0) or ((not 0 <= y < 1.0)):
return 'NULL'
if zoom == 0:
return ''
if x >= .5 and y >= .5:
q = '3'
elif y >= .5:
q = '2'
elif x >= .5:
q = '1'
else:
q = '0'
return q + get_tile_idx(x*2 % 1, y*2 % 1, zoom - 1)
def get_tile_pos(x, y, zoom):
print(x,y)
if zoom == 0:
return x, y
return get_tile_pos(x*2 % 2, y*2 % 2, zoom - 1)
def get_tile(idx):
if idx == 'NULL':
return np.zeros((TILE_SIZE, TILE_SIZE, 3), dtype=np.uint8)
sub_path = ''.join([a + '/' for a in idx])
path = BASE_DIR + sub_path + 'img.jpeg'
return pil_loader(path)
def get_custom_tile(x, y, zoom):
assert 0 <= x < 1
assert 0 <= y < 1
assert zoom >= 1
zoom = int(zoom)
tiles = []
delta = (.5)**(zoom + 1)
for x_off in [-delta, delta]:
tiles.append([])
for y_off in [-delta, delta]:
tiles[-1].append(get_tile(get_tile_idx(x + x_off, y + y_off, zoom)))
tiles[-1] = np.vstack(tiles[-1])
tiles = np.hstack(tiles)
#x_pos, y_pos = get_tile_pos(x,y,zoom)
for i in range(zoom):
x = (x - .25) * 2 % 1.0 + .5
y = (y - .25) * 2 % 1.0 + .5
x_pix = int((x * TILE_SIZE))
y_pix = int((y * TILE_SIZE))
return tiles[y_pix - TILE_SIZE//2: y_pix + TILE_SIZE//2,
x_pix - TILE_SIZE//2: x_pix + TILE_SIZE//2]
def get_custom_tile_geodetic(lat, lon, zoom):
x,y = xy_from_geodetic(lat, lon)
return get_custom_tile(x, y, zoom)
class SatImageDataset(Dataset):
def __init__(self, transform=None, size=1000000, max_zoom=8):
self.size = size
self.transform = transform
self.samples = np.random.uniform(0.0, 1.0, size=(self.size, 3))
self.samples[:,2] = 8.0 # np.floor(max_zoom*self.samples[:,2]) + 1.0
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = self.samples[idx]
image = Image.fromarray(get_custom_tile(*sample))
if self.transform:
image = self.transform(image)
meta = torch.FloatTensor(sample)
sample = {'image': image, 'meta': meta}
return sample
class dataloader:
def __init__(self, config):
self.batch_table = {4:32, 8:32, 16:32, 32:16, 64:8, 128:8, 256:12, 512:3, 1024:1} # change this according to available gpu memory.
self.batchsize = int(self.batch_table[pow(2,2)]) # we start from 2^2=4
self.imsize = int(pow(2,2))
self.num_workers = 0
def renew(self, resl):
print('[*] Renew dataloader configuration, load data from {}.'.format('raid.dell1'))
self.batchsize = int(self.batch_table[pow(2,resl)])
self.imsize = int(pow(2,resl))
self.dataset = SatImageDataset(
transform=transforms.Compose([
transforms.Resize(size=(self.imsize,self.imsize), interpolation=Image.NEAREST),
transforms.ToTensor(),
]))
self.dataloader = DataLoader(
dataset=self.dataset,
batch_size=self.batchsize,
shuffle=True,
num_workers=self.num_workers
)
def __iter__(self):
return iter(self.dataloader)
def __next__(self):
return next(self.dataloader)
def __len__(self):
return len(self.dataloader.dataset)
def get_batch(self):
dataIter = iter(self.dataloader)
return next(dataIter)
|
python
|
import logging
from datetime import datetime, timezone
from brownie import chain
from yearn.historical_helper import export_historical, time_tracking
from yearn.networks import Network
from yearn.treasury.treasury import StrategistMultisig
from yearn.utils import closest_block_after_timestamp
logger = logging.getLogger('yearn.historical_sms_exporter')
def main():
start = datetime.now(tz=timezone.utc)
end = {
Network.Mainnet: datetime(2021, 1, 28, 9, 10, tzinfo=timezone.utc), # first inbound sms tx
Network.Fantom: datetime(2021, 6, 17, tzinfo=timezone.utc), # Fantom SMS deployed
}[chain.id]
data_query = {
Network.Mainnet: 'sms_assets{network="ETH"}',
Network.Fantom: 'sms_assets{network="FTM"}',
}[chain.id]
export_historical(
start,
end,
export_chunk,
export_snapshot,
data_query
)
def export_chunk(chunk, export_snapshot_func):
sms = StrategistMultisig()
for snapshot in chunk:
ts = snapshot.timestamp()
export_snapshot_func(
{
'treasury': sms,
'snapshot': snapshot,
'ts': ts,
'exporter_name': 'historical_sms'
}
)
@time_tracking
def export_snapshot(sms, snapshot, ts, exporter_name):
block = closest_block_after_timestamp(ts)
assert block is not None, "no block after timestamp found"
sms.export(block, ts)
logger.info("exported SMS snapshot %s", snapshot)
|
python
|
def write_fbk(file_name, feat_path):
with open(file_name, 'r') as f:
lines = f.readlines()
for lin_num, x in enumerate(lines):
audio_name = x.split("/wav/")[1].split(".")[0]
feat_name = ''.join([feat_path, audio_name, '.fbk'])
lines[lin_num] = ''.join([x.strip(), ' ', feat_name, '\n'])
with open(file_name, 'w') as f:
for line in lines:
f.writelines(line)
def main():
file_name = '/data/mifs_scratch/mjfg/zs323/yr4project/speechchain/egs/babel/asrtts/exp/tts_/outputs_snapshot.ep.200_denorm/convert/lib/coding/segmented_test.dev.fbk'
feat_path = '/data/mifs_scratch/mjfg/zs323/yr4project/speechchain/egs/babel/asrtts/exp/tts_/outputs_snapshot.ep.200_denorm/fbk/'
write_fbk(file_name, feat_path)
if __name__ == "__main__":
main()
|
python
|
import torch
import torch.nn as nn
import numpy as np
from skimage.morphology import label
class Dice(nn.Module):
"""The Dice score.
"""
def __init__(self):
super().__init__()
def forward(self, output, target):
"""
Args:
output (torch.Tensor) (N, C, *): The model output.
target (torch.LongTensor) (N, 1, *): The data target.
Returns:
metric (torch.Tensor) (C): The dice scores for each class.
"""
# Get the one-hot encoding of the prediction and the ground truth label.
pred = output.argmax(dim=1, keepdim=True)
pred = torch.zeros_like(output).scatter_(1, pred, 1)
target = torch.zeros_like(output).scatter_(1, target, 1)
# Calculate the dice score.
reduced_dims = list(range(2, output.dim())) # (N, C, *) --> (N, C)
intersection = 2.0 * (pred * target).sum(reduced_dims)
union = pred.sum(reduced_dims) + target.sum(reduced_dims)
score = intersection / (union + 1e-10)
return score.mean(dim=0)
class Accuracy(nn.Module):
"""The accuracy for the classification task.
"""
def __init__(self):
super().__init__()
def forward(self, output, target):
"""
Args:
output (torch.Tensor) (N, C): The model output.
target (torch.LongTensor) (N): The data target.
Returns:
metric (torch.Tensor) (0): The accuracy.
"""
pred = torch.argmax(output, dim=1)
return (pred == target).float().mean()
class FalseNegativeSize(nn.Module):
"""The false negative target size.
"""
def __init__(self):
super().__init__()
def forward(self, output, target):
"""
Args:
output (torch.Tensor) (N, C, *): The model output.
target (torch.LongTensor) (N, 1, *): The data target.
Returns:
metric (torch.Tensor) (C): The average false negative size for each class.
"""
scores = []
# Get the one-hot encoding of the prediction and the ground truth label.
pred = output.argmax(dim=1, keepdim=True)
pred = torch.zeros_like(output).scatter_(1, pred, 1)
target = torch.zeros_like(output).scatter_(1, target, 1)
# Calculate the score for each class
for i in range(1, output.shape[1]):
label_target = label(target[:, i].squeeze(dim=0).cpu().numpy(), connectivity=output.dim()-2)
label_target_list = np.unique(label_target)[1:]
_pred = pred[:, i].squeeze(dim=0).cpu().numpy()
score = []
for target_id in label_target_list:
if (np.sum((_pred == 1) * (label_target == target_id)) == 0):
score.append(np.sum(label_target == target_id) / 1000.0)
scores.append(score)
return scores
|
python
|
import sort_for_vexflow
import pretty_midi
def notation(orchestra, inst, tech, dyn, note, tgt, onoff, microtone,masking_order_idx):
annotations=[]
orchestration_slice=[]
tgts=[]
for i in range(len(inst)):
# Check that you input proper values:
if tech[i] in list(orchestra[inst[i]].keys()):
if dyn[i] in list(orchestra[inst[i]][tech[i]].keys()):
if int(note[i]) in list(orchestra[inst[i]][tech[i]][dyn[i]].keys()):
orchestration_slice.append(
[inst[i], tech[i], dyn[i], int(note[i]), tgt[i], onoff[i]]) # Note comes as string, convert to int
# Do annotations
annotations.append(inst[i] + " " + dyn[i] + " " + tech[i])
# If marked as target, add to target list
if tgt[i]:
tgts.append(i)
highlights = []
for i in range(len(orchestration_slice)):
highlights.append('')
for i in range(len(masking_order_idx)):
try:
if i == 0:
highlights[masking_order_idx[i]] = 'red'
# outer_style[masking_order_idx[i]]['backgroundColor'] = 'red'
if i == 1:
highlights[masking_order_idx[i]] = 'magenta'
# outer_style[masking_order_idx[i]]['backgroundColor'] = 'magenta'
if i == 2:
highlights[masking_order_idx[i]] = 'yellow'
# outer_style[masking_order_idx[i]]['backgroundColor'] = 'yellow'
except:
pass
for i in range(len(note)):
note[i]=int(note[i])+microtone[i]
note, annotations, tgts, highlights, srt_idx = sort_for_vexflow.sort_notes(note, annotations, tgts, highlights)
notes = [pretty_midi.note_number_to_name(int(round(i))) for i in note] # Change to note names
notes = [i.lower() for i in notes]
return {"notes":notes, "notenumbers":note, "instruments":annotations, "target":tgts, "highlights":highlights}
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-06-18 07:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0005_auto_20180618_1123'),
]
operations = [
migrations.RemoveField(
model_name='proposalapprovergroup',
name='activities',
),
migrations.RemoveField(
model_name='proposalapprovergroup',
name='regions',
),
migrations.RemoveField(
model_name='proposalassessorgroup',
name='activities',
),
migrations.RemoveField(
model_name='proposalassessorgroup',
name='regions',
),
]
|
python
|
def shared_function(x, sep=':'):
return sep.join(['got', x])
|
python
|
from cleaner_console import Console
if __name__ == '__main__':
Console()
|
python
|
from node import constants
def shout(data):
data['type'] = 'shout'
return data
def proto_page(uri, pubkey, guid, text, signature, nickname, PGPPubKey, email,
bitmessage, arbiter, notary, notary_description, notary_fee,
arbiter_description, sin, homepage, avatar_url):
data = {
'type': 'page',
'uri': uri,
'pubkey': pubkey,
'senderGUID': guid,
'text': text,
'nickname': nickname,
'PGPPubKey': PGPPubKey,
'email': email,
'bitmessage': bitmessage,
'arbiter': arbiter,
'notary': notary,
'notary_description': notary_description,
'notary_fee': notary_fee,
'arbiter_description': arbiter_description,
'sin': sin,
'homepage': homepage,
'avatar_url': avatar_url,
'v': constants.VERSION
}
return data
def query_page(guid):
data = {
'type': 'query_page',
'findGUID': guid,
'v': constants.VERSION
}
return data
def proto_store(key, value, originalPublisherID, age):
data = {
'type': 'store',
'key': key,
'value': value,
'originalPublisherID': originalPublisherID,
'age': age,
'v': constants.VERSION
}
return data
|
python
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class representing a source container repository or directory."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import enum
from googlecloudsdk.core import exceptions
import six
class UnknownSourceError(exceptions.Error):
"""The provided source could not be identified."""
pass
class SourceRef(object):
"""Reference to image or local directory."""
class SourceType(enum.Enum):
DIRECTORY = 1
IMAGE = 2
def __str__(self):
return 'SourceRef({}, {})'.format(self.source_type, self.source_path)
def __repr__(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, SourceRef):
return False
return (other.source_type == self.source_type and
other.source_path == self.source_path)
def __init__(self, source_type, source_path):
self.source_type = source_type
self.source_path = source_path
@classmethod
def MakeImageRef(cls, image_arg):
"""Create a SourceRef from provided image name."""
return cls(cls.SourceType.IMAGE, six.text_type(image_arg))
@classmethod
def MakeDirRef(cls, source_arg):
"""Create a SourceRef from the provided directory name."""
if os.path.isdir(source_arg):
return cls(cls.SourceType.DIRECTORY, source_arg)
raise UnknownSourceError(
'Could not identify source [{}]'.format(source_arg))
|
python
|
# -*- coding: utf-8 -*-
"""
controlbeast.utils.loader
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2013 by the ControlBeast team, see AUTHORS.
:license: ISC, see LICENSE for details.
"""
import importlib
import os
import re
def __filter_members(item):
"""
Filter function to detect classes within a module or package
:param str item: the item to be tested with this filter
"""
exclude = (
re.escape('__builtins__'),
re.escape('__cached__'),
re.escape('__doc__'),
re.escape('__file__'),
re.escape('__loader__'),
re.escape('__name__'),
re.escape('__package__'),
re.escape('__path__')
)
pattern = re.compile('|'.join(exclude))
return not pattern.search(item)
def __filter_modules(item):
"""
Filter function to detect processor modules and packages
:param str item: the item to be tested with this filter
"""
exclude = (
re.escape('__init__.py'),
re.escape('base.py')
)
pattern = re.compile('|'.join(exclude))
return not pattern.search(item)
def detect_class_modules(module, parent=object):
"""
Detect available class modules or packages and return a dictionary of valid class names, referring to
the module they are contained within.
:param str module: the module or package to be scanned for classes
:param parent: the class potential candidates must be derived off
"""
# initialise result dictionary
result = {}
# get a list of all files and directories inside the module
try:
package_instance = importlib.import_module(module)
except ImportError:
return result
if package_instance.__file__[-11:] == '__init__.py':
gen_dir = os.listdir(os.path.dirname(os.path.realpath(package_instance.__file__)))
else:
gen_dir = [os.path.realpath(package_instance.__file__)]
# only consider modules and packages, and exclude the base module
for file_candidate in filter(__filter_modules, gen_dir):
# Python files are modules; the name needs to be without file ending
if file_candidate[-3:] == '.py':
file_candidate = file_candidate[:-3]
# try if the detected package or module can be imported
try:
class_module_candidate = importlib.import_module('.'.join([module, file_candidate]))
except ImportError:
class_module_candidate = None
# if the module or module could be imported, test if it contains classes derived from the parent class
if class_module_candidate:
for member_candidate in filter(__filter_members, dir(class_module_candidate)):
try:
if issubclass(getattr(class_module_candidate, member_candidate), parent) \
and getattr(class_module_candidate, member_candidate).__name__ != parent.__name__:
result[member_candidate] = class_module_candidate.__name__
except TypeError:
pass
# return the dictionary
return result
def load_member(module, member):
"""
Load a member (function, class, ...) from a module and return it
:param str module: the module or package name where the class should be loaded from
:param str member: the name of the member to be loaded
"""
try:
module = importlib.import_module(module)
except ImportError:
return None
try:
result = getattr(module, member)
except AttributeError:
return None
return result
|
python
|
# Generated by Django 3.2.4 on 2021-06-14 12:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_app', '0004_auto_20210614_0157'),
]
operations = [
migrations.AddField(
model_name='tag',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
|
python
|
from __future__ import annotations
from typing import (
Any,
Dict,
Iterator,
Mapping,
MutableMapping,
Optional,
Tuple,
TypeVar,
)
from apiwrappers import utils
VT = TypeVar("VT")
class NoValue:
__slots__: Tuple[str, ...] = tuple()
def __repr__(self):
return f"{self.__class__.__name__}()"
class CaseInsensitiveDict(MutableMapping[str, VT]):
__slots__ = ("_data",)
def __init__(self, data: Optional[Mapping[str, VT]] = None, **kwargs: VT):
self._data: Dict[str, Tuple[str, VT]] = {}
if data is not None:
self.update(data)
self.update(kwargs)
def __getitem__(self, key: str) -> VT:
return self._data[key.lower()][1]
def __setitem__(self, key: str, value: VT) -> None:
self._data[key.lower()] = (key, value)
def __delitem__(self, key: str) -> None:
del self._data[key.lower()]
def __iter__(self) -> Iterator[str]:
return (original_key for original_key, value in self._data.values())
def __len__(self) -> int:
return len(self._data)
def __repr__(self) -> str:
if self._data:
return f"{self.__class__.__name__}({dict(self)})"
return f"{self.__class__.__name__}()"
class Url:
"""
Class to work with formatted string URLs and joining urls and path.
Sometimes it useful to keep original format string in place, for example,
for logging or metrics. This class stores original format string and its
replacements fields, substituting it when needed.
Args:
template: a URL as format string, e.g. "https://example.org/users/{id}".
replacements: values to format template with.
Usage::
>>> from apiwrappers import Url
>>> url = Url("https://example.org")
>>> url("/users/{id}", id=1)
Url('https://example.org/users/{id}', id=1)
>>> str(url("/users/{id}", id=1))
'https://example.org/users/1'
"""
def __init__(self, template: str, **replacements: Any):
self.template = template
self.replacements = replacements
def __str__(self) -> str:
return self.template.format_map(self.replacements)
def __repr__(self) -> str:
params = ", ".join(f"{k}={repr(v)}" for k, v in self.replacements.items())
if self.replacements:
return f"{self.__class__.__name__}({repr(self.template)}, {params})"
return f"{self.__class__.__name__}({repr(self.template)})"
def __call__(self, path: str, **replacements: Any) -> Url:
"""
Joins path with current URL and return a new instance.
Args:
path: a path as format string, e.g. "/users/{id}".
replacements: values to path with.
Returns: New instance with a url joined with path.
"""
url = utils.build_url(self.template, path)
return Url(url, **{**self.replacements, **replacements})
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return str(self) == other
if isinstance(other, self.__class__):
return (
self.template == other.template
and self.replacements == other.replacements
)
return NotImplemented
|
python
|
# -*- coding: utf-8 -*-from setuptools import setup, find_packages
from baseapp import get_version
setup(
name='feincms_baseapp',
version=get_version(),
description='This is a base app and contenttype for Feincms.',
author='',
author_email='',
url='https://github.com/',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
]
)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/6/24 20:55
# @Author : ganliang
# @File : doctest_test.py
# @Desc : doctest测试 执行模块测试
import doctest
import src.deco
if __name__ == "__main__":
doctest.testmod(src.deco)
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumby/flask-thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2015 thumby.io [email protected]
class FlaskThumbor:
__name__ = "FlaskThumbor"
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
from libthumbor import CryptoURL
from flask import current_app
thumbor_server = app.config.get('THUMBOR_SERVER', None)
thumbor_key = app.config.get('THUMBOR_KEY', None)
if thumbor_server is None or thumbor_key is None:
raise RuntimeError(
'Make sure both THUMBOR_SERVER (URL for the thumbor server that will serve your images) and '
'THUMBOR_KEY (security key for the thumbor server you\'re connecting to) are set in your '
'Flask configuration.'
)
app.thumbor_crypto = CryptoURL(key=thumbor_key)
app.thumbor_server = thumbor_server.rstrip('/')
@app.context_processor
def utility_processor():
def thumbor(**kw):
return '%s%s' % (
current_app.thumbor_server,
current_app.thumbor_crypto.generate(**kw)
)
return dict(thumbor=thumbor)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.