content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#
# wcsmod -- module wrapper for WCS calculations.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
We are fortunate to have several possible choices for a python WCS package
compatible with Ginga: astlib, kapteyn, starlink and astropy.
kapteyn and astropy wrap Mark Calabretta's "WCSLIB", astLib wraps
Jessica Mink's "wcstools", and I'm not sure what starlink uses (their own?).
Note that astlib requires pyfits (or astropy) in order to create a WCS
object from a FITS header.
To force the use of one, do:
.. code-block:: python
from ginga.util import wcsmod
wcsmod.use('kapteyn')
before you load any images. Otherwise Ginga will try to pick one for
you.
Note that you can register custom WCS types using:
.. code-block:: python
from ginga.util.wcsmod.common import register_wcs
register_wcs('mywcs', MyWCSClass, list_of_coord_types)
Look at the implemented WCS wrappers for details.
"""
import sys
import os.path
import glob
from ginga.misc.ModuleManager import my_import
from . import common
# Module variables that get configured at module load time
# or when use() is called
wcs_configured = False
WCS = None
"""Alias to the chosen WCS system."""
# Holds names of coordinate types
coord_types = []
display_types = ['sexagesimal', 'degrees']
# try to load them in this order until we find one that works.
# If none can be loaded, we default to the BareBones dummy WCS
wcs_try_order = ('astropy', 'astropy_ape14', 'kapteyn', 'starlink', 'astlib',
'barebones')
wcs_home = os.path.split(sys.modules[__name__].__file__)[0]
def use(wcspkg, raise_err=True):
"""Choose WCS package."""
global coord_types, wcs_configured, WCS
if wcspkg not in common.custom_wcs:
# Try to dynamically load WCS
modname = 'wcs_%s' % (wcspkg)
path = os.path.join(wcs_home, '%s.py' % (modname))
try:
my_import(modname, path)
except ImportError as e:
if raise_err:
raise e
return False
if wcspkg in common.custom_wcs:
bnch = common.custom_wcs[wcspkg]
WCS = bnch.wrapper_class
coord_types = bnch.coord_types
wcs_configured = True
return True
return False
# configure at least one WCS wrapper
if not wcs_configured:
# Try some preconfigured names
for name in wcs_try_order:
if use(name, raise_err=False):
break
if not wcs_configured:
wcs_path = os.path.join(wcs_home, 'wcs_*.py')
# look up WCS wrappers we have in this directory
for path in glob.glob(wcs_path):
dirname, filename = os.path.split(path)
modname, ext = os.path.splitext(filename)
modname = modname[4:] # strip off "wcs_"
if use(name, raise_err=False):
break
def get_wcs_wrappers():
return list(common.custom_wcs.keys())
def get_wcs_class(name):
"""Get a WCS class corresponding to the registered name.
Will raise a KeyError if a class of the given name does not exist.
"""
return common.custom_wcs[name]
# END
|
python
|
#!/usr/bin/env python3
#
# Configure logging
#
import logging
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true", help="Show debugging output.")
args = parser.parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=log_level, format='[%(levelname)8s] %(name)-15s : %(message)s')
#
# The following demonstrates random access
# operations on sequences.
#
#
# Generate a sequence
#
import os
print()
print("Running: \"python3 ./write_from_files.py > /dev/null\"")
os.system("python3 ./write_from_files.py > /dev/null")
print()
print()
#
# Read single items in linear mode
#
import itypes
from itypes import Sequence
seq = Sequence("out_write_from_files/data.gridseq").read()
device = "numpy"
# Access by linear index
frame = seq.frames()[0]
# NOTE that you can also access single ids as in read_linear_entry.py
struct = frame.torch_struct("hwc", device)
print('Read frame:')
print(struct)
print()
# Access by scene and frame name
frame = seq.data["Scene-001"]["0000000001"]
# NOTE that you can also access single ids as in read_linear_entry.py
struct = frame.torch_struct("hwc", device)
print('Read frame:')
print(struct)
print()
|
python
|
from multiprocessing import Manager, Process
def to_add(d, k, v):
d[k] = v
if __name__ == "__main__":
process_dict = Manager().dict()
p1 = Process(target=to_add, args=(process_dict, 'name', 'li'))
p2 = Process(target=to_add, args=(process_dict, 'age', 13))
p1.start()
p2.start()
p1.join()
p2.join()
print(process_dict)
|
python
|
''' Create words from an existing wordlist '''
import random
import re
class WordBuilder(object):
''' uses an existing corpus to create new phonemically consistent words '''
def __init__(self, initial='>', terminal='<', chunk_size=2):
#indicators for start and ends of words - set if necessary to avoid collision
self.initial = initial
self.terminal = terminal
self.chunk_size = chunk_size
self.links = {
self.initial: []
}
self.average_word_length = 0
self.shortest = None
def ingest(self, corpus_file):
''' load and parse a pre-formatted and cleaned text file. Garbage in, garbage out '''
corpus = open(corpus_file)
total_letters = 0
total_words = 0
shortest_word = 100
for word in corpus.readlines():
# clean word
word = word.strip()
word = re.sub(r'[\',\.\"]', '', word)
total_letters += len(word)
total_words += 1
shortest_word = len(word) if len(word) < shortest_word else shortest_word
# iterate through n letter groups, where 1 <= n <= 3
n = self.chunk_size
start = 0
# >: C, Cys: t, yst: i
self.links[self.initial].append(word[0:n])
for position in range(n, len(word)):
start = position - n if position - n >= 0 else 0
base = word[start:position]
if not base in self.links:
self.links[base] = []
self.links[base].append(word[position])
if not word[-n:] in self.links:
self.links[word[-n:]] = []
self.links[word[-n:]].append(self.terminal)
self.average_word_length = total_letters / total_words
self.shortest = shortest_word
def get_word(self, word=None):
''' creates a new word '''
word = word if not word == None else self.initial
if not self.terminal in word:
if len(word) > self.average_word_length and \
self.terminal in self.links[word[-self.chunk_size:]] \
and random.randint(0, 1):
addon = self.terminal
else:
options = self.links[word[-self.chunk_size:]]
addon = random.choice(options)
word = word + addon
return self.get_word(word)
return word[1:-1]
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 01 20:28:46 2016
@author: Anna
Directory structure: assumes galaxy directory is the working directory, input_data/ should contain pars, phot and fake files
This code generates a CMD plot in the same directory where the program, CMDscript.py, is
Use optional -phot, -fake flags to specify their location, or change the default setting in parse_options()
Assumes the default values of the column numbers for fake data to be 2,4,9 and 11; specify the values in the bash script or edit the default values
Give any name for .png file after -fake=... The file gets saved as [filename].png
Syntax: python CMDscript.py [path to working directory, ending in /] -phot=[path from working directory to phot file] -fake=...
e.g. python CMDscript.py /work/04316/kmcquinn/wrangler/shield/galaxies/agc223254/ -phot=phot/a223254_tilted_ellipse.phot3 2 4 9 11 -fake=input_data/fake AGC223254
The output files (.png files) are stored in the same directory as this program's
"""
import matplotlib
matplotlib.use("Agg")
# Comment this line if not running on tacc
import matplotlib.pyplot as plt
plt.ion()
import numpy as np
import pylab as pl
import sys
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator
import argparse
#def parse_options():
# Creates argument parser
parser = argparse.ArgumentParser(description='Process input parameters.')
# Defines required arguments used on the command line
parser.add_argument('galaxydir',action='store',help='path to galaxy directory (note: this is the whole path ending in the galaxy directory)')
parser.add_argument('-phot',dest='phot',action='store',help="location of phot file from galaxy directory",default='input_data/phot')
parser.add_argument('col1',action='store',help="column number for V",default='2', type=int)
parser.add_argument('col2',action='store',help="column number for Verr",default='4', type = int)
parser.add_argument('col3',action='store',help="column number for I",default='9',type = int)
parser.add_argument('col4',action='store',help="column number for Ierr",default='11',type=int)
parser.add_argument('-fake',dest='fake',action='store',help="location of fake file from galaxy directory",default='input_data/fake')
parser.add_argument('gname',action = 'store', help="names the galaxy", type=str)
# Parses through the arguments and saves them within the keyword args
args = parser.parse_args()
#plot_title = sys.argv[1]
#redfilter = sys.argv[2]
#bluefilter = sys.argv[3]
#plot_title = "AGC238890"
# Parses through command line arguments
#args = parse_options()
galdir = args.galaxydir
#col_1 = args.col1
#col_2 = args.col2
#col_3 = args.col3
#col_4 = args.col4
# Defines the location of phot, and fake
phot = galdir + args.phot
fake = galdir + args.fake
#print filtername(pars)
gal_name = args.gname
plot_title= gal_name
redfilter = "F814W"
bluefilter = "F606W"
print(str(galdir))
#Real Data
"""Parsers the path to phot file"""
dat = np.genfromtxt(phot)
#dat = np.genfromtxt(args.phot+'')
#remove all NaNs
dat = dat[~np.isnan(dat).any(axis=1)]
"""Parsers the column numbers for fake"""
#V = np.array(dat[:,2])
V= np.array(dat[:,args.col1])
#Verr = np.array(dat[:,4])
Verr = np.array(dat[:,args.col2])
#I = np.array(dat[:,9])
I = np.array(dat[:,args.col3])
#Ierr = np.array(dat[:,11])
Ierr = np.array(dat[:,args.col4])
VmI = V-I
VmIerr = (((Verr)**2)+(((Ierr)**2)))**.5
print VmIerr
#fake error stuff
fdat = np.loadtxt(fake)
#fdat = np.loadtxt(args.fake)
fdat = np.asarray([d for d in fdat if not 99.999 in d])
fVerr = np.array(fdat[:,2])
fIerr = np.array(fdat[:,3])
fI = np.array(fdat[:,1])
fV = np.array(dat[:,0])
fVmIerr = (fVerr**2 + fIerr**2)**0.5
print fVmIerr
#Here I am finding the max and min values of the data
#We will use this to automate the figsize thing
maxV = np.amax(V) + .3
print max(V)
maxI = np.amax(I) + .3
minV = np.amin(V) + 1.3
minI = np.amin(I) + 1.3
meanVmI = np.mean(VmI)
maxVmI = (meanVmI) + 5*np.std(VmI)
minVmI = (meanVmI) - 4*np.std(VmI)
Ierrup = np.around(maxI - .5)
Verrup = np.around(maxV - .5)
Ierrlow = np.around(minI + .5)
Verrlow = np.around(minV + .5)
errx = maxVmI -.3
print maxVmI
print errx
#maxVmI = np.amax(VmI) + .2
#minVmI = np.amin(VmI) - .2
#print minI
#print min(I)
#Making plain scatter plot
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.scatter(VmI, I, color = 'black', s = 2)
ax.set_ylim(maxI, minI)
ax.set_xlim(minVmI, maxVmI)
ax.set_xlabel(bluefilter + '-' + redfilter + ' (mag)')
ax.set_ylabel(redfilter + ' (mag)')
fig.text(.18, .85, 'Number of stars: '+str(len(V)))
majorLocator = MultipleLocator(1)
majorFormatter = FormatStrFormatter('%d')
minorLocator = AutoMinorLocator(10)
ax.yaxis.set_major_locator(majorLocator)
ax.yaxis.set_major_formatter(majorFormatter)
ax.yaxis.set_minor_locator(minorLocator)
plt.suptitle(plot_title)
#ax.set_title(plot_title)
#adding error to the plot
#c = 3
c = errx
#location of the line
errlist = []
ylist = []
Verrlist = []
VmIerrlist = []
#Ilist = range(18, 27)
Ilist = range(int(Ierrlow), int(Ierrup))
for a in Ilist:
Iw = np.where(I > a)
Iwr = np.where(I[Iw] < a+1)
fIw = np.where(fI > a)
fIwr = np.where(fI[fIw] < a+1)
Ierravg = ((np.mean(Ierr[Iwr]))**2 + (np.mean(abs(fIerr[fIwr]))**2))**.5
errlist.append(Ierravg)
ylist.append(a+.5)
VmIerravg = (np.mean(VmIerr[Iwr])**2 + np.mean(abs(fVmIerr[fIwr]))**2)**.5
VmIerrlist.append(VmIerravg)
xlist = c*np.ones_like(ylist)
plt.errorbar(xlist, ylist, xerr = VmIerrlist, yerr= errlist, fmt = '.', capsize=0)
#adding in the contour script
def multidigitize(VmI,I,binsVmI,binsV):
dVmI = np.digitize(VmI.flat, binsVmI)
dI = np.digitize(I.flat, binsV)
return dVmI,dI
def linlogspace(xmin,xmax,n):
return np.logspace(np.log10(xmin),np.log10(xmax),n)
#here's the contour actual values
def adaptive_param_plot(VmI,I,
bins=3,
threshold=2,
marker='.',
marker_color=None,
ncontours=5,
fill=False,
mesh=False,
contourspacing=linlogspace,
mesh_alpha=0.5,
norm=None,
axis=None,
cmap=None,
**kwargs):
if axis is None:
axis = pl.gca()
axis.set_ylim(28, 18)
ok = np.isfinite(VmI)*np.isfinite(I)
if hasattr(bins, 'ndim') and bins.ndim == 2:
nbinsVmI, nbinsI = bins.shape[0]-1, bins.shape[1]-1
else:
try:
nbinsVmI = nbinsI = len(bins)-1
except TypeError:
nbinsVmI = nbinsI = bins
H, bVmI, bI = np.histogram2d(VmI[ok], I[ok], bins = bins)
dVmI, dI = multidigitize(VmI[ok], I[ok], bVmI, bI)
plottable = np.ones([nbinsVmI+2, nbinsI+2], dtype = 'bool')
plottable_hist = plottable[1:-1, 1:-1]
assert H.shape == plottable_hist.shape
plottable_hist[H > threshold] = False
H[plottable_hist] = 0
toplot = plottable[dVmI, dI]
cVmI = (bVmI[1:]+bVmI[:-1])/2
cI = (bI[1:]+bI[:-1])/2
levels = contourspacing(threshold-0.5, H.max(), ncontours)
if cmap is None:
cmap = plt.cm.get_cmap()
cmap.set_under((0,0,0,0))
cmap.set_bad((0,0,0,0))
if fill:
con = axis.contourf(cVmI, cI, H.T, levels= levels, norm = norm, cmap = cmap, **kwargs)
else:
con = axis.contour(cVmI, cI, H.T,levels=levels,norm=norm,cmap=cmap,**kwargs)
if mesh:
mesh = axis.pcolormesh(bVmI, bI, H.T, **kwargs)
mesh.set_alpha(mesh_alpha)
#Is there a way to add lines w the contour levels?
if 'linestyle' in kwargs:
kwargs.pop('linestyle')
#if i wanted to plot the scatter from this script intstead, but I can't make it look as nice
# axis.plot(VmI[ok][toplot],
# I[ok][toplot],
# linestyle='none',
# marker=marker,
# markerfacecolor=marker_color,
# markeredgecolor=marker_color,
# **kwargs)
return cVmI, cI, H, VmI[ok][toplot], I[ok][toplot]
adaptive_param_plot(VmI, I, bins = 100, fill = True, ncontours = 7, threshold = 2, axis = ax)
#SECOND PLOT
#Making plain scatter plot
#fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 2)
ax1.scatter(VmI, V, color = 'black', s = 2)
ax1.set_ylim(maxV, minV)
ax1.set_xlim(minVmI, maxVmI)
#ax.set_ylim(29, 18)
#ax.set_xlim(-.98, 3.48)
ax1.set_xlabel(redfilter + '-' + bluefilter + ' (mag)')
ax1.set_ylabel(bluefilter + ' (mag)')
ax1.yaxis.set_label_coords(-0.12, 0.5)
#fig.text(.6, .85, 'Number of stars: '+str(len(V)))
majorLocator = MultipleLocator(1)
majorFormatter = FormatStrFormatter('%d')
minorLocator = AutoMinorLocator(10)
ax1.yaxis.set_major_locator(majorLocator)
ax1.yaxis.set_major_formatter(majorFormatter)
ax1.yaxis.set_minor_locator(minorLocator)
ax.get_shared_y_axes().join(ax, ax1)
ax.set_yticklabels([])
ax1.autoscale()
#ax.set_title(plot_title)
#adding error to the plot
#c = 3
c = errx
#location of the line
errlist = []
ylist = []
Verrlist = []
VmIerrlist = []
#Ilist = range(18, 28)
Ilist = range(int(Verrlow), int(Verrup))
for a in Ilist:
Iw = np.where(I > a)
Iwr = np.where(I[Iw] < a+1)
fIw = np.where(fI > a)
fIwr = np.where(fI[fIw] < a+1)
Ierravg = ((np.mean(Ierr[Iwr]))**2 + (np.mean(abs(fIerr[fIwr])))**2)**.5
errlist.append(Ierravg)
ylist.append(a+.5)
VmIerravg = (np.mean(VmIerr[Iwr])**2 + np.mean(fVmIerr[fIwr])**2)**.5
VmIerrlist.append(VmIerravg)
xlist = c*np.ones_like(ylist)
plt.errorbar(xlist, ylist, xerr = VmIerrlist, yerr= errlist, fmt = '.', capsize = 0)
#adding in the contour script
def multidigitize(VmI,V,binsVmI,binsV):
dVmI = np.digitize(VmI.flat, binsVmI)
dV = np.digitize(V.flat, binsV)
return dVmI,dV
def linlogspace(xmin,xmax,n):
return np.logspace(np.log10(xmin),np.log10(xmax),n)
#here's the contour actual values
def adaptive_param_plot(VmI,V,
bins=3,
threshold=2,
marker='.',
marker_color=None,
ncontours=5,
fill=False,
mesh=False,
contourspacing=linlogspace,
mesh_alpha=0.5,
norm=None,
axis=None,
cmap=None,
**kwargs):
if axis is None:
axis = pl.gca()
axis.set_ylim(28, 18)
ok = np.isfinite(VmI)*np.isfinite(V)
if hasattr(bins, 'ndim') and bins.ndim == 2:
nbinsVmI, nbinsV = bins.shape[0]-1, bins.shape[1]-1
else:
try:
nbinsVmI = nbinsV = len(bins)-1
except TypeError:
nbinsVmI = nbinsV = bins
H, bVmI, bV = np.histogram2d(VmI[ok], V[ok], bins = bins)
dVmI, dV = multidigitize(VmI[ok], V[ok], bVmI, bV)
plottable = np.ones([nbinsVmI+2, nbinsV+2], dtype = 'bool')
plottable_hist = plottable[1:-1, 1:-1]
assert H.shape == plottable_hist.shape
plottable_hist[H > threshold] = False
H[plottable_hist] = 0
toplot = plottable[dVmI, dV]
cVmI = (bVmI[1:]+bVmI[:-1])/2
cV = (bV[1:]+bV[:-1])/2
levels = contourspacing(threshold-0.5, H.max(), ncontours)
if cmap is None:
cmap = plt.cm.get_cmap()
cmap.set_under((0,0,0,0))
cmap.set_bad((0,0,0,0))
if fill:
con = axis.contourf(cVmI, cV, H.T, levels= levels, norm = norm, cmap = cmap, **kwargs)
else:
con = axis.contour(cVmI, cV, H.T,levels=levels,norm=norm,cmap=cmap,**kwargs)
if mesh:
mesh = axis.pcolormesh(bVmI, bV, H.T, **kwargs)
mesh.set_alpha(mesh_alpha)
#Is there a way to add lines w the contour levels?
if 'linestyle' in kwargs:
kwargs.pop('linestyle')
return cVmI, cV, H, VmI[ok][toplot], I[ok][toplot]
adaptive_param_plot(VmI, V, bins = 100, fill = True, ncontours = 7, threshold = 2, axis = ax)
#plt.savefig('CMD.png')
plt.savefig(gal_name+'.png')
|
python
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2014 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show fqdn --all`."""
from sqlalchemy.orm import contains_eager
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.formats.list import StringList
from aquilon.aqdb.model import DnsDomain, DnsEnvironment, Fqdn
class CommandShowFqdnAll(BrokerCommand):
def render(self, session, dns_environment, **arguments):
self.deprecated_command("The show_fqdn command is deprecated. Please "
"use search_dns instead.", **arguments)
dbdns_env = DnsEnvironment.get_unique_or_default(session,
dns_environment)
q = session.query(Fqdn)
q = q.filter_by(dns_environment=dbdns_env)
q = q.join(DnsDomain)
q = q.options(contains_eager("dns_domain"))
q = q.order_by(DnsDomain.name, Fqdn.name)
return StringList(q.all())
|
python
|
"""Request handler base classes
"""
from decorator import decorator
from pyramid.httpexceptions import HTTPForbidden
from dd_app.django_codec import DjangoSessionCodec
from dd_app.messaging.mixins import MsgMixin
class DDHandler(object):
"""Base view handler object
"""
def __init__(self, request, *args, **kwargs):
self.request = request
@property
def mongo(self):
if not hasattr(self, '__mongo'):
self.__mongo = self.settings['mongodb.connector']
return self.__mongo
@property
def redis(self):
if not hasattr(self, '__redis'):
self.__redis = self.settings['redis.connector']
return self.__redis
@property
def settings(self):
return self.request.registry.settings
@property
def cookies(self):
return self.request.cookies
@property
def debug_charge_accel(self):
return int(self.settings.get('dd_app.debug_charge_accel', 1))
@property
def powerup_types(self):
return ('ad', 'teammember', 'upgrade')
class DjangoSessionMixin(object):
"""Mixin implementing authentication agains django sessions"""
def _get_redis_key(self, key):
return "%s%s" % (self.settings['session.prefix'], key)
@property
def session_codec(self):
if not hasattr(self, '_session_codec'):
self._session_codec = DjangoSessionCodec(self.settings)
return self._session_codec
def get_session_cookie(self):
if hasattr(self, '_token'):
return self._token
return self.cookies.get(self.settings['session.cookie_id'], None)
def get_redis_session(self, key):
self._raw_session = self.redis.get().get(self._get_redis_key(key))
result = self._raw_session
return result
def _get_session_data(self):
key = self.get_session_cookie()
if key is None:
return {} # no session cookie
session_data = self.get_redis_session(key)
if session_data is None:
return {} # no session data for key
session_dec, auth_uid = self.session_codec.decode(session_data)
return session_dec
@property
def session_data(self):
if not hasattr(self, '_django_session'):
self._django_session = self._get_session_data()
return self._django_session
@property
def session_language(self):
return self.session_data.get('django_language', 'en')
@property
def auth_uid(self):
return self.session_data.get('_auth_user_id', None)
def check_user(self):
if self.auth_uid is not None:
return self.mongo.get_user_by_auth_uid(self.auth_uid, {'_id': 1}) is not None
return False
def get_user_info(self):
if self.auth_uid is not None:
return self.mongo.get_user_by_auth_uid(self.auth_uid)
@property
def userdata(self):
if not hasattr(self, '_userdata'):
self._userdata = self.get_user_info()
return self._userdata
@property
def game_query_base(self):
oid = self.userdata['_id']
query_base = {'user.$id': oid}
version = self.userdata.get('game_version', None)
if version is not None:
query_base.update({'version': version})
return query_base
def _delete_session(self):
del self._django_session
del self._raw_session
if hasattr(self, '_delkey'):
self.redis.get().delete(self._get_redis_key(self._delkey))
del self._delkey
def _delete_cookie(self):
def del_cookie_callback(request, response):
response.delete_cookie(self.settings['session.cookie_id'])
self.request.add_response_callback(del_cookie_callback)
def _logout(self):
self._delkey = self.get_session_cookie()
self._delete_cookie()
self._delete_session()
def get_game_version(self, auth_uid):
if not hasattr(self, '_game_version'):
data = self.mongo.get_game_version(auth_uid)
if data is None:
self._game_version = None
else:
self._game_version = data.get('game_version', None)
return self._game_version
class BaseHandler(DDHandler, DjangoSessionMixin, MsgMixin):
def _get_uid(self):
# For MsgMixin compatibility
return self.auth_uid
# decorator preserving the argspec,
# see https://micheles.googlecode.com/hg/decorator/documentation.html
@decorator
def dd_protected(f, obj, token, *args, **kwargs):
obj._token = token
if obj.auth_uid is None:
raise HTTPForbidden('unauthorized')
return f(obj, token, *args, **kwargs)
|
python
|
"""Module with view functions that serve each uri."""
from datetime import datetime
from learning_journal.models.mymodel import Journal
from learning_journal.security import is_authenticated
from pyramid.httpexceptions import HTTPFound, HTTPNotFound
from pyramid.security import NO_PERMISSION_REQUIRED, forget, remember
from pyramid.view import view_config
@view_config(route_name='home', renderer='learning_journal:templates/index.jinja2', permission='view')
def list_view(request):
"""Pass response to send to index.html page with all entries."""
entries = request.dbsession.query(Journal).all()
entries = [entry.to_dict() for entry in entries]
return {
'entries': entries
}
@view_config(route_name='detail', renderer='learning_journal:templates/detail.jinja2', permission='view')
def detail_view(request):
"""Pass response to send to detail page for individual entries."""
target_id = int(request.matchdict['id'])
entry = request.dbsession.query(Journal).get(target_id)
if not entry:
raise HTTPNotFound
if request.method == 'GET':
return {
'entry': entry.to_dict()
}
if request.method == "POST":
return HTTPFound(request.route_url('edit', id=entry.id))
@view_config(route_name='create', renderer='learning_journal:templates/new.jinja2', permission='secret')
def create_view(request):
"""Pass response to send to new page."""
if request.method == 'GET':
return{
'textarea': 'New Entry'
}
if request.method == 'POST':
new_entry = Journal(
title=request.POST['title'],
text=request.POST['text'],
created=datetime.now()
)
request.dbsession.add(new_entry)
return HTTPFound(request.route_url('home'))
@view_config(route_name='edit', renderer='learning_journal:templates/edit.jinja2', permission='secret')
def update_view(request):
"""Pass response to send to edit page."""
target_id = int(request.matchdict['id'])
entry = request.dbsession.query(Journal).get(target_id)
if not entry:
raise HTTPNotFound
if request.method == 'GET':
return {
'entry': entry.to_dict()
}
if request.method == 'POST' and request.POST:
entry.title = request.POST['title']
entry.text = request.POST['body']
entry.created = datetime.now()
request.dbsession.add(entry)
request.dbsession.flush()
return HTTPFound(request.route_url('detail', id=entry.id))
@view_config(route_name='delete', permission='secret')
def delete_view(request):
"""Delete a specific entry."""
target_id = int(request.matchdict['id'])
entry = request.dbsession.query(Journal).get(target_id)
if entry:
request.dbsession.delete(entry)
return HTTPFound(request.route_url('home'))
raise HTTPNotFound
@view_config(
route_name='login', renderer="learning_journal:templates/login.jinja2", permission=NO_PERMISSION_REQUIRED
)
def login(request):
"""Login view config to authenticate username/password."""
if request.authenticated_userid:
return HTTPFound(request.route_url('home'))
if request.method == "GET":
return {}
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
if is_authenticated(username, password):
headers = remember(request, username)
return HTTPFound(request.route_url('home'), headers=headers)
return {
'error': 'Invalid username/password combination.'
}
@view_config(route_name='logout', permission=NO_PERMISSION_REQUIRED)
def logout(request):
"""Logout view config to redirect to home view."""
headers = forget(request)
return HTTPFound(request.route_url('home'), headers=headers)
|
python
|
import os
import torch
import pickle
import numpy as np
from transformers import RobertaConfig, RobertaModel, RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
roberta = RobertaModel.from_pretrained('roberta-base')
path = '../wikidata5m_alias'
if not os.path.exists('../wikidata5m_alias_emb'):
os.makedirs('../wikidata5m_alias_emb')
with open('../read_ent_vocab.bin', 'rb') as fin:
ent_vocab = pickle.load(fin)
with open('../read_rel_vocab.bin', 'rb') as fin:
rel_vocab = pickle.load(fin)
print(len(ent_vocab))
print(len(rel_vocab))
aliases = {}
with open(os.path.join(path, 'wikidata5m_entity.txt'), 'r', encoding='utf-8') as fin:
for line in fin:
segs = line.strip().split('\t')
entity = segs[0]
alias = segs[1:]
aliases[entity] = alias
print(len(aliases))
miss = 0
entity_embeddings = []
for k, v in ent_vocab.items():
if k in aliases:
alias = aliases[k][0]
tokens = tokenizer.encode(' '+alias, add_special_tokens=False)
embedding = roberta.embeddings.word_embeddings(torch.tensor(tokens).view(1,-1)).squeeze(0).mean(dim=0)
else:
miss += 1
embedding = torch.randn(768) / 10
entity_embeddings.append(embedding)
assert len(ent_vocab) == len(entity_embeddings)
entity_embeddings = torch.stack(entity_embeddings, dim=0)
print(miss * 1.0 / len(ent_vocab))
print(entity_embeddings.shape)
np.save('../wikidata5m_alias_emb/entities.npy', entity_embeddings.detach().numpy())
del entity_embeddings
rel_aliases = {}
with open(os.path.join(path, 'wikidata5m_relation.txt'), 'r', encoding='utf-8') as fin:
for line in fin:
segs = line.strip().split('\t')
relation = segs[0]
alias = segs[1:]
rel_aliases[relation] = alias
miss = 0
relation_embeddings = []
for k, v in rel_vocab.items():
if k in rel_aliases:
alias = rel_aliases[k][0]
tokens = tokenizer.encode(' '+alias, add_special_tokens=False)
embedding = roberta.embeddings.word_embeddings(torch.tensor(tokens).view(1,-1)).squeeze(0).mean(dim=0)
else:
miss += 1
embedding = torch.randn(768) / 10
relation_embeddings.append(embedding)
assert len(rel_vocab) == len(relation_embeddings)
relation_embeddings = torch.stack(relation_embeddings, dim=0)
print(relation_embeddings.shape)
print(miss * 1.0 / len(ent_vocab))
np.save('../wikidata5m_alias_emb/relations.npy', relation_embeddings.detach().numpy())
|
python
|
from array import array
import numpy as np
import os
import cv2
import argparse
from tensorflow import lite as tflite
from matplotlib import pyplot as plt
import time
import torch
from torchvision import transforms
from omegaconf import OmegaConf
import torch.nn.functional as F
import tensorflow as tf
from torch.optim import Adam
from torch.utils.data import DataLoader
import torchvision
import pytorch_lightning as pl
from PIL import Image
from aei_net import AEINet
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str, default="../config/train.yaml",
help="path of configuration yaml file"),
parser.add_argument("--model_path", type=str, default="../ONNX/",
help="path of onnx extra data folder"),
parser.add_argument("--checkpoint_path", type=str, default="../chkpt/30.ckpt",
help="path of aei-net pre-trained file"),
parser.add_argument("--images_folder", type=str, default="../data/faceshifter-datasets-preprocessed/train/",
help="path of preprocessed source face image"),
parser.add_argument("--gpu_num", type=int, default=0,
help="number of gpu"),
parser.add_argument("--num_images", type=int, default=50,
help="number of images used to convert the model")
args = parser.parse_args()
def optizeADD_w_optim_MLE(argument):
device = torch.device(f"cuda:{argument.gpu_num}" if torch.cuda.is_available() else 'cpu')
#set experimental memory growth
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(
physical_devices[0], True
)
#load model for converter
converter = tf.lite.TFLiteConverter.from_saved_model(argument.model_path + "ADD_gen")
converter.optimizations = [tf.lite.Optimize.DEFAULT]
#load model for data preparation
hp = OmegaConf.load(argument.config)
model = AEINet.load_from_checkpoint(argument.checkpoint_path, hp=hp)
model.eval()
model.freeze()
model.to(device)
interpreter = tflite.Interpreter(args.model_path+ "MultiLevelEncoder_gen_Lite_optimized.tflite", num_threads=12)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
#setup for data preparation
def representative_dataset_gen():
for i in range(argument.num_images):
#choose a picture
source_img_path = os.path.join(argument.images_folder, f"{i:08}.png")
source_img = transforms.ToTensor()(Image.open(source_img_path)).unsqueeze(0).to(device)
#prepare the image for the model
z_id = model.Z(F.interpolate(source_img, size=112, mode='bilinear'))
z_id = F.normalize(z_id)
z_id = z_id.detach()
#choose target image
target_img_number = (i+argument.num_images)
target_img_path = os.path.join(argument.images_folder, f"{target_img_number:08}.png")
img = cv2.imread(target_img_path)
img = cv2.resize(img, (256, 256))
img = img.astype(np.float32)
img = img/255.0
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
interpreter.set_tensor(input_details[0]['index'], img)
interpreter.invoke()
feature_map = [interpreter.get_tensor(output_details[1]['index']), interpreter.get_tensor(output_details[0]['index']), interpreter.get_tensor(output_details[3]['index']),
interpreter.get_tensor(output_details[5]['index']), interpreter.get_tensor(output_details[6]['index']), interpreter.get_tensor(output_details[4]['index']),
interpreter.get_tensor(output_details[7]['index']), interpreter.get_tensor(output_details[2]['index'])]
#converting to cpu and numpy and prepraring with dictionary signature
yield {"input.5": z_id.cpu().numpy(),
"input.119": feature_map[5],
"input.145": feature_map[6],
"input.171": feature_map[7],
"input.27": feature_map[1],
"input.47": feature_map[2],
"input.67": feature_map[3],
"input.7": feature_map[0],
"input.93": feature_map[4]}
#converter setup
converter.representative_dataset = representative_dataset_gen
#converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
#converter.inference_input_type = tf.float32
#converter.inference_output_type = tf.float32
#convert the model
tflite_quant_model = converter.convert()
#save the model
with open(args.model_path + "ADD_gen_Lite_optimized.tflite", 'wb') as f:
f.write(tflite_quant_model)
optizeADD_w_optim_MLE(args)
|
python
|
import os
# Bot Setup
os.environ["prefix"] = "g!"
os.environ["token"] = "NzM5Mjc0NjkxMDg4MjIwMzEw.XyYFNQ.Mb6wJSHhpj9LP5bqO2Hb0w8NBQM"
os.environ["botlog"] = "603894328212848651"
os.environ["debugEnabled"] = "False"
# Database Setup
os.environ["db_type"] = "MySQL" # Either MySQL or Flat (or just leave empty for Flat SQLite)
os.environ["db_user"] = "USERNAME" # Defaults to root if empty
os.environ["db_pword"] = "PASSWORD" # Defaults to none if empty
os.environ["db_host"] = "HOST" # Defaults to localhost if empty
os.environ["db_port"] = "PORT" # Defaults to 3306
# Bot list API tokens
os.environ["top.gg_token"] = ""
os.environ["discord.bots.gg_token"] = ""
os.environ["discordbotlist.com_token"] = ""
os.environ["bots.ondiscord.xyz_token"] = ""
os.environ["botsfordiscord.com_token"] = ""
|
python
|
###
# Adapted from Avalanche LvisDataset
# https://github.com/ContinualAI/avalanche/tree/detection/avalanche/benchmarks/datasets/lvis
#
# Released under the MIT license, see:
# https://github.com/ContinualAI/avalanche/blob/master/LICENSE
###
from pathlib import Path
from typing import List, Sequence, Union
from PIL import Image
from torch.utils.data import Dataset
from torchvision.datasets.folder import default_loader
from devkit_tools.challenge_constants import DEFAULT_CHALLENGE_TRAIN_JSON, \
DEFAULT_CHALLENGE_TEST_JSON
from ego_objects import EgoObjects, EgoObjectsAnnotation, \
EgoObjectsImage
import torch
class ChallengeDetectionDataset(Dataset):
"""
The sample dataset. For internal use by challenge organizers only.
"""
def __init__(
self,
root: Union[str, Path],
*,
train=True,
transform=None,
loader=default_loader,
ego_api=None,
img_ids: List[int] = None,
bbox_format: str = 'ltwh',
categories_id_mapping: List[int] = None
):
"""
Instantiates the sample dataset.
:param root: The path to the images and annotation file.
:param transform: The transformation to apply.
:param loader: The image loader. Defaults to PIL Image open.
:param ego_api: An EgoObjects object. If not provided, annotations
will be loaded from the json file found in the root. Defaults to
None.
:param img_ids: A list of image ids to use. If not None, only those
images (a subset of the original dataset) will be used. Defaults
to None.
:param bbox_format: The bounding box format. Defaults to "ltwh"
(Left, Top, Width, Height).
:param categories_id_mapping: If set, it must define a mapping from
the to-be-used-id to the real category id so that:
real_cat_id = categories_id_mapping[mapped_id].
"""
self.root: Path = Path(root)
self.train = train
self.transform = transform
self.loader = loader
self.bbox_crop = True
self.img_ids = img_ids
self.bbox_format = bbox_format
self.categories_id_mapping = categories_id_mapping
self.ego_api = ego_api
must_load_api = self.ego_api is None
must_load_img_ids = self.img_ids is None
# Load metadata
if must_load_api:
if self.train:
ann_json_path = str(self.root / DEFAULT_CHALLENGE_TRAIN_JSON)
else:
ann_json_path = str(self.root / DEFAULT_CHALLENGE_TEST_JSON)
self.ego_api = EgoObjects(ann_json_path)
if must_load_img_ids:
self.img_ids = list(sorted(self.ego_api.get_img_ids()))
self.targets = EgoObjectsDetectionTargets(
self.ego_api, self.img_ids,
categories_id_mapping=categories_id_mapping)
# Try loading an image
if len(self.img_ids) > 0:
img_id = self.img_ids[0]
img_dict = self.ego_api.load_imgs(ids=[img_id])[0]
assert self._load_img(img_dict) is not None
def __getitem__(self, index):
"""
Loads an instance given its index.
:param index: The index of the instance to retrieve.
:return: a (sample, target) tuple where the target is a
torchvision-style annotation for object detection
https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
"""
img_id = self.img_ids[index]
img_dict: EgoObjectsImage = self.ego_api.load_imgs(ids=[img_id])[0]
annotation_dicts = self.targets[index]
# Transform from EgoObjects dictionary to torchvision-style target
num_objs = len(annotation_dicts)
boxes = []
labels = []
areas = []
for i in range(num_objs):
xmin = annotation_dicts[i]['bbox'][0]
ymin = annotation_dicts[i]['bbox'][1]
if self.bbox_format == 'ltrb':
# Left, Top, Right, Bottom
xmax = annotation_dicts[i]['bbox'][2]
ymax = annotation_dicts[i]['bbox'][3]
boxw = xmax - xmin
boxh = ymax - ymin
else:
# Left, Top, Width, Height
boxw = annotation_dicts[i]['bbox'][2]
boxh = annotation_dicts[i]['bbox'][3]
xmax = boxw + xmin
ymax = boxh + ymin
boxes.append([xmin, ymin, xmax, ymax])
labels.append(annotation_dicts[i]['category_id'])
areas.append(boxw * boxh)
if len(boxes) > 0:
boxes = torch.as_tensor(boxes, dtype=torch.float32)
else:
boxes = torch.empty((0, 4), dtype=torch.float32)
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([img_id])
areas = torch.as_tensor(areas, dtype=torch.float32)
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = dict()
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
target["area"] = areas
target["iscrowd"] = iscrowd
img = self._load_img(img_dict)
if self.transform is not None:
img, target = self.transform(img, target)
return img, target
def __len__(self):
return len(self.img_ids)
def _load_img(self, img_dict):
img_url = img_dict['url']
splitted_url = img_url.split('/')
img_path = 'images/' + splitted_url[-1]
img_path_alt = 'cltest/' + splitted_url[-1]
final_path = self.root / img_path # <root>/images/<img_id>.jpg
if not final_path.exists():
final_path = self.root / img_path_alt
return Image.open(str(final_path)).convert("RGB")
class EgoObjectsDetectionTargets(Sequence[List[EgoObjectsAnnotation]]):
def __init__(
self,
ego_api: EgoObjects,
img_ids: List[int] = None,
categories_id_mapping: List[int] = None):
super(EgoObjectsDetectionTargets, self).__init__()
self.ego_api = ego_api
if categories_id_mapping is not None:
self.reversed_mapping = dict()
for mapped_id, real_id in enumerate(categories_id_mapping):
self.reversed_mapping[real_id] = mapped_id
else:
self.reversed_mapping = None
if img_ids is None:
img_ids = list(sorted(ego_api.get_img_ids()))
self.img_ids = img_ids
def __len__(self):
return len(self.img_ids)
def __getitem__(self, index):
img_id = self.img_ids[index]
annotation_ids = self.ego_api.get_ann_ids(img_ids=[img_id])
annotation_dicts: List[EgoObjectsAnnotation] = \
self.ego_api.load_anns(annotation_ids)
if self.reversed_mapping is None:
return annotation_dicts
mapped_anns: List[EgoObjectsAnnotation] = []
for ann_dict in annotation_dicts:
ann_dict: EgoObjectsAnnotation = dict(ann_dict)
ann_dict['category_id'] = \
self.reversed_mapping[ann_dict['category_id']]
mapped_anns.append(ann_dict)
return mapped_anns
__all__ = [
'ChallengeDetectionDataset'
]
|
python
|
import os
import cv2
import numpy as np
from constants import DATA_DIR
MYPY = False
if MYPY:
from typing import Tuple, Union
Pair = Tuple[int, int]
def scale(image, scale_percent):
height, width = image.shape[:2]
return cv2.resize(image, (int(width * scale_percent), int(height * scale_percent)))
def show(image, scale_percent=None):
if scale_percent is not None:
image = scale(image, scale_percent)
cv2.namedWindow("test")
cv2.imshow("test", image)
return cv2.waitKey()
def get_drawn_contours(im, contours, draw_to_existing_image=False):
if draw_to_existing_image:
if len(im.shape) < 3 or im.shape[2] == 1:
orig = im
im = np.empty((im.shape[0], im.shape[1], 3))
im[:, :, 0] = orig
im[:, :, 1] = orig
im[:, :, 2] = orig
else:
im = im.copy()
else:
im = np.empty((im.shape[0], im.shape[1], 3))
im[:, :] = [0, 0, 0]
cv2.drawContours(im, contours, -1, (0, 255, 0), 1)
return im
def get_center_for_contour(contour):
x, y, w, h = cv2.boundingRect(contour)
return x + w / 2, y + h / 2
def order_points(pts):
# Handle the common case of pts being a contour
if pts.shape == (4, 1, 2):
pts = pts.reshape((4, 2))
# initialize a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts, margin_percent=0):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordinates or the top-right and top-left x-coordinates
width_a = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
width_b = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
max_width = max(int(width_a), int(width_b))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
height_a = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
height_b = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
max_height = max(int(height_a), int(height_b))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
margin_width = max_width * margin_percent / 100
margin_height = max_width * margin_percent / 100
dst = np.array([
[margin_width, margin_height],
[margin_width + max_width, margin_height],
[margin_width + max_width, margin_height + max_height],
[margin_width, margin_height + max_height],
], dtype="float32")
# compute the perspective transform matrix and then apply it
perspective_transform = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, perspective_transform,
(2 * margin_width + max_width, 2 * margin_height + max_height))
# return the warped image
return warped
def inflate_classifier(classifier_root_dir):
vocab_path, unlabelled_dir, labelled_dir, features_dir, svm_data_dir = \
get_classifier_directories(classifier_root_dir)
with open(vocab_path, "rb") as f:
vocab = np.load(f)
# FLANN parameters
flann_index_kdtree = 0
index_params = dict(algorithm=flann_index_kdtree, trees=5)
search_params = dict(checks=50) # or pass empty dictionary
matcher = cv2.FlannBasedMatcher(index_params, search_params)
detector = cv2.SIFT()
extractor = cv2.DescriptorExtractor_create("SIFT")
bow_de = cv2.BOWImgDescriptorExtractor(extractor, matcher)
bow_de.setVocabulary(vocab)
svm = cv2.SVM()
svm.load(os.path.join(svm_data_dir, "svm_data.dat"))
def classifier(image):
keypoints = detector.detect(image)
descriptor = bow_de.compute(image, keypoints)
return svm.predict(descriptor)
return classifier
def get_classifier_directories(root_dir):
vocab_path = os.path.join(root_dir, "vocab.npy")
unlabelled_dir = os.path.join(root_dir, "unlabelled")
labelled_dir = os.path.join(root_dir, "labelled")
features_dir = os.path.join(root_dir, "features")
svm_data_dir = os.path.join(root_dir, "svm_data")
dirs = (
unlabelled_dir,
labelled_dir,
features_dir,
svm_data_dir
)
for directory in dirs:
if not os.path.exists(directory):
os.makedirs(directory)
return (vocab_path,) + dirs
def ls(path, limit=None, name_filter=None):
i = 0
for name in os.listdir(path):
if name == ".DS_Store":
continue
if name_filter is not None and name_filter not in name:
continue
i += 1
if limit is not None and i > limit:
break
yield os.path.join(path, name)
def apply_offset_to_locations(locations, offset):
x_offset, y_offset = offset
return [(location[0] + x_offset, location[1] + y_offset) for location in locations]
def apply_offset_to_single_location(location, offset):
x_offset, y_offset = offset
return location[0] + x_offset, location[1] + y_offset
def _apply_sensitivity_to_255(orig_100):
low = max(0, orig_100 - 10)
high = min(100, orig_100 + 10)
return int((255 * low) / 100.0), int((255 * high) / 100.0)
def extract_color_2(im, hue_360, saturation_100, value_100):
saturation_255 = _apply_sensitivity_to_255(saturation_100)
value_255 = _apply_sensitivity_to_255(value_100)
return extract_color(im, hue_360/2, saturation_255, value_255)
def extract_color(im, hue, saturation, value):
# type: (np.ndarray, Union[int, Pair], Pair, Pair) -> np.ndarray
if isinstance(hue, int):
sensitivity = 10
hue = (hue - sensitivity, hue + sensitivity)
# Handle hue's near the boundary
split_hue_pairs = None
if hue[0] < 0:
split_hue_pairs = ((hue[0] % 180, 180), (0, hue[1]))
elif hue[1] > 180:
split_hue_pairs = ((hue[0], 180), (0, hue[1] % 180))
if split_hue_pairs is not None:
a_hue, b_hue = split_hue_pairs
return extract_color(im, a_hue, saturation, value) + \
extract_color(im, b_hue, saturation, value)
lower_bound = np.array([hue[0], saturation[0], value[0]])
upper_bound = np.array([hue[1], saturation[1], value[1]])
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
mono = cv2.inRange(hsv, lower_bound, upper_bound)
return mono
def _handle_points(a, b, most_horizontal, most_vertical):
line = (a, b)
most_horizontal.append(line)
most_horizontal[:] = sorted(most_horizontal, key=aspect_ratio, reverse=True)[:2]
most_vertical.append(line)
most_vertical[:] = sorted(most_vertical, key=aspect_ratio)[:2]
def get_corners_from_cornerless_rect(contour):
most_horizontal = []
most_vertical = []
prev_point = None
for (point,) in contour:
if prev_point is None:
prev_point = point
continue
_handle_points(prev_point, point, most_horizontal, most_vertical)
prev_point = point
# Make sure to consider the line between the first and last points.
_handle_points(contour[0][0], prev_point, most_horizontal, most_vertical)
top, bottom = sorted(most_horizontal, key=lambda (j, k): (j[1] + k[1]) / 2)
left, right = sorted(most_vertical, key=lambda (j, k): (j[0] + k[0]) / 2)
tl = find_intersection(left, top)
tr = find_intersection(top, right)
br = find_intersection(right, bottom)
bl = find_intersection(bottom, left)
points = np.array((tl, tr, br, bl))
return points
def aspect_ratio(line):
(x1, y1), (x2, y2) = line
denominator = float(abs(y2 - y1))
if denominator == 0:
return float("inf")
return float(abs(x2 - x1)) / denominator
def find_intersection(line_a, line_b):
# Math'ed the shit out of this
# https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Given_two_points_on_each_line
(x1, y1), (x2, y2) = line_a
(x3, y3), (x4, y4) = line_b
intersect_x = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / \
((x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4))
intersect_y = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / \
((x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4))
return intersect_x, intersect_y
def point_closest_to(points, x, y):
def dist(point):
x_offset = point[0][0] - x
y_offset = point[0][1] - y
sqrt = np.math.sqrt(x_offset ** 2 + y_offset ** 2)
return sqrt
return sorted(points, key=dist)[0]
def contour_bounding_box_for_contour(contour):
x, y, w, h = cv2.boundingRect(contour)
contour = np.array([
[x, y],
[x + w, y],
[x + w, y + h],
[x, y + h],
]).reshape((4, 1, 2))
return contour
def get_dimens(im):
h, w = im.shape[:2]
return w, h
def get_contours(im, close_and_open=True):
im = im.copy()
if close_and_open:
structuring_element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15))
structuring_element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
im = cv2.morphologyEx(im, cv2.MORPH_CLOSE, structuring_element1)
im = cv2.morphologyEx(im, cv2.MORPH_OPEN, structuring_element2)
return cv2.findContours(im, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
def get_width(im):
return get_dimens(im)[0]
def get_height(im):
return get_dimens(im)[1]
def ls_debug(start_num=None, end_num=None, explicit_options=None):
assert (start_num is None) == (end_num is None) and \
(start_num is None) != (explicit_options is None), \
"Should specify either start and end or explicit"
if start_num is not None:
explicit_options = range(start_num, end_num + 1)
for path in ls(DATA_DIR + "module_specific_data/debug/"):
name, _ = os.path.splitext(os.path.basename(path))
try:
num = int(name)
except ValueError:
continue
if num in explicit_options:
yield path
def get_subset(im, x_percents, y_percents, margin_percent=0):
w, h = get_dimens(im)
left, right, top, bottom = [
int((percent * full)/100.0) for percent, full in zip(x_percents + y_percents, (w, w, h, h))
]
if margin_percent != 0:
half_w = (right - left) / 2.0
half_h = (bottom - top) / 2.0
pos_dist_to_center_pairs = (
(left, half_w),
(right, -half_w),
(top, half_h),
(bottom, -half_h),
)
left, right, top, bottom = [
int(pos + dist_to_center - (dist_to_center * (100 + margin_percent) / 100.0))
for pos, dist_to_center in pos_dist_to_center_pairs
]
return im[top:bottom, left:right]
def rotate_image_180(image):
image = cv2.flip(image, 0)
image = cv2.flip(image, 1)
return image
def rotate_image_clockwise(image):
image = cv2.transpose(image)
image = cv2.flip(image, 1)
return image
def rotate_image_counter_clockwise(image):
image = cv2.transpose(image)
image = cv2.flip(image, 0)
return image
def extract_threshold(im, threshold):
return cv2.threshold(im, threshold, 255, 0)[1]
|
python
|
from django.contrib import admin
# Register your models here.
from .models import SiteConfiguration, SingleOrder, Order, UniqueFeature, Size, Color
admin.site.register(Size)
admin.site.register(Color)
admin.site.register(SiteConfiguration)
admin.site.register(UniqueFeature)
|
python
|
import pygame
from pygame import draw
from .player import Player, Enemy
from .utils import Director, TileSet
class lvl1(Director):
def __init__(self) -> None:
super().__init__(self)
self.tile = TileSet("assets/tileset.png", 10, 28)
self.tile.gen_map(self.alto, self.ancho, 52)
self.plr = Player()
self.enm = Enemy()
def update(self):
self.plr.update()
self.enm.update()
if pygame.sprite.collide_rect(self.plr, self.enm):
if self.plr.pos in [1, 3]:
if self.plr.rect.top <= self.enm.rect.bottom and self.enm.rect.bottom < self.plr.rect.bottom:
self.plr.rect.top = self.enm.rect.bottom
elif self.plr.rect.bottom >= self.enm.rect.top and self.enm.rect.top > self.plr.rect.top:
self.plr.rect.bottom = self.enm.rect.top
if self.plr.pos in [0, 2]:
if self.plr.rect.left <= self.enm.rect.right and self.enm.rect.right < self.plr.rect.right:
self.plr.rect.left = self.enm.rect.right
elif self.plr.rect.right >= self.enm.rect.left and self.enm.rect.left > self.plr.rect.left:
self.plr.rect.right = self.enm.rect.left
def draw(self, screen):
screen.fill((0, 0, 0))
self.tile.draw(self.screen,self.tile.tilemap)
self.plr.draw(self.screen)
self.enm.draw(self.screen)
|
python
|
from django.urls import path, include
from .views import *
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('titles', TitleViewSet, basename='titles')
router.register('categories', CategoryViewSet, basename='categories')
router.register('genres', GenreViewSet, basename='genres')
urlpatterns = [
path('', include(router.urls)),
]
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-09 10:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_prices.models
class Migration(migrations.Migration):
dependencies = [
('order', '0016_order_language_code'),
]
operations = [
migrations.AlterField(
model_name='deliverygroup',
name='shipping_price',
field=django_prices.models.PriceField(currency='KES', decimal_places=4, default=0, editable=False, max_digits=12, verbose_name='shipping price'),
),
migrations.AlterField(
model_name='order',
name='billing_address',
field=models.ForeignKey(default=1, editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='userprofile.Address', verbose_name='billing address'),
),
migrations.AlterField(
model_name='order',
name='discount_amount',
field=django_prices.models.PriceField(blank=True, currency='KES', decimal_places=2, max_digits=12, null=True, verbose_name='discount amount'),
),
migrations.AlterField(
model_name='order',
name='token',
field=models.CharField(max_length=36, null=True, verbose_name='token'),
),
migrations.AlterField(
model_name='order',
name='total_net',
field=django_prices.models.PriceField(blank=True, currency='KES', decimal_places=2, max_digits=12, null=True, verbose_name='total net'),
),
migrations.AlterField(
model_name='order',
name='total_tax',
field=django_prices.models.PriceField(blank=True, currency='KES', decimal_places=2, max_digits=12, null=True, verbose_name='total tax'),
),
migrations.AlterField(
model_name='ordernote',
name='content',
field=models.CharField(max_length=250, verbose_name='content'),
),
]
|
python
|
# -*- coding: utf-8 -*-
##
# @file goldman_equation.py
# @brief Contain a function that calculates the equilibrium potential using the Goldman equation
# @author Gabriel H Riqueti
# @email [email protected]
# @date 22/04/2021
#
from biomedical_signal_processing import FARADAYS_CONSTANT as F
from biomedical_signal_processing import GAS_CONSTANT as R
import numpy as np
def goldman_equation(temperature, mono_cations_in, mono_cations_out, mono_cations_perm, mono_anions_in, mono_anions_out, mono_anions_perm,):
"""
Calculate the resting membrane potential for a specific ion
Parameters
----------
temperature : float
Temperature (Kelvin)
mono_cations_in : positive float
Concentration of monovalent cations inside the cell
mono_cations_out : positive float
Concentration of monovalent cations outside the cell
mono_cations_perm : positive float
Relative permeability of monovalent cations outside the cell
mono_anions_in : positive float
Concentration of monovalent anions inside the cell
mono_anions_out : positive float
Concentration of monovalent anions outside the cell
mono_anions_perm : positive float
Relative permeability of monovalent anions outside the cell
Returns
-------
e_r : float
Resting membrane potential
"""
if (mono_cations_in <= 0).any() or (mono_cations_out <= 0).any() or (mono_anions_in <= 0).any() or (mono_anions_out <= 0).any():
raise ValueError('The ionic concentrations must have positive values')
if temperature < 0:
raise ValueError('temperature must have non-negative values')
if (np.sum(mono_cations_in * mono_cations_perm) + np.sum(mono_anions_out * mono_anions_perm) == 0
and (np.sum(mono_cations_out * mono_cations_perm) + np.sum(mono_anions_in * mono_anions_perm)) == 0):
return 0 * (R * temperature / F) * (
(np.sum(mono_cations_out * mono_cations_perm) + np.sum(mono_anions_in * mono_anions_perm)) *
(np.sum(mono_cations_in * mono_cations_perm) + np.sum(mono_anions_out * mono_anions_perm))
)
return (R * temperature / F) * np.log(
(np.sum(mono_cations_out * mono_cations_perm) + np.sum(mono_anions_in * mono_anions_perm)) /
(np.sum(mono_cations_in * mono_cations_perm) + np.sum(mono_anions_out * mono_anions_perm))
)
|
python
|
# Copyright 2015 Abhijit Menon-Sen <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import string
import textwrap
from ansible import constants as C
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from units.mock.path import mock_unfrackpath_noop
from ansible.inventory.manager import InventoryManager, split_host_pattern
from units.mock.loader import DictDataLoader
class TestInventory(unittest.TestCase):
patterns = {
'a': ['a'],
'a, b': ['a', 'b'],
'a , b': ['a', 'b'],
' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'],
'9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo'],
'foo[1:2]': ['foo[1:2]'],
'a::b': ['a::b'],
'a:b': ['a', 'b'],
' a : b ': ['a', 'b'],
'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'],
}
pattern_lists = [
[['a'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a, b'], ['a', 'b']],
[['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'],
['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo']]
]
# pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ]
# a,b are the bounds of the subscript; x..z are the results of the subscript
# when applied to string.ascii_letters.
subscripts = {
'a': [('a', None), list(string.ascii_letters)],
'a[0]': [('a', (0, None)), ['a']],
'a[1]': [('a', (1, None)), ['b']],
'a[2:3]': [('a', (2, 3)), ['c', 'd']],
'a[-1]': [('a', (-1, None)), ['Z']],
'a[-2]': [('a', (-2, None)), ['Y']],
'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']],
'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']],
'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])],
}
ranges_to_expand = {
'a[1:2]': ['a1', 'a2'],
'a[1:10:2]': ['a1', 'a3', 'a5', 'a7', 'a9'],
'a[a:b]': ['aa', 'ab'],
'a[a:i:3]': ['aa', 'ad', 'ag'],
'a[a:b][c:d]': ['aac', 'aad', 'abc', 'abd'],
'a[0:1][2:3]': ['a02', 'a03', 'a12', 'a13'],
'a[a:b][2:3]': ['aa2', 'aa3', 'ab2', 'ab3'],
}
def setUp(self):
fake_loader = DictDataLoader({})
self.i = InventoryManager(loader=fake_loader, sources=[None])
def test_split_patterns(self):
for p in self.patterns:
r = self.patterns[p]
self.assertEqual(r, split_host_pattern(p))
for p, r in self.pattern_lists:
self.assertEqual(r, split_host_pattern(p))
def test_ranges(self):
for s in self.subscripts:
r = self.subscripts[s]
self.assertEqual(r[0], self.i._split_subscript(s))
self.assertEqual(
r[1],
self.i._apply_subscript(
list(string.ascii_letters),
r[0][1]
)
)
class TestInventoryPlugins(unittest.TestCase):
def test_empty_inventory(self):
inventory = self._get_inventory('')
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
self.assertFalse(inventory.groups['all'].get_hosts())
self.assertFalse(inventory.groups['ungrouped'].get_hosts())
def test_ini(self):
self._test_default_groups("""
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_explicit_ungrouped(self):
self._test_default_groups("""
[ungrouped]
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_variables_stringify(self):
values = ['string', 'no', 'No', 'false', 'FALSE', [], False, 0]
inventory_content = "host1 "
inventory_content += ' '.join(['var%s=%s' % (i, to_text(x)) for i, x in enumerate(values)])
inventory = self._get_inventory(inventory_content)
variables = inventory.get_host('host1').vars
for i in range(len(values)):
if isinstance(values[i], string_types):
self.assertIsInstance(variables['var%s' % i], string_types)
else:
self.assertIsInstance(variables['var%s' % i], type(values[i]))
@mock.patch('ansible.inventory.manager.unfrackpath', mock_unfrackpath_noop)
@mock.patch('os.path.exists', lambda x: True)
@mock.patch('os.access', lambda x, y: True)
def test_yaml_inventory(self, filename="test.yaml"):
inventory_content = {filename: textwrap.dedent("""\
---
all:
hosts:
test1:
test2:
""")}
C.INVENTORY_ENABLED = ['yaml']
fake_loader = DictDataLoader(inventory_content)
im = InventoryManager(loader=fake_loader, sources=filename)
self.assertTrue(im._inventory.hosts)
self.assertIn('test1', im._inventory.hosts)
self.assertIn('test2', im._inventory.hosts)
self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['all'].hosts)
self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['all'].hosts)
self.assertEqual(len(im._inventory.groups['all'].hosts), 2)
self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['ungrouped'].hosts)
self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['ungrouped'].hosts)
self.assertEqual(len(im._inventory.groups['ungrouped'].hosts), 2)
def _get_inventory(self, inventory_content):
fake_loader = DictDataLoader({__file__: inventory_content})
return InventoryManager(loader=fake_loader, sources=[__file__])
def _test_default_groups(self, inventory_content):
inventory = self._get_inventory(inventory_content)
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
all_hosts = set(host.name for host in inventory.groups['all'].get_hosts())
self.assertEqual(set(['host1', 'host2', 'host3', 'host4', 'host5']), all_hosts)
ungrouped_hosts = set(host.name for host in inventory.groups['ungrouped'].get_hosts())
self.assertEqual(set(['host1', 'host2']), ungrouped_hosts)
servers_hosts = set(host.name for host in inventory.groups['servers'].get_hosts())
self.assertEqual(set(['host3', 'host4', 'host5']), servers_hosts)
|
python
|
''' Given a binary string str of length N, the task is to find the maximum count of substrings str can be divided into such that all the substrings are balanced i.e. they have equal number of 0s and 1s. If it is not possible to split str satisfying the conditions then print -1.
Example:
Input: str = “0100110101”
Output: 4
The required substrings are “01”, “0011”, “01” and “01”.
Approach:
Initialize count = 0 and traverse the string character by character and keep track of the number of 0s and 1s so far, whenever the count of 0s and 1s become equal increment the count. If the count of 0s and 1s in the original string is not equal then print -1 else print the value of count after the traversal of the complete string.'''
s = str(input())
count_sub = 0
count_0 = 0
count_1 = 0
for i in range(len(s)):
if (s[i]== '0'):count_0 += 1
else:count_1 += 1
if(count_0 == count_1):count_sub += 1
print(count_sub)
|
python
|
from Cocoa import *
gNumDaysInMonth = ( 0, 31, 28, 31, 30, 21, 30, 31, 31, 30, 31, 30, 31 )
def isLeap(year):
return (((year % 4) == 0 and ((year % 100) != 0)) or (year % 400) == 0)
class CalendarMatrix (NSMatrix):
lastMonthButton = objc.IBOutlet()
monthName = objc.IBOutlet()
nextMonthButton = objc.IBOutlet()
__slots__ = ('_selectedDay', '_startOffset')
def initWithFrame_(self, frameRect):
self._selectedDay = None
self._startOffset = 0
cell = NSButtonCell.alloc().initTextCell_("")
now = NSCalendarDate.date()
cell.setShowsStateBy_(NSOnOffButton)
self.initWithFrame_mode_prototype_numberOfRows_numberOfColumns_(
frameRect, NSRadioModeMatrix, cell, 5, 7)
count = 0
for i in range(6):
for j in range(7):
val = self.cellAtRow_column_(i, j)
if val:
val.setTag_(count)
count += 1
self._selectedDay = NSCalendarDate.dateWithYear_month_day_hour_minute_second_timeZone_(
now.yearOfCommonEra(),
now.monthOfYear(),
now.dayOfMonth(),
0,
0,
0,
NSTimeZone.localTimeZone())
return self
@objc.IBAction
def choseDay_(self, sender):
prevSelDate = self.selectedDay()
selDay = self.selectedCell().tag() - self._startOffset + 1
selDate = NSCalendarDate.dateWithYear_month_day_hour_minute_second_timeZone_(
prevSelDate.yearOfCommonEra(),
prevSelDate.monthOfYear(),
selDay,
0,
0,
0,
NSTimeZone.localTimeZone())
self.setSelectedDay_(selDate)
self.highlightTodayIfVisible()
if self.delegate().respondsToSelector_('calendarMatrix:didChangeToDate:'):
self.delegate().calendarMatrix_didChangeToDate_(
self, selDate)
@objc.IBAction
def monthChanged_(self, sender):
thisDate = self.selectedDay()
currentYear = thisDate.yearOfCommonEra()
currentMonth = thisDate.monthOfYear()
if sender is self.nextMonthButton:
if currentMonth == 12:
currentMonth = 1
currentYear += 1
else:
currentMonth += 1
else:
if currentMonth == 1:
currentMonth = 12
currentYear -= 1
else:
currentMonth -= 1
self.setSelectedDay_(NSCalendarDate.dateWithYear_month_day_hour_minute_second_timeZone_(currentYear, currentMonth, 1, 0, 0, 0, NSTimeZone.localTimeZone()))
self.refreshCalendar()
self.choseDay_(self)
def setSelectedDay_(self, newDay):
self._selectedDay = newDay
def selectedDay(self):
return self._selectedDay
def refreshCalendar(self):
selDate = self.selectedDay()
currentMonth = selDate.monthOfYear()
currentYear = selDate.yearOfCommonEra()
firstOfMonth = NSCalendarDate.dateWithYear_month_day_hour_minute_second_timeZone_(
currentYear,
currentMonth,
1,
0,
0,
0,
NSTimeZone.localTimeZone())
self.monthName.setStringValue_(
firstOfMonth.descriptionWithCalendarFormat_("%B %Y"))
daysInMonth = gNumDaysInMonth[currentMonth]
if (currentMonth == 2) and isLeap(currentYear):
daysInMonth += 1
self._startOffset = firstOfMonth.dayOfWeek()
dayLabel = 1
for i in range(42):
cell = self.cellWithTag_(i)
if cell is None:
continue
if i < self._startOffset or i >= (daysInMonth + self._startOffset):
# blank out unused cells in the matrix
cell.setBordered_(False)
cell.setEnabled_(False)
cell.setTitle_("")
cell.setCellAttribute_to_(NSCellHighlighted, False)
else:
# Fill in valid days in the matrix
cell.setBordered_(True)
cell.setEnabled_(True)
cell.setFont_(NSFont.systemFontOfSize_(12))
cell.setTitle_(str(dayLabel))
dayLabel += 1
cell.setCellAttribute_to_(NSCellHighlighted, False)
self.selectCellWithTag_(selDate.dayOfMonth() + self._startOffset - 1)
self.highlightTodayIfVisible()
def highlightTodayIfVisible(self):
now = NSCalendarDate.date()
selDate = self.selectedDay()
if (selDate.yearOfCommonEra() == now.yearOfCommonEra()
and selDate.monthOfYear() == now.monthOfYear()
and selDate.dayOfMonth() == now.dayOfMonth()):
aCell = self.cellWithTag_(
now.dayOfMonth() + self._startOffset - 1)
aCell.setHighlightsBy_(NSMomentaryChangeButton)
aCell.setCellAttribute_to_(NSCellHighlighted, True)
def awakeFromNib(self):
self.setTarget_(self)
self.setAction_('choseDay:')
self.setAutosizesCells_(True)
self.refreshCalendar()
self.choseDay_(self)
|
python
|
import warnings
from collections import OrderedDict
from ConfigSpace import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter
from mindware.components.feature_engineering.transformations import _bal_balancer, _preprocessor, _rescaler, \
_image_preprocessor, _text_preprocessor, _bal_addons, _imb_balancer, _gen_addons, _res_addons, _sel_addons, \
EmptyTransformer
from mindware.components.utils.class_loader import get_combined_fe_candidtates
from mindware.components.utils.constants import CLS_TASKS
from mindware.components.feature_engineering import TRANS_CANDIDATES
builtin_stage = ['balancer', 'preprocessor', 'rescaler']
stage_list = ['balancer', 'preprocessor', 'rescaler']
thirdparty_candidates_dict = OrderedDict()
def set_stage(udf_stage_list, stage_candidates_dict):
'''
:param udf_stage_list: List, a list for stage_name like ['my_stage','selector']
:param stage_candidates_dict: Dictionary, <key, value>.
Key is stage_name, and value is a list of operators in this stage.
Each operator must be a Transformer.
:return:
'''
global stage_list
stage_list = udf_stage_list
print("Current Stage: %s" % ', '.join(stage_list))
for stage in udf_stage_list:
if stage in builtin_stage:
print("Built-in stage '%s' found!" % stage)
else:
print("User-defined stage '%s' found!" % stage)
if stage not in stage_candidates_dict:
raise ValueError("Expected stage name '%s' in stage_candidates_dict." % stage)
if len(stage_candidates_dict[stage]) == 0:
warnings.warn("Candidate list for stage '%s' is empty! EmptyTransformer will be used instead!" % stage)
stage_candidates_dict[stage] = [EmptyTransformer]
thirdparty_candidates_dict[stage] = {candidate.__name__: candidate for candidate in
stage_candidates_dict[stage]}
def get_task_hyperparameter_space(task_type, include_preprocessors=None,
include_text=False, include_image=False, if_imbal=False,
optimizer='smac'):
"""
Fetch the underlying hyperparameter space for feature engineering.
Pipeline Space:
1. balancer: weight_balancer,
data_balancer.
2. scaler: normalizer, scaler, quantile.
3. preprocessor
:return: hyper space.
"""
if task_type in CLS_TASKS:
trans_types = TRANS_CANDIDATES['classification'].copy()
else:
trans_types = TRANS_CANDIDATES['regression'].copy()
_preprocessor_candidates, trans_types = get_combined_fe_candidtates(_preprocessor, _gen_addons, trans_types)
_preprocessor_candidates, trans_types = get_combined_fe_candidtates(_preprocessor_candidates, _sel_addons,
trans_types)
_rescaler_candidates, trans_types = get_combined_fe_candidtates(_rescaler, _res_addons, trans_types)
if not if_imbal:
_balancer_candadates, trans_types = get_combined_fe_candidtates(_bal_balancer, _bal_addons, trans_types)
else:
_balancer_candadates, trans_types = get_combined_fe_candidtates(_imb_balancer, _bal_addons, trans_types)
# TODO: Avoid transformations, which would take too long
# feature_learning = ["kitchen_sinks", "kernel_pca", "nystroem_sampler"]
# if task_type in CLS_TASKS:
# classifier_set = ["adaboost", "decision_tree", "extra_trees",
# "gradient_boosting", "k_nearest_neighbors",
# "libsvm_svc", "random_forest", "gaussian_nb",
# "decision_tree", "lightgbm"]
#
# if estimator_id in classifier_set:
# for tran_id in [12, 13, 15]:
# if tran_id in trans_types:
# trans_types.remove(tran_id)
preprocessor = dict()
if include_preprocessors:
for key in include_preprocessors:
if key not in _preprocessor_candidates:
raise ValueError(
"Preprocessor %s not in built-in preprocessors! Only the following preprocessors are supported: %s." % (
key, ','.join(_preprocessor_candidates.keys())))
preprocessor[key] = _preprocessor_candidates[key]
trans_types.append(_preprocessor_candidates[key].type)
else:
preprocessor = _preprocessor_candidates
configs = dict()
if include_image:
image_preprocessor_dict = _get_configuration_space(_image_preprocessor, optimizer=optimizer)
configs['image_preprocessor'] = image_preprocessor_dict
if include_text:
text_preprocessor_dict = _get_configuration_space(_text_preprocessor, optimizer=optimizer)
configs['text_preprocessor'] = text_preprocessor_dict
for stage in stage_list:
if stage == 'preprocessor':
stage_dict = _get_configuration_space(preprocessor, trans_types, optimizer=optimizer)
elif stage == 'rescaler':
stage_dict = _get_configuration_space(_rescaler_candidates, trans_types, optimizer=optimizer)
elif stage == 'balancer':
if task_type in CLS_TASKS:
stage_dict = _get_configuration_space(_balancer_candadates, optimizer=optimizer)
else:
stage_dict = None
else:
# Third party stage
trans_types.extend([candidate.type for _, candidate in thirdparty_candidates_dict[stage].items()])
stage_dict = _get_configuration_space(thirdparty_candidates_dict[stage], trans_types, optimizer=optimizer)
configs[stage] = stage_dict
cs = _build_hierachical_configspace(configs, optimizer=optimizer)
return cs
def _get_configuration_space(builtin_transformers, trans_type=None, optimizer='smac'):
config_dict = dict()
for tran_key in builtin_transformers:
tran = builtin_transformers[tran_key]
tran_id = tran.type
if trans_type is None or tran_id in trans_type:
try:
sub_configuration_space = builtin_transformers[tran_key].get_hyperparameter_search_space(
optimizer=optimizer)
config_dict[tran_key] = sub_configuration_space
except:
if optimizer == 'smac':
config_dict[tran_key] = ConfigurationSpace()
elif optimizer == 'tpe':
config_dict[tran_key] = {}
return config_dict
def _add_hierachical_configspace(cs, config, parent_name):
config_cand = list(config.keys())
config_option = CategoricalHyperparameter(parent_name, config_cand,
default_value=config_cand[0])
cs.add_hyperparameter(config_option)
for config_item in config_cand:
sub_configuration_space = config[config_item]
parent_hyperparameter = {'parent': config_option,
'value': config_item}
cs.add_configuration_space(config_item, sub_configuration_space,
parent_hyperparameter=parent_hyperparameter)
def _build_hierachical_configspace(configs, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
for config_key in configs:
if configs[config_key] is not None:
_add_hierachical_configspace(cs, configs[config_key], config_key)
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {}
def dict2hi(dictionary):
hi_list = list()
for key in dictionary:
hi_list.append((key, dictionary[key]))
return hi_list
for config_key in configs:
if configs[config_key] is not None:
space[config_key] = hp.choice(config_key, dict2hi(configs[config_key]))
return space
|
python
|
# Copyright 2020 by Christophe Lambin
# All rights reserved.
import logging
from prometheus_client import start_http_server
from libpimon.version import version
from libpimon.configuration import print_configuration
from libpimon.cpu import CPUTempProbe, CPUFreqProbe
from libpimon.gpio import GPIOProbe
from libpimon.openvpn import OpenVPNProbe, OpenVPNStatusProbe
from libpimon.mediacentre import TransmissionProbe, MonitorProbe
from pimetrics.scheduler import Scheduler
def initialise(config):
scheduler = Scheduler()
# Probes
if config.monitor_cpu:
try:
scheduler.register(
CPUFreqProbe(config.freq_filename),
5
)
scheduler.register(
CPUTempProbe(config.temp_filename, 1000),
5
)
except FileNotFoundError as err:
logging.warning(f'Could not add CPU monitor(s): {err}')
if config.monitor_fan:
try:
scheduler.register(
GPIOProbe(config.monitor_fan_pin),
5
)
except RuntimeError:
logging.warning('Could not add Fan monitor. Possibly /dev/gpiomem isn\'t accessible?')
if config.monitor_vpn:
try:
scheduler.register(
OpenVPNProbe(config.monitor_vpn_client_status),
5
)
except FileNotFoundError as err:
logging.warning(f'Could not add OpenVPN monitor: {err}')
if config.monitor_vpn_proxies:
scheduler.register(
OpenVPNStatusProbe(config.monitor_vpn_proxies),
60
)
else:
logging.warning('No VPN Proxies defined. VPN status monitoring is disabled')
if config.monitor_mediaserver:
if config.monitor_mediaserver_transmission:
scheduler.register(
TransmissionProbe(config.monitor_mediaserver_transmission),
5
)
if config.monitor_mediaserver_sonarr:
scheduler.register(
MonitorProbe(
config.monitor_mediaserver_sonarr, MonitorProbe.App.sonarr,
config.monitor_mediaserver_sonarr_apikey),
60
)
if config.monitor_mediaserver_radarr:
scheduler.register(
MonitorProbe(
config.monitor_mediaserver_radarr, MonitorProbe.App.radarr,
config.monitor_mediaserver_radarr_apikey),
60
)
return scheduler
def pimon(config):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG if config.debug else logging.INFO)
logging.info(f'Starting pimon v{version}')
logging.info(f'Configuration: {print_configuration(config)}')
start_http_server(config.port)
scheduler = initialise(config)
if config.once:
scheduler.run(once=True)
else:
while True:
scheduler.run(duration=config.interval)
return 0
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import opcodes as OperandDef
from ...serialize import BoolField, KeyField
from ..utils import to_numpy
from ..core import TensorOrder
from .core import TensorHasInput
class TensorFromDataFrame(TensorHasInput):
""" represent tensor from DataFrame """
_op_type_ = OperandDef.TENSOR_FROM_DATAFRAME
_input = KeyField('_input')
_extract_multi_index = BoolField('extract_multi_index')
def __init__(self, extract_multi_index=False, **kw):
super().__init__(_extract_multi_index=extract_multi_index, **kw)
@classmethod
def execute(cls, ctx, op: 'TensorFromDataFrame'):
df = ctx[op.inputs[0].key]
if op._extract_multi_index:
df = df.to_frame()
ctx[op.outputs[0].key] = to_numpy(df).astype(op.dtype, order='F')
@classmethod
def tile(cls, op: 'TensorFromDataFrame'):
output = op.outputs[0]
out_chunks = []
for c in op.input.chunks:
shape = (c.shape[0], output.shape[1]) if op._extract_multi_index else c.shape
index = (c.index[0], 0) if op._extract_multi_index else c.index
out_chunk = op.copy().reset_key().new_chunk(
[c], shape=shape, index=index, order=output.order)
out_chunks.append(out_chunk)
new_op = op.copy()
nsplits = (op.input.nsplits[0], (output.shape[1],)) if op._extract_multi_index else op.input.nsplits
return new_op.new_tensors(op.inputs, output.shape, order=output.order,
chunks=out_chunks, nsplits=nsplits)
def __call__(self, a, order=None):
from ...dataframe.core import INDEX_TYPE, IndexValue
if self._extract_multi_index and isinstance(a, INDEX_TYPE) \
and isinstance(a.index_value.value, IndexValue.MultiIndex):
order = a.order if order is None else order
return self.new_tensor([a], (a.shape[0], len(a.index_value.value.names)), order=order)
else:
self._extract_multi_index = False
return super().__call__(a, order=order)
def from_dataframe(in_df, dtype=None):
from ...dataframe.utils import build_empty_df
if dtype is None:
empty_pdf = build_empty_df(in_df.dtypes)
dtype = to_numpy(empty_pdf).dtype
op = TensorFromDataFrame(dtype=dtype, gpu=in_df.op.gpu)
return op(in_df, order=TensorOrder.F_ORDER) # return tensor with F-order always
def from_series(in_series, dtype=None):
op = TensorFromDataFrame(dtype=dtype or in_series.dtype, gpu=in_series.op.gpu)
return op(in_series, order=TensorOrder.F_ORDER) # return tensor with F-order always
def from_index(in_index, dtype=None, extract_multi_index=False):
op = TensorFromDataFrame(dtype=dtype or in_index.dtype, gpu=in_index.op.gpu,
extract_multi_index=extract_multi_index)
return op(in_index, order=TensorOrder.F_ORDER) # return tensor with F-order always
|
python
|
# reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbones
# modified from mmclassification timm_backbone.py
try:
import timm
except ImportError:
timm = None
from mmcv.cnn.bricks.registry import NORM_LAYERS
from openmixup.utils import get_root_logger
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
def print_timm_feature_info(feature_info):
"""Print feature_info of timm backbone to help development and debug.
Args:
feature_info (list[dict] | timm.models.features.FeatureInfo | None):
feature_info of timm backbone.
"""
logger = get_root_logger()
if feature_info is None:
logger.warning('This backbone does not have feature_info')
elif isinstance(feature_info, list):
for feat_idx, each_info in enumerate(feature_info):
logger.info(f'backbone feature_info[{feat_idx}]: {each_info}')
else:
try:
logger.info(f'backbone out_indices: {feature_info.out_indices}')
logger.info(f'backbone out_channels: {feature_info.channels()}')
logger.info(f'backbone out_strides: {feature_info.reduction()}')
except AttributeError:
logger.warning('Unexpected format of backbone feature_info')
@BACKBONES.register_module()
class TIMMBackbone(BaseBackbone):
"""Wrapper to use backbones from timm library.
More details can be found in
`timm <https://github.com/rwightman/pytorch-image-models>`_.
See especially the document for `feature extraction
<https://rwightman.github.io/pytorch-image-models/feature_extraction/>`_.
Args:
model_name (str): Name of timm model to instantiate.
in_channels (int): Number of input image channels. Defaults to 3.
num_classes (int): Number of classes for classification head (used when
features_only is False). Default to 1000.
features_only (bool): Whether to extract feature pyramid (multi-scale
feature maps from the deepest layer at each stride) by using timm
supported `forward_features()`. Defaults to False.
pretrained (bool): Whether to load pretrained weights.
Defaults to False.
checkpoint_path (str): Path of checkpoint to load at the last of
``timm.create_model``. Defaults to empty string, which means
not loading.
init_cfg (dict or list[dict], optional): Initialization config dict of
OpenMMLab projects (removed!). Defaults to None.
**kwargs: Other timm & model specific arguments.
"""
def __init__(self,
model_name,
num_classes=1000,
in_channels=3,
features_only=False,
pretrained=False,
checkpoint_path='',
**kwargs):
if timm is None:
raise RuntimeError(
'Failed to import timm. Please run "pip install timm". '
'"pip install dataclasses" may also be needed for Python 3.6.')
if not isinstance(pretrained, bool):
raise TypeError('pretrained must be bool, not str for model path')
super(TIMMBackbone, self).__init__()
if 'norm_layer' in kwargs:
kwargs['norm_layer'] = NORM_LAYERS.get(kwargs['norm_layer'])
self.timm_model = timm.create_model(
model_name=model_name,
pretrained=pretrained,
in_chans=in_channels,
checkpoint_path=checkpoint_path,
num_classes=0 if features_only else num_classes,
**kwargs)
self.features_only = features_only
# reset classifier
if hasattr(self.timm_model, 'reset_classifier'):
self.timm_model.reset_classifier(0, '')
# Hack to use pretrained weights from timm
if pretrained or checkpoint_path:
self._is_init = True
feature_info = getattr(self.timm_model, 'feature_info', None)
print_timm_feature_info(feature_info)
def forward(self, x):
if self.features_only:
features = self.timm_model.forward_features(x)
else:
features = self.timm_model(x)
if isinstance(features, (list, tuple)):
features = list(features)
else:
features = [features]
return features
|
python
|
import torch
import os
import pickle
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
class TrainUtil():
def __init__(self, checkpoint_path='checkpoints', version='mcts_nas_net_v1'):
self.checkpoint_path = checkpoint_path
self.version = version
return
def save_model_fixed(self, epoch, fixed_cnn, fixed_cnn_optmizer, save_best=False, **kwargs):
filename = os.path.join(self.checkpoint_path, self.version) + '.pth'
# Torch Save State Dict
state = {
'epoch': epoch+1,
'shared_cnn': fixed_cnn.state_dict(),
'shared_cnn_optmizer': fixed_cnn_optmizer.state_dict(),
}
for key, value in kwargs.items():
state[key] = value
torch.save(state, filename)
filename = os.path.join(self.checkpoint_path, self.version) + '_best.pth'
if save_best:
torch.save(state, filename)
return
def load_model_fixed(self, fixed_cnn, fixed_cnn_optmizer, **kwargs):
filename = os.path.join(self.checkpoint_path, self.version) + '.pth'
# Load Torch State Dict
checkpoints = torch.load(filename)
fixed_cnn.load_state_dict(checkpoints['fixed_cnn'])
fixed_cnn_optmizer.load_state_dict(checkpoints['fixed_cnn_optmizer'])
print(filename + " Loaded!")
return checkpoints
def save_model(self,
mcts,
shared_cnn,
shared_cnn_optmizer,
shared_cnn_schduler,
estimator,
estimator_optmizer,
epoch,
**kwargs):
mcts_filename = os.path.join(self.checkpoint_path, self.version) + '_mcts' + '.pkl'
filename = os.path.join(self.checkpoint_path, self.version) + '.pth'
# Torch Save State Dict
state = {
'epoch': epoch+1,
'shared_cnn': shared_cnn.state_dict(),
'shared_cnn_optmizer': shared_cnn_optmizer.state_dict(),
'shared_cnn_schduler': shared_cnn_schduler.state_dict(),
'estimator': estimator.state_dict(),
'estimator_optmizer': estimator_optmizer.state_dict()
}
for key, value in kwargs.items():
state[key] = value
torch.save(state, filename)
print(filename + " saved!")
# Save MCTS to pickle
rolloutPolicy, searchPolicy = mcts.rollout, mcts.searchPolicy
mcts.rollout, mcts.searchPolicy = None, None
with open(mcts_filename, 'wb') as handle:
pickle.dump(mcts, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(mcts_filename + " Saved!")
mcts.rollout, mcts.searchPolicy = rolloutPolicy, searchPolicy
return
def load_model(self,
shared_cnn,
shared_cnn_optmizer,
shared_cnn_schduler,
estimator,
estimator_optmizer,
**kwargs):
filename = os.path.join(self.checkpoint_path, self.version) + '.pth'
mcts_filename = os.path.join(self.checkpoint_path, self.version) + '_mcts' + '.pkl'
# Load Torch State Dict
checkpoints = torch.load(filename)
shared_cnn.load_state_dict(checkpoints['shared_cnn'])
shared_cnn_optmizer.load_state_dict(checkpoints['shared_cnn_optmizer'])
shared_cnn_schduler.load_state_dict(checkpoints['shared_cnn_schduler'])
shared_cnn_schduler.optimizer = shared_cnn_optmizer
estimator.load_state_dict(checkpoints['estimator'])
estimator_optmizer.load_state_dict(checkpoints['estimator_optmizer'])
print(filename + " Loaded!")
# Load MCTS
with open(mcts_filename, 'rb') as handle:
mcts = pickle.load(handle)
print(mcts_filename + " Loaded!")
return checkpoints, mcts
|
python
|
from pymtl import *
from lizard.util.rtl.interface import UseInterface
from lizard.util.rtl.method import MethodSpec
from lizard.core.rtl.messages import ExecuteMsg, WritebackMsg, PipelineMsgStatus
from lizard.util.rtl.pipeline_stage import gen_stage, StageInterface, DropControllerInterface
from lizard.core.rtl.kill_unit import PipelineKillDropController
from lizard.core.rtl.controlflow import KillType
from lizard.config.general import *
def WritebackInterface():
return StageInterface(ExecuteMsg(), WritebackMsg())
class WritebackStage(Model):
def __init__(s, interface):
UseInterface(s, interface)
s.require(
MethodSpec(
'dataflow_write',
args={
'tag': PREG_IDX_NBITS,
'value': XLEN,
},
rets=None,
call=True,
rdy=False,
),)
s.connect(s.process_accepted, 1)
s.is_store_DEBUG = Wire(1)
s.connect(s.is_store_DEBUG, s.process_in_.hdr_is_store)
@s.combinational
def compute():
s.process_out.v = 0
s.process_out.hdr.v = s.process_in_.hdr
s.dataflow_write_call.v = 0
s.dataflow_write_tag.v = 0
s.dataflow_write_value.v = 0
if s.process_in_.hdr_status == PipelineMsgStatus.PIPELINE_MSG_STATUS_VALID:
s.process_out.rd_val_pair.v = s.process_in_.rd_val_pair
s.process_out.areg_d.v = s.process_in_.areg_d
# write the data if the destination is valid
s.dataflow_write_call.v = s.process_in_.rd_val and s.process_call
s.dataflow_write_tag.v = s.process_in_.rd
s.dataflow_write_value.v = s.process_in_.result
else:
s.process_out.exception_info.v = s.process_in_.exception_info
def line_trace(s):
return s.process_in_.hdr_seq.hex()[2:]
def WritebackDropController():
return PipelineKillDropController(
DropControllerInterface(WritebackMsg(), WritebackMsg(),
KillType(MAX_SPEC_DEPTH)))
Writeback = gen_stage(WritebackStage, WritebackDropController)
|
python
|
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function
import os
import sys
from enum import IntEnum, unique
import numpy as np
from legate.core import LegateLibrary, legate_add_library, legion
# Helper method for python 3 support
def _itervalues(obj):
return obj.values() if sys.version_info > (3,) else obj.viewvalues()
# Load the Legate NumPy library first so we have a shard object that
# we can use to initialize all these configuration enumerations
class NumPyLib(LegateLibrary):
def __init__(self, name):
self.name = name
self.runtime = None
def get_name(self):
return self.name
def get_shared_library(self):
from legate.numpy.install_info import libpath
return os.path.join(
libpath, "liblgnumpy" + self.get_library_extension()
)
def get_c_header(self):
from legate.numpy.install_info import header
return header
def get_registration_callback(self):
return "legate_numpy_perform_registration"
def initialize(self, shared_object):
assert self.runtime is None
self.shared_object = shared_object
def set_runtime(self, runtime):
assert self.runtime is None
assert self.shared_object is not None
self.runtime = runtime
def destroy(self):
if self.runtime is not None:
self.runtime.destroy()
NUMPY_LIB_NAME = "legate.numpy"
numpy_lib = NumPyLib(NUMPY_LIB_NAME)
legate_add_library(numpy_lib)
legate_numpy = numpy_lib.shared_object
# Match these to legate_core_type_code_t in legate_c.h
numpy_field_type_offsets = {
np.bool_: legion.LEGION_TYPE_BOOL,
np.int8: legion.LEGION_TYPE_INT8,
np.int16: legion.LEGION_TYPE_INT16,
np.int: legion.LEGION_TYPE_INT32,
np.int32: legion.LEGION_TYPE_INT32,
np.int64: legion.LEGION_TYPE_INT64,
np.uint8: legion.LEGION_TYPE_UINT8,
np.uint16: legion.LEGION_TYPE_UINT16,
np.uint32: legion.LEGION_TYPE_UINT32,
np.uint64: legion.LEGION_TYPE_UINT64,
np.float16: legion.LEGION_TYPE_FLOAT16,
np.float: legion.LEGION_TYPE_FLOAT32,
np.float32: legion.LEGION_TYPE_FLOAT32,
np.float64: legion.LEGION_TYPE_FLOAT64,
np.complex64: legion.LEGION_TYPE_COMPLEX64,
np.complex128: legion.LEGION_TYPE_COMPLEX128,
}
# Match these to NumPyVariantCode in legate_numpy_c.h
@unique
class NumPyVariantCode(IntEnum):
NORMAL = legate_numpy.NUMPY_NORMAL_VARIANT_OFFSET
SCALAR = legate_numpy.NUMPY_SCALAR_VARIANT_OFFSET
BROADCAST = legate_numpy.NUMPY_BROADCAST_VARIANT_OFFSET
REDUCTION = legate_numpy.NUMPY_REDUCTION_VARIANT_OFFSET
INPLACE = legate_numpy.NUMPY_INPLACE_VARIANT_OFFSET
INPLACE_BROADCAST = legate_numpy.NUMPY_INPLACE_BROADCAST_VARIANT_OFFSET
NUMPY_MAX_VARIANTS = len(NumPyVariantCode)
NUMPY_MAX_TYPES = legion.MAX_TYPE_NUMBER
NUMPY_TYPE_OFFSET = NUMPY_MAX_TYPES * NUMPY_MAX_VARIANTS
# Match these to NumPyOpCode in legate_numpy_c.h
@unique
class NumPyOpCode(IntEnum):
ABSOLUTE = legate_numpy.NUMPY_ABSOLUTE
ADD = legate_numpy.NUMPY_ADD
ALLCLOSE = legate_numpy.NUMPY_ALLCLOSE
ARCCOS = legate_numpy.NUMPY_ARCCOS
ARCSIN = legate_numpy.NUMPY_ARCSIN
ARCTAN = legate_numpy.NUMPY_ARCTAN
ARGMAX = legate_numpy.NUMPY_ARGMAX
ARGMAX_RADIX = legate_numpy.NUMPY_ARGMAX_RADIX
ARGMIN = legate_numpy.NUMPY_ARGMIN
ARGMIN_RADIX = legate_numpy.NUMPY_ARGMIN_RADIX
BINCOUNT = legate_numpy.NUMPY_BINCOUNT
CEIL = legate_numpy.NUMPY_CEIL
CLIP = legate_numpy.NUMPY_CLIP
CONVERT = legate_numpy.NUMPY_CONVERT
COPY = legate_numpy.NUMPY_COPY
COS = legate_numpy.NUMPY_COS
DIAG = legate_numpy.NUMPY_DIAG
DIVIDE = legate_numpy.NUMPY_DIVIDE
DOT = legate_numpy.NUMPY_DOT
EQUAL = legate_numpy.NUMPY_EQUAL
EXP = legate_numpy.NUMPY_EXP
EYE = legate_numpy.NUMPY_EYE
FILL = legate_numpy.NUMPY_FILL
FLOOR = legate_numpy.NUMPY_FLOOR
FLOOR_DIVIDE = legate_numpy.NUMPY_FLOOR_DIVIDE
GETARG = legate_numpy.NUMPY_GETARG
GREATER = legate_numpy.NUMPY_GREATER
GREATER_EQUAL = legate_numpy.NUMPY_GREATER_EQUAL
INVERT = legate_numpy.NUMPY_INVERT
ISINF = legate_numpy.NUMPY_ISINF
ISNAN = legate_numpy.NUMPY_ISNAN
LESS = legate_numpy.NUMPY_LESS
LESS_EQUAL = legate_numpy.NUMPY_LESS_EQUAL
LOG = legate_numpy.NUMPY_LOG
LOGICAL_NOT = legate_numpy.NUMPY_LOGICAL_NOT
MAX = legate_numpy.NUMPY_MAX
MAX_RADIX = legate_numpy.NUMPY_MAX_RADIX
MAXIMUM = legate_numpy.NUMPY_MAXIMUM
MIN = legate_numpy.NUMPY_MIN
MIN_RADIX = legate_numpy.NUMPY_MIN_RADIX
MINIMUM = legate_numpy.NUMPY_MINIMUM
MOD = legate_numpy.NUMPY_MOD
MULTIPLY = legate_numpy.NUMPY_MULTIPLY
NEGATIVE = legate_numpy.NUMPY_NEGATIVE
NORM = legate_numpy.NUMPY_NORM
NOT_EQUAL = legate_numpy.NUMPY_NOT_EQUAL
POWER = legate_numpy.NUMPY_POWER
PROD = legate_numpy.NUMPY_PROD
PROD_RADIX = legate_numpy.NUMPY_PROD_RADIX
RAND_INTEGER = legate_numpy.NUMPY_RAND_INTEGER
RAND_NORMAL = legate_numpy.NUMPY_RAND_NORMAL
RAND_UNIFORM = legate_numpy.NUMPY_RAND_UNIFORM
READ = legate_numpy.NUMPY_READ
SIN = legate_numpy.NUMPY_SIN
SORT = legate_numpy.NUMPY_SORT
SQRT = legate_numpy.NUMPY_SQRT
SUBTRACT = legate_numpy.NUMPY_SUBTRACT
SUM = legate_numpy.NUMPY_SUM
SUM_RADIX = legate_numpy.NUMPY_SUM_RADIX
TAN = legate_numpy.NUMPY_TAN
TANH = legate_numpy.NUMPY_TANH
TILE = legate_numpy.NUMPY_TILE
TRANSPOSE = legate_numpy.NUMPY_TRANSPOSE
WHERE = legate_numpy.NUMPY_WHERE
WRITE = legate_numpy.NUMPY_WRITE
LOGICAL_AND = legate_numpy.NUMPY_LOGICAL_AND
LOGICAL_OR = legate_numpy.NUMPY_LOGICAL_OR
LOGICAL_XOR = legate_numpy.NUMPY_LOGICAL_XOR
CONTAINS = legate_numpy.NUMPY_CONTAINS
COUNT_NONZERO = legate_numpy.NUMPY_COUNT_NONZERO
NONZERO = legate_numpy.NUMPY_NONZERO
COUNT_NONZERO_REDUC = legate_numpy.NUMPY_COUNT_NONZERO_REDUC
INCLUSIVE_SCAN = legate_numpy.NUMPY_INCLUSIVE_SCAN
CONVERT_TO_RECT = legate_numpy.NUMPY_CONVERT_TO_RECT
ARANGE = legate_numpy.NUMPY_ARANGE
# Match these to NumPyRedopID in legate_numpy_c.h
@unique
class NumPyRedopCode(IntEnum):
ARGMIN_REDOP = legate_numpy.NUMPY_ARGMIN_REDOP
ARGMAX_REDOP = legate_numpy.NUMPY_ARGMAX_REDOP
numpy_reduction_op_offsets = {
NumPyOpCode.SUM: legion.LEGION_REDOP_KIND_SUM,
NumPyOpCode.PROD: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.MIN: legion.LEGION_REDOP_KIND_MIN,
NumPyOpCode.MAX: legion.LEGION_REDOP_KIND_MAX,
# Dot uses sum reduction
NumPyOpCode.DOT: legion.LEGION_REDOP_KIND_SUM,
# Diag uses sum reduction
NumPyOpCode.DIAG: legion.LEGION_REDOP_KIND_SUM,
NumPyOpCode.EQUAL: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.NOT_EQUAL: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.GREATER: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.GREATER_EQUAL: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.LESS: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.LESS_EQUAL: legion.LEGION_REDOP_KIND_PROD,
NumPyOpCode.ALLCLOSE: legion.LEGION_REDOP_KIND_PROD,
# Norm uses sum reduction
NumPyOpCode.NORM: legion.LEGION_REDOP_KIND_SUM,
NumPyOpCode.ARGMIN: NumPyRedopCode.ARGMIN_REDOP,
NumPyOpCode.ARGMAX: NumPyRedopCode.ARGMAX_REDOP,
# bool sum is "or"
NumPyOpCode.CONTAINS: legion.LEGION_REDOP_KIND_SUM,
# nonzeros are counted with sum
NumPyOpCode.COUNT_NONZERO: legion.LEGION_REDOP_KIND_SUM,
}
# Match these to NumPyTunable in legate_numpy_c.h
@unique
class NumPyTunable(IntEnum):
NUM_PIECES = legate_numpy.NUMPY_TUNABLE_NUM_PIECES
NUM_GPUS = legate_numpy.NUMPY_TUNABLE_NUM_GPUS
TOTAL_NODES = legate_numpy.NUMPY_TUNABLE_TOTAL_NODES
LOCAL_CPUS = legate_numpy.NUMPY_TUNABLE_LOCAL_CPUS
LOCAL_GPUS = legate_numpy.NUMPY_TUNABLE_LOCAL_GPUS
LOCAL_OMPS = legate_numpy.NUMPY_TUNABLE_LOCAL_OPENMPS
RADIX = legate_numpy.NUMPY_TUNABLE_RADIX
MIN_SHARD_VOLUME = legate_numpy.NUMPY_TUNABLE_MIN_SHARD_VOLUME
MAX_EAGER_VOLUME = legate_numpy.NUMPY_TUNABLE_MAX_EAGER_VOLUME
FIELD_REUSE_SIZE = legate_numpy.NUMPY_TUNABLE_FIELD_REUSE_SIZE
FIELD_REUSE_FREQ = legate_numpy.NUMPY_TUNABLE_FIELD_REUSE_FREQUENCY
# Match these to NumPyTag in legate_numpy_c.h
@unique
class NumPyMappingTag(IntEnum):
SUBRANKABLE_TASK_TAG = legate_numpy.NUMPY_SUBRANKABLE_TAG
CPU_ONLY_TASK_TAG = legate_numpy.NUMPY_CPU_ONLY_TAG
GPU_ONLY_TASK_TAG = legate_numpy.NUMPY_GPU_ONLY_TAG
NO_MEMOIZE_TAG = 0 # Turn this off for now since it doesn't help
KEY_REGION_TAG = legate_numpy.NUMPY_KEY_REGION_TAG
RADIX_GEN_TAG = legate_numpy.NUMPY_RADIX_GEN_TAG
RADIX_DIM_TAG = legate_numpy.NUMPY_RADIX_DIM_TAG
RADIX_GEN_SHIFT = 5
RADIX_DIM_SHIFT = 8
# Match these to NumPyProjectionCode in legate_numpy_c.h
@unique
class NumPyProjCode(IntEnum):
# 2D reduction
PROJ_2D_1D_X = legate_numpy.NUMPY_PROJ_2D_1D_X
PROJ_2D_1D_Y = legate_numpy.NUMPY_PROJ_2D_1D_Y
# 2D broadcast
PROJ_2D_2D_X = legate_numpy.NUMPY_PROJ_2D_2D_X
PROJ_2D_2D_Y = legate_numpy.NUMPY_PROJ_2D_2D_Y
# 2D promotion
PROJ_1D_2D_X = legate_numpy.NUMPY_PROJ_1D_2D_X
PROJ_1D_2D_Y = legate_numpy.NUMPY_PROJ_1D_2D_Y
# 2D transpose
PROJ_2D_2D_YX = legate_numpy.NUMPY_PROJ_2D_2D_YX
# 3D reduction
PROJ_3D_2D_XY = legate_numpy.NUMPY_PROJ_3D_2D_XY
PROJ_3D_2D_XZ = legate_numpy.NUMPY_PROJ_3D_2D_XZ
PROJ_3D_2D_YZ = legate_numpy.NUMPY_PROJ_3D_2D_YZ
PROJ_3D_1D_X = legate_numpy.NUMPY_PROJ_3D_1D_X
PROJ_3D_1D_Y = legate_numpy.NUMPY_PROJ_3D_1D_Y
PROJ_3D_1D_Z = legate_numpy.NUMPY_PROJ_3D_1D_Z
# 3D broadcast
PROJ_3D_3D_XY = legate_numpy.NUMPY_PROJ_3D_3D_XY
PROJ_3D_3D_XZ = legate_numpy.NUMPY_PROJ_3D_3D_XZ
PROJ_3D_3D_YZ = legate_numpy.NUMPY_PROJ_3D_3D_YZ
PROJ_3D_3D_X = legate_numpy.NUMPY_PROJ_3D_3D_X
PROJ_3D_3D_Y = legate_numpy.NUMPY_PROJ_3D_3D_Y
PROJ_3D_3D_Z = legate_numpy.NUMPY_PROJ_3D_3D_Z
PROJ_3D_2D_XB = legate_numpy.NUMPY_PROJ_3D_2D_XB
PROJ_3D_2D_BY = legate_numpy.NUMPY_PROJ_3D_2D_BY
# 3D promotion
PROJ_2D_3D_XY = legate_numpy.NUMPY_PROJ_2D_3D_XY
PROJ_2D_3D_XZ = legate_numpy.NUMPY_PROJ_2D_3D_XZ
PROJ_2D_3D_YZ = legate_numpy.NUMPY_PROJ_2D_3D_YZ
PROJ_1D_3D_X = legate_numpy.NUMPY_PROJ_1D_3D_X
PROJ_1D_3D_Y = legate_numpy.NUMPY_PROJ_1D_3D_Y
PROJ_1D_3D_Z = legate_numpy.NUMPY_PROJ_1D_3D_Z
# Radix 2D
PROJ_RADIX_2D_X_4_0 = legate_numpy.NUMPY_PROJ_RADIX_2D_X_4_0
PROJ_RADIX_2D_X_4_1 = legate_numpy.NUMPY_PROJ_RADIX_2D_X_4_1
PROJ_RADIX_2D_X_4_2 = legate_numpy.NUMPY_PROJ_RADIX_2D_X_4_2
PROJ_RADIX_2D_X_4_3 = legate_numpy.NUMPY_PROJ_RADIX_2D_X_4_3
PROJ_RADIX_2D_Y_4_0 = legate_numpy.NUMPY_PROJ_RADIX_2D_Y_4_0
PROJ_RADIX_2D_Y_4_1 = legate_numpy.NUMPY_PROJ_RADIX_2D_Y_4_1
PROJ_RADIX_2D_Y_4_2 = legate_numpy.NUMPY_PROJ_RADIX_2D_Y_4_2
PROJ_RADIX_2D_Y_4_3 = legate_numpy.NUMPY_PROJ_RADIX_2D_Y_4_3
# Radix 3D
PROJ_RADIX_3D_X_4_0 = legate_numpy.NUMPY_PROJ_RADIX_3D_X_4_0
PROJ_RADIX_3D_X_4_1 = legate_numpy.NUMPY_PROJ_RADIX_3D_X_4_1
PROJ_RADIX_3D_X_4_2 = legate_numpy.NUMPY_PROJ_RADIX_3D_X_4_2
PROJ_RADIX_3D_X_4_3 = legate_numpy.NUMPY_PROJ_RADIX_3D_X_4_3
PROJ_RADIX_3D_Y_4_0 = legate_numpy.NUMPY_PROJ_RADIX_3D_Y_4_0
PROJ_RADIX_3D_Y_4_1 = legate_numpy.NUMPY_PROJ_RADIX_3D_Y_4_1
PROJ_RADIX_3D_Y_4_2 = legate_numpy.NUMPY_PROJ_RADIX_3D_Y_4_2
PROJ_RADIX_3D_Y_4_3 = legate_numpy.NUMPY_PROJ_RADIX_3D_Y_4_3
PROJ_RADIX_3D_Z_4_0 = legate_numpy.NUMPY_PROJ_RADIX_3D_Z_4_0
PROJ_RADIX_3D_Z_4_1 = legate_numpy.NUMPY_PROJ_RADIX_3D_Z_4_1
PROJ_RADIX_3D_Z_4_2 = legate_numpy.NUMPY_PROJ_RADIX_3D_Z_4_2
PROJ_RADIX_3D_Z_4_3 = legate_numpy.NUMPY_PROJ_RADIX_3D_Z_4_3
# Flattening
PROJ_ND_1D_C_ORDER = legate_numpy.NUMPY_PROJ_ND_1D_C_ORDER
# Must always be last
PROJ_LAST = legate_numpy.NUMPY_PROJ_LAST
|
python
|
import numpy as np
import hmm
x = np.random.randint(0, 500, 1000)
m = 3
l = np.array([10, 250, 450])
g = np.array([[.8, .1, .1], [.1, .8, .1], [.1, .1, .8]])
d = np.array([.3, .3, .3])
(success, lambda_, gamma_, delta_, aic, bic, nll, niter) = hmm.hmm_poisson_fit_em(x, m, l, g, d, 1000, 1e-6)
print(success, aic, bic, nll, niter)
print(lambda_)
print("\n")
print(gamma_)
print("\n")
print(delta_)
|
python
|
import torch
from torch import nn
from buglab.models.layers.multihead_attention import MultiheadAttention
class RelationalMultiheadAttention(MultiheadAttention):
"""
A relational multihead implementation supporting two variations of using additional
relationship information between tokens:
* If no edge information is passed in in .forward(), this behaves like a standard
multi-head self-attention.
* If edges are present and edge_attention_bias_is_scalar=False,
and use_edge_value_biases=True is set, this implements
Eqs. (3) and (4) of
Shaw, Peter, Jakob Uszkoreit, and Ashish Vaswani. "Self-attention with relative position representations."
In ACL 2018. https://www.aclweb.org/anthology/N18-2074/
and
Eq. (2) of
Wang, Bailin, et al. "RAT-SQL: Relation-aware schema encoding and linking for text-to-SQL parsers."
In ICML 2020. https://arxiv.org/pdf/1911.04942.pdf
* If edges are present and edge_attention_bias_is_scalar=True,
and use_edge_value_biases=False is set, this implements Sect. 3.1 of
Hellendoorn, Vincent J., et al. "Global relational models of source code."
In ICLR 2020. https://openreview.net/pdf?id=B1lnbRNtwr
"""
def __init__(
self,
*,
num_heads: int,
num_edge_types: int,
input_state_dimension: int,
key_query_dimension: int,
value_dimension: int,
output_dimension: int,
dropout_rate: float,
use_edge_value_biases: bool = False,
edge_attention_bias_is_scalar: bool = False,
):
super().__init__(
num_heads=num_heads,
input_state_dimension=input_state_dimension,
key_query_dimension=key_query_dimension,
value_dimension=value_dimension,
output_dimension=output_dimension,
dropout_rate=dropout_rate,
)
self._use_edge_value_biases = use_edge_value_biases
self._edge_attention_bias_is_scalar = edge_attention_bias_is_scalar
if self._edge_attention_bias_is_scalar:
edge_attention_bias_dim = num_heads
else:
edge_attention_bias_dim = num_heads * key_query_dimension
self._edge_attention_biases = nn.Embedding(num_embeddings=num_edge_types, embedding_dim=edge_attention_bias_dim)
self._reverse_edge_attention_biases = nn.Embedding(
num_embeddings=num_edge_types, embedding_dim=edge_attention_bias_dim
)
if self._use_edge_value_biases:
self._edge_value_biases = nn.Embedding(
num_embeddings=num_edge_types, embedding_dim=num_heads * value_dimension
)
self._reverse_edge_value_biases = nn.Embedding(
num_embeddings=num_edge_types, embedding_dim=num_heads * value_dimension
)
def forward(self, input_seq_states, masked_elements, edges, edge_types):
edge_sample_ids = edges[:, 0]
edge_sources = edges[:, 1]
edge_targets = edges[:, 2]
queries, keys, values = self._compute_qkv(input_seq_states)
raw_attention_scores = self._compute_attention_scores(keys, queries)
attention_scores = self._add_edge_attention_scores(
edge_sample_ids, edge_sources, edge_targets, edge_types, keys, queries, raw_attention_scores
)
attention_probs = self._compute_attention_probs(masked_elements, attention_scores)
multiheaded_weighted_value_sum = self._compute_weighted_sum(values, attention_probs)
if self._use_edge_value_biases:
multiheaded_weighted_value_sum = self._add_edge_value_biases(
edge_sample_ids, edge_sources, edge_targets, edge_types, attention_probs, multiheaded_weighted_value_sum
)
return self._compute_output(multiheaded_weighted_value_sum)
def _add_edge_attention_scores(
self, edge_sample_ids, edge_sources, edge_targets, edge_types, keys, queries, raw_attention_scores
):
# We compute (sparse, per existing edge) additional bias scores e'_bijk:
edge_bias_scores = self._compute_edge_bias_scores(
edge_sample_ids, edge_sources, edge_targets, edge_types, keys, queries
)
# We add the e'_bijk (where present) to e_bijk. This should be a simple +=, but
# that doesn't accumulate if we have several entries to add to e_bij. Hence we use
# index_put_, which in turn requires a contiguous Tensor memory layout, and so we need
# to establish that first:
attention_scores = raw_attention_scores.contiguous()
edge_sample_indices = torch.cat([edge_sample_ids, edge_sample_ids])
edge_query_indices = torch.cat([edge_sources, edge_targets])
edge_key_indices = torch.cat([edge_targets, edge_sources])
attention_scores.index_put_(
indices=(edge_sample_indices, edge_query_indices, edge_key_indices),
values=edge_bias_scores,
accumulate=True,
)
return attention_scores
def _compute_edge_bias_scores(self, edge_sample_ids, edge_sources, edge_targets, edge_types, keys, queries):
# We will compute additional e'_bihj which will be added onto the standard attention scores:
attention_biases = self._edge_attention_biases(edge_types)
attention_biases_r = self._reverse_edge_attention_biases(edge_types)
if self._edge_attention_bias_is_scalar:
# Compute e'_bijk = \sum_d (bias_bijk * (in_bj * W_K^k))_d
# This is the GREAT model. Note two things:
# (1) This is defined on the _key_ representation, not the _query_ repr.
# (2) Because bias_bijk is a scalar, this is essentially just scaling
# (in_bj * W_K^k) and then summing.
edge_attention_scores = torch.einsum(
"eh,ehd->eh",
attention_biases, # [num_edges, num_heads]
keys[edge_sample_ids, edge_targets], # [num_edges, num_heads, key_dim]
) # [num_edges, num_head]
r_edge_attention_scores = torch.einsum(
"eh,ehd->eh",
attention_biases_r, # [num_edges, num_heads]
keys[edge_sample_ids, edge_sources], # [num_edges, num_heads, key_dim]
) # [num_edges, num_head]
edge_bias_scores = torch.cat([edge_attention_scores, r_edge_attention_scores]) # [2 * num_edges, num_head]
else:
# Compute e'_bijk = (in_bj * W_Q^k) * bias_bijk^T
# This is the Relative Position Representations / RAT-SQL variant. Note that this
# is defined using the query representation, not the key repr.
edge_attention_scores = torch.einsum(
"ehd,ehd->eh",
attention_biases.reshape((-1, self._num_heads, self._key_query_dim)),
# [num_edges, num_heads, key_dim]
queries[edge_sample_ids, edge_sources], # [num_edges, num_heads, key_dim]
) # [num_edges, num_head]
r_edge_attention_scores = torch.einsum(
"ehd,ehd->eh",
attention_biases_r.reshape((-1, self._num_heads, self._key_query_dim)),
# [num_edges, num_heads, key_dim]
queries[edge_sample_ids, edge_targets], # [num_edges, num_heads, key_dim]
) # [num_edges, num_head]
edge_bias_scores = torch.cat([edge_attention_scores, r_edge_attention_scores]) # [2 * num_edges, num_head]
return edge_bias_scores
def _add_edge_value_biases(
self, edge_sample_ids, edge_sources, edge_targets, edge_types, attention_probs, multiheaded_weighted_value_sum
):
edge_sample_indices = torch.cat([edge_sample_ids, edge_sample_ids])
edge_query_indices = torch.cat([edge_sources, edge_targets])
value_biases_shape = (edge_sample_ids.shape[0], self._num_heads, self._value_dim)
value_bias_per_edge = attention_probs[edge_sample_ids, edge_sources, :, edge_targets].unsqueeze(
-1
) * self._edge_value_biases(edge_types).reshape(
value_biases_shape
) # [num_edges, num_heads, value_dim]
value_bias_per_r_edge = attention_probs[edge_sample_ids, edge_targets, :, edge_sources].unsqueeze(
-1
) * self._reverse_edge_value_biases(edge_types).reshape(
value_biases_shape
) # [num_edges, num_heads, value_dim]
biased_weighted_value_sum = multiheaded_weighted_value_sum.contiguous()
biased_weighted_value_sum.index_put_(
indices=(edge_sample_indices, edge_query_indices),
values=torch.cat((value_bias_per_edge, value_bias_per_r_edge), dim=0),
accumulate=True,
)
return biased_weighted_value_sum
|
python
|
from django.shortcuts import render
from datetime import datetime
from notifications.models import Notification
from users.models import Profile
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.core.urlresolvers import reverse
from datetime import datetime
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
def notifications_index(request):
if request.user.is_superuser or Profile.objects.get(user_id=request.user.id).rol == 'SUP':
notifications = Notification.objects.all()
else:
notifications = Notification.objects.filter(profile_id=Profile.objects.get(user_id=request.user.id))
return render(request, 'notifications/index.html', {
'notifications': notifications,
})
def notifications_show(request, id):
notification = Notification.objects.get(id=int(id))
notification.read_at = datetime.now()
notification.save(update_fields=['read_at'])
return render(request, 'notifications/show.html', {
'notification_obj': Notification,
'notification': notification,
})
def notifications_delete(request, id):
notification = Notification.objects.get(id=id)
notification.delete()
is_exist = Notification.objects.filter(id=id).exists()
if is_exist:
message = 'No se pudo eliminar'
messages.add_message(request, messages.ERROR, message)
else:
message = 'Eliminado!'
messages.add_message(request, messages.SUCCESS, message)
return HttpResponseRedirect(reverse(notifications_index))
|
python
|
from aoc import AOC
aoc = AOC(year=2015, day=18)
data = aoc.load()
## Part 1
# Initialize the array of lights to all off
lights = [x[:] for x in [[0] * len(data.lines())] * len(data.lines())]
# For every line in the input
in_y = 0
for line in data.lines():
line = line.strip()
in_x = 0
for c in line:
# Set lights which are initially 'on' to 1
if c == "#":
lights[in_y][in_x] = 1
in_x += 1
in_y += 1
def count_neighbors(x, y):
# Counts the number of neighbors of a light which are on
global lights
neighbors = 0
# Loops through all 8 neighbors
for i in range(9):
# Skipping the current light
if i == 4:
continue
# Get the position of the neighbor and check if it is a valid position and on
yy = y - 1 + int(i / 3)
xx = x - 1 + i % 3
if (
yy in range(0, len(lights))
and xx in range(0, len(lights[yy]))
and lights[yy][xx] == 1
):
neighbors += 1
return neighbors
def step():
# Advance one step
global lights
# Create a copy of the array for the next step
next_step = [row[:] for row in lights]
# Loop through each light
for y, _ in enumerate(lights):
for x, _ in enumerate(lights[y]):
# Check if the conditions to turn a light on/off are met
if lights[y][x] == 1 and not count_neighbors(x, y) in [2, 3]:
next_step[y][x] = 0
elif lights[y][x] == 0 and count_neighbors(x, y) == 3:
next_step[y][x] = 1
lights = next_step
# Step 100 times
for _ in range(100):
step()
def total_lights():
# Count the number of lights that are on
total_lights_on = 0
for y, _ in enumerate(lights):
for x, _ in enumerate(lights[y]):
if lights[y][x] == 1:
total_lights_on += 1
return total_lights_on
aoc.p1(total_lights())
## Part 2
lines = data.lines()
# Initialize the array of lights to all off
lights = [x[:] for x in [[0] * len(lines)] * len(lines)]
def is_vertical_end(yy, line):
return yy in (0, len(line) - 1)
def is_horizontal_end(xx, line):
return xx in (0, len(line) - 1)
def read_input():
# For every line in the input
global lights
y = 0
for line in lines:
line = line.strip()
x = 0
for c in line.strip():
# Set the corners to be on no matter what
if is_vertical_end(y, lines) and is_horizontal_end(x, line):
lights[y][x] = 1
# Set lights which are initially 'on' to 1
elif c == "#":
lights[y][x] = 1
x += 1
y += 1
def count_neighbors(x, y):
# Counts the number of neighbors of a light which are on
global lights
neighbors = 0
# Loops through all 8 neighbors
for i in range(9):
# Skipping the current light
if i == 4:
continue
# Get the position of the neighbor and check if it is a valid position and on
yy = y - 1 + int(i / 3)
xx = x - 1 + i % 3
if (
yy in range(0, len(lights))
and xx in range(0, len(lights[yy]))
and lights[yy][xx] == 1
):
neighbors += 1
return neighbors
def step():
# Advance one step
global lights
# Create a copy of the array for the next step
next_step = [row[:] for row in lights]
# Loop through each light
for y, _ in enumerate(lights):
for x, _ in enumerate(lights[y]):
# Skip the corners - they are always on
if is_vertical_end(y, lights) and is_horizontal_end(x, lights[y]):
continue
# Check if the conditions to turn a light on/off are met
if lights[y][x] == 1 and not count_neighbors(x, y) in [2, 3]:
next_step[y][x] = 0
elif lights[y][x] == 0 and count_neighbors(x, y) == 3:
next_step[y][x] = 1
lights = next_step
read_input()
# Step 100 times
for _ in range(100):
step()
def total_lights():
# Count the number of lights that are on
total_lights_on = 0
for y, _ in enumerate(lights):
for x, _ in enumerate(lights[y]):
if lights[y][x] == 1:
total_lights_on += 1
return total_lights_on
aoc.p2(total_lights())
|
python
|
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree
from pyspark.mllib.linalg import SparseVector
from pyspark import SparkContext
from operator import add
import time
import numpy
from pyspark.mllib.linalg import Vectors
import pyspark.mllib.clustering as cl
import os
sc = SparkContext("local", "Myapp")
#####Read input files
#Get the all the file names
filenames = next(os.walk("/Users/panpan/Desktop/linkedin/followings/group8"))[2]
#save the all files to variable <files>
files=list()
for filename in filenames:
f=open("/Users/panpan/Desktop/linkedin/followings/group8/%s" %filename,"r")
files.append(f.readline())
#initialize mutual_list
mutual_list=numpy.zeros((len(filenames),len(filenames)))
#pick two users each time, and calculate their common freinds
for i in range(0,len(files)):
if i+1>=len(files):
continue
for j in range(i,len(files)):
file_1 =files[i].split(",")
file_2 =files[j].split(",")
file1 =sc.parallelize(file_1)
file2 =sc.parallelize(file_2)
#common friends of the two users
file_12=file1.intersection(file2)
mutual=len(file_12.collect())
#define a way to cauculate how much percent they are similar to each other
mutual_proportion=1.0/2*mutual*(1.0/len(file_1)+1.0/len(file_2))
mutual_list[i][j]=mutual_list[j][i]=mutual_proportion
###Cluster the models
model = cl.KMeans.train(sc.parallelize(mutual_list), 4, maxIterations=10, runs=30, initializationMode="random",
seed=50, initializationSteps=5, epsilon=1e-4)
for i in range(0,len(mutual_list)):
print model.predict(mutual_list[i])
#further optimization on parameter needed
|
python
|
#!/usr/bin/env python3
from aws_cdk import core
from lab08.lab08_stack import Lab08Stack
app = core.App()
Lab08Stack(app, "lab08",env={"region":"us-east-1","account":"111111111111"})
app.synth()
|
python
|
import h5py
import numpy as np
import glob
from mne import create_info
from mne.io import RawArray
def extract_blackrock_info(mat_file, blackrock_type):
""" Extracts basic recording info from a blacrock extracted mat file.
Extracts the data, sampling rate, channel names, and digital to
analog conversion factor from a blackrock extracted .mat file. hp5y was
required instead of scipy.loadmat due to the large .mat file size.
Args:
mat_file: string of filename representing a .mat file extracted from a
blackrock .ns2 or .ns5 file using OpenNSx
blackrock_type: a string either 'ns2' or 'ns5' denoting which type
of recording the .mat file contains
Returns:
a dictionary containing the data, sampling rate,
channel names, and digital to analog conversion factor
"""
info = {}
file_obj = h5py.File(mat_file)
struct = file_obj[blackrock_type.upper()]
if 'saline1' in mat_file:
data = [file_obj[struct['Data'][0, 0]], file_obj[struct['Data'][1, 0]]]
info['data'] = np.concatenate(data, axis=0).T
else:
info['data'] = np.array(struct['Data']).T
info['srate'] = struct['MetaTags']['SamplingFreq'][0][0]
# extract the digital to analog conversion factor as the ratio
# between the analog range and digital range
max_av = file_obj[struct['ElectrodesInfo']
['MaxAnalogValue'][0][0]][0][0]
max_dv = file_obj[struct['ElectrodesInfo']
['MaxDigiValue'][0][0]][0][0]
info['dac_factor'] = float(max_av) / max_dv
# extract the channel names
ch_name_datasets = [file_obj[ref[0]] for ref in
np.array(struct['ElectrodesInfo']['Label'])]
ch_names = [u''.join(unichr(c) for c in l if c)
for l in ch_name_datasets]
# replace 'elec1-84' with ref since this was only done in some files
info['ch_names'] = [u'ref' if c == "elec1-84" else c for c in ch_names]
return info
def create_mne_raw(blackrock_info):
""" Creates an MNE-Python raw object given a dictionary containing
recording information extracted from the blacrock mat file.
Args:
blackrock_info: dictionary containing the data, channel names,
sampling rate, and dac factor
Returns:
an MNE-Python raw object for the data
"""
# create the MNE info object
ch_types = ['eeg'] * len(blackrock_info['ch_names']) + ['stim']
blackrock_info['ch_names'].append("STIM")
mne_info = create_info(blackrock_info['ch_names'],
blackrock_info['srate'], ch_types)
# take the recorded data and add a row of 0's to represent
# the stim channel without events yet
num_samples = blackrock_info['data'].shape[-1]
blackrock_info['data'] = np.vstack((blackrock_info['data'],
np.zeros(num_samples)))
# convert from digitized units to microvolts
blackrock_info['data'] *= blackrock_info['dac_factor']
# create MNE Raw object
raw = RawArray(blackrock_info['data'], mne_info, verbose=False)
return raw
def create_events_square_wave(events):
""" Takes an MNE events array consisting of pairs of onset and offset
events and interpolates new events between these onset and offset events to
form a "square wave" for placement into an MNE stim channel.
Args:
events: An MNE events array consisting of onset and offset
paired events.
Returns:
The new events array with samples between onset and offset
events filled with events.
"""
filled_events = []
i = 0
while i < events.shape[0]:
onset, offset = events[i, 0], events[i + 1, 0]
for j in range(onset, offset + 1):
filled_events.append([j, 0, 1])
i += 2
return np.array(filled_events)
def load_power_data(exp, condition, typ='ns2'):
""" Loads all tfr power for a given experiment and condition.
Args:
exp: The experiment to collect data for. 'main' or 'saline'
condition: The condition to collect data for.
'Open', 'Closed', or 'Brain'
typ: The type of recording file to collect. 'ns2' or 'ns5'.
Returns:
A tuple containing the power data across all dates in a single array,
a list of channel names, a list of time labels, and a list of
frequency labels.
"""
file = '../data/power/%s_%s_*_raw_power.npz' % (typ, condition)
fnames = sorted(glob.glob(file))
if exp == 'saline':
fnames = [f for f in fnames if 'saline' in f]
else:
fnames = [f for f in fnames if 'saline' not in f]
tmp = np.load(fnames[0])
chs = tmp['chs']
times = tmp['times']
freqs = tmp['freqs']
power = [np.load(f)['data'] for f in fnames]
power = np.concatenate(power, axis=0)
return power, chs, times, freqs
def baseline_normalize(power, baseline, times):
""" Baseline normalizes raw tfr power data according to
a slightly modified version of the normalization procedure suggested by
Grandchamp and Delorme, 2011.
First, we divide the power data in each trial by the median of the
power across the entire trial (excluding the stimulation period and 0.5
seconds of buffer around the stimulation period). Then, we take the median
across all trials and divide the median power by the median of the
pre-stimulation baseline period. Finally, we log transform and multipy
by 10 to get a decibel representation.
Args:
power: # trials x # chs x # freqs x # time points array
containing TFR power
baseline: tuple delimiting the time boundaries of baseline period
times: a list of time labels for each sample
Returns:
The modified tfr power array now baseline normalized (# chs x
# freqs x # time points)
"""
# first normalize by the median of the power across the entire trial
# excluding the stimulation period and stimulation edge artifacts
trial_mask = np.where(np.logical_or(times <= -.5, times >= 10.5))[0]
trial_norm = np.median(power[:, :, :, trial_mask],
axis=-1)[:, :, :, np.newaxis]
power /= trial_norm
# median across trials
power = np.median(power, axis=0)
# normalize by median of pre-stimulation baseline period
bl_mask = np.where(np.logical_and(times >= baseline[0],
times <= baseline[1]))[0]
bl_norm = np.median(power[:, :, bl_mask], axis=-1)[:, :, np.newaxis]
power /= bl_norm
# log transform and scale
power = 10 * np.log10(power)
return power
def reduce_band_power(power, freqs, band, axis):
""" Averages frequency content within a given frequency band range.
Args:
power: array containing tfr power
freqs: list of frequencies contained in the tfr power array
band: tuple containing the frequency band limits
axis: the axis containing the frequency data
Returns:
Returns a band power array where the frequency axis has been averaged
within the range supplied by band.
"""
band_mask = np.where(np.logical_and(freqs >= band[0], freqs <= band[1]))[0]
power = np.take(power, band_mask, axis=axis).mean(axis=axis)
return power
def reduce_toi_power(power, times, toi, axis):
""" Averages across time withing a given period of interest.
Args:
power: array containing tfr power
times: list of time labels for each sample
toi: tuple containing the limits of the time period of interest
axis: the axis containing the time data
Returns:
Returns a power array where the time axis has been averaged
within the range supplied by toi.
"""
toi_mask = np.where(np.logical_and(times >= toi[0], times <= toi[1]))[0]
power = np.take(power, toi_mask, axis=axis).mean(axis=axis)
return power
def reduce_array_power(power, chs, bad_chs, array, axis):
""" Averages across channels withing a given array.
Args:
power: array containing tfr power
chs: list of channel names
bad_chs: bad channels not to be included in average
array: which recording array to average over
axis: the axis containing the ch info
Returns:
Returns a power array where the channel axis has been averaged
within the selected chs supplied by array and bad_chs.
"""
arr_base = 'elec%s' % array
ch_mask = [ix for ix in np.arange(len(chs)) if arr_base in chs[ix] and
chs[ix] not in bad_chs]
power = np.take(power, ch_mask, axis=axis).mean(axis=axis)
return power
|
python
|
import tensorflow as tf
from tf_stft import Spectrogram, Logmel
from tensorflow_utils import do_mixup
class ConvBlock(tf.keras.Model):
"""
Convolutional Block Class.
"""
def __init__(self, out_channels):
"""
Parameters
----------
out_channels : int
Number of output channels
"""
super(ConvBlock, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters = out_channels,
kernel_size=3, strides=1,
padding = 'same',
use_bias=False,
kernel_initializer='glorot_uniform')
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(filters = out_channels,
kernel_size=3, strides=1,
padding = 'same',
use_bias=False,
kernel_initializer='glorot_uniform')
self.bn2 = tf.keras.layers.BatchNormalization()
def call(self, inputs, pool_size=(2, 2), pool_type='avg'):
# NOTE move pool_type to init.
x = inputs
x = tf.keras.activations.relu(self.bn1(self.conv1(x)))
x = tf.keras.activations.relu(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = tf.keras.layers.MaxPool2D(pool_size = pool_size)(x)
elif pool_type == 'avg':
x = tf.keras.layers.AveragePooling2D(pool_size = pool_size)(x)
elif pool_type == 'avg+max':
x1 = tf.keras.layers.AveragePooling2D(pool_size = pool_size)(x)
x2 = tf.keras.layers.MaxPool2D(pool_size = pool_size)(x)
x = x1 + x2
else:
raise ValueError("pool_type should be one of the following:\
max, avg or avg+max. Here, we got {}.".format(pool_type))
# NOTE change to fstring
return x
class Cnn14(tf.keras.Model):
"""
CNN14 Backbone
"""
# NOTE: I did everything. only leave backbone in here
# NOTE add name argument in init
def __init__(self, sample_rate, window_size, hop_size, mel_bins,
fmin, fmax, classes_num):
# NOTE Add Docstring
super(Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size,
hop_length=hop_size) # NOTE Missing parameters: win_length, window, center, pad_mode, freeze_parameters
self.logmel_extractor = Logmel(sample_rate=sample_rate,
win_length=window_size, n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref,
amin=amin, top_db=top_db) # NOTE Missing parameter: freeze_parameters
self.spec_augmenter = # NOTE Missing SpecAugmentation function
self.bn0 = tf.keras.layers.BatchNormalization()
self.conv_block1 = ConvBlock(out_channels=64)
self.conv_block2 = ConvBlock(out_channels=128)
self.conv_block3 = ConvBlock(out_channels=256)
self.conv_block4 = ConvBlock(out_channels=512)
self.conv_block5 = ConvBlock(out_channels=1024)
self.conv_block6 = ConvBlock(out_channels=2048)
# NOTE uuse_bias==True
self.fc1 = tf.keras.layers.Dense(2048, use_bias=True)
self.fc_audioset = tf.keras.layers.Dense(classes_num, use_bias=True)
# NOTE Question: Need to initialize?. -> Do it in arguments.
def call(self, inputs, mixup_lambda=None):
# NOTE add training in call
"""
Parameters
----------
inputs : (batch_size, data_length)
mixup_lambda : (batch_size * 2,), optional
"""
# NOTE add comment to say that second dimension is channels
x = self.spectrogram_extractor(inputs) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
# NOTE investigate or ask qiuqiang.
x = tf.transpose(x, perm=[0, 3, 2, 1])
x = self.bn0(x)
x = tf.transpose(x, perm=[0, 3, 2, 1])
if self.training:
x = self.spec_augmenter(x)
# NOTE move mixup bool as an attribut
# NOTE create lambda uniform in call
# NOTE create a lambda attribut: update it every time a forward function is used
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
# NOTE add dropout_rates in init.
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x) # NOTE add training attribute on dropout layers
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = tf.keras.layers.Dropout(.2)(x)
x = tf.math.reduce_mean(x, axis=-1)
# NOTE test if I need parenthesis
(x1, _) = tf.math.reduce_max(x, axis=-1)
x2 = tf.math.reduce_mean(x, axis=-1)
x = x1 + x2
x = tf.keras.layers.Dropout(.5)(x)
x = tf.keras.activations.relu(self.fc1(x))
embedding = tf.keras.layers.Dropout(.5)(x)
clipwise_output = tf.math.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output,
'embedding': embedding}
return output_dict
|
python
|
# -*- coding: utf-8 -*-
def count_days(y, m, d):
return (365 * y + (y // 4) - (y // 100) + (y // 400) + ((306 * (m + 1)) // 10) + d - 429)
def main():
y = int(input())
m = int(input())
d = int(input())
if m == 1 or m == 2:
m += 12
y -= 1
print(count_days(2014, 5, 17) - count_days(y, m, d))
if __name__ == '__main__':
main()
|
python
|
import pytest
from azure.ml import MLClient
@pytest.fixture
def environment_id() -> str:
return "/subscriptions/5f08d643-1910-4a38-a7c7-84a39d4f42e0/resourceGroups/sdk_vnext_cli/providers/Microsoft.MachineLearningServices/Environments/AzureML-Minimal"
@pytest.fixture
def compute_id() -> str:
return "testCompute"
@pytest.fixture
def experiment_name() -> str:
return "mfe-test-sweep"
@pytest.mark.e2etest
@pytest.mark.skip(reason="TODO: need to be fixed")
def test_sweep_job_submit(
client: MLClient, experiment_name: str, randstr: str, environment_id: str, compute_id: str
) -> None:
# TODO: need to create a workspace under a e2e-testing-only subscription and reousrce group
job_resource = client.jobs.submit(
file="./tests/test_configs/sweep_job_test.yaml",
job_name=randstr,
compute_id=compute_id,
experiment_name=experiment_name,
environment_id=environment_id,
)
assert job_resource.name == randstr
assert job_resource.properties["status"] == "Running"
assert job_resource.properties["computeBinding"]["computeId"] == compute_id
assert job_resource.properties["experimentName"] == experiment_name
|
python
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import parse_ddrescue
import reiserfs_blocks
def print_rangelist(rangelist):
print(0, "*", 1)
end = 0
for item in rangelist.items:
if end != item.start:
print(end, item.start - end, "-")
print(item.start, item.size, "+")
end = item.start + item.size
def main(argv):
if len(argv) < 2:
print(f"Usage: {argv[0]} MAPFILE", file=sys.stderr)
sys.exit(1)
filenameMap = argv[1]
rescueMap = parse_ddrescue.parseDdrescue(filenameMap)
rangelist = reiserfs_blocks.RangeList()
expandAmount = 512 * 1
mapSize = rescueMap.size()
last = 0
for start, size, val in rescueMap:
if val != parse_ddrescue.Status.FINISHED:
continue
end = min(mapSize, start+size+expandAmount)
start = max(last, start-expandAmount)
last = end
rangelist.add(start, end-start)
print_rangelist(rangelist)
if __name__ == "__main__":
main(sys.argv)
|
python
|
from rdflib.graph import ConjunctiveGraph
from typing import ClassVar
from rdflib import Namespace
from test.testutils import MockHTTPResponse, ServedSimpleHTTPMock
import unittest
EG = Namespace("http://example.org/")
class TestSPARQLConnector(unittest.TestCase):
query_path: ClassVar[str]
query_endpoint: ClassVar[str]
update_path: ClassVar[str]
update_endpoint: ClassVar[str]
httpmock: ClassVar[ServedSimpleHTTPMock]
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.httpmock = ServedSimpleHTTPMock()
cls.query_path = "/db/sparql"
cls.query_endpoint = f"{cls.httpmock.url}{cls.query_path}"
cls.update_path = "/db/update"
cls.update_endpoint = f"{cls.httpmock.url}{cls.update_path}"
@classmethod
def tearDownClass(cls) -> None:
super().tearDownClass()
cls.httpmock.stop()
def setUp(self):
self.httpmock.reset()
def tearDown(self):
pass
def test_graph_update(self):
graph = ConjunctiveGraph("SPARQLUpdateStore")
graph.open((self.query_endpoint, self.update_endpoint))
update_statement = f"INSERT DATA {{ {EG['subj']} {EG['pred']} {EG['obj']}. }}"
self.httpmock.do_post_responses.append(
MockHTTPResponse(
200,
"OK",
b"Update succeeded",
{"Content-Type": ["text/plain; charset=UTF-8"]},
)
)
# This test assumes that updates are performed using POST
# at the moment this is the only supported way for SPARQLUpdateStore
# to do updates.
graph.update(update_statement)
self.assertEqual(self.httpmock.call_count, 1)
req = self.httpmock.do_post_requests.pop(0)
self.assertEqual(req.parsed_path.path, self.update_path)
self.assertIn("application/sparql-update", req.headers.get("content-type"))
|
python
|
# Copyright 2021 TUNiB Inc.
import torch
import torch.distributed as dist
from transformers import GPT2Tokenizer
from oslo.models.gpt_neo.modeling_gpt_neo import (
GPTNeoForCausalLM,
GPTNeoForSequenceClassification,
GPTNeoModel,
)
class TestPPInference:
def __init__(self, num_gpus):
self.num_gpus = num_gpus
self.tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
@torch.no_grad()
def test_gpt_neo_model(self, fp16):
model_pp = GPTNeoModel.from_pretrained_with_parallel(
"EleutherAI/gpt-neo-125M",
pipeline_parallel_size=self.num_gpus,
torch_dtype=torch.float16 if fp16 else torch.float32,
).eval()
if fp16:
model_1d = (
GPTNeoModel.from_pretrained_with_parallel("EleutherAI/gpt-neo-125M")
.half()
.eval()
.cuda()
)
else:
model_1d = (
GPTNeoModel.from_pretrained_with_parallel("EleutherAI/gpt-neo-125M")
.eval()
.cuda()
)
batch_encoding = self.tokenizer(
text="Hello I am Kevin. Today,", return_tensors="pt"
).to("cuda")
hidden_pp = [_.last_hidden_state for _ in model_pp(**batch_encoding)][0]
hidden_1d = model_1d(**batch_encoding).last_hidden_state
if dist.get_rank() == 0:
print(
f"\n{TestPPInference.__qualname__}:\n"
f"--fp16:{fp16}\n"
f"--test result: {torch.isclose(hidden_1d[0], hidden_pp[0], rtol=1e-2)}\n"
)
del model_pp
del model_1d
@torch.no_grad()
def test_gpt_neo_lm_head_model(self, fp16):
model_pp = GPTNeoForCausalLM.from_pretrained_with_parallel(
"EleutherAI/gpt-neo-125M",
pipeline_parallel_size=self.num_gpus,
torch_dtype=torch.float16 if fp16 else torch.float32,
).eval()
if fp16:
model_1d = (
GPTNeoForCausalLM.from_pretrained_with_parallel(
"EleutherAI/gpt-neo-125M"
)
.half()
.eval()
.cuda()
)
else:
model_1d = (
GPTNeoForCausalLM.from_pretrained_with_parallel(
"EleutherAI/gpt-neo-125M"
)
.eval()
.cuda()
)
batch_encoding = self.tokenizer(
text="Hello I am Kevin. Today,", return_tensors="pt"
).to("cuda")
output_pp = model_pp.generate(
**batch_encoding, num_beams=4, no_repeat_ngram_size=3
)
output_1d = model_1d.generate(
**batch_encoding, num_beams=4, no_repeat_ngram_size=3
)
if dist.get_rank() == 0:
print(
f"\n{TestPPInference.__qualname__}:\n"
f"--fp16:{fp16}\n"
f"--test result: \n1D:{self.tokenizer.decode(output_1d[0])}\n2D:{self.tokenizer.decode(output_pp[0])}\n"
)
del model_pp
del model_1d
@torch.no_grad()
def test_gpt_neo_for_classification(self, fp16):
model_pp = GPTNeoForSequenceClassification.from_pretrained_with_parallel(
"EleutherAI/gpt-neo-125M",
pipeline_parallel_size=self.num_gpus,
torch_dtype=torch.float16 if fp16 else torch.float32,
).eval()
if fp16:
model_1d = (
GPTNeoForSequenceClassification.from_pretrained(
"EleutherAI/gpt-neo-125M"
)
.half()
.eval()
.cuda()
)
else:
model_1d = (
GPTNeoForSequenceClassification.from_pretrained(
"EleutherAI/gpt-neo-125M"
)
.eval()
.cuda()
)
model_1d.config.pad_token_id = self.tokenizer.eos_token_id
model_pp.config.pad_token_id = self.tokenizer.eos_token_id
batch_encoding = self.tokenizer(
text=["I love you !", "I hate you !"], return_tensors="pt"
).to("cuda")
output_pp = torch.cat(
[_.logits.argmax(-1) for _ in model_pp(**batch_encoding)], dim=0
)
output_1d = model_1d(**batch_encoding).logits.argmax(-1)
if dist.get_rank() == 0:
print(
f"\n{TestPPInference.__qualname__}:\n"
f"--fp16:{fp16}\n"
f"--test result: \n1D:{output_1d}\n2D:{output_pp}\n"
)
del model_1d
del model_pp
if __name__ == "__main__":
test = TestPPInference(num_gpus=4)
for fp16 in [False, True]:
test.test_gpt_neo_model(fp16=fp16)
for fp16 in [False, True]:
test.test_gpt_neo_lm_head_model(fp16=fp16)
for fp16 in [False, True]:
test.test_gpt_neo_for_classification(fp16=fp16)
|
python
|
"""(c) All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017"""
import ldap3
from epflldap.utils import get_optional_env, EpflLdapException
def _get_LDAP_connection():
"""
Return a LDAP connection
"""
server = ldap3.Server('ldap://' + get_optional_env('EPFL_LDAP_SERVER_FOR_SEARCH'))
connection = ldap3.Connection(server)
connection.open()
return connection, get_optional_env('EPFL_LDAP_BASE_DN_FOR_SEARCH')
def LDAP_search(pattern_search, attribute):
"""
Do a LDAP search
"""
connection, ldap_base = _get_LDAP_connection()
connection.search(
search_base=ldap_base,
search_filter=pattern_search,
attributes=[attribute]
)
return connection.response
def get_attribute(response, attribute):
return response[0]['attributes'][attribute][0]
def is_unit_exist(unit_id):
"""
Return True if the unit 'unid_id' exists.
Otherwise return False
"""
attribute = 'objectClass'
response = LDAP_search(
pattern_search="(uniqueidentifier={})".format(unit_id),
attribute=attribute
)
try:
unit_exist = 'EPFLorganizationalUnit' in response[0]['attributes'][attribute]
except Exception:
return False
return unit_exist
def get_unit_name(unit_id):
"""
Return the unit name to the unit 'unit_id'
"""
attribute = 'cn'
response = LDAP_search(
pattern_search='(uniqueIdentifier={})'.format(unit_id),
attribute=attribute
)
try:
unit_name = get_attribute(response, attribute)
except Exception:
raise EpflLdapException("The unit with id '{}' was not found".format(unit_id))
return unit_name
def get_unit_id(unit_name):
"""
Return the unit id to the unit 'unit_name'
"""
unit_name = unit_name.lower()
attribute = 'uniqueIdentifier'
response = LDAP_search(
pattern_search='(cn={})'.format(unit_name),
attribute=attribute
)
unit_id = ""
try:
for element in response:
if 'dn' in element and element['dn'].startswith('ou={},'.format(unit_name)):
unit_id = element['attributes'][attribute][0]
except Exception:
raise EpflLdapException("The unit named '{}' was not found".format(unit_name))
finally:
if not unit_id:
raise EpflLdapException("The unit named '{}' was not found".format(unit_name))
return unit_id
def get_units(username):
"""
Return all units of user 'username'
"""
connection, ldap_base = _get_LDAP_connection()
# Search the user dn
connection.search(
search_base=ldap_base,
search_filter='(uid={}@*)'.format(username),
)
# For each user dn give me the unit
dn_list = [connection.response[index]['dn'] for index in range(len(connection.response))]
units = []
# For each unit search unit information and give me the unit id
for dn in dn_list:
unit = dn.split(",ou=")[1]
connection.search(search_base=ldap_base, search_filter='(ou={})'.format(unit), attributes=['uniqueidentifier'])
units.append(get_attribute(connection.response, 'uniqueIdentifier'))
return units
def get_sciper(username):
"""
Return the sciper of user
"""
attribute = 'uniqueIdentifier'
response = LDAP_search(
pattern_search='(uid={})'.format(username),
attribute=attribute
)
try:
sciper = get_attribute(response, attribute)
except Exception:
raise EpflLdapException("No sciper corresponds to username {}".format(username))
return sciper
def get_username(sciper):
"""
Return username of user
"""
attribute = 'uid'
response = LDAP_search(
pattern_search='(uniqueIdentifier={})'.format(sciper),
attribute=attribute
)
try:
username = get_attribute(response, attribute)
except Exception:
raise EpflLdapException("No username corresponds to sciper {}".format(sciper))
return username
def get_email(sciper):
"""
Return email of user
"""
attribute = 'mail'
response = LDAP_search(
pattern_search='(uniqueIdentifier={})'.format(sciper),
attribute=attribute
)
try:
email = get_attribute(response, attribute)
except Exception:
raise EpflLdapException("No email address corresponds to sciper {}".format(sciper))
return email
|
python
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Conduct forced alignment with the pre-trained CTC model."""
import codecs
import logging
import os
import shutil
import sys
from tqdm import tqdm
from neural_sp.bin.args_asr import parse_args_eval
from neural_sp.bin.eval_utils import average_checkpoints
from neural_sp.bin.train_utils import (
load_checkpoint,
set_logger
)
from neural_sp.datasets.asr import build_dataloader
from neural_sp.models.seq2seq.speech2text import Speech2Text
from neural_sp.utils import mkdir_join
logger = logging.getLogger(__name__)
def main():
# Load configuration
args, recog_params, dir_name = parse_args_eval(sys.argv[1:])
# Setting for logging
if os.path.isfile(os.path.join(args.recog_dir, 'align.log')):
os.remove(os.path.join(args.recog_dir, 'align.log'))
set_logger(os.path.join(args.recog_dir, 'align.log'), stdout=args.recog_stdout)
for i, s in enumerate(args.recog_sets):
# Align all utterances
args.min_n_frames = 0
args.max_n_frames = 1e5
# Load dataloader
dataloader = build_dataloader(args=args,
tsv_path=s,
batch_size=recog_params['recog_batch_size'])
if i == 0:
# Load the ASR model
model = Speech2Text(args, dir_name)
epoch = int(args.recog_model[0].split('-')[-1])
if args.recog_n_average > 1:
# Model averaging for Transformer
model = average_checkpoints(model, args.recog_model[0],
n_average=args.recog_n_average)
else:
load_checkpoint(args.recog_model[0], model)
if not args.recog_unit:
args.recog_unit = args.unit
logger.info('recog unit: %s' % args.recog_unit)
logger.info('epoch: %d' % epoch)
logger.info('batch size: %d' % args.recog_batch_size)
# GPU setting
if args.recog_n_gpus >= 1:
model.cudnn_setting(deterministic=True, benchmark=False)
model.cuda()
save_path = mkdir_join(args.recog_dir, 'ctc_forced_alignments')
# Clean directory
if save_path is not None and os.path.isdir(save_path):
shutil.rmtree(save_path)
os.mkdir(save_path)
pbar = tqdm(total=len(dataloader))
while True:
batch, is_new_epoch = dataloader.next()
trigger_points = model.ctc_forced_align(batch['xs'], batch['ys']) # `[B, L]`
for b in range(len(batch['xs'])):
save_path_spk = mkdir_join(save_path, batch['speakers'][b])
save_path_utt = mkdir_join(save_path_spk, batch['utt_ids'][b] + '.txt')
tokens = dataloader.idx2token[0](batch['ys'][b], return_list=True)
with codecs.open(save_path_utt, 'w', encoding="utf-8") as f:
for i, tok in enumerate(tokens):
f.write('%s %d\n' % (tok, trigger_points[b, i]))
# TODO: consider down sampling
pbar.update(len(batch['xs']))
if is_new_epoch:
break
pbar.close()
if __name__ == '__main__':
main()
|
python
|
def my_reduce(value):
number = 0
for v in value:
number+=v
return number
|
python
|
from playground.network.packet import PacketType, FIELD_NOT_SET
from playground.network.packet.fieldtypes import UINT8, UINT16, UINT32, UINT64, \
STRING, BUFFER, \
ComplexFieldType, PacketFields
from playground.network.packet.fieldtypes.attributes import Optional
class VNICSocketControlPacket(PacketType):
"""
This packet type is only to provide a common base class
for VNIC packets.
"""
DEFINITION_IDENTIFIER = "vsockets.VNICSocketControlPacket"
DEFINITION_VERSION = "1.0"
class VNICSocketOpenPacket(VNICSocketControlPacket):
DEFINITION_IDENTIFIER = "vsockets.VNICSocketOpenPacket"
DEFINITION_VERSION = "1.0"
class SocketConnectData(PacketFields):
FIELDS = [
("destination", STRING),
("destinationPort", UINT16)
]
class SocketListenData(PacketFields):
FIELDS = [
("sourcePort", UINT16)
]
FIELDS = [
("ConnectionId", UINT32),
("callbackAddress", STRING),
("callbackPort", UINT16),
("connectData", ComplexFieldType(SocketConnectData, {Optional:True})),
("listenData", ComplexFieldType(SocketListenData, {Optional:True}))
]
def isConnectType(self):
return self.connectData != FIELD_NOT_SET and self.listenData == FIELD_NOT_SET
def isListenType(self):
return self.connectData == FIELD_NOT_SET and self.listenData != FIELD_NOT_SET
class VNICSocketClosePacket(VNICSocketControlPacket):
DEFINITION_IDENTIFIER = "vsockets.VNICSocketClosePacket"
DEFINITION_VERSION = "1.0"
FIELDS = [("ConnectionId", UINT32)]
class VNICSocketOpenResponsePacket(VNICSocketControlPacket):
DEFINITION_IDENTIFIER = "vsockets.VNICSocketOpenResponsePacket"
DEFINITION_VERSION = "1.0"
FIELDS = [
("ConnectionId", UINT32),
("port", UINT16),
("errorCode", UINT8({Optional:True})),
("errorMessage", STRING({Optional:True}))
]
def isFailure(self):
return (self.errorCode != FIELD_NOT_SET or self.errorMessage != FIELD_NOT_SET)
class VNICConnectionSpawnedPacket(VNICSocketControlPacket):
DEFINITION_IDENTIFIER = "vsockets.VNICConnectionSpawnedPacket"
DEFINITION_VERSION = "1.0"
FIELDS = [
("ConnectionId", UINT32),
("spawnTcpPort", UINT16),
("source", STRING),
("sourcePort", UINT16),
("destination", STRING),
("destinationPort", UINT16)
]
class VNICStartDumpPacket(VNICSocketControlPacket):
DEFINITION_IDENTIFIER = "vsockets.VNICStartDumpPacket"
DEFINITION_VERSION = "1.0"
class VNICPromiscuousLevelPacket(VNICSocketControlPacket):
"""
This packet is both a getter/setter packet that can be
sent by a client to either set or get the promiscuity
level. It is also sent back by the server as an acknowledgement
with the current level
Client sends VNICPromiscuousLevelPacket with no fields set
Server responds with VNICPromiscuousLevelPacket with get set to current level
Client sends VNICPromiscuousLevelPacket with set field set
Server responds with VNICPromiscuousLevelPacket with get set to new level
"""
DEFINITION_IDENTIFIER = "vsockets.VNICPromiscuousLevelPacket"
DEFINITION_VERSION = "1.0"
FIELDS = [ ("set",UINT8({Optional:True})),
("get",UINT8({Optional:True}))]
def basicUnitTest():
v1 = VNICSocketOpenPacket(callbackAddress="1.1.1.1", callbackPort=80)
connectData = v1.SocketConnectData(destination="2.2.2.2",destinationPort=1000)
v1.connectData = connectData
assert v1.isConnectType()
v1a = VNICSocketOpenPacket.Deserialize(v1.__serialize__())
assert v1 == v1a
v2 = VNICSocketOpenResponsePacket()
v2.port = 666
v2.errorCode = 1
v2.errorMessage = "test failure"
v2a = VNICSocketOpenResponsePacket.Deserialize(v2.__serialize__())
assert v2 == v2a
assert v2a.isFailure()
v3 = VNICConnectionSpawnedPacket()
v3.spawnTcpPort=555
v3.source="0.0.0.0"
v3.sourcePort=999
v3.destination="1.2.3.4"
v3.destinationPort=123
v3a = VNICConnectionSpawnedPacket.Deserialize(v3.__serialize__())
assert v3 == v3a
if __name__ == "__main__":
basicUnitTest()
print("Basic unit test completed successfully.")
|
python
|
"""Base class for patching time and I/O modules."""
import sys
import inspect
class BasePatcher(object):
"""Base class for patching time and I/O modules."""
# These modules will not be patched by default, unless explicitly specified
# in `modules_to_patch`.
# This is done to prevent time-travel from interfering with the timing of
# the actual test environment.
UNPATCHED_MODULES = ['pytest', '_pytest', 'unittest', 'mock', 'threading']
def __init__(self,
clock,
event_pool,
modules_to_patch=None,
patcher_module=None):
"""Create the patch."""
self.clock = clock
self.event_pool = event_pool
if modules_to_patch is None:
self.modules_to_patch = []
elif isinstance(modules_to_patch, (list, tuple)):
self.modules_to_patch = modules_to_patch
else:
self.modules_to_patch = [modules_to_patch]
self.patcher_module = patcher_module if patcher_module else None
self._undo_set = set()
@classmethod
def get_events_namespace(cls):
"""Return the namespace of the patcher's events."""
return None
@classmethod
def get_events_types(cls):
"""Return Enum of the patcher's events types."""
return None
def get_patched_module(self):
"""Return the actual module obect to be patched."""
raise NotImplementedError()
def get_patch_actions(self):
"""Return list of the patches to do.
The list structure is tuples containing:
(real_object_name,
the_real_object,
fake_object)
"""
raise NotImplementedError()
def start(self):
"""Start the patcher.
The logic to the patchers start is based on the work done by:
spulec/freezegun
under
https://github.com/spulec/freezegun
Copyright (C) 2017 spulec/freezegun
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
Modifications:
Modifications to the file was to leave the inner change of the loaded
modules and removing any other related logic to a specific module.
"""
patch_actions = self.get_patch_actions()
real_id_to_fake = {id(real): fake for _, real, fake in patch_actions}
patched_module = self.get_patched_module()
# Change modules for later imports.
for obj_name, real_obj, fake_obj in patch_actions:
self._save_for_undo(patched_module, obj_name, real_obj)
setattr(patched_module, obj_name, fake_obj)
if self.modules_to_patch:
# If only a given list of modules is required to be patched
modules = [sys.modules[name] for name in self.modules_to_patch]
else:
# not given a specific module to patch on.
# Create the list of all modules to search for the patched objects.
# Patch on all loaded modules.
modules = [
module for mod_name, module in sys.modules.items() if
(inspect.ismodule(module)
and hasattr(module, '__name__')
# Don't patch inside the original module, this (the patcher)
# module, or the unpatched modules.
and module.__name__ not in ([patched_module,
self.patcher_module,
__name__]
+ self.UNPATCHED_MODULES
)
)
]
# Search in all modules for the object to patch.
for module in modules:
for attr in dir(module):
try:
# Get any attribute loaded on the module.
attribute_value = getattr(module, attr)
except (ValueError, AttributeError, ImportError):
# For some libraries, this happen.
# e.g. attr=dbm_gnu, module=pkg_resources._vendor.six.moves
continue
# If the attribute is on this module - avoid recursion.
# Do stuff only if the attribute is the object to patch.
if id(attribute_value) not in real_id_to_fake.keys():
continue
# Find the relative mock object for the original class.
fake_obj = real_id_to_fake.get(id(attribute_value))
# Change the class to the mocked one in the given module.
setattr(module, attr, fake_obj)
# Save the original class for later - when stopping the patch.
self._save_for_undo(module, attr, attribute_value)
def stop(self):
"""Stop the patching."""
for module, attribute, original_value in self._undo_set:
setattr(module, attribute, original_value)
self._undo_set.clear()
def _save_for_undo(self, module, attribute, original_value):
self._undo_set.add((module, attribute, original_value))
|
python
|
#!/usr/bin/env python
"""
Pox-based OpenFlow manager
"""
import pox.openflow.libopenflow_01 as of
from pox.core import core
from pox.lib.recoco import *
from pox.lib.revent import *
from pox.lib.addresses import IPAddr, EthAddr
from pox.lib.util import dpid_to_str
from pox.lib.util import str_to_dpid
from debussy.util import Config
from debussy.db import DebussyDb
from debussy.profiling import PerfCounter
from debussy.messaging import MsgQueueReceiver, RpcReceiver
from debussy.of import OfManager
log = core.getLogger()
class PoxManager(OfManager):
"Pox-based OpenFlow manager"
def __init__(self, log, dbname, dbuser):
super(PoxManager, self).__init__()
self.db = DebussyDb(dbname, dbuser, None, reconnect=True)
self.log = log
self.datapaths = {}
self.flowstats = []
self.perfcounter = PerfCounter("sw_delay")
self.dpid_cache = {}
core.openflow.addListeners(self, priority=0)
self.log.info("debussy: starting pox manager")
def startup():
self.log.info("registering handlers")
core.openflow_discovery.addListeners(self)
core.call_when_ready(startup, ("openflow", "openflow_discovery"))
def update_switch_cache(self):
self.db.cursor.execute("SELECT * FROM switches;")
result = self.db.cursor.fetchall()
for sw in result:
self.dpid_cache[sw[1]] = { 'sid' : sw[0],
'dpid' : sw[1],
'ip' : sw[2],
'mac': sw[3],
'name': sw[4] }
def _handle_ConnectionDown(self, event):
dpid = "%0.16x" % event.dpid
self.update_switch_cache()
del self.datapaths[event.dpid]
self.db.cursor.execute("DELETE FROM switches WHERE dpid='{0}';"
.format(dpid))
self.log.info("debussy: dpid {0} removed".format(event.dpid))
def _handle_ConnectionUp(self, event):
dpid = "%0.16x" % event.dpid
self.update_switch_cache()
self.datapaths[event.dpid] = event.connection
self.db.cursor.execute("SELECT COUNT(*) FROM switches WHERE dpid='{0}';"
.format(dpid))
count = self.db.cursor.fetchall()[0][0]
if count > 0:
# switch already in db
pass
elif dpid in self.dpid_cache:
sw = self.dpid_cache[dpid]
self.db.cursor.execute("INSERT INTO switches (sid, dpid, ip, mac, name) "
"VALUES ({0}, '{1}', '{2}', '{3}', '{4}');".format(
sw['sid'], sw['dpid'], sw['ip'], sw['mac'], sw['name']))
else:
sid = len(self.dpid_cache) + 1
name = "s{0}".format(sid)
self.db.cursor.execute("INSERT INTO switches (sid, dpid, name) VALUES "
"({0}, '{1}', '{2}')".format(sid, dpid, name))
self.log.info("debussy: dpid {0} online".format(event.dpid))
self.log.info("debussy: online dpids: {0}".format(self.datapaths))
def _handle_LinkEvent(self, event):
dpid1 = "%0.16x" % event.link.dpid1
dpid2 = "%0.16x" % event.link.dpid2
port1 = event.link.port1
port2 = event.link.port2
sid1 = self.dpid_cache[dpid1]['sid']
sid2 = self.dpid_cache[dpid2]['sid']
if event.removed:
self.db.cursor.execute("UPDATE tp SET isactive=0 WHERE "
" (sid={0} AND nid={1}) OR "
" (sid={1} AND nid={0});"
.format(sid1, sid2))
self.log.info("Link down {0}".format(event.link))
elif event.added:
# does the forward link exist in Postgres?
self.db.cursor.execute("SELECT COUNT(*) FROM tp WHERE "
"sid={0} AND nid={1};"
.format(sid1, sid2))
count = self.db.cursor.fetchall()[0][0]
if count == 0:
self.db.cursor.execute("INSERT INTO tp (sid, nid, ishost, isactive) "
"VALUES ({0}, {1}, 0, 1);"
.format(sid1, sid2))
self.db.cursor.execute("INSERT INTO ports (sid, nid, port) VALUES "
"({0}, {1}, {2});"
.format(sid1, sid2, port1))
# does the reverse link already exist in Postgres?
self.db.cursor.execute("SELECT COUNT(*) FROM tp WHERE "
"sid={0} AND nid={1};"
.format(sid2, sid1))
count = self.db.cursor.fetchall()[0][0]
if count == 0:
self.db.cursor.execute("INSERT INTO tp (sid, nid, ishost, isactive) "
"VALUES ({0}, {1}, 0, 1);"
.format(sid2, sid1))
self.db.cursor.execute("INSERT INTO ports (sid, nid, port) VALUES "
"({0}, {1}, {2});"
.format(sid2, sid1, port2))
self.log.info("Link up {0}".format(event.link))
def _handle_BarrierIn(self, event):
self.perfcounter.stop()
self.log.debug("received barrier")
def _handle_FlowStatsReceived(self, event):
self.log.info("debussy: flow stat received dpid={0}, len={1}".format(
event.connection.dpid, len(event.stats)))
for stat in event.stats:
self.log.info(" flow: nw_src={0}, nw_dst={1}".format(
stat.match.nw_src, stat.match.nw_dst))
def requestStats(self):
"Send all switches a flow statistics request"
self.flowstats = []
for connection in core.openflow._connections.values():
connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
self.log.debug("debussy: sent {0} flow stats requests".format(
len(core.openflow._connections)))
return True
def sendBarrier(self, dpid):
"""Send a barrier message
dpid: datapath id of the switch to receive the barrier"""
dpid = int(dpid)
if dpid in self.datapaths:
dp = self.datapaths[dpid]
msg = of.ofp_barrier_request()
dp.send(msg)
self.perfcounter.start()
self.log.debug("dpid {0} sent barrier".format(dpid))
else:
self.log.debug("dpid {0} not in datapath list".format(dpid))
return True
def registerReceiver(self, receiver):
"""Register a new message receiver
receiver: a debussy.messaging.MessageReceiver object"""
self.log.info("registering receiver")
self.receiver.append(receiver)
receiver.start()
core.addListener(pox.core.GoingDownEvent, receiver.stop)
def isRunning(self):
"returns: true if the controller is running, false otherwise"
return core.running
def mk_msg(self, flow):
"""Create a Pox flowmod message from debussy.flow.OfMessage
flow: a debussy.flow.OfMessage object"""
msg = of.ofp_flow_mod()
msg.command = int(flow.command)
msg.priority = int(flow.priority)
msg.match = of.ofp_match()
if flow.match.dl_type is not None:
msg.match.dl_type = int(flow.match.dl_type)
if flow.match.nw_src is not None:
msg.match.nw_src = IPAddr(flow.match.nw_src)
if flow.match.nw_dst is not None:
msg.match.nw_dst = IPAddr(flow.match.nw_dst)
if flow.match.dl_src is not None:
msg.match.dl_src = EthAddr(flow.match.dl_src)
if flow.match.dl_dst is not None:
msg.match.dl_dst = EthAddr(flow.match.dl_dst)
for outport in flow.actions:
msg.actions.append(of.ofp_action_output(port=int(outport)))
return msg
def send(self, dpid, msg):
"""Send a message to a switch
dpid: datapath id of the switch
msg: OpenFlow message"""
self.log.debug("debussy: flow mod dpid={0}".format(dpid))
if dpid in self.datapaths:
dp = self.datapaths[dpid]
dp.send(msg)
else:
self.log.debug("dpid {0} not in datapath list".format(dpid))
def sendFlowmod(self, flow):
"""Send a flow modification message
flow: the flow modification message to send"""
dpid = int(flow.switch.dpid)
self.send(dpid, self.mk_msg(flow))
def launch():
"Start the OpenFlow manager and message receivers"
ctrl = PoxManager(log, Config.DbName, Config.DbUser)
mq = MsgQueueReceiver(Config.QueueId, ctrl)
ctrl.registerReceiver(mq)
rpc = RpcReceiver(Config.RpcHost, Config.RpcPort, ctrl)
ctrl.registerReceiver(rpc)
core.register("debussycontroller", ctrl)
|
python
|
import cv2 as cv # noqa
import numpy as np # noqa
|
python
|
r"""
Module of trace monoids (free partially commutative monoids).
EXAMPLES:
We first create a trace monoid::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a'))); M
Trace monoid on 3 generators ([a], [b], [c]) with independence relation {{a, c}}
Different elements can be equal because of the partially
commutative multiplication::
sage: c * a * b == a * c * b
True
We check that it is a monoid::
sage: M in Monoids()
True
REFERENCES:
- :wikipedia:`Trace_monoid`
- https://ncatlab.org/nlab/show/trace+monoid
AUTHORS:
- Pavlo Tokariev (2019-05-31): initial version
"""
# ****************************************************************************
# Copyright (C) 2019 Pavlo Tokariev <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from collections import OrderedDict
from itertools import repeat, chain, product
from sage.misc.cachefunc import cached_method
from sage.misc.misc_c import prod
from sage.graphs.digraph import DiGraph
from sage.graphs.graph import Graph
from sage.monoids.free_monoid import FreeMonoid
from sage.monoids.monoid import Monoid_class
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.power_series_ring import PowerSeriesRing
from sage.rings.infinity import infinity
from sage.combinat.words.alphabet import Alphabet
from sage.structure.element import MonoidElement
from sage.structure.element_wrapper import ElementWrapper
from sage.structure.unique_representation import UniqueRepresentation
class TraceMonoidElement(ElementWrapper, MonoidElement):
r"""
Element of a trace monoid, also known as a trace.
Elements of trace monoid is actually a equivalence classes
of related free monoid over some equivalence relation
that in the case is presented as independence relation.
.. RUBRIC:: Representative
We transform each trace to its lexicographic form for the
representative in the ambient free monoid. This is also used
for comparisons.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x^3
[b*a^2*d*b^2*c*a^2*d*b^2*c*a^2*d*b*c]
sage: x^0
1
sage: x.lex_normal_form()
b*a^2*d*b*c
sage: x.foata_normal_form()
(b, a*d, a, b*c)
"""
def _repr_(self):
"""
Textual representation of ``self``.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: a * b
[a*b]
sage: b * a
[b*a]
sage: d * a
[a*d]
"""
if self == self.parent().one():
return "1"
return "[{}]".format(self.value)
def _richcmp_(self, other, op):
r"""
Compare two traces by their lexicographic normal forms.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: a^2 > a
True
sage: a*b < b*a
True
sage: a * c * b == a * b * c
True
"""
return self.value._richcmp_(other.value, op)
def lex_normal_form(self):
r"""
Return the lexicographic normal form of ``self``.
OUTPUT:
A free monoid element.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: (a*b).lex_normal_form()
a*b
sage: (b*a).lex_normal_form()
b*a
sage: (d*a).lex_normal_form()
a*d
"""
return self.value
def foata_normal_form(self):
r"""
Return the Foata normal form of ``self``.
OUTPUT:
Tuple of free monoid elements.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.foata_normal_form()
(b, a*d, a, b*c)
"""
return self.parent()._compute_foata_normal_form(self.value)
def _mul_(self, other):
r"""
Concatenate one equivalence class with another.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: a * b * c == a * c * b
True
"""
return self.parent(self.value * other.value)
def _flat_elements(self):
r"""
Return flatten list of generator numbers representing the trace.
OUTPUT:
A list of generator indexes.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a^3 * d * a * c * b^2
sage: x._flat_elements()
[b, a, a, a, a, d, b, b, c]
"""
return [g for g, times in self.value for _ in range(times)]
@cached_method
def dependence_graph(self):
r"""
Return dependence graph of the trace.
It is a directed graph where all dependent (non-commutative)
generators are connected by edges which
direction depend on the generator position in the trace.
OUTPUT:
Directed graph of generator indexes.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.dependence_graph()
Digraph on 6 vertices
"""
elements = self._flat_elements()
independence = self.parent()._independence
graph = {}
for i, e in enumerate(elements):
edges = []
for v in graph:
if (e, elements[v]) not in independence:
edges.append((v, i))
graph[i] = []
for v1, v2 in edges:
graph[v1].append(v2)
return DiGraph(graph)
@cached_method
def hasse_diagram(self, algorithm="naive"):
r"""
Return Hasse diagram of the trace.
Hasse diagram is a dependence graph without transitive edges.
INPUT:
- ``algorithm`` -- string (default: ``'naive'``); defines algorithm
that will be used to compute Hasse diagram; there are two
variants: ``'naive'`` and ``'min'``.
OUTPUT:
Directed graph of generator indexes.
.. SEEALSO::
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.naive_hasse_digram`,
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.min_hasse_diagram`.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.hasse_diagram()
Digraph on 6 vertices
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.hasse_diagram(algorithm='naive') == x.hasse_diagram(algorithm='min')
True
sage: y = b * a^3 * d * a * c * b^2
sage: y.hasse_diagram(algorithm='naive') == y.hasse_diagram(algorithm='min')
True
"""
if algorithm == "naive":
return self.naive_hasse_diagram()
elif algorithm == "min":
return self.min_hasse_diagram()
else:
raise ValueError("`alg` option must be `naive` "
"or `min`, got `{}`.".format(algorithm))
def min_hasse_diagram(self):
r"""
Return Hasse diagram of the trace.
OUTPUT:
Directed graph of generator indexes.
.. SEEALSO::
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.hasse_digram`,
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.naive_hasse_diagram`.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.min_hasse_diagram()
Digraph on 6 vertices
"""
elements = self._flat_elements()
elements.reverse()
independence = self.parent()._independence
reachable = dict()
min = set()
graph = DiGraph({})
for i, x in enumerate(elements):
reachable[i] = set()
front = min.copy()
while front:
used = set()
for j in list(front):
y = elements[j]
if (x, y) not in independence:
graph.add_edge(i, j)
reachable[i].add(j)
reachable[i].update(reachable[j])
if j in min:
min.remove(j)
used.add(j)
forbidden = set(chain.from_iterable(reachable[v] for v in used))
front = set(dest for _, dest in graph.outgoing_edges(front, labels=False))
front = front - forbidden
min.add(i)
length = len(elements)
graph.relabel(length - 1 - i for i in range(length))
return graph
def naive_hasse_diagram(self):
r"""
Return Hasse diagram of ``self``.
ALGORITHM:
In loop check for every two pair of edges if they
have common vertex, remove their transitive edge.
OUTPUT:
Directed graph of generator indexes.
.. SEEALSO::
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.hasse_digram`,
:meth:`~sage.monoids.trace_monoid.TraceMonoidElement.min_hasse_diagram`.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b * a * d * a * c * b
sage: x.naive_hasse_diagram()
Digraph on 6 vertices
"""
d = self.dependence_graph()
h = d.copy()
for e1 in d.edges():
for e2 in d.edges():
if e1[1] == e2[0]:
h.delete_edge((e1[0], e2[1]))
return h
def alphabet(self):
r"""
Return alphabet of ``self``.
OUTPUT:
A set of free monoid generators.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: x = b*a*d*a*c*b
sage: x.alphabet()
{b, a, d, c}
"""
return Alphabet([g for g, _ in self.value])
def projection(self, letters):
r"""
Return a trace that formed from ``self`` by erasing ``letters``.
INPUT:
- ``letters`` -- set of generators; defines set of letters that will be
used to filter the trace
OUTPUT:
A trace
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c,d> = FreeMonoid()
sage: I = ((a,d), (d,a), (b,c), (c,b))
sage: M.<ac,bc,cc,dc> = TraceMonoid(F, I=I)
sage: x = M(b*a*d*a*c*b)
sage: x.projection({a,b})
[b*a^2*b]
sage: x.projection({b,d,c})
[b*d*b*c]
"""
P = self.parent()
base = P._free_monoid
return P(base.prod(x for x in self._flat_elements() if x in letters))
def multiplicative_order(self):
r"""
Return the multiplicative order of ``self``, which is `\infty`
for any element not the identity.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: a.multiplicative_order()
+Infinity
sage: M.one().multiplicative_order()
1
"""
if self.value.is_one():
return ZZ.one()
return infinity
class TraceMonoid(UniqueRepresentation, Monoid_class):
r"""
Return a free partially commuting monoid (trace monoid) on `n` generators
over independence relation `I`.
We construct a trace monoid by specifing:
- a free monoid and independence relation
- or generator names and independence relation,
FreeMonoid is constructed automatically then.
INPUT:
- ``M`` -- a free monoid
- ``I`` -- commutation relation between generators
(or their names if the ``names`` are given)
- ``names`` -- names of generators
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F = TraceMonoid(names=('a', 'b', 'c'), I={('a','c'), ('c','a')}); F
Trace monoid on 3 generators ([a], [b], [c]) with independence relation {{a, c}}
sage: x = F.gens()
sage: x[0]*x[1]**5 * (x[0]*x[2])
[a*b^5*a*c]
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: latex(M)
\langle a, b, c \mid ac=ca \rangle
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M.number_of_words(3) == len(M.words(3))
True
"""
Element = TraceMonoidElement
@staticmethod
def __classcall_private__(cls, M=None, I=frozenset(), names=None):
"""
Normalize input to ensure a unique representation.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M1.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M2.<a,b,c> = TraceMonoid(I=[('a','c')])
sage: M3 = TraceMonoid(I=[{'a','c'}], names=('a', 'b', 'c'))
sage: M1 is M2 and M2 is M3
True
"""
if not M:
if names:
M = FreeMonoid(names=names)
else:
raise ValueError("names must be provided")
elif not names:
names = [str(g) for g in M.gens()]
names = tuple(names)
rels = set()
gen_from_str = {names[i]: gen for i, gen in enumerate(M.gens())}
for (x, y) in I:
try:
if isinstance(x, str):
x = gen_from_str[x]
x = M(x)
if isinstance(y, str):
y = gen_from_str[y]
y = M(y)
if x == y:
raise ValueError
except (TypeError, ValueError):
raise ValueError("invalid relation defined")
rels.add((x, y))
rels.add((y, x))
I = frozenset(rels)
return super(TraceMonoid, cls).__classcall__(cls, M, I, names)
def __init__(self, M, I, names):
r"""
Initialize ``self``.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: TestSuite(M).run()
"""
self._free_monoid = M
self._independence = I
Monoid_class.__init__(self, names=names)
def ngens(self):
"""
Return the number of generators of ``self``.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M.ngens()
3
"""
return self._free_monoid.ngens()
def one(self):
"""
Return the neutral element of ``self``.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M.one()
1
"""
return self.element_class(self, self._free_monoid.one())
def gen(self, i=0):
"""
Return the `i`-th generator of the monoid.
INPUT:
- ``i`` -- integer (default: 0)
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M.gen(1)
[b]
sage: M.gen(4)
Traceback (most recent call last):
...
IndexError: argument i (= 4) must be between 0 and 2
"""
return self.element_class(self, self._free_monoid.gen(i))
def cardinality(self):
"""
Return the cardinality of ``self``, which is infinite except for
the trivial monoid.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: M.cardinality()
+Infinity
"""
return self._free_monoid.cardinality()
def _compute_dependence_stack(self, x):
r"""
Return generator stacks formed from trace
subelements with respect to non-commutativity.
OUTPUT:
Used generators and list of stacks as tuple.
ALGORITHM:
Let `x` be a word of monoid; we scan `x` from right to left;
when processing a letter `a` it is pushed on its stack and a
marker is pushed on the stack of all the letters `b` ( `b \neq a` )
which do not commute with `a`.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c,d> = FreeMonoid()
sage: I = (('ac','dc'), ('dc','ac'), ('bc','cc'), ('cc','bc'))
sage: M.<ac,bc,cc,dc> = TraceMonoid(F, I=I)
sage: x = b*a*d*a*c*b
sage: M._compute_dependence_stack(x)
({a, b, c, d},
OrderedDict([(a, [False, False, True, True, False]),
(b, [True, False, False, False, True]),
(c, [True, False, False, False]),
(d, [False, False, True, False])]))
"""
independence = self._independence
generators_set = set(e for e, _ in x)
stacks = OrderedDict(sorted((g, []) for g in generators_set))
for generator, times in reversed(list(x)):
stacks[generator].extend(repeat(True, times))
for other_gen in generators_set:
if other_gen == generator:
continue
if (generator, other_gen) not in independence:
stacks[other_gen].extend(repeat(False, times))
return generators_set, stacks
@cached_method
def _compute_lex_normal_form(self, x):
r"""
Return lexicographic normal form of the free monoid
element in free monoid terms.
OUTPUT:
Trace monoid element.
ALGORITHM:
Take among the letters being on the top of some stack that
letter `a` being minimal with respect to the given lexicographic
ordering. We pop a marker from each stack corresponding to a
letter `b` ( `b \neq a` ) which does not commute with `a`. We repeat
this loop until all stacks are empty.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c,d> = FreeMonoid()
sage: I = ((a,d), (d,a), (b,c), (c,b))
sage: M.<ac,bc,cc,dc> = TraceMonoid(F, I=I)
sage: M._compute_lex_normal_form(c*a*c*b*a^2)
c*a*b*c*a^2
"""
if not x._element_list:
return x
generators_set, stacks = self._compute_dependence_stack(x)
independence = self._independence
elements = []
while any(stacks.values()):
for generator, g_stack in stacks.items():
if g_stack and g_stack[-1]:
g_stack.pop()
elements.append(generator)
for other_gen in generators_set:
if (other_gen != generator
and (generator, other_gen) not in independence):
stacks[other_gen].pop()
break
return prod(elements)
@cached_method
def _compute_foata_normal_form(self, x):
r"""
Return Foata normal form of the monoid element.
OUTPUT: tuple of steps
ALGORITHM:
Within a loop we form the set using letters being
on the top of stacks; arranging the letters in the lexicographic
order yields a step of the Foata normal form;
This loop is repeated until all stacks are empty.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c,d> = FreeMonoid()
sage: I = ((a,d), (d,a), (b,c), (c,b))
sage: M.<ac,bc,cc,dc> = TraceMonoid(F, I=I)
sage: x = b*a*d*a*c*b
sage: M._compute_foata_normal_form(x)
(b, a*d, a, b*c)
sage: y = b*a*a*d*b*a*b*c^2*a
sage: M._compute_foata_normal_form(y)
(b, a*d, a, b, a, b*c, c, a)
"""
if not x._element_list:
return tuple()
generators_set, stacks = self._compute_dependence_stack(x)
independence = self._independence
steps = []
while any(stacks.values()):
step = []
for generator, g_stack in stacks.items():
if g_stack and g_stack[-1]:
g_stack.pop()
step.append(generator)
for g in step:
for other_gen in generators_set:
if other_gen != g and (g, other_gen) not in independence:
stacks[other_gen].pop()
steps.append(step)
return tuple(prod(step) for step in steps)
def _element_constructor_(self, x):
"""
Return ``x`` coerced into this trace monoid.
One can create a free monoid element from the integer 1,
free monoid elements of the same generators as internal one,
and coerce everything that can coerce free monoid.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c,d> = FreeMonoid()
sage: I = ((a,d), (d,a), (b,c), (c,b))
sage: M.<ac,bc,cc,dc> = TraceMonoid(F, I=I)
sage: x = b*a*d*a*c*b
sage: M(x)
[b*a^2*d*b*c]
"""
x = self._compute_lex_normal_form(self._free_monoid(x))
return self.element_class(self, x)
@cached_method
def independence(self):
r"""
Return independence relation over the monoid.
OUTPUT: set of commuting generator pairs.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c> = FreeMonoid()
sage: I = frozenset(((a,c), (c,a)))
sage: M.<ac,bc,cc> = TraceMonoid(F, I=I)
sage: M.independence() == frozenset([frozenset([a,c])])
True
"""
return frozenset(map(frozenset, self._independence))
@cached_method
def dependence(self):
r"""
Return dependence relation over the monoid.
OUTPUT:
Set of non-commuting generator pairs.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','c'), ('c','a')))
sage: sorted(M.dependence())
[(a, a), (a, b), (b, a), (b, b), (b, c), (c, b), (c, c)]
"""
return frozenset(pair for pair in product(self._free_monoid.gens(), repeat=2)
if pair not in self._independence)
@cached_method
def dependence_graph(self):
r"""
Return graph of dependence relation.
OUTPUT: dependence graph with generators as vertices
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c> = FreeMonoid()
sage: M.<ai,bi,ci> = TraceMonoid(F, I=((a,c), (c,a)))
sage: M.dependence_graph() == Graph({a:[a,b], b:[b], c:[c,b]})
True
"""
return Graph(set(frozenset((e1, e2)) if e1 != e2 else (e1, e2)
for e1, e2 in self.dependence()), loops=True,
format="list_of_edges",
immutable=True)
@cached_method
def independence_graph(self):
r"""
Return the digraph of independence relations.
OUTPUT:
Independence graph with generators as vertices.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c> = FreeMonoid()
sage: M.<ai,bi,ci> = TraceMonoid(F, I=((a,c), (c,a)))
sage: M.independence_graph() == Graph({a:[c], b:[], c:[]})
True
"""
verts = list(self._free_monoid.gens())
edges = list(map(list, self.independence()))
return Graph([verts, edges], immutable=True)
@cached_method
def dependence_polynomial(self, t=None):
r"""
Return dependence polynomial.
The polynomial is defined as follows: `\sum{i}{(-1)^i c_i t^i}`,
where `c_i` equals to number of full subgraphs
of size `i` in the independence graph.
OUTPUT:
A rational function in ``t`` with coefficients in the integer ring.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: M.dependence_polynomial()
1/(2*t^2 - 4*t + 1)
"""
if t is None:
R = PolynomialRing(ZZ, 't')
t = R.gen()
clique_seq = self.independence_graph().clique_polynomial().coefficients()
return ~sum((-1)**i * coeff * (t**i)
for i, coeff in enumerate(clique_seq))
@cached_method
def number_of_words(self, length):
r"""
Return number of unique words of defined length.
INPUT:
- ``length`` -- integer; defines size of words what number should be computed
OUTPUT: words number as integer
EXAMPLES:
Get number of words of size 3 ::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: M.number_of_words(3)
48
"""
psr = PowerSeriesRing(ZZ, default_prec=length + 1)
return psr(self.dependence_polynomial()).coefficients()[length]
@cached_method
def words(self, length):
r"""
Return all lexicographic forms of defined length.
INPUT:
- ``length`` -- integer; defines size of words
OUTPUT: set of traces of size ``length``
EXAMPLES:
All words of size 2::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: sorted(M.words(2))
[[a^2], [a*b], [a*c], [a*d], [b*a], [b^2], [b*c],
[b*d], [c*a], [c^2], [c*d], [d*b], [d*c], [d^2]]
Get number of words of size 3::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I)
sage: len(M.words(3))
48
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: M.<a,b,c> = TraceMonoid(I=(('a','b'), ('b','a'), ('b', 'c'), ('c', 'b')))
sage: for i in range(10):
....: assert len(M.words(i)) == M.number_of_words(i)
sage: True
True
"""
if length < 0:
raise ValueError("Bad length of words. Expected zero or positive number.")
if length == 0:
return frozenset([self.one()])
if length == 1:
return frozenset(self.gens())
return frozenset([word * suffix for word in self.words(length - 1)
for suffix in self.gens()
if not ((list(word.value)[-1][0], suffix.value) in self._independence
and list(word.value)[-1][0] > suffix.value)])
def _sorted_independence(self):
r"""
Return independence relation over the monoid.
OUTPUT: sorted list of sorted commuting generator pairs.
EXAMPLES::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: F.<a,b,c> = FreeMonoid()
sage: I = frozenset(((a,c), (c,a)))
sage: M.<ac,bc,cc> = TraceMonoid(F, I=I)
sage: M._sorted_independence()
[[a, c]]
"""
return sorted(sorted(x_y)
for x_y in sorted(self.independence()))
def _repr_(self):
r"""
Textual representation of trace monoids.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I); M
Trace monoid on 4 generators ([a], [b], [c], [d])
with independence relation {{a, d}, {b, c}}
"""
return ("Trace monoid on {!s} generators {!s} "
"with independence relation {{{}}}").format(self.ngens(), self.gens(),
", ".join("{{{}, {}}}".format(x, y)
for (x, y) in self._sorted_independence()))
def _latex_(self):
r"""
LaTeX representation of trace monoids.
TESTS::
sage: from sage.monoids.trace_monoid import TraceMonoid
sage: I = (('a','d'), ('d','a'), ('b','c'), ('c','b'))
sage: M.<a,b,c,d> = TraceMonoid(I=I); latex(M)
\langle a, b, c, d \mid ad=da,bc=cb \rangle
"""
return "\\langle {} \\mid {} \\rangle".format(
repr(self._free_monoid.gens())[1:-1],
",".join(
"{0!r}{1!r}={1!r}{0!r}".format(v1, v2)
for v1, v2 in self._sorted_independence()
)
)
|
python
|
from .handler import get_db_handle
__all__ = ["get_db_handle"]
|
python
|
#!/usr/bin/env python3
import os
# Third Party
from flask import Blueprint
from flask import request, jsonify
from flask import render_template
# main
from . import routes
main = Blueprint('main', __name__)
# Routes
main.add_url_rule("/", 'root', view_func=routes.root)
main.add_url_rule("/api/", 'api', view_func=routes.api)
main.add_url_rule("/form/", 'form', view_func=routes.form)
if __name__ == "__main__":
main.debug = True
main.port = int(os.getenv("PORT", 5000))
main.run()
|
python
|
"""scrapli.driver.core.cisco_iosxr"""
from scrapli.driver.core.cisco_iosxr.driver import IOSXRDriver
__all__ = ("IOSXRDriver",)
|
python
|
import pandas as pd
import os
import json
from settings import *
from src.utils.sampu import interp_multi, sel_pos_frame, normalize
import seaborn as sns
sns.set(style="darkgrid")
"""Given some keyframe numbers (normalized kf), encodes them and interpolates their latent datapoints.
Saves the z interpolants and the decoded animations in a df.
"""
check_model = '42'
check_epoch = '-200'
method = 'lerp' # slerp, lerp, bspline
nsteps = 100 # per segment
fr = 0.06
frames = [0, 465, 354, 289, 252, 0] # Has to be 2 or 4 or higher. Add 0 for standInit
x_dataset = 'df14_KF.csv' # 'df14_KF.csv': radians, normalized in [0,1]
latent = False # latent=True for interp the latent space directly without encoding keyframes before
# Load keyframes dataset
df = pd.read_csv(os.path.join(ROOT_PATH, 'data/processed/keyframes/', x_dataset), index_col=0)
# Postures in radians
pos_list = []
id_anim_list = []
for frame in frames:
if frame == 0:
pos_list.append(standInit_norm) # List of lists
id_anim_list.append('standInit_0')
else:
pos, id_anim = sel_pos_frame(df, frame)
pos_list.append(pos) # List of lists
id_anim_list.append(id_anim + '_f' + str(frame))
# Get the radians frames (dec, denorm) and the latent interpolants
df_dec_interp, df_z_interp = interp_multi(pos_list, latent, nsteps, check_model, check_epoch, method, joints_names)
# Add 'time' column based on frequency fr
end = df_dec_interp.shape[0] * fr + 0.02
df_dec_interp['time'] = list(np.arange(0.02, end, fr))
# Save path
df_path = os.path.join(ROOT_PATH, DATA_SAMP, 'interp_multi_pos')
# Prepare the overview
json_file = os.path.join(df_path, '-overview.json')
with open(json_file, 'r') as fd:
files_dict = json.load(fd)
file_id = len(files_dict)
files_dict[file_id] = {
'file_id': file_id,
'interp_method': method,
'interp_steps': nsteps,
'frequency': fr,
'model': check_model + check_epoch,
'animations': id_anim_list,
'frames': frames
}
with open(json_file, 'w') as fd:
fd.write(json.dumps(files_dict))
# Save
df_dec_interp.to_csv(os.path.join(df_path, str(file_id) + '_dec_' + method + '.csv'))
df_z_interp.to_csv(os.path.join(df_path, str(file_id) + '_z_' + method + '.csv'))
|
python
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UE Map handler."""
# import lightedge.managers.apimanager.apimanager as apimanager
import empower_core.apimanager.apimanager as apimanager
# pylint: disable=W0223
class MatchMapHandler(apimanager.APIHandler):
"""All the accounts defined in the controller."""
URLS = [r"/upf/v1/matchmap/([-0-9.]*)",
r"/upf/v1/matchmap/(\bchecked\b)",
r"/upf/v1/matchmap"]
@apimanager.validate(min_args=0, max_args=1)
def get(self, match_index=0):
"""List entries in the Match Map.
Args:
[0]: the Match Index
Example URLs:
GET /upf/v1/matchmap
[
{
"ip_proto_num": "1",
"dst_ip": "31.13.0.0",
"dst_port": "0",
"netmask": "16",
"new_dst_ip": null,
"new_dst_port": 0
},
{
"ip_proto_num": "1",
"dst_ip": "2.2.2.2",
"dst_port": "0",
"netmask": "32",
"new_dst_ip": "192.168.0.1",
"new_dst_port": 0
},
{
"ip_proto_num": "0",
"dst_ip": "31.13.0.0",
"dst_port": "0",
"netmask": "16",
"new_dst_ip": "127.0.0.1",
"new_dst_port": 0
},
{
"ip_proto_num": "6",
"dst_ip": "18.185.97.149",
"dst_port": "0",
"netmask": "32",
"new_dst_ip": "10.104.0.26",
"new_dst_port": 0
}
]
GET /upf/v1/matchmap/2
{
"ip_proto_num": "1",
"dst_ip": "2.2.2.2",
"dst_port": "0",
"netmask": "32",
"new_dst_ip": "192.168.0.1",
"new_dst_port": 0
}
"""
# self.service.upf_request_validator.get_matchmap(match_index)
# if match_index:
# return self.service.matches[int(match_index) - 1]
# return self.service.matches
if match_index == '':
match_index = 0
if match_index == 'checked':
return self.service.rest__get_matchmap_checked()
return self.service.rest__get_matchmap(int(match_index) - 1)
@apimanager.validate(returncode=201, min_args=0, max_args=1)
def post(self, match_index=1, **request_data):
"""Insert entry in the Match Map.
Args:
[0]: the Match Index
Request:
version: protocol version (1.0)
params: the list of parameters to be set
Example URLs:
POST /upf/v1/matchmap
POST /upf/v1/matchmap/5
{
"ip_proto_num": 6,
"dst_ip":
"ec2-18-185-97-149.eu-central-1.compute.amazonaws.com",
"netmask": 32,
"dst_port": 0,
"new_dst_ip": "nginx-service",
"new_dst_port": 0
}
...
"""
# if match_index:
# match_index = int (match_index)
# if match_index <= 0:
# message = "Invalid match index '%i': must be greater than 0"\
# % match_index
# raise ValueError(message)
# matches_length = len(self.service.matches)
# if matches_length == 0:
# if match_index != 1:
# message =\
# "Match list is void: inserting match index has to be 1"\
# % match_index
# raise ValueError(message)
# elif match_index > matches_length:
# message = "Invalid match index '%i': acceptable range is [1, %i]"\
# % (match_index, matches_length )
# raise ValueError(message)
# return self.service.matches[int(match_index) - 1]
# self.service.upf_request_validator.post_matchmap(match_index,
# request_data)
if match_index == '':
match_index = 1
self.service.rest__add_matchmap(int(match_index) - 1, request_data)
self.set_header("Location", "/upf/v1/matchmap/%s" % match_index)
self.set_status(201)
@apimanager.validate(returncode=204, min_args=0, max_args=1)
def delete(self, match_index=0):
"""Delete entries in the Match Map.
Args:
[0]: the Match Index
Example URLs:
DELETE /upf/v1/matchmap
DELETE /upf/v1/matchmap/5
"""
if match_index == '':
match_index = 0
# no match in url -> match_index < 0 -> remove all
self.service.rest__del_matchmap(int(match_index) - 1)
|
python
|
import pytest
from aiosnow.exceptions import SchemaError
from aiosnow.models import ModelSchema, Pluck, fields
from aiosnow.query.fields import IntegerQueryable, StringQueryable
def test_model_schema_field_registration():
class TestSchema(ModelSchema):
test1 = fields.String()
test2 = fields.Integer()
assert isinstance(TestSchema.test1, StringQueryable)
assert isinstance(TestSchema.test2, IntegerQueryable)
assert isinstance(TestSchema.fields["test1"], fields.String)
assert isinstance(TestSchema.fields["test2"], fields.Integer)
def test_model_schema_primary_key():
with pytest.raises(SchemaError):
type(
"TestSchema",
(ModelSchema,),
dict(
test1=fields.String(is_primary=True),
test2=fields.Integer(is_primary=True),
),
)
def test_model_schema_dumps_loads():
class MainDocument(ModelSchema):
test1 = fields.String()
test2 = fields.Integer()
dict_obj = dict(test1="test", test2=123)
json_obj = MainDocument().dumps(dict_obj)
assert isinstance(json_obj, str)
loaded = MainDocument().loads(json_obj)
assert loaded == dict_obj
def test_model_schema_loads():
class MainDocument(ModelSchema):
test1 = fields.String()
test2 = fields.Integer()
json_obj = """{"test1": "test", "test2": 123}"""
dict_obj = dict(test1="test", test2=123)
assert MainDocument().loads(json_obj) == dict_obj
def test_model_schema_nested():
class RelatedDocument(ModelSchema):
test2 = fields.String()
test3 = fields.Integer(pluck=Pluck.VALUE)
class MainDocument(ModelSchema):
test1 = fields.String(pluck=Pluck.DISPLAY_VALUE)
related = RelatedDocument
json_obj = """
{
"test1": {"value": "test", "display_value": "test2"},
"related":
{
"test2": {"value": "test1", "display_value": "test2"},
"test3": {"value": 123, "display_value": "test2"}
}
}
"""
dict_obj = dict(test1="test2", related=dict(test2="test1", test3=123))
query = MainDocument.related.test2.equals("test123")
assert str(query) == "related.test2=test123"
main = MainDocument()
assert main.loads(json_obj) == dict_obj
related = main.nested_fields["related"].schema
assert isinstance(related, RelatedDocument)
assert set(related.fields.keys()) == {"test2", "test3"}
|
python
|
#!/usr/bin/env python2
# XXX: Refactor to a comand line tool and remove pylint disable
"""Merge columns of multiple experiments by gene id."""
from __future__ import absolute_import
import argparse
import csv
import os
import sys
from itertools import chain
import utils
parser = argparse.ArgumentParser(
description="Merge columns of multiple experiments by gene id."
)
parser.add_argument("files", nargs="*", help="expression files")
parser.add_argument("--experiments", nargs="+", help="experiment ids")
parser.add_argument("--genes", nargs="+", help="filter genes")
parser.add_argument(
"--intersection", action="store_true", help="merge by intersection of gene ids"
)
parser.add_argument("--out", help="output file")
args = parser.parse_args()
# if args.experiments and len(args.experiments) != len(args.files):
# raise ValueError("Number of experiments must match the number of files")
genes = set()
expressions = []
headers = []
op = set.intersection if args.intersection else set.union
offset = 0
for f in args.files:
if not os.path.isfile(f):
exit(1)
base, ext = os.path.splitext(f)
delimiter = ";" if ext == ".csv" else "\t"
with utils.gzopen(f) as csvfile:
reader = csv.reader(csvfile, delimiter=delimiter)
header = reader.next()[1:]
headers.append(
args.experiments[offset : offset + len(header)]
if args.experiments
else header
)
offset += len(headers[-1])
expressions.append(dict((r[0], r[1:]) for r in reader))
genes = (
set(expressions[-1].keys())
if args.intersection and not genes
else op(genes, expressions[-1].keys())
)
if args.genes:
genes = genes.intersection(args.genes)
genes = sorted(genes)
he = zip(headers, expressions)
rows = [
dict(chain.from_iterable([zip(h, e[g]) for h, e in he if g in e]), **{"Gene": g})
for g in genes
]
fhandler = open(args.out, "wb") if args.out else sys.stdout
writer = csv.DictWriter(
fhandler, ["Gene"] + [h for subheader in headers for h in subheader], delimiter="\t"
)
writer.writeheader()
writer.writerows(rows)
|
python
|
from torchvision import models
from PIL import Image
import matplotlib.pyplot as plt
import torch
import numpy as np
import cv2
import torchvision.transforms as T
def decode_segmap(image, source, nc=21):
label_colors = np.array([(0, 0, 0),
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
(128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
(0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0),
# 11=dining table, 12=dog, 13=horse, 14=motorbike, 15=person
(192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
(0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128)])
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, nc):
if l != 15:
continue
idx = (image == l)
r[idx] = label_colors[l, 0]
g[idx] = label_colors[l, 1]
b[idx] = label_colors[l, 2]
rgb = np.stack([r, g, b], axis=2)
# Load the foreground input image
foreground = cv2.imread(source)
# Change the color of foreground image to RGB
# and resize image to match shape of R-band in RGB output map
foreground = cv2.cvtColor(foreground, cv2.COLOR_BGR2RGB)
foreground = cv2.resize(foreground,(r.shape[1],r.shape[0]))
# Create a background array to hold white pixels
# with the same size as RGB output map
background = 255 * np.ones_like(rgb).astype(np.uint8)
# Convert uint8 to float
foreground = foreground.astype(float)
background = background.astype(float)
# Create a binary mask of the RGB output map using the threshold value 0
th, alpha = cv2.threshold(np.array(rgb),0,255, cv2.THRESH_BINARY)
# Apply a slight blur to the mask to soften edges
alpha = cv2.GaussianBlur(alpha, (7,7),0)
# Normalize the alpha mask to keep intensity between 0 and 1
alpha = alpha.astype(float)/255
# Multiply the foreground with the alpha matte
foreground = cv2.multiply(alpha, foreground)
# Multiply the background with ( 1 - alpha )
background = cv2.multiply(1.0 - alpha, background)
# Add the masked foreground and background
outImage = cv2.add(foreground, background)
# Return a normalized output image for display
return outImage / 255
def segment(net, path, show_orig=True, dev='cuda'):
img = Image.open(path)
if show_orig: plt.imshow(img); plt.axis('off'); plt.show()
# Comment the Resize and CenterCrop for better inference results
trf = T.Compose([T.Resize(450),
#T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])])
inp = trf(img).unsqueeze(0).to(dev)
out = net.to(dev)(inp)['out']
om = torch.argmax(out.squeeze(), dim=0).detach().cpu().numpy()
rgb = decode_segmap(om, path)
plt.saveimg(rgb.png")
return rgb
|
python
|
# -*- coding: utf-8 -*-
from django.db.models.query import QuerySet
class PublisherQuerySet(QuerySet):
"""Added publisher specific filters to queryset.
"""
def drafts(self):
return self.filter(publisher_is_draft=True)
def public(self):
return self.filter(publisher_is_draft=False)
|
python
|
#
# Copyright (c) 2019 Jonathan Weyn <[email protected]>
#
# See the file LICENSE for your rights.
#
"""
Upload settings to a theta-e website loaded dynamically from the theta-e.conf.
"""
import os
import string
template_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'template.txt')
def main(config, stid, forecast_date):
"""
Read the template settings file and copy a settings.php file to the web directory
"""
# Get the file directory and attempt to create it if it doesn't exist
try:
file_dir = config['Web']['Settings']['web_directory']
except KeyError:
raise KeyError("settings error: no 'web_directory' specified in config Web Settings")
required_options = ['page_url', 'page_path', 'json_directory', 'plot_directory']
for opt in required_options:
if opt not in config['Web']['Settings'].keys():
raise KeyError("settings error: required option '%s' not specified in config Web Settings" % opt)
if not(os.path.isdir(file_dir)):
os.makedirs(file_dir)
# Compile substitution parameters
params = {k: v + '/' for k, v in config['Web']['Settings'].items()}
params.pop('web_directory')
params['stid'] = stid = config['current_stid']
for k in ['timezone', 'latitude', 'longitude', 'long_name']:
try:
params[k] = config['Stations'][stid][k]
except KeyError:
raise KeyError("settings error: required station option '%s' not found for station %s" % (k, stid))
params['models'] = str(list(config['Models'].keys()))
params['default_model'] = config['Models'].keys()[0]
params['bufr_models'] = str([m for m in config['Models'].keys() if 'bufr_name' in config['Models'][m].keys()])
params['colors'] = str([config['Models'][m]['color'] for m in config['Models'].keys()])
# Replace the template with parameters
with open(template_file, 'r') as f:
src = string.Template(f.read())
result = src.substitute(**params)
if config['debug'] > 50:
print('settings: uploading settings: %s' % params)
# Write out to the file
out_file = os.path.join(file_dir, 'settings.php')
if config['debug'] > 9:
print('settings: writing to %s' % out_file)
with open(out_file, 'w') as f:
f.write(result)
return
|
python
|
# Generated by Django 3.2.5 on 2021-09-01 16:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0008_alter_workspacerole_role'),
]
operations = [
migrations.AlterModelOptions(
name='upload',
options={},
),
]
|
python
|
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from trappy.stats.Topology import Topology
from trappy.stats.Trigger import Trigger
from trappy.stats.Aggregator import MultiTriggerAggregator
import collections
import trappy
from trappy.base import Base
import pandas as pd
from pandas.util.testing import assert_series_equal
class TestTopology(unittest.TestCase):
def test_add_to_level(self):
"""Test level creation"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
check_groups = topology.get_level(level)
self.assertTrue(topology.has_level(level))
self.assertEqual(level_groups, check_groups)
def test_flatten(self):
"""Test Topology: flatten"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
flattened = [0, 1, 2, 3, 4, 5]
self.assertEqual(flattened, topology.flatten())
def test_cpu_topology_construction(self):
"""Test CPU Topology Construction"""
cluster_0 = [0, 3, 4, 5]
cluster_1 = [1, 2]
clusters = [cluster_0, cluster_1]
topology = Topology(clusters=clusters)
# Check cluster level creation
cluster_groups = [[0, 3, 4, 5], [1, 2]]
self.assertTrue(topology.has_level("cluster"))
self.assertEqual(cluster_groups, topology.get_level("cluster"))
# Check cpu level creation
cpu_groups = [[0], [1], [2], [3], [4], [5]]
self.assertTrue(topology.has_level("cpu"))
self.assertEqual(cpu_groups, topology.get_level("cpu"))
# Check "all" level
all_groups = [[0, 1, 2, 3, 4, 5]]
self.assertEqual(all_groups, topology.get_level("all"))
def test_level_span(self):
"""TestTopology: level_span"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
self.assertEqual(topology.level_span(level), 2)
def test_group_index(self):
"""TestTopology: get_index"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
self.assertEqual(topology.get_index(level, [1, 2]), 0)
self.assertEqual(topology.get_index(level, [0, 3, 4, 5]), 1)
class BaseTestStats(unittest.TestCase):
def setUp(self):
trace = trappy.BareTrace()
data = {
"identifier": [
0,
0,
0,
1,
1,
1,
],
"result": [
"fire",
"blank",
"fire",
"blank",
"fire",
"blank",
],
}
index = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], name="Time")
data_frame = pd.DataFrame(data, index=index)
trace.add_parsed_event("aim_and_fire", data_frame)
self._trace = trace
self.topology = Topology(clusters=[[0], [1]])
class TestTrigger(BaseTestStats):
def test_trigger_generation(self):
"""TestTrigger: generate"""
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace,
event_class,
filters,
value,
pivot)
expected = pd.Series([1, 1], index=pd.Index([0.1, 0.3], name="Time"))
assert_series_equal(expected, trigger.generate(0))
expected = pd.Series([1], index=pd.Index([0.5], name="Time"))
assert_series_equal(expected, trigger.generate(1))
def test_trigger_with_func(self):
"""Trigger works with a function or lambda as filter"""
def my_filter(val):
return val.startswith("fi")
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": my_filter}, value=1,
pivot="identifier")
expected = pd.Series([1], index=pd.Index([0.5], name="Time"))
assert_series_equal(expected, trigger.generate(1))
my_filters = {"result": lambda x: x.startswith("bl")}
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters=my_filters, value=1, pivot="identifier")
expected = pd.Series([1, 1], index=pd.Index([0.4, 0.6], name="Time"))
assert_series_equal(expected, trigger.generate(1))
def test_trigger_with_callable_class(self):
"""Trigger works with a callable class as filter"""
class my_filter(object):
def __init__(self, val_out):
self.prev_val = 0
self.val_out = val_out
def __call__(self, val):
ret = self.prev_val == self.val_out
self.prev_val = val
return ret
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters={"identifier": my_filter(1)}, value=1,
pivot="result")
expected = pd.Series([1], index=pd.Index([0.6], name="Time"))
assert_series_equal(expected, trigger.generate("blank"))
def test_filter_prev_values(self):
"""Trigger works with a filter that depends on previous values of the same pivot"""
# We generate an example in which we want a trigger whenever the
# identifier is no longer 1 for blank
class my_filter(object):
def __init__(self, val_out):
self.prev_val = 0
self.val_out = val_out
def __call__(self, val):
ret = self.prev_val == self.val_out
self.prev_val = val
return ret
trace = trappy.BareTrace()
data = collections.OrderedDict([
(0.1, ["blank", 1]),
(0.2, ["fire", 1]),
(0.3, ["blank", 0]), # value is no longer 1, trigger
(0.4, ["blank", 1]),
(0.5, ["fire", 0]), # This should NOT trigger
(0.6, ["blank", 0]), # value is no longer 1 for blank, trigger
])
data_frame = pd.DataFrame.from_dict(data, orient="index", )
data_frame.columns = ["result", "identifier"]
trace.add_parsed_event("aim_and_fire", data_frame)
trigger = Trigger(trace, trace.aim_and_fire,
filters={"identifier": my_filter(1)}, value=-1,
pivot="result")
expected = pd.Series([-1, -1], index=[0.3, 0.6])
assert_series_equal(expected, trigger.generate("blank"))
class TestAggregator(BaseTestStats):
def test_scalar_aggfunc_single_trigger(self):
"""TestAggregator: 1 trigger scalar aggfunc"""
def aggfunc(series):
return series.sum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace,
event_class,
filters,
value,
pivot)
aggregator = MultiTriggerAggregator([trigger],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
self.assertEqual(result, [3.0])
# There are two "fire" on the first node group and a
# a single "fire" on the second node group at the cluster
# level which looks like
# [[0], [1]]
result = aggregator.aggregate(level="cluster")
self.assertEqual(result, [2.0, 1.0])
def test_vector_aggfunc_single_trigger(self):
"""TestAggregator: 1 trigger vector aggfunc"""
def aggfunc(series):
return series.cumsum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace, event_class, filters, value, pivot)
aggregator = MultiTriggerAggregator([trigger],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
expected_result = pd.Series([1.0, 1.0, 2.0, 2.0, 3.0, 3.0],
index=pd.Index([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
)
assert_series_equal(result[0], expected_result)
def test_vector_aggfunc_multiple_trigger(self):
"""TestAggregator: multi trigger vector aggfunc"""
def aggfunc(series):
return series.cumsum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger_fire = Trigger(self._trace,
event_class,
filters,
value,
pivot)
filters = {
"result": "blank"
}
value = -1
trigger_blank = Trigger(self._trace, event_class, filters, value,
pivot)
aggregator = MultiTriggerAggregator([trigger_fire, trigger_blank],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
expected_result = pd.Series([1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
index=pd.Index([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
)
assert_series_equal(result[0], expected_result)
def test_default_aggfunc_multiple_trigger(self):
"""MultiTriggerAggregator with the default aggfunc"""
trigger_fire = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": "fire"},
pivot="identifier", value=1)
trigger_blank = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": "blank"},
pivot="identifier", value=2)
aggregator = MultiTriggerAggregator([trigger_fire, trigger_blank],
self.topology)
results = aggregator.aggregate(level="cpu")
expected_results = [
pd.Series([1., 2., 1., 0., 0., 0.],
index=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
pd.Series([0., 0., 0., 2., 1., 2.],
index=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
]
self.assertEquals(len(results), len(expected_results))
for result, expected in zip(results, expected_results):
assert_series_equal(result, expected)
|
python
|
import os
f = open("test.txt" "w")
list1 = ["Shoes", "Socks", "Gloves"]
quantity = [10, 5, 32]
f.write("{:<10} {:10} {:10}\n".format("S/N", "Items", "Quantity"))
for item in list1:
f.write("{:<10} {:10} {:10}\n".format("S/N", "Items", "Quantity") + "\n")
f.close()
|
python
|
import os
import subprocess
import sys
try:
import pty
except ImportError:
PTY = False
else:
PTY = True
from mklibpy.common.string import AnyString
from mklibpy.terminal.colored_text import get_text, remove_switch
from mklibpy.util.path import CD
__author__ = 'Michael'
TIMEOUT = 0.5
print("""`mklsgit` has been merged into `mklibpy-bin` (v0.8).
Please uninstall this package and install `mklibpy-bin` instead.""", file=sys.stderr)
def system_call(*args, **kwargs):
out = subprocess.check_output(*args, **kwargs)
return out.decode().splitlines(False)
if PTY:
def system_call_pty(*args, **kwargs):
"""
Opens a pty for stdout, so that colored output is retained.
"""
master, slave = pty.openpty()
p = subprocess.Popen(*args, **kwargs, stdout=slave)
code = p.wait(timeout=TIMEOUT)
if code != 0:
raise subprocess.CalledProcessError(code, args[0])
# echo an empty line so that we can properly break
subprocess.call(['echo', ''], stdout=slave)
def __gen():
with os.fdopen(master) as f:
for line in f:
line = line.strip()
if not line:
break
yield line
return __gen()
def is_git_repo(abspath):
path = os.path.join(abspath, ".git")
return os.path.exists(path) and os.path.isdir(path)
def get_git_branch(abspath):
with CD(abspath):
for line in system_call(['git', 'branch']):
if not line.startswith("*"):
continue
return line.lstrip("*").strip()
class LsGit(object):
def __init__(self, stdout=None):
self.stdout = stdout
if stdout is None:
self.stdout = sys.stdout
@property
def is_tty(self):
return self.stdout.isatty()
@property
def is_gnu(self):
try:
system_call(['ls', '--version'], stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
return False
else:
return True
def print(self, *args, **kwargs):
print(*args, **kwargs, file=self.stdout)
def __call__(self, *args):
LsGitProcess(self, args).run()
class LsGitProcess(object):
def __init__(self, parent, args):
self.__parent = parent
self.__args = args
self.__cmd = ['ls'] + list(self.__args)
self.__flags = None
self.__options = None
self.__dirs = None
self.__cur_dir = None
self.__parse_args()
def __parse_args(self):
self.__flags = AnyString([
arg
for arg in self.__args
if arg.startswith('-') and not arg.startswith('--')
])
self.__options = AnyString([
arg
for arg in self.__args
if arg.startswith('--')
])
self.__dirs = [
arg
for arg in self.__args
if not arg.startswith('-')
]
@property
def _l(self):
return 'l' in self.__flags
@property
def __color(self):
if self.__parent.is_gnu:
if not self.__options.startswith('--color'):
return False
if self.__options == '--color' or self.__options == '--color=always':
return True
elif self.__options == '--color=auto':
return self.__parent.is_tty
else:
return False
else:
if not self.__parent.is_tty:
return False
return 'G' in self.__flags
def color(self, text, color=None, mode=None):
if not self.__color:
return text
return get_text(text, color=color, mode=mode)
def __process_line(self, line):
if line.endswith(':') and line[:-1] in self.__dirs:
self.__cur_dir = line[:-1]
return line
sp = line.split()
if len(sp) < 9:
return line
dir = sp[8]
if self.__color:
dir = remove_switch(dir)
abspath = os.path.abspath(os.path.join(self.__cur_dir, dir))
if not is_git_repo(abspath):
return line
branch = get_git_branch(abspath)
return line + self.color(" ({})".format(branch), color='red', mode='bold')
def __native_call(self):
return subprocess.check_call(self.__cmd, stdout=self.__parent.stdout)
def __system_call(self):
return system_call(self.__cmd)
if PTY:
def __system_call_pty(self):
return system_call_pty(self.__cmd)
def run(self):
if not self._l:
self.__native_call()
return
if self.__dirs:
self.__cur_dir = self.__dirs[0]
else:
self.__cur_dir = os.getcwd()
if not PTY:
# See Issue #3
lines = self.__system_call()
workaround_flag = True
elif not self.__color:
lines = self.__system_call()
workaround_flag = False
else:
# This is a workaround for a bug on Mac. See Issue #1 on GitHub
try:
lines = self.__system_call_pty()
workaround_flag = False
except subprocess.TimeoutExpired:
lines = self.__system_call()
workaround_flag = True
if not workaround_flag:
for line in lines:
self.__parent.print(self.__process_line(line))
else:
new_lines = []
modified_flag = False
for line in lines:
if modified_flag:
self.__parent.print(self.__process_line(line))
continue
new_line = self.__process_line(line)
if new_line == line:
new_lines.append(line)
continue
modified_flag = True
for line0 in new_lines:
self.__parent.print(line0)
self.__parent.print(new_line)
if not modified_flag:
self.__native_call()
def main(args=None):
if args is None:
import sys
args = sys.argv[1:]
instance = LsGit()
try:
instance(*args)
except subprocess.CalledProcessError as e:
exit(e.returncode)
if __name__ == '__main__':
main()
|
python
|
#coding: utf-8
'''
# * By :
# *
# * ██████╗ ██████╗ ██████╗ ██╗████████╗ ████████╗██╗ ██╗██████╗ ███╗ ██╗███████╗██████╗
# * ██╔═══██╗██╔══██╗██╔══██╗██║╚══██╔══╝ ╚══██╔══╝██║ ██║██╔══██╗████╗ ██║██╔════╝██╔══██╗
# * ██║ ██║██████╔╝██████╔╝██║ ██║ ██║ ██║ ██║██████╔╝██╔██╗ ██║█████╗ ██████╔╝
# * ██║ ██║██╔══██╗██╔══██╗██║ ██║ ██║ ██║ ██║██╔══██╗██║╚██╗██║██╔══╝ ██╔══██╗
# * ╚██████╔╝██║ ██║██████╔╝██║ ██║ ██║ ╚██████╔╝██║ ██║██║ ╚████║███████╗██║ ██║
# * ╚═════╝ ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚══════╝╚═╝ ╚═╝
# *
# * AUTHOR : MOHAMED GUEYE [Orbit Turner] - Email: [email protected] - Country: Senegal
# */
This program allows you to hit an url X times.
'''
import urllib.request as urllib2
print("\n===============================> WELCOME <===============================\n")
print("\t\t!! URL HITTER V1.0.1 !!")
print("\n==========================================================================\n")
# RECUPERATION & CONTROLE DES VALEURS SAISIES
correct = False
while not(correct):
try:
url = str(input("\n-> VEUILLEZ DONNER L'URL A ATTACKER: "))
iteration = int(input("\n-> VEUILLEZ DONNER LE NOMBRE DE HIT A FAIRE: "))
# assert hd >= 0 and md >= 0 and ha >= 0 and ma >= 0
except ValueError:
print("\n!! VOUS AVEZ SAISI UNE VALEUR INCORRECTE !!")
continue
else:
correct = True
print("")
print("\n--> FETCHING THE URL... ")
try:
for _ in range(iteration):
urllib2.urlopen(url)
except:
print("\n===============================> STATE <===============================\n")
print("\t\t!! AN ERROR OCURRED DURING EXECUTION !!")
print("\t\t!! PLEASE TRY AGAIN LATER OR USE ANOTHER URL !!")
print("\n==========================================================================\n")
else:
print("\n===============================> STATE <===============================\n")
print("\t\tSUCESSFULLY DONE !")
print("\n==========================================================================\n")
finally:
print("PROGRAM ENDED")
|
python
|
from mySecrets import connectStr
import json
import pyodbc
DATABASE_USERACCOUNTS = "[dbo].[UserAccounts]"
DATABASE_PROBLEMS = "[dbo].[Problems]"
DATABASE_SUBMISSIONS = "[dbo].[Submissions]"
def executeCommandCommit(cmd: str) -> None:
cnxn = pyodbc.connect(connectStr)
cursor = cnxn.cursor()
cursor.execute(cmd)
cursor.commit()
cnxn.close()
def executeCommandFetchAll(cmd: str) -> list:
cnxn = pyodbc.connect(connectStr)
cursor = cnxn.cursor()
cursor.execute(cmd)
arr = cursor.fetchall()
cnxn.close()
return arr
def ACCOUNT_getUniqueIDNumber() -> int:
return executeCommandFetchAll(f"SELECT MAX(AccountID) FROM {DATABASE_USERACCOUNTS}")[0][0] + 1
def ACCOUNT_createAccount(firstName: str, lastName: str) -> None:
id = ACCOUNT_getUniqueIDNumber()
executeCommandCommit(f"INSERT INTO {DATABASE_USERACCOUNTS} VALUES ({id}, '{firstName}', '{lastName}')")
def PROBLEMS_getProblemsListString() -> list:
arr = executeCommandFetchAll(f"SELECT ProblemID, ProblemName, Difficulty FROM {DATABASE_PROBLEMS}")
for i in range(len(arr)):
arr[i][0] = str(arr[i][0])
arr[i][2] = str(arr[i][2])
return arr
def PROBLEMS_getProblemString(problemID: int) -> list:
arr = executeCommandFetchAll(f"SELECT ProblemID, ProblemName, ProblemDescription, ProblemInput, ProblemOutput, ProblemExampleInput, ProblemExampleOutput, TimeLimit, MemoryLimit, Difficulty FROM {DATABASE_PROBLEMS} WHERE ProblemID={str(problemID)}")
for i in range(len(arr)):
arr[i][0] = str(arr[i][0])
arr[i][7] = str(arr[i][7])
arr[i][8] = str(arr[i][8])
arr[i][9] = str(arr[i][9])
for k in range(len(arr[i])):
arr[i][k] = arr[i][k].replace("\\n", "\n")
return arr
def PROBLEMS_getProblemNameString(problemID: int) -> list:
arr = executeCommandFetchAll(f"SELECT ProblemID, ProblemName FROM {DATABASE_PROBLEMS} WHERE ProblemID={str(problemID)}")
for i in range(len(arr)):
arr[i][0] = str(arr[i][0])
return arr
def PROBLEMS_getProblemTest(problemID: int) -> list:
arr = executeCommandFetchAll(f"SELECT ProblemID, ProblemRunInput, ProblemRunOutput, ProblemRunCheckFunction, TimeLimit, MemoryLimit, Difficulty FROM {DATABASE_PROBLEMS} WHERE ProblemID={str(problemID)}")
for i in range(len(arr)):
for k in range(1, 3):
arr[i][k] = arr[i][k].replace("\\n", "\n")
return arr
def SUBMISSIONS_getUniqueIDNumber() -> int:
return executeCommandFetchAll(f"SELECT MAX(submissionId) FROM {DATABASE_SUBMISSIONS}")[0][0] + 1
def SUBMISSIONS_createSubmission(submissionUserId: int, submissionProblemId: int, submissionCompiler: str, submissionCode: str, submissionOutput: str, submissionStatus: int) -> str:
submissionId = SUBMISSIONS_getUniqueIDNumber()
submissionCode = json.dumps(submissionCode)
submissionCode = submissionCode.replace("'", "''")
submissionOutput = json.dumps(submissionOutput)
submissionOutput = submissionOutput.replace("'", "''")
executeCommandCommit(f"INSERT INTO {DATABASE_SUBMISSIONS} (SubmissionID, SubmissionUserID, SubmissionProblemID, SubmissionCompiler, SubmissionCode, SubmissionOutput, SubmissionStatus) VALUES ({str(submissionId)}, {str(submissionUserId)}, {str(submissionProblemId)}, '{submissionCompiler}', '{submissionCode}', '{submissionOutput}', {str(submissionStatus)})")
return str(submissionId)
def SUBMISSIONS_getSubmissionString(submissionId: int):
arr = executeCommandFetchAll(f"SELECT SubmissionID, SubmissionUserID, SubmissionProblemID, SubmissionCode, SubmissionStatus, SubmissionCompiler FROM {DATABASE_SUBMISSIONS} WHERE SubmissionID={str(submissionId)}")
for i in range(len(arr)):
arr[i][0] = str(arr[i][0])
arr[i][1] = str(arr[i][1])
arr[i][2] = str(arr[i][2])
arr[i][3] = json.loads(arr[i][3])
return arr
# if __name__ == "__main__":
# # print(ACCOUNT_getUniqueIDNumber())
# # print(PROBLEMS_getProblemsListString())
# # print(PROBLEMS_getProblemString(1))
# # print("'" == "\'")
# SUBMISSIONS_createSubmission(2, 3, "python3", """Some cool code""", "out", 1500)
# print(SUBMISSIONS_getSubmissionString(3))
# # ACCOUNT_createAccount("Danny", "Kaja")
# # a = executeCommandFetchAll(f"SELECT TOP (1000) * FROM {DATABASE_USERACCOUNTS}")
# # print(a)
|
python
|
import csv
import os
import sqlite3
import pytest
import shutil
from tempfile import gettempdir
from openpyxl import Workbook
TMP_DIR = gettempdir()
ws_summary_B5_rand = [
'Cookfield, Rebuild',
'Smithson Glenn Park Editing',
'Brinkles Bypass Havensmere',
'Folicles On Fire Ltd Extradition',
'Puddlestein Havelock Underpass',
]
ws_summary_B8_rand = [
'Aerobics, Maritime and Commerce',
'TSAD',
'Special Transport Needs for the Northern Populace',
'Parenting, Levels and Financial Irregularity',
'HR',
]
ws_finance_C6_rand = [
'Green',
'Amber/Green',
'Amber',
'Amber/Red',
'Red',
]
ws_finance_C11_rand = [
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
]
ws_finance_B19_rand = [
'2012',
'2013',
'2011',
'2018',
'2002',
'2007',
]
ws_finance_C18_rand = [
'Real',
'Nominal',
]
ws_finance_C36_rand = [
'2.00',
'4.20',
'1.13',
'12.09',
'222.07',
]
ws_finance_C44_rand = [
'12.00',
'41.20',
'13.13',
'122.09',
'22.07',
]
ws_finance_C77_rand = [
'29.00',
'49.23',
'23.43',
'1.89',
'290.37',
]
ws_resources_C7_rand = [
'9.00',
'19.00',
'29.5',
'12.00',
'20.5',
]
ws_resources_G17_rand = [
'9.90',
'19.22',
'29.93',
'1202.89',
'20.37',
]
ws_resources_I30_rand = [
'Green',
'Amber/Green',
'Amber',
'Amber/Red',
'Red',
]
ws_resources_J30_rand = [
'Green',
'Amber/Green',
'Amber',
'Amber/Red',
'Red',
]
ws_resources_J38_rand = [
'Green',
'Amber/Green',
'Amber',
'Amber/Red',
'Red',
]
ws_approval_C10_rand = [
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
]
ws_approval_F19_rand = [
'A load of absolute\n horseradish.',
'When people speak of these kind of things, they are often surprised.',
'It is very bad here. Completely unacceptable when you think about it.',
'Never worry too much about it - it wont last forever',
'There is a forester on this project who is disrupting everything.'
]
ws_approval_B39_rand = [
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
]
ws_assurance_C4_rand = [
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
]
ws_assurance_D10_rand = [
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
'02-2-2011',
]
ws_resources_E17_rand = [
'Green',
'Amber/Green',
'Amber',
'Amber/Red',
'Red',
]
dm_data = [
('Project/Programme Name', 'Summary', 'A5', 'GMPP Sheet', 'A15', None),
('SRO Name', 'Summary', 'B5', 'GMPP Sheet', 'B15', None),
('SRO Age', 'Summary', 'C5', 'GMPP Sheet', 'C15', None),
('Top 37', 'Summary', 'I5', 'GMPP Sheet', 'C29', None),
('DfT Business Plan', 'Summary', 'I6', 'GMPP Sheet', 'C30', None),
('DFT ID Number', 'Summary', 'B6', 'GMPP Sheet', 'C31', None),
('Working Contact Name', 'Summary', 'H8', 'GMPP Sheet', 'C32', None),
('Working Contact Telephone', 'Summary', 'H9', 'GMPP Sheet', 'C33', None),
('Working Contact Email', 'Summary', 'H10', 'GMPP Sheet', 'C34', None),
('DfT Group', 'Summary', 'B8', 'GMPP Sheet', 'C35', None),
('DfT Division', 'Summary', 'B9', 'GMPP Sheet', 'C36', None),
('Agency or delivery partner (GMPP - Delivery Organisation primary)',
'Summary', 'B10', 'GMPP Sheet', 'C37', None),
]
return_data = [
(1, 1, 1, "P1 Q1 DM1"),
(1, 1, 2, "P1 Q1 DM2"),
(1, 1, 3, "P1 Q1 DM3"),
(1, 1, 4, "P1 Q1 DM4"),
(1, 1, 5, "P1 Q1 DM5"),
(1, 1, 6, "P1 Q1 DM6"),
(1, 1, 7, "P1 Q1 DM7"),
(1, 1, 8, "P1 Q1 DM8"),
(1, 1, 9, "P1 Q1 DM9"),
(1, 1, 10, "P1 Q1 DM10"),
(1, 1, 11, "P1 Q1 DM11"),
(1, 1, 12, "P1 Q1 DM12"),
(2, 1, 1, "P2 Q1 DM1"),
(2, 1, 2, "P2 Q1 DM2"),
(2, 1, 3, "P2 Q1 DM3"),
(2, 1, 4, "P2 Q1 DM4"),
(2, 1, 5, "P2 Q1 DM5"),
(2, 1, 6, "P2 Q1 DM6"),
(2, 1, 7, "P2 Q1 DM7"),
(2, 1, 8, "P2 Q1 DM8"),
(2, 1, 9, "P2 Q1 DM9"),
(2, 1, 10, "P2 Q1 DM10"),
(2, 1, 11, "P2 Q1 DM11"),
(2, 1, 12, "P2 Q1 DM12"),
(1, 2, 1, "P1 Q2 DM1"),
(1, 2, 2, "P1 Q2 DM2"),
(1, 2, 3, "P1 Q2 DM3"),
(1, 2, 4, "P1 Q2 DM4"),
(1, 2, 5, "P1 Q2 DM5"),
(1, 2, 6, "P1 Q2 DM6"),
(1, 2, 7, "P1 Q2 DM7"),
(1, 2, 8, "P1 Q2 DM8"),
(1, 2, 9, "P1 Q2 DM9"),
(1, 2, 10, "P1 Q2 DM10"),
(1, 2, 11, "P1 Q2 DM11"),
(1, 2, 12, "P1 Q2 DM12"),
(2, 2, 1, "P2 Q2 DM1"),
(2, 2, 2, "P2 Q2 DM2"),
(2, 2, 3, "P2 Q2 DM3"),
(2, 2, 4, "P2 Q2 DM4"),
(2, 2, 5, "P2 Q2 DM5"),
(2, 2, 6, "P2 Q2 DM6"),
(2, 2, 7, "P2 Q2 DM7"),
(2, 2, 8, "P2 Q2 DM8"),
(2, 2, 9, "P2 Q2 DM9"),
(2, 2, 10, "P2 Q2 DM10"),
(2, 2, 11, "P2 Q2 DM11"),
(2, 2, 12, "P2 Q2 DM12"),
]
@pytest.fixture
def sqlite3_db_file():
db_file = os.path.join(TMP_DIR, "test.db")
conn = sqlite3.connect(db_file)
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS projects")
c.execute("DROP TABLE IF EXISTS datamap_items")
c.execute("DROP TABLE IF EXISTS returns")
c.execute("DROP TABLE IF EXISTS portfolios")
c.execute("DROP TABLE IF EXISTS series")
c.execute("DROP TABLE IF EXISTS series_items")
c.execute("DROP TABLE IF EXISTS retained_source_files")
c.execute("""CREATE TABLE projects
(id integer PRIMARY KEY, name text, portfolio id)""")
c.execute("""CREATE TABLE datamap_items
(id integer PRIMARY KEY,
key text,
bicc_sheet text,
bicc_cellref text,
gmpp_sheet text,
gmpp_cellref text,
bicc_ver_form text
)"""
)
c.execute("""CREATE TABLE returns
(id integer PRIMARY KEY,
project_id integer,
series_item_id integer,
datamap_item_id integer,
value text,
FOREIGN KEY (project_id) REFERENCES projects(id),
FOREIGN KEY (series_item_id) REFERENCES series_items(id),
FOREIGN KEY (datamap_item_id) REFERENCES datamap_items(id)
)""")
c.execute("""CREATE TABLE portfolios
(id integer PRIMARY KEY,
name text)"""
)
c.execute("""CREATE TABLE series
(id integer PRIMARY KEY,
name text)"""
)
c.execute("""CREATE TABLE series_items
(id integer PRIMARY KEY,
name text,
start_date text,
end_date text,
series_id integer,
FOREIGN KEY (series_id) REFERENCES series(id)
)""")
c.execute("""CREATE TABLE retained_source_files
(id integer PRIMARY KEY,
project_id integer,
portfolio_id integer,
series_item_id integer,
uuid text,
FOREIGN KEY (project_id) REFERENCES projects(id),
FOREIGN KEY (portfolio_id) REFERENCES portfolios(id),
FOREIGN KEY (series_item_id) REFERENCES series_items(id)
)""")
c.execute("INSERT INTO portfolios (name) VALUES('Tier 1 Projects')")
c.execute("INSERT INTO series (name) VALUES('Financial Quarters')")
c.execute("""INSERT INTO series_items (name, start_date, end_date, series_id)
VALUES('Q1 2013/14', '2013-04-01', '2013-06-30', 1 )""")
c.execute("""INSERT INTO series_items (name, start_date, end_date, series_id)
VALUES('Q2 2013/14', '2013-04-01', '2013-06-30', 1 )""")
c.execute("""INSERT INTO series_items (name, start_date, end_date, series_id)
VALUES('Q3 2013/14', '2013-04-01', '2013-06-30', 1 )""")
c.execute("""INSERT INTO series_items (name, start_date, end_date, series_id)
VALUES('Q5 2013/14', '2013-04-01', '2013-06-30', 1 )""")
c.execute("INSERT INTO projects (name, portfolio) VALUES('Project 1', 1)")
c.execute("INSERT INTO projects (name, portfolio) VALUES('Project 2', 1)")
c.execute("INSERT INTO projects (name, portfolio) VALUES('Project 3', 1)")
# c.execute("""INSERT INTO retained_source_files (portfolio_id, project_id, series_item_id)
# VALUES(1, 1, 1)""")
c.executemany(
("INSERT INTO datamap_items (key, bicc_sheet, "
"bicc_cellref, gmpp_sheet, gmpp_cellref, bicc_ver_form) VALUES"
"(?, ?, ?, ?, ?, ?)"), dm_data)
c.executemany(
("INSERT INTO returns (project_id, series_item_id, datamap_item_id, value)"
" VALUES (?, ?, ?, ?)"), return_data)
conn.commit()
c.close()
conn.close()
return db_file
# os.unlink(os.path.join(TMP_DIR, 'test.db')
@pytest.fixture
def test_blank_xls():
wb = Workbook()
wb.create_sheet('Summary')
wb.create_sheet('Finance & Benefits')
wb.create_sheet('Approval & Project milestones')
wb.create_sheet('Resources')
wb.create_sheet('Assurance planning')
wb.create_sheet('GMPP info')
wb.save(os.path.join(TMP_DIR, 'test.xlsx'))
return(os.path.join(TMP_DIR, 'test.xlsx'))
@pytest.fixture
def bicc_return():
wb = Workbook()
wb.create_sheet('Summary')
wb.create_sheet('Finance & Benefits')
wb.create_sheet('Approval & Project milestones')
wb.create_sheet('Resources')
wb.create_sheet('Assurance planning')
wb.create_sheet('GMPP info')
# Summary fixture
ws_summary = wb['Summary']
ws_summary['A5'].value = 'Project/Programme Name'
ws_summary['B5'].value = ws_summary_B5_rand[0]
ws_summary['A8'].value = 'DfT Group'
ws_summary['B8'].value = ws_summary_B8_rand[0]
# Finance & Benefits fixture
ws_finance = wb['Finance & Benefits']
ws_finance['A6'].value = 'SRO Finance Confidence'
ws_finance['C6'].value = ws_finance_C6_rand[0]
ws_finance['B11'].value = 'Date of Business Case'
ws_finance['C11'].value = ws_finance_C11_rand[0]
ws_finance['A19'].value = 'Index Year'
ws_finance['B19'].value = ws_finance_B19_rand[0]
ws_finance['A18'].value = 'Real or Nominal'
ws_finance['C18'].value = ws_finance_C18_rand[0]
ws_finance['A36'].value = '2019/2020'
ws_finance['C36'].value = ws_finance_C36_rand[0]
ws_finance['A44'].value = 'Total'
ws_finance['C44'].value = ws_finance_C44_rand[0]
ws_finance['A77'].value = 'Total WLC (RDEL)'
ws_finance['C77'].value = ws_finance_C77_rand[0]
# Resources fixture
ws_resources = wb['Resources']
ws_resources['A7'].value = 'SCS(PB2)'
ws_resources['C7'].value = ws_resources_C7_rand[0]
ws_resources['A17'].value = 'Total'
ws_resources['G17'].value = ws_resources_G17_rand[0]
ws_resources['A30'].value = 'Change Implementation'
ws_resources['I30'].value = ws_resources_I30_rand[0]
ws_resources['J30'].value = ws_resources_I30_rand[1]
ws_resources['G38'].value = 'Overall Assessment'
ws_resources['J38'].value = ws_resources_J38_rand[0]
# Approval and Project Milestones fixture
ws_approvals = wb['Approval & Project milestones']
ws_approvals['A10'].value = 'SOBC - HMT Approval'
ws_approvals['C10'].value = ws_approval_C10_rand[0]
ws_approvals['A19'].value = 'FBC - HMT Approval'
ws_approvals['F19'].value = ws_approval_F19_rand[0]
ws_approvals['A39'].value = 'Completion of Construction'
ws_approvals['B39'].value = ws_approval_B39_rand[0]
# Assurance fixture
ws_assurance = wb['Assurance planning']
ws_assurance['B4'].value = 'Date Created'
ws_assurance['C4'].value = ws_assurance_C4_rand[0]
ws_assurance['A10'].value = 'Gate 0 (Programme)'
ws_assurance['D10'].value = ws_assurance_D10_rand[0]
ws_assurance['A17'].value = 'Review Point 4 MPRG'
ws_assurance['E17'].value = 'Amber/Green'
wb.save(os.path.join(TMP_DIR, 'test-bicc-return.xlsx'))
yield os.path.join(TMP_DIR, 'test-bicc-return.xlsx')
os.unlink(os.path.join(TMP_DIR, 'test-bicc-return.xlsx'))
@pytest.fixture
def mock_datamap_source_file():
data = [
[
'Project/Programme Name', 'Summary', 'B5', 'red', 'white', '',
'Yes/No'
], ['SRO Sign-Off', 'Summary', 'B49', 'red', 'white', '', 'Yes/No'],
['GMPP - FD Sign-Off', 'Summary'],
['GMPP - Person completing this return'],
['GMPP - Single Point of Contact Email Address'],
['GMPP - Single Point of Contact (SPOC)'], ['GMPP - Email Address'], [
'Reporting period (GMPP - Snapshot Date)', 'Summary', 'G3', 'red',
'white', '', 'Yes/No'
], ['Quarter Joined', 'Summary', 'I3', 'red', 'white', '', 'Yes/No'],
['GMPP - Sub-portfolio'], [
'Index Year', 'Finance & Benefits', 'B19', 'red', 'white', '',
'Yes/No'
], [
'Real or Nominal - Baseline', 'Finance & Benefits', 'C18', 'red',
'white', '', 'Yes/No'
], ['GMPP/quarter formally joined'], [
'GMPP (GMPP – formally joined GMPP)', 'Summary', 'G5', 'red',
'white', '', 'Yes/No'
], ['IUK top 40', 'Summary', 'G6', 'red', 'white', '', 'Yes/No'],
['Top 37', 'Summary', 'I5', 'red', 'white', '', 'Yes/No'],
['DfT Business Plan', 'Summary', 'I6', 'red', 'white', '', 'Yes/No'], [
'GMPP - IPA ID Number', 'Summary', 'C6', 'red', 'white', '',
'Yes/No'
], ['DFT ID Number', 'Summary', 'B6', 'red', 'white', '', 'Yes/No'], [
'Working Contact Name', 'Summary', 'H8', 'red', 'white', '',
'Yes/No'
], ['Working Contact Telephone', 'Summary', 'H9', 'red', '', ''], [
'Working Contact Email', 'Summary', 'H10', 'red', 'white', '',
'Yes/No'
], ['DfT Group', 'Summary', 'B8', 'red', 'yellow', '', 'DfT Group'], [
'Significant Steel Requirement', 'Finance & Benefits', 'D15',
'blue', 'yello', '', 'Yes/No'
], [
'SRO Finance confidence', 'Finance & Benefits', 'C6', 'green',
'red', '', 'RAG_Short'
], [
'BICC approval point', 'Finance & Benefits', 'E9', 'orange', 'red',
'', 'Business Cases'
], [
'Assurance MM2 Latest Approved Baseline', 'Assurance planning',
'C10', 'red', 'white', '', 'Yes/No'
], [
'Approval MM11 Notes', 'Approval & Project milestones', 'F19',
'red', 'yellow', '', 'Yes/No'
], [
'SCS PB2 No public sector', 'Resources', 'C7', 'red', 'white', '',
'Yes/No'
], [
'Project MM31 Original Baseline', 'Approval & Project milestones',
'B39', 'red', 'white', 'd/mm/yy', 'Yes/No'
], [
'Change Implementation - Now', 'Resources', 'I30', 'black',
'yellow', 'd/mm/yy', 'Capability RAG'
]
]
with open(os.path.join(TMP_DIR, 'mock_datamap.csv'), 'w') as f:
datamap_writer = csv.writer(f, delimiter=',')
f.write('cell_key,template_sheet,cell_reference,bg_colour,fg_colour'
',number_format,verification_list\n')
for item in data:
datamap_writer.writerow(item)
yield os.path.join(TMP_DIR, 'mock_datamap.csv')
os.unlink(os.path.join(TMP_DIR, 'mock_datamap.csv'))
def mock_blank_xlsx_file(source_dir: str, empty: bool=False, mix: bool=False) -> None:
wb = Workbook()
wb.create_sheet('Test')
# Test sheet fixtures
ws_summary = wb['Test']
ws_summary['A5'].value = 'Project/Programme Name'
ws_summary['B5'].value = ws_summary_B5_rand[0]
ws_summary['A8'].value = 'DfT Group'
ws_summary['B8'].value = ws_summary_B8_rand[0]
try:
os.mkdir(source_dir)
wb.save(os.path.join(os.path.abspath(source_dir), 'test-blank.xlsx'))
if mix: # we want to throw another file type in there
with open(source_dir + '/' + 'baws.txt', 'w') as f:
f.write("Some random bollocks")
if empty: # we want the dir but no files in it
for test_file in os.path.abspath(source_dir):
os.unlink(os.path.abspath(source_dir).join(test_file))
except:
shutil.rmtree(source_dir)
os.mkdir(source_dir)
wb.save(os.path.join(os.path.abspath(source_dir), 'test-blank.xlsx'))
if mix:
with open(source_dir + '/' + 'baws.txt', 'w') as f:
f.write("Some random bollocks")
if empty:
for test_file in os.listdir(os.path.abspath(source_dir)):
os.unlink(os.path.join(os.path.abspath(source_dir), test_file))
|
python
|
from . import *
class TestDefaults(BrokerTestCase):
def test_basic_submit(self):
f = self.queue.submit_ex(name=self.id() + '.0', pattern=None)
self.assertTrue(f.id)
self.assertEqual(self.broker.fetch(f.id)['status'], 'pending')
self.assertEqual(self.broker.fetch(f.id)['priority'], 1000)
def test_manual_child_submit_by_id(self):
cf = self.queue.submit_ex(name=self.id() + '.0', pattern=None)
pf = self.queue.submit_ex(name=self.id() + '.1', pattern=None, dependencies=[cf.id])
self.assertTrue(pf.id > cf.id)
def test_manual_child_submit_by_future(self):
cf = self.queue.submit_ex(name=self.id() + '.0', pattern=None)
pf = self.queue.submit_ex(name=self.id() + '.1', pattern=None, dependencies=[cf])
self.assertIs(cf, list(pf.iter())[1])
self.assertTrue(pf.id > cf.id)
def test_auto_child_submit(self):
f = self.queue.submit_ex(name=self.id() + '.0', pattern=None, dependencies=[{
'name': self.id() + '.1',
'pattern': None,
}])
self.assertTrue(f.id > list(f.iter())[1].id, 1)
|
python
|
from django.db import models
class Job(models.Model):
"""Class describing a computational job"""
# currently, available types of job are:
TYPES = (
("fibonacci", "fibonacci"),
("power", "power")
)
STATUSES = (
("pending", "pending"),
("started", "started"),
("finished", "finished"),
("failed", "failed"),
)
type = models.CharField(choices=TYPES, max_length=20)
status = models.CharField(choices=STATUSES, max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
argument = models.PositiveIntegerField()
result = models.IntergerField(null=True)
def save(self, *args, **kwargs):
"""Save model and if job is in pending state, schedule it"""
if self.status == "pending":
from .tasks import TASK_MAPPING
task = TASK_MAPPING[self.type]
task.delay(job_id=self.id, n=self.argument)
|
python
|
from pathlib import Path
from math import inf
def get_image_layers(raw_data, width, height):
digits = map(int, raw_data.strip())
layers = list()
curr_layer = list()
layer_size = width * height
for digit in digits:
curr_layer.append(digit)
if len(curr_layer) == layer_size:
layers.append(curr_layer)
curr_layer = list()
return layers
def layer_digit_count(layer, digit):
return len(list(filter(lambda d: d == digit, layer)))
def fewest_zeros(layers):
index = -1
fewest = inf
for l, layer in enumerate(layers):
zeros = layer_digit_count(layer, 0)
if zeros < fewest:
fewest = zeros
index = l
return index
def get_layer_data(layers, index):
layer = layers[index]
ones = layer_digit_count(layer, 1)
twos = layer_digit_count(layer, 2)
return ones * twos
def display_image(layers, width, height):
layer_size = width * height
pixels = [list() for ls in range(layer_size)]
for layer in layers:
for d, digit in enumerate(layer):
if digit != 2:
pixels[d].append(digit)
image = list()
pixel_row = list()
for pixel in pixels:
pixel_row.append(" " if pixel[0] == 0 else "@")
if len(pixel_row) == width:
print("".join(pixel_row))
pixel_row = list()
if __name__ == "__main__":
dsn_data = Path("../etc/aoc8.txt").read_text()
image_width = 25
image_height = 6
image_layers = get_image_layers(dsn_data, image_width, image_height)
layer_index = fewest_zeros(image_layers)
result1 = get_layer_data(image_layers, layer_index)
print("Part 1:", result1)
print("Part 2:")
display_image(image_layers, image_width, image_height)
|
python
|
import matplotlib.pyplot as plt
VIEW_BORDER = 0.1
plt.ion()
class plotter():
def __init__(self, pos_bounds, plot_vor, plot_traj, plot_graph):
self.p_bounds = pos_bounds
self.plot_vor_bool = plot_vor
self.plot_traj_bool = plot_traj
self.plot_graph_bool = plot_graph
if self.plot_vor_bool:
self.fig_vor = plt.figure(figsize=(16, 16), dpi=100)
self.ax_vor = self.fig_vor.add_subplot(1,1,1)
self.ax_vor.axis('equal')
self.ax_vor.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
self.ax_vor.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
self.fig_opt = plt.figure(figsize=(8,4), dpi=100)
#freq selection
if self.plot_graph_bool:
#self.fig_graph = plt.figure(figsize=(4, 4), dpi=100)
self.ax_graph = self.fig_opt.add_subplot(1,2,1)
self.ax_graph.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
self.ax_graph.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
self.ax_graph.tick_params(
axis='both',
which='both',
top=False,
bottom=False,
left=False,
right=False,
labeltop=False,
labelbottom=False,
labelleft=False,
labelright=False
)
#trajectories
if self.plot_traj_bool:
#self.fig_traj = plt.figure(figsize=(4, 4), dpi=100)
self.ax_traj = self.fig_opt.add_subplot(1,2,2)
self.ax_traj.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
self.ax_traj.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
self.ax_traj.tick_params(
axis='both',
which='both',
top=False,
bottom=False,
left=False,
right=False,
labeltop=False,
labelbottom=False,
labelleft=False,
labelright=False
)
def plot_vor(self, drones, centroids, vor):
self.fig_vor.clf()
ax = self.fig_vor.gca()
# Plot drone points
ax.scatter([d.pos[0] for d in drones], [d.pos[1] for d in drones], marker='x', color='b')
#print("initial",vor.filtered_points)
# Plot ridge points
#for region in vor.filtered_regions:
# vertices = vor.vertices[region, :]
# ax.plot(vertices[:, 0], vertices[:, 1], 'go')
# Plot ridges
for region in vor.filtered_regions:
vertices = vor.vertices[region + [region[0]], :]
ax.plot(vertices[:, 0], vertices[:, 1], linewidth=1, linestyle='-', color='k')
# Plot Centroids
for region in vor.filtered_regions:
ax.scatter(centroids[:, 0], centroids[:, 1], marker='.', color='r')
plt.show()
plt.pause(0.01)
def plot_traj(self, q, gt):
if not self.plot_traj_bool:
return
#self.fig_traj.clf()
#ax = self.fig_traj.gca()
self.ax_traj.cla()
ax = self.ax_traj
# Plot drone points
for k in range(len(q)):
#print(q[k, :, 0], q[k, :, 1])
ax.plot(q[k, :, 0], q[k, :, 1], marker='.', ms=2, color='blue', linewidth=0.25)
ax.scatter(gt[k][0], gt[k][1], marker='.',s=64, color='blue')
#ax.plot(q[k, :, 0], q[k, :, 1], marker='.', ms=2, color='C%d' % (k % 8), linewidth=0.25)
#ax.scatter(gt[k][0], gt[k][1], marker='.',s=64, color='C%d' % (k % 8))
ax.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
ax.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
#for k in range(len(gt)):
# ax.scatter(gt[k])
#ax.set_aspect('equal', 'box')
ax.set_xticklabels([])
ax.set_yticklabels([])
#self.ax_traj.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
#self.ax_traj.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
plt.show()
plt.pause(0.01)
#print(self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER, self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER)
#print(self.ax_traj.get_ylim(), self.ax_traj.get_xlim())
#plot gt positions, graph edges, freq assignments
def plot_graph(self, g, gt, freqs):
if not self.plot_graph_bool:
return
#self.fig_graph.clf()
#ax = self.fig_graph.gca()
self.ax_graph.cla()
ax = self.ax_graph
ax.set_xlim([self.p_bounds[0][0] - VIEW_BORDER, self.p_bounds[0][1] + VIEW_BORDER])
ax.set_ylim([self.p_bounds[1][0] - VIEW_BORDER, self.p_bounds[1][1] + VIEW_BORDER])
ax.set_xticklabels([])
ax.set_yticklabels([])
#plot gt and target positions
#unique_freqs = list(set(freqs))
for k in range(len(gt)):
size = 64
if freqs[k] == 0.0:
color = 'red'
elif freqs[k] >= 5.0:
color = 'gold'
elif freqs[k] >= 2.4:
color = 'green'
else:
color = 'blue'
#size = 128
ax.scatter(gt[k][0], gt[k][1], marker='x', s=5, c=color)
ax.scatter(g.nodes(data='p')[k][0], g.nodes(data='p')[k][1], marker='.', s=size, c=color)
#'''
#plot graph edges
for u, v in g.edges:
u_pos = g.nodes(data='p')[u]
v_pos = g.nodes(data='p')[v]
x_pos = [u_pos[0], v_pos[0]]
y_pos = [u_pos[1], v_pos[1]]
ax.plot(x_pos, y_pos, c='black', linewidth=0.25)
#print(g.nodes(data='p')[u], g.nodes(data='p')[v])
#'''
#TODO plot freq assignments
plt.show()
plt.pause(0.01)
|
python
|
# -*- coding: utf-8 -*-
import json
with open('data.txt', 'w') as outfile:
data = {"total": 10}
json.dump(data, outfile)
if __name__ == "__main__":
print("ok")
|
python
|
from util import traceguard
from gui.toolbox import GetTextFromUser
from common import profile, pref
import wx
def change_password(protocol, cb):
val = GetTextFromUser(_('Enter a new password for {username}:'.format(username=protocol.username)),
_('Change Password'),
default_value = protocol.password,
password = True)
if val: cb(val)
def remove_contact(contact, do_remove):
action_allowed = getattr(do_remove, 'action_allowed', lambda c: True)
if not action_allowed(contact):
return
yes_default = pref('prompts.defaults.contacts.del_contact', type=bool, default=True)
more_flags = wx.NO_DEFAULT * (not yes_default)
if wx.YES == wx.MessageBox(_('Are you sure you want to delete contact {name}?').format(name=contact.name),
_('Delete Contact'), style = wx.YES_NO | more_flags):
do_remove()
def remove_group(group, do_remove):
try:
s = u'group "%s"' % group.name
except:
s = u'this group'
yes_default = pref('prompts.defaults.contacts.del_group', type=bool, default=True)
more_flags = wx.NO_DEFAULT * (not yes_default)
line1 = _('WARNING!')
line2 = _('All your contacts in this group will be deleted locally AND on the server.')
line3 = _('Are you sure you want to remove {groupname}?').format(groupname=s)
msg = u'\n\n'.join((line1, line2, line3))
if wx.YES == wx.MessageBox(msg, _('Delete Group'),
style = wx.YES_NO | wx.ICON_ERROR | more_flags):
do_remove()
def add_group():
group = GetTextFromUser(_('Please enter a group name:'),_('Add Group'))
if group is None or not group.strip():
return
protos = [acct.connection for acct in profile.account_manager.connected_accounts]
for proto in protos:
with traceguard:
proto.add_group(group)
def block_buddy(buddy, do_block):
yes_default = pref('prompts.defaults.contacts.block', type=bool, default=True)
more_flags = wx.NO_DEFAULT * (not yes_default)
if wx.YES == wx.MessageBox(_('Are you sure you want to block %s?') % buddy.name,
_('Block Buddy'),
style = wx.YES_NO | more_flags):
do_block()
|
python
|
"""Module for dilation based pixel consensus votin
For now hardcode 3x3 voting kernel and see
"""
from abc import ABCMeta, abstractmethod, abstractproperty
import numpy as np
from scipy.ndimage.measurements import center_of_mass
from ..box_and_mask import get_xywh_bbox_from_binary_mask
from .. import cfg
class PCV_base(metaclass=ABCMeta):
def __init__(self):
# store the necessary modules
pass
@abstractproperty
def num_bins(self):
pass
@abstractproperty
def num_votes(self):
pass
@abstractproperty
def vote_mask(self):
pass
@abstractproperty
def query_mask(self):
"""
Flipped from inside out
"""
diam = len(self.vote_mask)
radius = (diam - 1) // 2
center = (radius, radius)
mask_shape = self.vote_mask.shape
offset_grid = np.indices(mask_shape).transpose(1, 2, 0)[..., ::-1]
offsets = center - offset_grid
allegiance = self.discrete_vote_inx_from_offset(
offsets.reshape(-1, 2)
).reshape(mask_shape)
return allegiance
@abstractmethod
def centroid_from_ins_mask(self, ins_mask):
mode = self.centroid_mode
assert mode in ('bbox', 'cmass')
if mode == 'bbox':
bbox = get_xywh_bbox_from_binary_mask(ins_mask)
x, y, w, h = bbox
return [x + w // 2, y + h // 2]
else:
y, x = center_of_mass(ins_mask)
x, y = int(x), int(y)
return [x, y]
@abstractmethod
def discrete_vote_inx_from_offset(self, offset):
pass
@staticmethod
def _discretize_offset(vote_mask, offset):
"""
Args:
offset: [N, 2] array of offset towards each pixel's own center,
Each row is filled with (x, y) pair, not (y, x)!
"""
shape = offset.shape
assert len(shape) == 2 and shape[1] == 2
offset = offset[:, ::-1] # swap to (y, x) for indexing
diam = len(vote_mask)
radius = (diam - 1) // 2
center = (radius, radius)
coord = offset + center
del offset
ret = -1 * np.ones(len(coord), dtype=np.int32)
valid_inds = np.where(
(coord[:, 0] >= 0) & (coord[:, 0] < diam)
& (coord[:, 1] >= 0) & (coord[:, 1] < diam)
)[0]
_y_inds, _x_inds = coord[valid_inds].T
vals = vote_mask[_y_inds, _x_inds]
ret[valid_inds] = vals
return ret
@abstractmethod
def mask_from_sem_vote_tsr(self, dset_meta, sem_pred, vote_pred):
pass
|
python
|
# coding: utf-8
"""
Command Line Interface
"""
import sys
import click
from chipheures_sos import __version__
from chipheures_sos.app import App
@click.group()
@click.version_option(__version__)
@click.option("--debug/--no-debug", default=False)
@click.pass_context
def cli(ctx, debug):
"""
Tool for database maintenance of the Chip'heures web application.
\f
:param ctx:
:param debug: debug flag
"""
ctx.ensure_object(App)
ctx.obj.debug = debug
@cli.command(name="list")
@click.argument("database", metavar="DB")
@click.pass_context
def list_tables(ctx, database):
"""
List the database tables and show record counts.
\b
DB path to the SQlite database to read.
\f
:param ctx:
:param database: path to the database
"""
#: :type app: chipheures_sos.app.App
app = ctx.obj
app.list_tables(database)
@cli.command(name="backup")
@click.argument("database", metavar="DB")
@click.pass_context
def backup_database(ctx, database):
"""
Backup the database
\b
DB path to the SQlite database to read.
\f
:param ctx:
:param database: path to the database
"""
#: :type app: chipheures_sos.app.App
app = ctx.obj
app.backup_database(database)
@cli.command(name="list_orders")
@click.argument("database", metavar="DB")
@click.option(
"--closed/--not-closed", default=None, help=u"Display only closed/not closed orders", show_default=True,
)
@click.pass_context
def list_orders(ctx, database, closed):
"""
List the orders and show the close date.
\b
DB path to the SQlite database to read.
\f
:param ctx:
:param database: path to the database
"""
#: :type app: chipheures_sos.app.App
app = ctx.obj
app.list_orders(database, closed)
@cli.command(name="close_orders")
@click.argument("database", metavar="DB")
@click.option(
"-d",
"--date",
"max_date",
type=click.DateTime(["%Y-%m-%d"]),
default=None,
help=u"Close orders which are older than this date, if missing the date is detected by examining the tracked times",
)
@click.option(
"--dry-run/--run", default=True, help=u"Dry run", show_default=True,
)
@click.option(
"--period",
"period_days",
type=click.IntRange(1, 3650),
default=365 // 2,
help=u"Period (in days) from which we can consider an order is old",
show_default=True,
metavar="PERIOD",
)
@click.pass_context
def close_orders(ctx, database, max_date, dry_run, period_days):
"""
Close the "old" orders.
\b
DB path to the SQlite database to read.
\f
:param ctx:
:param database: path to the database
:param max_date: maximum date use to close an order (UTC time).
:param dry_run: If ``True``, only show action, don't run it (database is preserved).
:param period_days: Period (in days) from which we can consider an order is old
"""
#: :type app: chipheures_sos.app.App
app = ctx.obj
app.dry_run = dry_run
app.close_orders(database, max_date, period_days)
if __name__ == "__main__":
cli(sys.argv[1:])
|
python
|
from unittest import TestCase
from osbot_k8s.utils.Docker_Desktop_Cluster import DEFAULT_DOCKER_DESKTOP_NAME
from osbot_utils.utils.Misc import list_set
from osbot_utils.utils.Dev import pprint
from osbot_k8s.kubectl.Kubectl import Kubectl
class test_Kubectl(TestCase):
def setUp(self) -> None:
self.kubectl = Kubectl()
def test_kubectl_exec(self):
assert self.kubectl.kubectl_exec().startswith('kubectl controls the Kubernetes cluster manager.\n')
def test_kubectl_exec_raw(self):
result = self.kubectl.kubectl_exec_raw()
assert result.get('stdout').startswith('kubectl controls the Kubernetes cluster manager.\n')
del result['stdout']
assert result == { 'cwd' : '.' ,
'error' : None ,
'kwargs' : {'cwd': '.', 'stderr': -1, 'stdout': -1, 'timeout': None},
'runParams' : ['kubectl'] ,
'status' : 'ok' ,
'stderr' : '' }
# config methods
def test_clusters(self):
clusters = self.kubectl.clusters()
assert DEFAULT_DOCKER_DESKTOP_NAME in clusters
assert list_set(clusters.get(DEFAULT_DOCKER_DESKTOP_NAME)) == ['certificate-authority-data', 'server']
def test_config(self):
result = self.kubectl.config()
assert list_set(result) == ['apiVersion', 'clusters', 'contexts', 'current-context', 'kind', 'preferences', 'users']
pprint(result)
def test_context_set_current(self):
assert self.kubectl.context_set_current('aaa') == 'error: no context exists with the name: "aaa"\n'
assert self.kubectl.context_set_current(DEFAULT_DOCKER_DESKTOP_NAME) == f'Switched to context "{DEFAULT_DOCKER_DESKTOP_NAME}".\n'
assert self.kubectl.context_current() == DEFAULT_DOCKER_DESKTOP_NAME
def test_contexts(self):
contexts = self.kubectl.contexts()
assert DEFAULT_DOCKER_DESKTOP_NAME in contexts
assert list_set(contexts.get(DEFAULT_DOCKER_DESKTOP_NAME)) == ['client-certificate-data', 'client-key-data']
def test_contexts(self):
contexts = self.kubectl.contexts()
assert DEFAULT_DOCKER_DESKTOP_NAME in contexts
assert list_set(contexts.get(DEFAULT_DOCKER_DESKTOP_NAME)) == ['cluster', 'user']
def test_users(self):
users = self.kubectl.users()
assert DEFAULT_DOCKER_DESKTOP_NAME in users
assert list_set(users.get(DEFAULT_DOCKER_DESKTOP_NAME)) == ['client-certificate-data', 'client-key-data']
# kubectl functions
def test_deployments(self):
result = self.kubectl.deployments()
pprint(result)
|
python
|
import queue
from pynput.keyboard import Events, Key, Controller, Listener
from random import choice
from json import load
thread_comms = queue.Queue()
kb = Controller()
class KeyboardEvents(Events):
_Listener = Listener
class Press(Events.Event): # key press event
def __init__(self, key):
self.key = str(key).strip("'") # converting the key pressed to string and removing the ''
def __init__(self): # returning the key pressed
super(Events, self).__init__(on_press=self.Press)
def replace_letter(mode, exception, window_name):
with KeyboardEvents() as events, open("replace letters.json", "r", encoding="utf8") as replace_json:
replace_dict = load(replace_json)[mode] # load a json object with the parameter mode, which is received from
# the dropdown selected value
for event in events:
try:
message = thread_comms.get_nowait() # this will receive the messages sent from the GUI
if message == "Stop":
break # break out of the loop if "Stop" is received from the GUI
except queue.Empty: # get_nowait() will get exception when Queue is empty
message = None
if event.key in exception: # continue if the user-written key is in the exception input box
continue
if event.key in replace_dict:
kb.press(Key.backspace) # deleted the user-written key
kb.press(choice(replace_dict[event.key])) # replaces the key with another
if event.key == "Key.esc":
window_name["__STATUS__"].update("Status: Stopped")
break # stop listening if ESC is pressed and update the GUI status to "Stopped"
|
python
|
import scipy
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
class DataLoader():
def __init__(self, dataset_name, img_res=(101, 101), norm=False):
self.dataset_name = dataset_name
self.img_res = img_res
self.data = np.loadtxt("network/networkFrame.csv", delimiter=',', dtype=str)
self.norm = norm
def load_data(self, batch_size=1, is_testing=False):
batch_images = np.random.choice(self.data.shape[0], size=batch_size)
imgs_A = []
imgs_B = []
for img_path in batch_images:
img_A, img_B = self.imread(self.data[img_path])
# If training => do random flip
if not is_testing and np.random.random() < 0.5:
img_A = np.fliplr(img_A)
img_B = np.fliplr(img_B)
if not is_testing and np.random.random() > 0.5:
img_A = np.flipud(img_A)
img_B = np.flipud(img_B)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.asarray(imgs_A)
imgs_B = np.asarray(imgs_B)
return imgs_A, imgs_B
def load_batch(self, batch_size=1, is_testing=False):
self.n_batches = int(len(self.data) / batch_size)
for i in range(self.n_batches - 1):
batch = self.data[i * batch_size:(i + 1) * batch_size]
imgs_A, imgs_B = [], []
for img in batch:
# Need to load in 5 channels here for the data
img_A, img_B = self.imread(img)
if not is_testing and np.random.random() > 0.5:
img_A = np.fliplr(img_A)
img_B = np.fliplr(img_B)
if not is_testing and np.random.random() < 0.5:
img_A = np.flipud(img_A)
img_B = np.flipud(img_B)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.asarray(imgs_A)
imgs_B = np.asarray(imgs_B)
yield imgs_A, imgs_B
def imread(self, path):
sim_img = []
source_img = []
for element in path[1:]:
if "sci" in element:
if "sim" in element:
img = fits.getdata(element, ext=0)
center = (int(img.shape[0] / 2), int(img.shape[1] / 2))
img = img[center[0] - 32:center[0] + 32, center[1] - 32:center[1] + 32]
sim_img.append(img)
elif "source" in element:
img = fits.getdata(element, ext=0)
center = (int(img.shape[0] / 2), int(img.shape[1] / 2))
img = img[center[0] - 32:center[0] + 32, center[1] - 32:center[1] + 32]
source_img.append(img)
sim_img = np.asarray(sim_img).T
source_img = np.asarray(source_img).T
if self.norm:
sim_img = 2 * (sim_img - np.min(sim_img)) / (np.max(sim_img) - np.min(sim_img)) - 1.
source_img = 2 * (source_img - np.min(source_img)) / (np.max(source_img) - np.min(source_img)) - 1.
return sim_img, source_img
def load_redshifts(self, path):
sim_img = []
sim_redshift = []
for element in path[1:]:
if "sci" in element:
if "sim" in element:
img = fits.getdata(element, ext=0)
center = (int(img.shape[0] / 2), int(img.shape[1] / 2))
img = img[center[0] - 32:center[0] + 32, center[1] - 32:center[1] + 32]
sim_img.append(img)
sim_img = np.asarray(sim_img).T
if self.norm:
sim_img = 2 * (sim_img - np.min(sim_img)) / (np.max(sim_img) - np.min(sim_img)) - 1.
return sim_img, sim_redshift
|
python
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Profile
from .models import StudentUser
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=StudentUser)
def post_save_create_profile(sender,instance,created,**kwargs):
if created:
Profile.objects.create(user=instance)
|
python
|
import json
class Operators():
@staticmethod
def initFactory():
import planout.ops.core as core
import planout.ops.random as random
Operators.operators = {
"literal": core.Literal,
"get": core.Get,
"seq": core.Seq,
"set": core.Set,
"index": core.Index,
"array": core.Array,
"equals": core.Equals,
"cond": core.Cond,
"and": core.And,
"or": core.Or,
">": core.GreaterThan,
"<": core.LessThan,
">=": core.GreaterThanOrEqualTo,
"<=": core.LessThanOrEqualTo,
"%": core.Mod,
"/": core.Divide,
"not": core.Not,
"negative": core.Negative,
"min": core.Min,
"max": core.Max,
"length": core.Length,
"product": core.Product,
"sum": core.Sum,
"randomFloat": random.RandomFloat,
"randomInteger": random.RandomInteger,
"bernoulliTrial": random.BernoulliTrial,
"bernoulliFilter": random.BernoulliFilter,
"uniformChoice": random.UniformChoice,
"weightedChoice": random.WeightedChoice,
"sample": random.Sample
}
@staticmethod
def enable_overrides():
import core
Operators.operators['set'] = core.SetOverride
@staticmethod
def isOperator(op):
return \
type(op) is dict and "op" in op and op["op"] in Operators.operators
@staticmethod
def operatorInstance(params):
return Operators.operators[params['op']](**params)
@staticmethod
def validateOperator(params):
if type(params) is dict and 'op' in params:
if params['op'] in Operators.operators:
return Operators.operatorInstance(params)._validate()
else:
# this should probably throw an exception
print 'invalid operator %s' % params['op']
return False
else:
return True # literals are always valid
@staticmethod
def prettyParamFormat(params):
ps = [p+'='+Operators.pretty(params[p]) for p in params if p != 'op']
return ', '.join(ps)
@staticmethod
def pretty(params):
if Operators.isOperator(params):
try:
# if an op is invalid, we may not be able to pretty print it
my_pretty = Operators.operatorInstance(params).pretty()
except:
my_pretty = params
return my_pretty
elif type(params) is list:
return '[%s]' % ', '.join([Operators.pretty(p) for p in params])
else:
return json.dumps(params)
|
python
|
import nltk
from models import *
import torch
from tokenization import *
import time
from torchtext.data.metrics import bleu_score
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.meteor_score import meteor_score
import sys
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def splt_dataset(ids,captions):
"""
video_path="YouTubeClips"
video_time_path = open("video_id.txt", "w+")
file_names = [] # List which will store all of the full filepaths.
for root, directories, files in os.walk(video_path):
for filename in files:
file = os.path.basename(filename).split('.', 1)[0]
file_names.append(file) # Add it to the list.
video_time_path.write(file + "\n")
video_time_path.close()
"""
fl = 'video_id.txt'
fileObj = open(fl, "r") # opens the file in read mode
file_names = fileObj.read().splitlines()
a=int(len(file_names)*0.70)
b=int(len(file_names) * 0.20)
train_captions=[]
train_id=[]
test_captions=[]
test_id=[]
val_captions=[]
val_id=[]
for i,cap in enumerate(file_names):
for j,idd in enumerate(ids):
if (idd == cap):
if i <= a:
train_captions.append(captions[j]) #TRAIN
train_id.append(idd)
elif i>a and i<=(a+b):
#print(i)
test_captions.append(captions[j]) #TEST
test_id.append(idd)
else:
#print(i)
val_captions.append(captions[j]) #VALIDATION
val_id.append(idd)
return train_captions,train_id,test_captions,test_id,val_captions,val_id
file_path="video_captioning_normalized.json"
with open(file_path) as f:
annotations = json.load(f)
captions = []
ids=[]
for annotation in tqdm(annotations):
caption=annotation ['caption']
id=annotation ['id']
captions.append(caption)
ids.append(id)
train_captions,train_id,test_captions,test_id,val_captions,val_id=splt_dataset(ids,captions)
import csv
with open('results/validation.csv', mode='w') as new_file:
new_writer = csv.writer(new_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
new_writer.writerow(['id','caption'])
for z,cap in zip(val_id,val_captions):
new_writer.writerow((z,cap))
i=0
#print(test_id)
"""
while test_id[i]==test_id[i+1]:
#print(i)
test_id.pop(i+1)
if i==len(test_id)-1:
break
if test_id[i]!=test_id[i+1]:
i=i+1
"""
tst_caption=[]
ref_captions = []
referance_caption=[]
for z in test_captions:
tst_caption.append(z)
#print(tst_caption)
tst_caption = tst_caption[0].split()
for i, k in enumerate(tst_caption):
if k == 'eos':
continue
elif k == 'bos':
continue
else:
ref_captions.append(k)
referance_caption.append(ref_captions)
ref_captions=[]
tst_caption=[]
#string = ' '.join(ref_captions)
#ref_captions.append(string)
#print(referance_caption)
batch_size=64
hidden_size = 256
def generate_caption(encoder,decoder, video_frames,prediction, max_len=20):
voc = Voc()
voc.load_vocabulary()
encoder_hidden = torch.zeros(6, 1, hidden_size).to(device)
input_length = video_frames.size(1)
with torch.no_grad():
#for i in range(batch_size-1):
#for n, index in enumerate(test_id):
#batch_index = n % batch_size
for ei in range(input_length):
encoder_output, encoder_hidden = encoder.forward(
(video_frames[0,ei,:].unsqueeze(0)).unsqueeze(0), encoder_hidden)
decoder_hidden = encoder_hidden
input_token=torch.ones(1,1).type(torch.LongTensor).to(device)
captions=[]
caption = ""
for seq in range(max_len):
#input_token = torch.ones(1,1).type(torch.LongTensor).to(device)
#input_token = input_token.unsqueeze(0)
with torch.no_grad():
decoder_output, decoder_hidden = decoder(input_token, decoder_hidden)
decoder_output = decoder_output.argmax(dim=1)
caption += voc.index2word[str(int(decoder_output))] + " "
input_token = decoder_output.unsqueeze(0)
captions.append(caption)
captions = captions[0].split()
generated_captions=[]
for i,k in enumerate(captions):
if k=='eos':
continue
elif k =='pad':
continue
else:
generated_captions.append(k)
string = ' '.join(generated_captions[:])
#print(f'predicted caption: {string}')
#print(generated_captions)
prediction.write(string + "\n")
#generated_caption.write(caption + "\n")
return generated_captions
def test():
print_test_loss_total = 0 # Reset every print_every
plot_test_loss_total = 0 # Reset every plot_every
criterion = nn.NLLLoss()
video_frames = torch.zeros(1, 8, 4032).to(device)
#target_tensor = torch.zeros(batch_size, train_tokens.shape[1]).to(device)
trgs = []
pred_trgs = []
reference=[]
print_bleu_1_total = 0
print_bleu_2_total = 0
print_bleu_3_total = 0
print_bleu_4_total = 0
#for iters in tqdm(range(1, n_iters + 1)):
import csv
#with open('results/nasnet_epoch_blue_scores.csv', mode='w') as new_file:
# new_writer = csv.writer(new_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# new_writer.writerow(['Iteration', 'BLEU-1', 'BLEU-2','BLEU-3','BLEU-4'])
for iters in tqdm(range(1, n_iters + 1)):
encoder = torch.load('model_nasnet_6_layer/%s_epoch_encoder.pth' % (iters))
decoder = torch.load('model_nasnet_6_layer/%s_epoch_decoder.pth' % (iters))
prediction = open("predict_nasnet_6_layer/prediction_%s.txt"% (iters), "w+")
#encoder = torch.load('model_incep_3_layer/15_epoch_encoder.pth')
#decoder = torch.load('model_incep_3_layer/15_epoch_decoder.pth')
#encoder = torch.load('best_encoder.pth' )
#decoder = torch.load('best_decoder.pth' )
encoder.train()
decoder.train()
encoder.eval()
decoder.eval()
#import csv
# with open('results/nasnet_blue_scores.csv', mode='w') as new_file:
# new_writer = csv.writer(new_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# new_writer.writerow(['Iteration', 'BLEU-1', 'BLEU-2','BLEU-3','BLEU-4'])
count=0
for n, index in enumerate(test_id):
if n==0:
reference.append(referance_caption[n])
elif index == test_id[n-1]:
reference.append(referance_caption[n])
else:
reference=[]
reference.append(referance_caption[n])
if n==len(test_id)-1:
#print(index)
#print(reference)
video_frames[0] = torch.load('nasnet_feature_new/' + index + '.pt') # encoder input
pred_test = generate_caption(encoder=encoder, decoder=decoder, video_frames=video_frames,prediction=prediction)
count += 1
weights = (1, 0, 0, 0)
bleu_1 = sentence_bleu(reference, pred_test, weights)
weights = (0.5, 0.5, 0, 0)
bleu_2 = sentence_bleu(reference, pred_test, weights)
weights = (0.3, 0.3, 0.3, 0)
bleu_3 = sentence_bleu(reference, pred_test, weights)
weights = (0.25, 0.25, 0.25, 0.25)
bleu_4 = sentence_bleu(reference, pred_test, weights)
#print(f"iteration:{count}")
#print(f"BLEU-1: {bleu_1}")
#print(f"BLEU-2: {bleu_2}")
#print(f"BLEU-3: {bleu_3}")
#print(f"BLEU-4: {bleu_4}")
print_bleu_1_total += bleu_1
print_bleu_2_total += bleu_2
print_bleu_3_total += bleu_3
print_bleu_4_total += bleu_4
print_bleu_1_avg = print_bleu_1_total / count
print_bleu_2_avg = print_bleu_2_total / count
print_bleu_3_avg = print_bleu_3_total / count
print_bleu_4_avg = print_bleu_4_total / count
#print(f"iteration:{count}")
print(f"BLEU-1: {print_bleu_1_avg}")
print(f"BLEU-2: {print_bleu_2_avg}")
print(f"BLEU-3: {print_bleu_3_avg}")
print(f"BLEU-4: {print_bleu_4_avg}")
print_bleu_1_total=0
print_bleu_2_total = 0
print_bleu_3_total = 0
print_bleu_4_total = 0
#new_writer.writerow((iters, print_bleu_1_avg, print_bleu_2_avg, print_bleu_3_avg, print_bleu_4_avg))
elif index != test_id[n+1]:
#print(index)
#print(reference)
video_frames[0] = torch.load('nasnet_feature_new/' + index + '.pt') # encoder input
pred_test = generate_caption(encoder=encoder, decoder=decoder, video_frames=video_frames,prediction=prediction)
count += 1
weights = (1, 0, 0, 0)
bleu_1 = sentence_bleu(reference, pred_test, weights)
weights = (0.5, 0.5, 0, 0)
bleu_2 = sentence_bleu(reference, pred_test, weights)
weights = (0.3, 0.3, 0.3, 0)
bleu_3 = sentence_bleu(reference, pred_test, weights)
weights = (0.25, 0.25, 0.25, 0.25)
bleu_4 = sentence_bleu(reference, pred_test, weights)
#print(f"iteration:{count}")
#print(f"BLEU-1: {bleu_1}")
#print(f"BLEU-2: {bleu_2}")
#print(f"BLEU-3: {bleu_3}")
#print(f"BLEU-4: {bleu_4}")
#new_writer.writerow((count, bleu_1, bleu_2, bleu_3, bleu_4))
print_bleu_1_total += bleu_1
print_bleu_2_total += bleu_2
print_bleu_3_total += bleu_3
print_bleu_4_total += bleu_4
else:
continue
def samplevid(id):
video_frames = torch.zeros(1, 8, 2048).to(device)
# target_tensor = torch.zeros(batch_size, train_tokens.shape[1]).to(device)
for iters in tqdm(range(1, n_iters + 1)):
encoder = torch.load('model_incep_3_layer/%s_epoch_encoder.pth' % (iters))
decoder = torch.load('model_incep_3_layer/%s_epoch_decoder.pth' % (iters))
encoder.train()
decoder.train()
trgs = []
pred_trgs = []
encoder.eval()
decoder.eval()
print(f'id:{id}')
video_frames[0] = torch.load('features/' + id + '.pt')
generate_caption(encoder=encoder, decoder=decoder, video_frames=video_frames)
n_iters=50
"""
for iters in tqdm(range(1, n_iters + 1)):
encoder=torch.load('model/%s_epoch_encoder.pth'% (iters))
decoder=torch.load('model/%s_epoch_decoder.pth'% (iters))
"""
#bleu_1,bleu_2,bleu_3,bleu_4=test()
test()
#print(f"BLEU-1: {bleu_1}")
#print(f"BLEU-2: {bleu_2}")
#print(f"BLEU-3: {bleu_3}")
#print(f"BLEU-4: {bleu_4}")
"""
########################## Sample Video ###############################
#id='BnJUWwSx1kE_11_22'
#id='N6SglZopfmk_97_111'
#id='YmXCfQm0_CA_109_120'
#id='8yS2wqwActs_2_14'
#id='SzEbtbNSg04_71_93'
#id='QHkvBU8diwU_1_18'
#id='QMJY29QMewQ_42_52'
samplevid(id)
import cv2
video = cv2.VideoCapture('YouTubeClips/' + id + '.avi')
video.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
video.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, view = video.read()
cv2.imshow(id,view)
if cv2.waitKey(25) & 0xFF == ord('t'):
break
video.release()
cv2.destroyAllWindows()
"""
|
python
|
from django.conf import settings
from ngw.core.models import Config, Contact
def banner(request):
"""
This context processor just add a "banner" key that's allways available
"""
if hasattr(request, 'user') and request.user.is_authenticated:
return {'banner': Config.objects.get(pk='banner').text}
else:
return ()
def contactcount(request):
"""
This context processor just add a "contactcount" key
"""
if hasattr(request, 'user') and request.user.is_authenticated:
return {'contactcount': Contact.objects.count()}
else:
return ()
def extra_header_links(request):
"""
This context processor just add a "extra_header_links" key
"""
return {'extra_header_links': settings.EXTRA_BANNER_LINKS}
def has_permission(request):
"""
Hard code has_permission for admin header
"""
return {
'has_permission':
hasattr(request, 'user') and request.user.is_authenticated
}
|
python
|
import sys
import os
import logging
import click
import wfepy
import wfepy.utils
@click.command()
@click.option('-d', '--debug', is_flag=True)
@click.argument('example_name')
def run_wf(debug, example_name):
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
example_module = __import__(example_name)
wf = wfepy.Workflow()
wf.load_tasks(example_module)
wfepy.utils.render_graph(wf, os.path.join(os.path.dirname(__file__), example_name + '.gv'))
runner = wf.create_runner()
runner.run()
if __name__ == '__main__':
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
run_wf()
|
python
|
from abc import ABCMeta, abstractmethod, abstractproperty
from pathlib import Path
from PIL import Image, ImageFont, ImageDraw, ImageEnhance, ImageFilter
import string
import cv2
import numpy as np
from numpy.random import uniform, choice
from random import randint, choice as rand_choice
import arabic_reshaper
from bidi.algorithm import get_display
# from googletrans import Translator
from unidecode import unidecode
from utils import use_seed
from utils.constant import (BACKGROUND_COLOR, BASELINE_COLOR, CAPTION_COLOR, CONTEXT_BACKGROUND_COLOR, DRAWING_COLOR,
FLOATING_WORD_COLOR, GLYPH_COLOR, IMAGE_COLOR, PARAGRAPH_COLOR, TABLE_WORD_COLOR,
TITLE_COLOR, TEXT_BORDER_COLOR)
from utils.constant import (BACKGROUND_LABEL, BASELINE_LABEL, CAPTION_LABEL, CONTEXT_BACKGROUND_LABEL, DRAWING_LABEL,
FLOATING_WORD_LABEL, GLYPH_LABEL, IMAGE_LABEL, PARAGRAPH_LABEL, TABLE_WORD_LABEL,
TITLE_LABEL, TEXT_BORDER_LABEL)
from utils.constant import SEG_GROUND_TRUTH_FMT
from utils.image import paste_with_blured_borders, resize
from utils.path import SYNTHETIC_RESRC_PATH
from synthetic.resource import (ResourceDatabase, BACKGROUND_RESRC_NAME, CONTEXT_BACKGROUND_RESRC_NAME,
DRAWING_RESRC_NAME, DRAWING_BACKGROUND_RESRC_NAME, GLYPH_FONT_RESRC_NAME,
FONT_RESRC_NAME, IMAGE_RESRC_NAME, NOISE_PATTERN_RESRC_NAME, TEXT_RESRC_NAME)
DATABASE = ResourceDatabase()
TRANSLATOR = None # Translator(service_urls=['translate.google.com'])
BLURED_BORDER_WIDTH_RANGE = (1, 7)
GAUSSIAN_NOISE_STD_RANGE = (2, 10)
NOISE_PATTERN_SIZE_RANGE = {
'border_hole': (100, 600),
'center_hole': (100, 400),
'corner_hole': (100, 400),
'phantom_character': (30, 100),
}
NOISE_PATTERN_OPACITY_RANGE = (0.2, 0.6)
POS_ELEMENT_OPACITY_RANGE = {
'drawing': (200, 255),
'glyph': (150, 255),
'image': (150, 255),
'table': (200, 255),
'text': (200, 255),
}
NEG_ELEMENT_OPACITY_RANGE = {
'drawing': (0, 10),
'glyph': (0, 10),
'image': (0, 25),
'table': (0, 10),
'text': (0, 10),
}
NEG_ELEMENT_BLUR_RADIUS_RANGE = (1, 2.5)
BACKGROUND_BLUR_RADIUS_RANGE = (0, 0.2)
BACKGROUND_COLOR_BLEND_FREQ = 0.1
CONTEXT_BACKGROUND_UNIFORM_FREQ = 0.5
DRAWING_CONTRAST_FACTOR_RANGE = (1, 4)
DRAWING_WITH_BACKGROUND_FREQ = 0.3
DRAWING_WITH_COLOR_FREQ = 0.3
GLYPH_COLORED_FREQ = 0.5
LINE_WIDTH_RANGE = (1, 4)
TABLE_LAYOUT_RANGE = {
'col_size_range': (50, 200),
}
TEXT_BASELINE_HEIGHT = 5
TEXT_BBOX_FREQ = 0.3
TEXT_BBOX_BORDER_WIDTH_RANGE = (1, 6)
TEXT_BBOX_PADDING_RANGE = (0, 20)
TEXT_COLORED_FREQ = 0.5
TEXT_FONT_TYPE_RATIO = {
'arabic': 0,
'chinese': 0,
'handwritten': 0.5,
'normal': 0.5,
}
TEXT_JUSTIFIED_PARAGRAPH_FREQ = 0.7
TEXT_ROTATION_ANGLE_RANGE = (-60, 60)
TEXT_TIGHT_PARAGRAPH_FREQ = 0.5
TEXT_TITLE_UPPERCASE_RATIO = 0.5
TEXT_TITLE_UNILINE_RATIO = 0.25
TEXT_UNDERLINED_FREQ = 0.1
TEXT_UNDERLINED_PADDING_RANGE = (0, 4)
@use_seed()
def get_random_noise_pattern(width, height):
pattern_path = choice(DATABASE[NOISE_PATTERN_RESRC_NAME])
pattern_type = Path(pattern_path).parent.name
img = Image.open(pattern_path).convert('L')
size_min, size_max = NOISE_PATTERN_SIZE_RANGE[pattern_type]
size_max = min(min(width, height), size_max)
size = (randint(size_min, size_max), randint(size_min, size_max))
if pattern_type in ['border_hole', 'corner_hole']:
img = resize(img, size, keep_aspect_ratio=True, resample=Image.ANTIALIAS)
rotation = choice([None, Image.ROTATE_90, Image.ROTATE_180, Image.ROTATE_270])
if rotation is not None:
img = img.transpose(rotation)
if pattern_type == 'border_hole':
if rotation is None:
position = ((randint(0, width - img.size[0]), 0))
elif rotation == Image.ROTATE_90:
position = (0, randint(0, height - img.size[1]))
elif rotation == Image.ROTATE_180:
position = ((randint(0, width - img.size[0]), height - img.size[1]))
else:
position = (width - img.size[0], randint(0, height - img.size[1]))
else:
if rotation is None:
position = (0, 0)
elif rotation == Image.ROTATE_90:
position = (0, height - img.size[1])
elif rotation == Image.ROTATE_180:
position = (width - img.size[0], height - img.size[1])
else:
position = (width - img.size[0], 0)
else:
img = resize(img, size, keep_aspect_ratio=False, resample=Image.ANTIALIAS)
rotation = randint(0, 360)
img = img.rotate(rotation, fillcolor=255)
pad = max(img.width, img.height)
position = (randint(0, max(0, width - pad)), randint(0, max(0, height - pad)))
alpha = uniform(*NOISE_PATTERN_OPACITY_RANGE)
arr = np.array(img.convert('RGBA'))
arr[:, :, 3] = (255 - arr[:, :, 2]) * alpha
hue_color = randint(0, 360)
value_ratio = uniform(0.95, 1)
return Image.fromarray(arr), hue_color, value_ratio, position
class AbstractElement:
"""Abstract class that defines the characteristics of a document's element."""
__metaclass__ = ABCMeta
label = NotImplemented
color = NotImplemented
content_width = NotImplemented
content_height = NotImplemented
name = NotImplemented
pos_x = NotImplemented
pos_y = NotImplemented
def __init__(self, width, height, seed=None, **kwargs):
self.width, self.height = width, height
self.parameters = kwargs
self.generate_content(seed=seed)
@property
def size(self):
return (self.width, self.height)
@property
def content_size(self):
return (self.content_width, self.content_height)
@property
def position(self):
return (self.pos_x, self.pos_y)
@use_seed()
@abstractmethod
def generate_content(self):
pass
@abstractmethod
def to_image(self):
pass
def to_image_as_array(self):
return np.array(self.to_image(), dtype=np.float32) / 255
@abstractmethod
def to_label_as_array(self):
pass
def to_label_as_img(self):
arr = self.to_label_as_array()
res = np.zeros(arr.shape + (3,), dtype=np.uint8)
res[arr == self.label] = self.color
return Image.fromarray(res)
class BackgroundElement(AbstractElement):
label = BACKGROUND_LABEL
color = BACKGROUND_COLOR
name = 'background'
@use_seed()
def generate_content(self):
self.img_path = self.parameters.get('image_path') or choice(DATABASE[BACKGROUND_RESRC_NAME])
self.img = Image.open(self.img_path).resize(self.size, resample=Image.ANTIALIAS).convert('RGB')
self.blur_radius = uniform(*BACKGROUND_BLUR_RADIUS_RANGE)
self.content_width, self.content_height = self.size
self.pos_x, self.pos_y = (0, 0)
color_blend = choice([True, False], p=[BACKGROUND_COLOR_BLEND_FREQ, 1 - BACKGROUND_COLOR_BLEND_FREQ])
if color_blend:
new_img = cv2.cvtColor(np.array(self.img), cv2.COLOR_RGB2HSV)
new_img[:, :, 0] = randint(0, 360)
self.img = Image.fromarray(cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB))
def to_image(self, flip=False):
if flip:
return self.img.transpose(Image.FLIP_LEFT_RIGHT).filter(ImageFilter.GaussianBlur(self.blur_radius))
else:
return self.img.filter(ImageFilter.GaussianBlur(self.blur_radius))
def to_label_as_array(self):
return np.full(self.size, self.label, dtype=np.uint8).transpose()
@property
def inherent_left_margin(self):
img_path = Path(self.img_path) if isinstance(self.img_path, str) else self.img_path
try:
return int(int(img_path.parent.name) * self.width / 596) # XXX: margins were calibrated on 596x842 images
except ValueError:
return 0
class ContextBackgroundElement(AbstractElement):
label = CONTEXT_BACKGROUND_LABEL
color = CONTEXT_BACKGROUND_COLOR
name = 'context_background'
@use_seed()
def generate_content(self):
uniform_bg = choice([True, False], p=[CONTEXT_BACKGROUND_UNIFORM_FREQ, 1 - CONTEXT_BACKGROUND_UNIFORM_FREQ])
if uniform_bg:
color = randint(0, 255)
std = randint(*GAUSSIAN_NOISE_STD_RANGE)
img = Image.new(mode='L', color=color, size=self.size)
img = Image.fromarray(cv2.randn(np.array(img), mean=color, stddev=std))
else:
color = None
img_path = self.parameters.get('image_path') or choice(DATABASE[CONTEXT_BACKGROUND_RESRC_NAME])
img = Image.open(img_path)
transpose_idx = choice([None, Image.ROTATE_90, Image.ROTATE_180, Image.ROTATE_270])
if transpose_idx is not None:
img = img.transpose(transpose_idx)
self.intensity = img.convert('L').resize((1, 1)).getpixel((0, 0))
self.img = img.resize(self.size, resample=Image.ANTIALIAS).convert('RGB')
self.blur_radius = uniform(*BACKGROUND_BLUR_RADIUS_RANGE)
self.content_width, self.content_height = self.size
self.pos_x, self.pos_y = (0, 0)
def to_image(self):
return self.img.filter(ImageFilter.GaussianBlur(self.blur_radius))
def to_label_as_array(self):
return np.full(self.size, self.label, dtype=np.uint8).transpose()
class DrawingElement(AbstractElement):
label = DRAWING_LABEL
color = DRAWING_COLOR
name = 'drawing'
@use_seed()
def generate_content(self):
self.img_path = self.parameters.get('image_path') or choice(DATABASE[DRAWING_RESRC_NAME])
img = Image.open(self.img_path).convert('L')
self.contrast_factor = uniform(*DRAWING_CONTRAST_FACTOR_RANGE)
self.as_negative = self.parameters.get('as_negative', False)
self.blur_radius = uniform(*NEG_ELEMENT_BLUR_RADIUS_RANGE) if self.as_negative else None
self.opacity = randint(*NEG_ELEMENT_OPACITY_RANGE[self.name] if self.as_negative
else POS_ELEMENT_OPACITY_RANGE[self.name])
self.colored = choice([True, False], p=[DRAWING_WITH_COLOR_FREQ, 1 - DRAWING_WITH_COLOR_FREQ])
if self.colored:
self.color_channels = choice(range(3), randint(1, 2), replace=False)
self.other_channel_intensity = [randint(0, 100) for _ in range(3)]
self.hue_color = randint(0, 360)
else:
self.color_channels, self.color_intensity = None, None
self.with_background = choice([True, False], p=[DRAWING_WITH_BACKGROUND_FREQ, 1 - DRAWING_WITH_BACKGROUND_FREQ])
if self.with_background:
self.color, self.label = IMAGE_COLOR, IMAGE_LABEL
blured_border_width = randint(*BLURED_BORDER_WIDTH_RANGE)
max_size = [s - 2 * blured_border_width for s in self.size]
img = resize(img, max_size)
bg = Image.open(choice(DATABASE[DRAWING_BACKGROUND_RESRC_NAME])).resize(img.size)
new_img = cv2.cvtColor(np.array(bg), cv2.COLOR_RGB2HSV)
new_img[:, :, 0] = randint(0, 360)
background = Image.fromarray(cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB))
if not self.colored:
background = background.convert('L').convert('RGB')
self.background = background
self.blured_border_width = blured_border_width
else:
img = resize(img, self.size)
self.background, self.blured_border_width = None, 0
self.img = img
self.content_width, self.content_height = self.img.size
self.pos_x = randint(0, self.width - self.content_width)
self.pos_y = randint(0, self.height - self.content_height)
label_path = Path(self.img_path).parent / SEG_GROUND_TRUTH_FMT.format(Path(self.img_path).stem, 'png')
self.mask_label = np.array(resize(Image.open(label_path), self.img.size, False, resample=Image.NEAREST))
def scaled_size(self, img):
size = [s - 2 * self.blured_border_width for s in self.size]
ratio = img.size[0] / img.size[1]
return map(min, zip(*[size, (int(size[1] * ratio), int(size[0] / ratio))]))
def to_image(self, canvas=None):
if canvas is None:
canvas = Image.new('RGB', self.size, (255, 255, 255))
if self.with_background:
paste_with_blured_borders(canvas, self.background, self.position, border_width=self.blured_border_width)
canvas_arr = np.array(canvas.convert('RGB'))
enhanced_img = ImageEnhance.Contrast(self.img).enhance(self.contrast_factor)
img_arr = np.array(enhanced_img, dtype=np.uint8)
img_arr[self.mask_label == 0] = 255
if self.colored:
img_arr_channels = []
for i in range(3):
if i in self.color_channels:
img_arr_channels.append(img_arr)
else:
other_arr = img_arr.copy()
other_arr[img_arr != 255] = self.other_channel_intensity[i]
img_arr_channels.append(other_arr)
img_arr_channels_hsv = cv2.cvtColor(np.dstack(img_arr_channels), cv2.COLOR_RGB2HSV)
img_arr_channels_hsv[:, :, 0] = self.hue_color
img_arr_channels = cv2.cvtColor(img_arr_channels_hsv, cv2.COLOR_HSV2RGB)
else:
img_arr_channels = np.dstack([img_arr for i in range(3)])
x, y = self.position
img_arr_rgb = np.full(canvas_arr.shape, 255, dtype=np.uint8)
img_arr_rgb[y:y+self.content_height, x:x+self.content_width] = img_arr_channels
result = Image.fromarray(cv2.multiply(canvas_arr, img_arr_rgb, scale=1/255)).convert('RGBA')
result.putalpha(self.opacity)
if self.as_negative:
result = result.filter(ImageFilter.GaussianBlur(self.blur_radius))
return result
def to_label_as_array(self):
label = np.full(self.size, BACKGROUND_LABEL, dtype=np.uint8)
if self.as_negative:
return label.transpose()
else:
x, y = self.position
if self.with_background:
label[x:x+self.content_width, y:y+self.content_height] = self.label
else:
mask = (self.mask_label == 255).transpose()
label[x:x+self.content_width, y:y+self.content_height][mask] = self.label
return label.transpose()
class GlyphElement(AbstractElement):
label = GLYPH_LABEL
color = GLYPH_COLOR
font_size_range = (200, 800)
name = 'glyph'
@use_seed()
def generate_content(self):
self.font_path = choice(DATABASE[GLYPH_FONT_RESRC_NAME])
self.letter = self.parameters.get('letter') or rand_choice(string.ascii_uppercase)
# To avoid oversized letters
rescaled_height = (self.height * 2) // 3
min_fs, max_fs = self.font_size_range
actual_max_fs = min(rescaled_height, max_fs)
tmp_font = ImageFont.truetype(self.font_path, size=actual_max_fs)
while tmp_font.getsize(self.letter)[0] > self.width and actual_max_fs > self.font_size_range[0]:
actual_max_fs -= 1
tmp_font = ImageFont.truetype(self.font_path, size=actual_max_fs)
if min_fs < actual_max_fs:
self.font_size = randint(min_fs, actual_max_fs)
else:
self.font_size = actual_max_fs
self.font = ImageFont.truetype(self.font_path, size=self.font_size)
self.as_negative = self.parameters.get('as_negative', False)
self.blur_radius = uniform(*NEG_ELEMENT_BLUR_RADIUS_RANGE) if self.as_negative else None
self.opacity = randint(*NEG_ELEMENT_OPACITY_RANGE[self.name] if self.as_negative
else POS_ELEMENT_OPACITY_RANGE[self.name])
self.colored = choice([True, False], p=[GLYPH_COLORED_FREQ, 1 - GLYPH_COLORED_FREQ])
self.colors = (0, 0, 0) if not self.colored else tuple([randint(0, 150) for _ in range(3)])
self.content_width, self.content_height = self.font.getsize(self.letter)
self.pos_x = randint(0, max(0, self.width - self.content_width))
self.pos_y = randint(0, max(0, self.height - self.content_height))
def to_image(self):
canvas = Image.new('RGBA', self.size)
image_draw = ImageDraw.Draw(canvas)
colors_alpha = self.colors + (self.opacity,)
image_draw.text(self.position, self.letter, font=self.font, fill=colors_alpha)
if self.as_negative:
canvas = canvas.filter(ImageFilter.GaussianBlur(self.blur_radius))
return canvas
def to_label_as_array(self):
if self.as_negative:
return np.full(self.size, BACKGROUND_LABEL, dtype=np.uint8).transpose()
else:
padding = self.font_size # XXX we want to avoid borders when computing closings
size = tuple(map(lambda s: s + 2 * padding, self.size))
position = tuple(map(lambda s: s + padding, self.position))
canvas = Image.new('L', size, color=0)
image_draw = ImageDraw.Draw(canvas)
image_draw.text(position, self.letter, font=self.font, fill=255)
nb_iter = self.font_size // 2
label = (np.asarray(canvas, dtype=np.uint8) > 0).astype(np.uint8)
label = cv2.morphologyEx(label, cv2.MORPH_CLOSE, kernel=np.ones((3, 3), dtype=np.uint8), iterations=nb_iter)
label = label[padding:-padding, padding:-padding]
label[label == 0] = BACKGROUND_LABEL
label[label == 1] = self.label
return label
class ImageElement(AbstractElement):
label = IMAGE_LABEL
color = IMAGE_COLOR
name = 'image'
@use_seed()
def generate_content(self):
self.blured_border_width = randint(*BLURED_BORDER_WIDTH_RANGE)
self.as_negative = self.parameters.get('as_negative', False)
self.blur_radius = uniform(*NEG_ELEMENT_BLUR_RADIUS_RANGE) if self.as_negative else None
self.opacity = randint(*NEG_ELEMENT_OPACITY_RANGE[self.name] if self.as_negative
else POS_ELEMENT_OPACITY_RANGE[self.name])
img = Image.open(self.parameters.get('image_path') or choice(DATABASE[IMAGE_RESRC_NAME]))
img.putalpha(self.opacity)
max_size = [s - 2 * self.blured_border_width for s in self.size]
self.img = resize(img, max_size)
self.content_width, self.content_height = self.img.size
self.pos_x = randint(0, self.width - self.content_width)
self.pos_y = randint(0, self.height - self.content_height)
def to_image(self, canvas_color=(255, 255, 255)):
canvas = Image.new('RGBA', self.size, canvas_color + (0,))
paste_with_blured_borders(canvas, self.img, self.position, border_width=self.blured_border_width)
if self.as_negative:
canvas = canvas.filter(ImageFilter.GaussianBlur(self.blur_radius))
return canvas
def to_label_as_array(self):
label = np.full(self.size, BACKGROUND_LABEL, dtype=np.uint8)
if self.as_negative:
return label.transpose()
else:
x, y = self.position
label[x:x+self.content_width, y:y+self.content_height] = self.label
return label.transpose()
class AbstractTextElement(AbstractElement):
"""Abstract class that defines a text element such as titles, captions and paragraphs."""
__metaclass__ = ABCMeta
border_label = TEXT_BORDER_LABEL
border_color = TEXT_BORDER_COLOR
name = 'text'
@abstractproperty
def text_type(self):
pass
@abstractproperty
def n_max_lines(self):
pass
@abstractproperty
def n_min_characters(self):
pass
@abstractproperty
def font_size_range(self):
pass
@abstractproperty
def line_spacing_range(self):
pass
@staticmethod
def get_random_font():
font_type = choice(list(TEXT_FONT_TYPE_RATIO.keys()), p=list(TEXT_FONT_TYPE_RATIO.values()))
return choice(DATABASE[FONT_RESRC_NAME][font_type])
@use_seed()
def generate_content(self):
min_fs, max_fs = self.font_size_range
min_spacing, max_spacing = self.line_spacing_range
if self.text_type == 'paragraph':
tight = choice([True, False], p=[TEXT_TIGHT_PARAGRAPH_FREQ, 1 - TEXT_TIGHT_PARAGRAPH_FREQ])
if tight:
max_fs, max_spacing = max(min_fs, 30), max(min_spacing, 4)
else:
min_fs, min_spacing = min(30, max_fs), min(2, max_fs)
self.justified = tight and choice([True, False], p=[TEXT_JUSTIFIED_PARAGRAPH_FREQ,
1 - TEXT_JUSTIFIED_PARAGRAPH_FREQ])
else:
self.justified = False
self.font_path = self.parameters.get('font_path') or self.get_random_font()
self.font_type = Path(self.font_path).relative_to(SYNTHETIC_RESRC_PATH / FONT_RESRC_NAME).parts[0]
# To avoid oversized letters
rescaled_height = (self.height * 2) // 3
actual_max_fs = min(rescaled_height, max_fs)
tmp_font = ImageFont.truetype(self.font_path, size=actual_max_fs)
while tmp_font.getsize('bucolic')[0] > self.width and actual_max_fs > self.font_size_range[0]:
actual_max_fs -= 1
tmp_font = ImageFont.truetype(self.font_path, size=actual_max_fs)
if min_fs < actual_max_fs:
self.font_size = randint(min_fs, actual_max_fs)
else:
self.font_size = actual_max_fs
self.spacing = randint(min_spacing, max_spacing)
if 'text' in self.parameters:
text = self.parameters['text']
else:
n_char = 0
while (n_char <= self.n_min_characters):
self.text_path = choice(DATABASE[TEXT_RESRC_NAME])
with open(self.text_path) as f:
text = f.read().rstrip('\n')
n_char = len(text)
self.baseline_as_label = self.parameters.get('baseline_as_label', False)
if self.baseline_as_label:
self.label, self.color = BASELINE_LABEL, BASELINE_COLOR
self.font = ImageFont.truetype(self.font_path, size=self.font_size)
self.as_negative = self.parameters.get('as_negative', False)
self.blur_radius = uniform(*NEG_ELEMENT_BLUR_RADIUS_RANGE) if self.as_negative else None
self.opacity = randint(*NEG_ELEMENT_OPACITY_RANGE[self.name] if self.as_negative
else POS_ELEMENT_OPACITY_RANGE[self.name])
self.transpose = self.parameters.get('transpose', False)
if self.text_type == 'title':
self.uppercase = (choice([True, False], p=[TEXT_TITLE_UPPERCASE_RATIO, 1 - TEXT_TITLE_UPPERCASE_RATIO]) or
self.font_type == 'chinese')
self.uniline = choice([True, False], p=[TEXT_TITLE_UNILINE_RATIO, 1 - TEXT_TITLE_UNILINE_RATIO])
n_spaces = randint(2, 5)
text = text.replace(' ', ' ' * n_spaces)
elif self.text_type == 'word':
self.uppercase = self.font_type == 'chinese'
self.uniline = True
else:
self.uppercase = self.font_type == 'chinese'
self.uniline = False
dark_mode = self.parameters.get('dark_mode', True)
color_range = (0, 75) if dark_mode else (175, 255)
colored = choice([True, False], p=[TEXT_COLORED_FREQ, 1 - TEXT_COLORED_FREQ])
colors = tuple([randint(*color_range)] * 3) if not colored else tuple([randint(*color_range) for _ in range(3)])
self.colors_alpha = colors + (self.opacity,)
self.underlined = (choice([True, False], p=[TEXT_UNDERLINED_FREQ, 1 - TEXT_UNDERLINED_FREQ]) and
self.font_type in ['normal', 'handwritten'] and not self.text_type == 'word')
if self.underlined:
self.underline_params = {
'width': randint(*LINE_WIDTH_RANGE),
'fill': tuple([randint(*color_range)] * 3) + (self.opacity,),
}
strikethrough = choice([True, False])
line_height = self.font.font.getsize('a')[0][1]
self.underline_padding = randint(*TEXT_UNDERLINED_PADDING_RANGE) if not strikethrough else -line_height // 2
else:
self.underlined_params, self.underline_padding = None, 0
self.with_bbox = self.text_type == 'paragraph' and choice([True, False], p=[TEXT_BBOX_FREQ, 1 - TEXT_BBOX_FREQ])
if self.with_bbox:
filled = choice([True, False])
alpha = randint(0, min(self.opacity, 100))
self.bbox_params = {
'width': randint(*TEXT_BBOX_BORDER_WIDTH_RANGE),
'outline': self.colors_alpha,
'fill': tuple([randint(150, 255) for _ in range(3)]) + (alpha,) if filled else None
}
self.padding = randint(*TEXT_BBOX_PADDING_RANGE) + self.bbox_params['width'] + 1
else:
self.bbox_params, self.padding = None, 0
self.with_border_label = self.parameters.get('with_border_label', False)
if self.with_border_label:
label_height = self.font.font.getsize('A')[0][1]
self.padding += label_height // 2 + 1
self.background_label = self.parameters.get('background_label', BACKGROUND_LABEL)
self.text, content_width, content_height = self.format_text(text)
self.is_empty_text = len(self.text) == 0
self.rotated_text = self.text_type == 'word' and len(self.text) > 2
if self.rotated_text:
hypo = np.sqrt((content_width**2 + content_height**2) / 4)
shift = np.arctan(content_height / content_width)
actual_max_rot = np.arcsin((self.height / 2) / hypo) if hypo > self.height / 2 else np.inf
actual_max_rot = (actual_max_rot - shift) * 180 / np.pi
min_rot, max_rot = TEXT_ROTATION_ANGLE_RANGE
min_rot, max_rot = max(min_rot, -actual_max_rot), min(max_rot, actual_max_rot)
self.rotation_angle = uniform(min_rot, max_rot)
shift = -shift if self.rotation_angle < 0 else shift
new_content_height = 2 * abs(round(float(np.sin((self.rotation_angle * np.pi / 180) + shift) * hypo)))
self.rot_padding = (new_content_height - content_height) // 2
self.content_width, self.content_height = content_width, new_content_height
self.pos_x = randint(0, max(0, self.width - self.content_width))
self.pos_y = randint(self.rot_padding, max(self.rot_padding, self.height - self.content_height))
else:
self.content_width, self.content_height = content_width, content_height
self.pos_x = randint(0, max(0, self.width - self.content_width))
self.pos_y = randint(0, max(0, self.height - self.content_height))
def format_text(self, text):
if self.font_type in ['normal', 'handwritten']:
text = unidecode(text)
elif self.font_type == 'arabic':
text = TRANSLATOR.translate(text, src='en', dest='ar').text
text = get_display(arabic_reshaper.reshape(text))
elif self.font_type == 'chinese':
text = TRANSLATOR.translate(text, src='en', dest='zh-CN').text
else:
raise NotImplementedError
width, height = self.width - 2 * self.padding, self.height - 2 * self.padding
text = (text.upper() if self.uppercase else text).strip()
if self.text_type == 'word':
word_as_number = choice([True, False])
if word_as_number:
n_letter = randint(1, 5)
result_text = str(randint(0, 10**n_letter - 1))
else:
words = text.split(' ')
result_text = rand_choice(words)
iteration = 1
while (not str.isalnum(result_text) or len(result_text) < 1) and iteration < 40:
result_text = rand_choice(words)
iteration += 1
if not str.isalnum(result_text) or len(result_text) < 1:
result_text = words[0][:randint(4, 10)]
line_width = self.font.getsize(result_text)[0]
while line_width > width and len(result_text) > 2:
result_text = result_text[:-1]
line_width = self.font.getsize(result_text)[0]
else:
max_lines = 1 if self.uniline else self.n_max_lines
result_text, lines = '', ''
text_height, cur_idx, n_lines = 0, 0, -1
while text_height <= height:
result_text = lines
n_lines += 1
line = text[cur_idx:].lstrip()
cur_idx += len(text[cur_idx:]) - len(line) # adjust cur_idx if stripped
if len(line) == 0 or n_lines == max_lines:
break
line_width = self.font.getsize(line)[0]
avg_char_width = line_width / len(line)
if line_width > width:
index = int(width / avg_char_width) + 10 # take larger slice in case of small characters
cut = max(line[:index].rfind(' '), line.find(' ')) # in case no space found in slice (small width)
line = line[:cut].strip()
line_width = self.font.getsize(line)[0]
while line_width > width:
if ' ' in line: # remove word by word
line = line[:line.rfind(' ')].strip()
else: # remove character by character
line = line[:-1]
line_width = self.font.getsize(line)[0]
cur_idx += len(line) + 1
if self.justified:
w_space = self.font.getsize(' ')[0]
n_spaces = line.count(' ')
n_spaces_to_add = (width - line_width) // w_space
if n_spaces > 0 and n_spaces_to_add > 0:
q, r = n_spaces_to_add // n_spaces + 1, n_spaces_to_add % n_spaces
if q < 5:
if q > 1:
line = line.replace(' ', q * ' ')
pos = 0
while r > 0:
space_idx = line[pos:].find(' ') + pos
line = line[:space_idx] + ' ' + line[space_idx:]
pos = space_idx + q + 1
r -= 1
lines = '{}\n{}'.format(lines, line) if lines else line
text_height = self.font.getsize_multiline(lines, spacing=self.spacing)[1]
if '\n' in result_text and self.justified: # we dont want to justify the last line
result_text, last_line = result_text.rsplit('\n', 1)
last_line = ' '.join(last_line.split())
result_text = '{}\n{}'.format(result_text, last_line)
content_width, content_height = self.font.getsize_multiline(result_text, spacing=self.spacing)
content_width += 2 * self.padding
content_height += 2 * self.padding
return result_text, content_width, content_height
def to_image(self):
canvas = Image.new('RGBA', self.size)
image_draw = ImageDraw.Draw(canvas)
if self.is_empty_text:
return canvas
if self.with_bbox:
x, y = self.pos_x, self.pos_y
p = self.bbox_params['width'] // 2 + 1
image_draw.rectangle([(x+p, y+p), (x+self.content_width-p, y+self.content_height-p)], **self.bbox_params)
if self.underlined:
x, y = self.pos_x + self.padding, self.pos_y + self.padding + self.underline_padding
line_height = self.font.getsize('A')[1]
ascent, descent = self.font.getmetrics()
lines = self.text.split('\n')
for k in range(len(lines)):
image_draw.line((x, y + ascent, x + self.content_width - 2 * self.padding, y + ascent),
**self.underline_params)
y += line_height + self.spacing
image_draw.text((self.pos_x + self.padding, self.pos_y + self.padding), self.text, self.colors_alpha,
font=self.font, spacing=self.spacing)
if self.rotated_text:
x, y = self.pos_x, self.pos_y
img = canvas.crop((x, y - self.rot_padding, x + self.content_width, y + self.content_height -
self.rot_padding))
img = img.rotate(self.rotation_angle, resample=Image.BICUBIC, fillcolor=(0, 0, 0, 0))
canvas.paste(img, (self.pos_x, self.pos_y - self.rot_padding))
if self.as_negative:
canvas = canvas.filter(ImageFilter.GaussianBlur(self.blur_radius))
if self.transpose:
canvas = canvas.transpose(Image.ROTATE_90)
return canvas
def to_label_as_array(self):
label = np.full(self.size, self.background_label, dtype=np.uint8)
if not self.as_negative and len(self.text) > 0:
x, y = self.pos_x + self.padding, self.pos_y + self.padding
line_height = self.font.getsize('A')[1]
if self.baseline_as_label:
label_height = TEXT_BASELINE_HEIGHT // 2
else:
if self.text.isdigit():
char = '1'
elif self.uppercase:
char = 'A'
else:
char = 'a'
label_height = self.font.font.getsize(char)[0][1]
ascent, descent = self.font.getmetrics()
offset_y = max(0, ascent - label_height)
if self.baseline_as_label:
ascent += label_height + 1
lines = self.text.split('\n')
if self.with_border_label:
border = label_height // 2 if not self.baseline_as_label else TEXT_BASELINE_HEIGHT // 2 + 1
for line in lines:
if len(line) == 0:
continue
line_width = self.font.getsize(line)[0]
x_min, x_max = max(0, x - border), min(x + line_width + border, label.shape[0])
y_min, y_max = max(0, y + offset_y - border), min(y + ascent + border, label.shape[1])
label[x_min:x_max, y_min:y_max] = self.border_label
y += line_height + self.spacing
x, y = self.pos_x + self.padding, self.pos_y + self.padding
for line in lines:
line_width = self.font.getsize(line)[0]
y_min, y_max = y + offset_y, min(y + ascent, label.shape[1])
label[x:x+line_width, y_min:y_max] = self.label
y += line_height + self.spacing
label = label.transpose()
if self.rotated_text:
center = (self.pos_x + self.content_width / 2, self.pos_y + self.content_height / 2 - self.rot_padding)
R = cv2.getRotationMatrix2D(center, self.rotation_angle, 1)
label = cv2.warpAffine(label, R, self.size, flags=cv2.INTER_NEAREST, borderValue=self.background_label)
if self.transpose:
return np.rot90(label)
else:
return label
def to_label_as_img(self):
arr = self.to_label_as_array()
res = np.full(arr.shape + (3,), self.background_label, dtype=np.uint8)
res[arr == self.label] = self.color
if self.with_border_label:
res[arr == self.border_label] = self.border_color
return Image.fromarray(res)
class CaptionElement(AbstractTextElement):
label = CAPTION_LABEL
color = CAPTION_COLOR
text_type = 'caption'
n_max_lines = 3
n_min_characters = 50
font_size_range = (20, 60)
line_spacing_range = (1, 8)
class ParagraphElement(AbstractTextElement):
label = PARAGRAPH_LABEL
color = PARAGRAPH_COLOR
text_type = 'paragraph'
n_max_lines = 1000
n_min_characters = 400
font_size_range = (20, 60)
line_spacing_range = (1, 10)
class TitleElement(AbstractTextElement):
label = TITLE_LABEL
color = TITLE_COLOR
text_type = 'title'
n_max_lines = 20
n_min_characters = 50
font_size_range = (50, 150)
line_spacing_range = (5, 50)
class WordElement(AbstractTextElement):
label = FLOATING_WORD_LABEL
color = FLOATING_WORD_COLOR
text_type = 'word'
n_max_lines = 1
n_min_characters = 100
font_size_range = (20, 60)
line_spacing_range = (1, 1)
class TableElement(AbstractElement):
label = TABLE_WORD_LABEL
color = TABLE_WORD_COLOR
border_label = TEXT_BORDER_LABEL
border_color = TEXT_BORDER_COLOR
font_size_range = (20, 50)
name = 'table'
@use_seed()
def generate_content(self):
min_fs, max_fs = self.font_size_range
self.font_path = self.parameters.get('font_path') or AbstractTextElement.get_random_font()
rescaled_height = (self.height * 2) // 3 # to avoid oversized letters
actual_max_fs = min(rescaled_height, max_fs)
if min_fs < actual_max_fs:
self.font_size = randint(min_fs, actual_max_fs)
else:
self.font_size = actual_max_fs
self.baseline_as_label = self.parameters.get('baseline_as_label', False)
if self.baseline_as_label:
self.label, self.color = BASELINE_LABEL, BASELINE_COLOR
self.font = ImageFont.truetype(self.font_path, size=self.font_size)
self.as_negative = self.parameters.get('as_negative', False)
self.blur_radius = uniform(*NEG_ELEMENT_BLUR_RADIUS_RANGE) if self.as_negative else None
self.opacity = randint(*NEG_ELEMENT_OPACITY_RANGE[self.name] if self.as_negative
else POS_ELEMENT_OPACITY_RANGE[self.name])
self.colored = choice([True, False], p=[TEXT_COLORED_FREQ, 1 - TEXT_COLORED_FREQ])
self.colors = tuple([randint(0, 100)] * 3) if not self.colored else tuple([randint(0, 100) for _ in range(3)])
self.colors_alpha = self.colors + (self.opacity,)
self.padding = 0
self.with_border_label = self.parameters.get('with_border_label', False)
if self.with_border_label:
label_height = self.font.font.getsize('A')[0][1]
border_label_size = label_height // 2 + 1
self.padding += border_label_size
self.line_params = {
'width': randint(*LINE_WIDTH_RANGE),
'fill': tuple([randint(0, 100)] * 3) + (self.opacity,),
}
self.column_params = {
'width': randint(*LINE_WIDTH_RANGE),
'fill': tuple([randint(0, 100)] * 3) + (self.opacity,),
}
if 'text' in self.parameters:
text = self.parameters['text']
else:
n_char = 0
while (n_char <= ParagraphElement.n_min_characters):
self.text_path = choice(DATABASE[TEXT_RESRC_NAME])
with open(self.text_path) as f:
text = f.read().rstrip('\n')
n_char = len(text)
dictionary = text.split(' ')
self.table, self.content_width, self.content_height = self._generate_table(dictionary)
self.pos_x = randint(0, max(0, self.width - self.content_width))
self.pos_y = randint(0, max(0, self.height - self.content_height))
def _generate_table(self, dictionary):
width, height = randint(min(200, self.width), self.width), randint(min(200, self.height), self.height)
line_size_min = round(self.font_size * 1.3)
line_size_max = round(self.font_size * 2.5)
lines = np.cumsum(np.random.randint(line_size_min, line_size_max, 40))
lines = lines[lines < height - line_size_min].tolist()
columns = np.cumsum(np.random.randint(*TABLE_LAYOUT_RANGE['col_size_range'], 20))
columns = columns[columns < width - TABLE_LAYOUT_RANGE['col_size_range'][0]].tolist()
words, word_positions = [], []
for i, c in enumerate([0] + columns):
for j, l in enumerate([0] + lines):
word_as_number = choice([True, False])
if word_as_number:
n_letter = randint(2, 9)
word = f'{randint(0, 10**n_letter - 1):,}'
else:
word = rand_choice(dictionary)
uppercase = choice([True, False])
if uppercase:
word = word.upper()
cell_width = columns[i] - c if i < len(columns) else width - c
cell_height = lines[j] - l if j < len(lines) else height - l
while self.font.getsize(word)[0] + 2 * self.padding > cell_width and len(word) > 0:
word = word[:-1].strip()
if len(word) > 0:
w, h = self.font.getsize(word)
p_c, p_l = (cell_width - w) // 2, (cell_height - h) // 2
words.append(word)
word_positions.append((c + p_c, l + p_l))
return ({'lines': lines, 'columns': columns, 'words': words, 'word_positions': word_positions}, width, height)
def to_image(self):
canvas = Image.new('RGBA', self.size)
draw = ImageDraw.Draw(canvas)
pos_x_width, pos_y_height = self.pos_x + self.content_width, self.pos_y + self.content_height
for l in self.table['lines']:
draw.line([self.pos_x, self.pos_y + l, pos_x_width, self.pos_y + l], **self.line_params)
for c in self.table['columns']:
draw.line([self.pos_x + c, self.pos_y, self.pos_x + c, pos_y_height], **self.column_params)
for word, pos in zip(self.table['words'], self.table['word_positions']):
pos = pos[0] + self.pos_x, pos[1] + self.pos_y
draw.text(pos, word, font=self.font, fill=self.colors_alpha)
if self.as_negative:
canvas = canvas.filter(ImageFilter.GaussianBlur(self.blur_radius))
return canvas
def to_label_as_array(self):
label = np.full(self.size, BACKGROUND_LABEL, dtype=np.uint8)
if self.as_negative:
return label.transpose()
else:
ascent, descent = self.font.getmetrics()
if self.baseline_as_label:
label_height = TEXT_BASELINE_HEIGHT // 2
offset_y = ascent - label_height
ascent += label_height + 1
if self.with_border_label:
for word, pos in zip(self.table['words'], self.table['word_positions']):
if len(word) == 0:
continue
x, y = self.pos_x + pos[0], self.pos_y + pos[1]
w = self.font.getsize(word)[0]
if not self.baseline_as_label:
if word.replace(',', '').isdigit():
char = '1'
elif word.isupper():
char = 'A'
else:
char = 'a'
label_height = self.font.font.getsize(char)[0][1]
offset_y = ascent - label_height
else:
label_height = TEXT_BASELINE_HEIGHT
border = label_height // 2 + 1
x_min, x_max = max(0, x-border), min(x + w + border, label.shape[0])
y_min, y_max = max(0, y + offset_y - border), min(y + ascent + border, label.shape[1])
label[x_min:x_max, y_min:y_max] = self.border_label
for word, pos in zip(self.table['words'], self.table['word_positions']):
if len(word) == 0:
continue
x, y = self.pos_x + pos[0], self.pos_y + pos[1]
w = self.font.getsize(word)[0]
if not self.baseline_as_label:
if word.replace(',', '').isdigit():
char = '1'
elif word.isupper():
char = 'A'
else:
char = 'a'
label_height = self.font.font.getsize(char)[0][1]
offset_y = ascent - label_height
label[x:x+w, y+offset_y:y+ascent] = self.label
return label.transpose()
def to_label_as_img(self):
arr = self.to_label_as_array()
res = np.zeros(arr.shape + (3,), dtype=np.uint8)
res[arr == self.label] = self.color
if self.with_border_label:
res[arr == self.border_label] = self.border_color
return Image.fromarray(res)
|
python
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# List of APIs for accessing remote or local yatai service via Python
import io
import os
import logging
import tarfile
import click
import requests
import shutil
from bentoml.exceptions import BentoMLException
from bentoml.utils import (
status_pb_to_error_code_and_message,
resolve_bento_bundle_uri,
is_s3_url,
is_gcs_url,
)
from bentoml.utils.lazy_loader import LazyLoader
from bentoml.yatai.client.label_utils import generate_gprc_labels_selector
from bentoml.yatai.proto.repository_pb2 import (
AddBentoRequest,
GetBentoRequest,
BentoUri,
UpdateBentoRequest,
UploadStatus,
ListBentoRequest,
DangerouslyDeleteBentoRequest,
ContainerizeBentoRequest,
)
from bentoml.yatai.proto import status_pb2
from bentoml.utils.tempdir import TempDirectory
from bentoml.saved_bundle import (
save_to_dir,
load_bento_service_metadata,
safe_retrieve,
load_from_dir,
)
from bentoml.yatai.status import Status
logger = logging.getLogger(__name__)
yatai_proto = LazyLoader('yatai_proto', globals(), 'bentoml.yatai.proto')
class BentoRepositoryAPIClient:
def __init__(self, yatai_service):
# YataiService stub for accessing remote YataiService RPCs
self.yatai_service = yatai_service
def push(self, bento, with_labels=True):
"""
Push a local BentoService to a remote yatai server.
Args:
bento: a BentoService identifier in the format of NAME:VERSION
Returns:
BentoService saved path
Example:
>>> svc = MyBentoService()
>>> svc.save()
>>>
>>> remote_yatai_client = get_yatai_client('http://remote.yatai.service:50050')
>>> bento = f'{svc.name}:{svc.version}'
>>> remote_saved_path= remote_yatai_client.repository.push(bento)
"""
from bentoml.yatai.client import get_yatai_client
local_yc = get_yatai_client()
local_bento_pb = local_yc.repository.get(bento)
if local_bento_pb.uri.s3_presigned_url:
bento_bundle_path = local_bento_pb.uri.s3_presigned_url
elif local_bento_pb.uri.gcs_presigned_url:
bento_bundle_path = local_bento_pb.uri.gcs_presigned_url
else:
bento_bundle_path = local_bento_pb.uri.uri
labels = (
dict(local_bento_pb.bento_service_metadata.labels)
if with_labels is True and local_bento_pb.bento_service_metadata.labels
else None
)
return self.upload_from_dir(bento_bundle_path, labels=labels)
def pull(self, bento):
"""
Pull a BentoService from a remote yatai service. The BentoService will be saved
and registered with local yatai service.
Args:
bento: a BentoService identifier in the form of NAME:VERSION
Returns:
BentoService saved path
Example:
>>> client = get_yatai_client('127.0.0.1:50051')
>>> saved_path = client.repository.pull('MyService:')
"""
bento_pb = self.get(bento)
with TempDirectory() as tmpdir:
# Create a non-exist directory for safe_retrieve
target_bundle_path = os.path.join(tmpdir, 'bundle')
self.download_to_directory(bento_pb, target_bundle_path)
from bentoml.yatai.client import get_yatai_client
labels = (
dict(bento_pb.bento_service_metadata.labels)
if bento_pb.bento_service_metadata.labels
else None
)
local_yc = get_yatai_client()
return local_yc.repository.upload_from_dir(
target_bundle_path, labels=labels
)
def upload(self, bento_service, version=None, labels=None):
"""Save and upload given bento_service to yatai_service, which manages all your
saved BentoService bundles and model serving deployments.
Args:
bento_service (bentoml.service.BentoService): a Bento Service instance
version (str): optional,
labels (dict): optional
Return:
URI to where the BentoService is being saved to
"""
with TempDirectory() as tmpdir:
save_to_dir(bento_service, tmpdir, version, silent=True)
return self.upload_from_dir(tmpdir, labels)
def upload_from_dir(self, saved_bento_path, labels=None):
from bentoml.yatai.db.stores.label import _validate_labels
bento_service_metadata = load_bento_service_metadata(saved_bento_path)
if labels:
_validate_labels(labels)
bento_service_metadata.labels.update(labels)
get_bento_response = self.yatai_service.GetBento(
GetBentoRequest(
bento_name=bento_service_metadata.name,
bento_version=bento_service_metadata.version,
)
)
if get_bento_response.status.status_code == status_pb2.Status.OK:
raise BentoMLException(
"BentoService bundle {}:{} already registered in repository. Reset "
"BentoService version with BentoService#set_version or bypass BentoML's"
" model registry feature with BentoService#save_to_dir".format(
bento_service_metadata.name, bento_service_metadata.version
)
)
elif get_bento_response.status.status_code != status_pb2.Status.NOT_FOUND:
raise BentoMLException(
'Failed accessing YataiService. {error_code}:'
'{error_message}'.format(
error_code=Status.Name(get_bento_response.status.status_code),
error_message=get_bento_response.status.error_message,
)
)
request = AddBentoRequest(
bento_name=bento_service_metadata.name,
bento_version=bento_service_metadata.version,
)
response = self.yatai_service.AddBento(request)
if response.status.status_code != status_pb2.Status.OK:
raise BentoMLException(
"Error adding BentoService bundle to repository: {}:{}".format(
Status.Name(response.status.status_code),
response.status.error_message,
)
)
if response.uri.type == BentoUri.LOCAL:
if os.path.exists(response.uri.uri):
# due to copytree dst must not already exist
shutil.rmtree(response.uri.uri)
shutil.copytree(saved_bento_path, response.uri.uri)
self._update_bento_upload_progress(bento_service_metadata)
logger.info(
"BentoService bundle '%s:%s' saved to: %s",
bento_service_metadata.name,
bento_service_metadata.version,
response.uri.uri,
)
# Return URI to saved bento in repository storage
return response.uri.uri
elif response.uri.type == BentoUri.S3 or response.uri.type == BentoUri.GCS:
uri_type = 'S3' if response.uri.type == BentoUri.S3 else 'GCS'
self._update_bento_upload_progress(
bento_service_metadata, UploadStatus.UPLOADING, 0
)
fileobj = io.BytesIO()
with tarfile.open(mode="w:gz", fileobj=fileobj) as tar:
tar.add(saved_bento_path, arcname=bento_service_metadata.name)
fileobj.seek(0, 0)
if response.uri.type == BentoUri.S3:
http_response = requests.put(
response.uri.s3_presigned_url, data=fileobj
)
elif response.uri.type == BentoUri.GCS:
http_response = requests.put(
response.uri.gcs_presigned_url, data=fileobj
)
if http_response.status_code != 200:
self._update_bento_upload_progress(
bento_service_metadata, UploadStatus.ERROR
)
raise BentoMLException(
f"Error saving BentoService bundle to {uri_type}."
f"{http_response.status_code}: {http_response.text}"
)
self._update_bento_upload_progress(bento_service_metadata)
logger.info(
"Successfully saved BentoService bundle '%s:%s' to {uri_type}: %s",
bento_service_metadata.name,
bento_service_metadata.version,
response.uri.uri,
)
return response.uri.uri
else:
raise BentoMLException(
f"Error saving Bento to target repository, URI type {response.uri.type}"
f" at {response.uri.uri} not supported"
)
def _update_bento_upload_progress(
self, bento_service_metadata, status=UploadStatus.DONE, percentage=None
):
upload_status = UploadStatus(status=status, percentage=percentage)
upload_status.updated_at.GetCurrentTime()
update_bento_req = UpdateBentoRequest(
bento_name=bento_service_metadata.name,
bento_version=bento_service_metadata.version,
upload_status=upload_status,
service_metadata=bento_service_metadata,
)
self.yatai_service.UpdateBento(update_bento_req)
def download_to_directory(self, bento_pb, target_dir):
if bento_pb.uri.s3_presigned_url:
bento_service_bundle_path = bento_pb.uri.s3_presigned_url
elif bento_pb.uri.gcs_presigned_url:
bento_service_bundle_path = bento_pb.uri.gcs_presigned_url
else:
bento_service_bundle_path = bento_pb.uri.uri
safe_retrieve(bento_service_bundle_path, target_dir)
def get(self, bento):
"""
Get a BentoService info
Args:
bento: a BentoService identifier in the format of NAME:VERSION
Returns:
bentoml.yatai.proto.repository_pb2.Bento
Example:
>>> yatai_client = get_yatai_client()
>>> bento_info = yatai_client.repository.get('my_service:version')
"""
if ':' not in bento:
raise BentoMLException(
'BentoService name or version is missing. Please provide in the '
'format of name:version'
)
name, version = bento.split(':')
result = self.yatai_service.GetBento(
GetBentoRequest(bento_name=name, bento_version=version)
)
if result.status.status_code != yatai_proto.status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
result.status
)
raise BentoMLException(
f'BentoService {name}:{version} not found - '
f'{error_code}:{error_message}'
)
return result.bento
def list(
self,
bento_name=None,
offset=None,
limit=None,
labels=None,
order_by=None,
ascending_order=None,
):
"""
List BentoServices that satisfy the specified criteria.
Args:
bento_name: optional. BentoService name
limit: optional. maximum number of returned results
labels: optional.
offset: optional. offset of results
order_by: optional. order by results
ascending_order: optional. direction of results order
Returns:
[bentoml.yatai.proto.repository_pb2.Bento]
Example:
>>> yatai_client = get_yatai_client()
>>> bentos_info_list = yatai_client.repository.list(
>>> labels='key=value,key2=value'
>>> )
"""
list_bento_request = ListBentoRequest(
bento_name=bento_name,
offset=offset,
limit=limit,
order_by=order_by,
ascending_order=ascending_order,
)
if labels is not None:
generate_gprc_labels_selector(list_bento_request.label_selectors, labels)
result = self.yatai_service.ListBento(list_bento_request)
if result.status.status_code != yatai_proto.status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
result.status
)
raise BentoMLException(f'{error_code}:{error_message}')
return result.bentos
def _delete_bento_bundle(self, bento_tag, require_confirm):
bento_pb = self.get(bento_tag)
if require_confirm and not click.confirm(f'Permanently delete {bento_tag}?'):
return
result = self.yatai_service.DangerouslyDeleteBento(
DangerouslyDeleteBentoRequest(
bento_name=bento_pb.name, bento_version=bento_pb.version
)
)
if result.status.status_code != yatai_proto.status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
result.status
)
# Rather than raise Exception, continue to delete the next bentos
logger.error(
f'Failed to delete {bento_pb.name}:{bento_pb.version} - '
f'{error_code}:{error_message}'
)
else:
logger.info(f'Deleted {bento_pb.name}:{bento_pb.version}')
def delete(
self,
bento_tag=None,
labels=None,
bento_name=None,
bento_version=None,
prune=False, # pylint: disable=redefined-builtin
require_confirm=False,
):
"""
Delete bentos that matches the specified criteria
Args:
bento_tag: string
labels: string
bento_name: string
bento_version: string
prune: boolean, Set True to delete all BentoService
require_confirm: boolean
Example:
>>>
>>> yatai_client = get_yatai_client()
>>> # Delete all bento services
>>> yatai_client.repository.delete(prune=True)
>>> # Delete bento service with name is `IrisClassifier` and version `0.1.0`
>>> yatai_client.repository.delete(
>>> bento_name='IrisClassifier', bento_version='0.1.0'
>>> )
>>> # or use bento tag
>>> yatai_client.repository.delete('IrisClassifier:v0.1.0')
>>> # Delete all bento services with name 'MyService`
>>> yatai_client.repository.delete(bento_name='MyService')
>>> # Delete all bento services with labels match `ci=failed` and `cohort=20`
>>> yatai_client.repository.delete(labels='ci=failed, cohort=20')
"""
delete_list_limit = 50
if (
bento_tag is not None
and bento_name is not None
and bento_version is not None
):
raise BentoMLException('Too much arguments')
if bento_tag is not None:
logger.info(f'Deleting saved Bento bundle {bento_tag}')
return self._delete_bento_bundle(bento_tag, require_confirm)
elif bento_name is not None and bento_tag is not None:
logger.info(f'Deleting saved Bento bundle {bento_name}:{bento_version}')
return self._delete_bento_bundle(
f'{bento_name}:{bento_version}', require_confirm
)
else:
# list of bentos
if prune is True:
logger.info('Deleting all BentoML saved bundles.')
# ignore other fields
bento_name = None
labels = None
else:
log_message = 'Deleting saved Bento bundles'
if bento_name is not None:
log_message += f' with name: {bento_name},'
if labels is not None:
log_message += f' with labels match to {labels}'
logger.info(log_message)
offset = 0
while offset >= 0:
bento_list = self.list(
bento_name=bento_name,
labels=labels,
offset=offset,
limit=delete_list_limit,
)
offset += delete_list_limit
# Stop the loop, when no more bentos
if len(bento_list) == 0:
break
else:
for bento in bento_list:
self._delete_bento_bundle(
f'{bento.name}:{bento.version}', require_confirm
)
def containerize(self, bento, tag=None, build_args=None, push=False):
"""
Create a container image from a BentoService.
Args:
bento: string
tag: string
build_args: dict
push: boolean
Returns:
Image tag: String
"""
if ':' not in bento:
raise BentoMLException(
'BentoService name or version is missing. Please provide in the '
'format of name:version'
)
name, version = bento.split(':')
containerize_request = ContainerizeBentoRequest(
bento_name=name,
bento_version=version,
tag=tag,
build_args=build_args,
push=push,
)
result = self.yatai_service.ContainerizeBento(containerize_request)
if result.status.status_code != yatai_proto.status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
result.status
)
raise BentoMLException(
f'Failed to containerize {bento} - {error_code}:{error_message}'
)
return result.tag
def load(self, bento):
"""
Load bento service from bento tag or from a bento bundle path.
Args:
bento: string,
Returns:
BentoService instance
Example:
>>> yatai_client = get_yatai_client()
>>> # Load BentoService bases on bento tag.
>>> bento = yatai_client.repository.load('Service_name:version')
>>> # Load BentoService from bento bundle path
>>> bento = yatai_client.repository.load('/path/to/bento/bundle')
>>> # Load BentoService from s3 storage
>>> bento = yatai_client.repository.load('s3://bucket/path/bundle.tar.gz')
"""
if os.path.isdir(bento) or is_s3_url(bento) or is_gcs_url(bento):
saved_bundle_path = bento
else:
bento_pb = self.get(bento)
saved_bundle_path = resolve_bento_bundle_uri(bento_pb)
svc = load_from_dir(saved_bundle_path)
return svc
|
python
|
# -*- coding: utf-8 -*-
"""
string normalizer exceptions module.
"""
from pyrin.core.exceptions import CoreException
class StringNormalizerManagerException(CoreException):
"""
string normalizer manager exception.
"""
pass
class InvalidStringNormalizerTypeError(StringNormalizerManagerException):
"""
invalid string normalizer type error.
"""
pass
class DuplicatedStringNormalizerError(StringNormalizerManagerException):
"""
duplicated string normalizer error.
"""
pass
class StringNormalizerDoesNotExistError(StringNormalizerManagerException):
"""
string normalizer does not exist error.
"""
pass
|
python
|
# https://www.blog.pythonlibrary.org/2010/03/08/a-simple-step-by-step-reportlab-tutorial/
# from reportlab.pdfgen import canvas
#
# c = canvas.Canvas("hello.pdf")
# c.drawString(100,750,"Welcome to Reportlab!")
# c.save()
import time
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
doc = SimpleDocTemplate("form_letter.pdf",pagesize=letter,
rightMargin=72,leftMargin=72,
topMargin=72,bottomMargin=18)
Story=[]
logo = "logo_iit.jpeg"
magName = "Pythonista"
issueNum = 12
subPrice = "99.00"
limitedDate = "03/05/2010"
freeGift = "tin foil hat"
formatted_time = time.ctime()
full_name = "Mike Driscoll"
address_parts = ["411 State St.", "Marshalltown, IA 50158"]
im = Image(logo, 2*inch, 2*inch)
Story.append(im)
styles=getSampleStyleSheet()
styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
ptext = '<font size=12>%s</font>' % formatted_time
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 12))
# Create return address
ptext = '<font size=12>%s</font>' % full_name
Story.append(Paragraph(ptext, styles["Normal"]))
for part in address_parts:
ptext = '<font size=12>%s</font>' % part.strip()
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 12))
ptext = '<font size=12>Dear %s:</font>' % full_name.split()[0].strip()
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 12))
ptext = '<font size=12>We would like to welcome you to our subscriber base for %s Magazine! \
You will receive %s issues at the excellent introductory price of $%s. Please respond by\
%s to start receiving your subscription and get the following free gift: %s.</font>' % (magName,
issueNum,
subPrice,
limitedDate,
freeGift)
Story.append(Paragraph(ptext, styles["Justify"]))
Story.append(Spacer(1, 12))
ptext = '<font size=12>Thank you very much and we look forward to serving you.</font>'
Story.append(Paragraph(ptext, styles["Justify"]))
Story.append(Spacer(1, 12))
ptext = '<font size=12>Sincerely,</font>'
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 48))
ptext = '<font size=12>Ima Sucker</font>'
Story.append(Paragraph(ptext, styles["Normal"]))
Story.append(Spacer(1, 12))
doc.build(Story)
|
python
|
import logging
import time
import config
logging.basicConfig(level=logging.DEBUG,
stream=open(config.LOGGING.format(int(time.time())), "w", encoding="utf-8"),
format='[%(asctime)s-%(filename)s] [%(levelname)s] %(message)s',
datefmt='%Y %H:%M:%S',
)
global_logger = logging.getLogger(__name__)
|
python
|
from hlt import *
from networking import *
def getValueMap(valueMap, gameMap):
for y in range(gameMap.height):
for x in range(gameMap.width):
valueMap[y][x] = gameMap.getSite(Location(x,y)).production
return valueMap
myID, gameMap = getInit()
valueMap = [ [0 for x in range(gameMap.width)] for y in range(gameMap.height)]
valueMap = getValueMap(valueMap, gameMap)
sendInit("NadjaBot")
def move(location, gameMap, x, y):
this_square = gameMap.getSite(location)
#gameMap.getSite(location
#
#return Move(location,STILL)
return Move(location, worthMovingCheck(sexiestNeighbour(location, gameMap, this_square.owner, this_square.strength, x, y, gameMap.width, gameMap.height), this_square.strength, this_square.production))
#our sexiest neighbour will be the highest production one we can beat
def sexiestNeighbour(location, gameMap, ownerID, myStrength, x, y,w,h):
global valueMap
dirs = [256,256,256,256]
dirsIOwn = []
#Find neighbours we can beat
for d in CARDINALS:
neighbour_site = gameMap.getSite(location, d)
if ownerID == neighbour_site.owner:
dirsIOwn.append((d,neighbour_site.strength))
if (strongerThanYou(myStrength, neighbour_site.strength) and ownerID != neighbour_site.owner):
dirs[d-1] = neighbour_site.strength
if min(dirs) == 256:
if len(dirsIOwn) == 4:
#all the squares in the map are friends!
friendlyChoices = []
for i in dirsIOwn:
friendlyChoices.append(i[0]) #we could go here
viablePals = []
for d in friendlyChoices:
#it's the actual direction by now :lenny-face:
palStrength = gameMap.getSite(location, d).strength
if myStrength + palStrength <= 255:
viablePals.append(d)
if len(viablePals)== 0:
return travelOnMyWaywordSon(location, gameMap, ownerID, myStrength, x, y, w, h)
return getMostValuable(viablePals,x,y,w,h)
else:
return STILL
beatableDirections = []
index = 0
for d in dirs:
if d != 256:
beatableDirections.append(index+1)
index+=1
if len(beatableDirections) == 1:
return beatableDirections[0]
#There's a more complex trade-off to consider here.....
return getMostValuable(beatableDirections, x, y,w,h)
#this function tries to determine which way a block surrounded by friendlies should move
def travelOnMyWaywordSon(location, gameMap, ownerID, myStrength, x, y, w, h):
for y1 in range(gameMap.height):
for x1 in range(gameMap.width):
location1 = Location(x1, y1)
site1 = gameMap.getSite(location1)
if site1.owner != ownerID:
return directionTowardsCoords(x1,y1,x,y,w,h)
return STILL
def directionTowardsCoords(targetX,targetY,x,y,w,h):
diffX = abs(targetX - x)
diffY = abs(targetY - y)
halfwayW = w/2
halfwayH = h/2
if x > halfwayW:
if targetX > halfwayW:
return EAST
else:
return WEST
else:
if targetX < halfwayW:
return WEST
else:
return EAST
if y > halfwayH:
if targetY > halfwayH:
return SOUTH
else:
return NORTH
else:
if targetY < halfwayH:
return NORTH
else:
return SOUTH
return STILL
def getMostValuable(directionList, x, y,w,h):
global valueMap
mostValuable = 0
chosenValuableDirection = STILL
for d in directionList:
if d == EAST:
val = valueMap[y][(x+1)%w]
if val > mostValuable:
mostValuable = val
chosenValuableDirection = d
if d == WEST:
val = valueMap[y][x-1%w]
if val > mostValuable:
mostValuable = val
chosenValuableDirection = d
if d == NORTH:
val = valueMap[(y-1)%h][x]
if val > mostValuable:
mostValuable = val
chosenValuableDirection = d
if d == SOUTH:
val = valueMap[(y+1)%h][x]
if val > mostValuable:
mostValuable = val
chosenValuableDirection = d
return chosenValuableDirection
def worthMovingCheck(direction, siteStrength, siteProduction):
if siteStrength >= siteProduction * 3:
return direction
else:
return STILL
def strongerThanYou(a,b):
return a > b
while True:
moves = []
gameMap = getFrame()
for y in range(gameMap.height):
for x in range(gameMap.width):
location = Location(x, y)
moves.append(move(location, gameMap, x, y))
sendFrame(moves)
|
python
|
#!/usr/bin/env python3
import hmac
import json
import flask
import flask_compress
import flask_httpauth
class Config:
pass
config = Config()
with open("config.json") as f:
config_data = json.load(f)
if config_data["cloud_service"]["type"] != "azure storage datalake":
raise NotImplementedError("unsupported cloud storage type")
config.storage_account_name = config_data["cloud_service"]["account_name"]
config.storage_account_key = config_data["cloud_service"]["account_key"]
config.storage_container = config_data["cloud_service"]["container_name"]
config.auth = dict()
for u in config_data["auth"]:
config.auth[u] = config_data["auth"][u]
app = flask.Flask(__name__)
app.config["COMPRESS_REGISTER"] = False
compress = flask_compress.Compress()
compress.init_app(app)
auth = flask_httpauth.HTTPBasicAuth()
@auth.verify_password
def verify_password(username, password):
if username in config.auth and hmac.compare_digest(password, config.auth[username]):
return username
import webdav_options # noqa
import webdav_get # noqa
import webdav_propfind # noqa
import webdav_mkcol # noqa
import webdav_delete # noqa
import webdav_put # noqa
|
python
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from math import pi
from scipy.constants import inch
__all__ = ['nearest_pipe', 'gauge_from_t', 't_from_gauge', 'wire_schedules']
# Schedules 5, 10, 20, 30, 40, 60, 80, 100, 120, 140, 160 from
# ASME B36.10M - Welded and Seamless Wrought Steel Pipe
# All schedule lists stored in mm, other than NPS.
# i = inner diameter, o = outer diameter, and t = wall thickness in variable names
NPS5 = [0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 30]
S5i = [18, 23.4, 30.1, 38.9, 45, 57, 68.78, 84.68, 97.38, 110.08, 135.76, 162.76, 213.56, 266.2, 315.88, 347.68, 398.02, 448.62, 498.44, 549.44, 598.92, 749.3]
S5o = [21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 762]
S5t = [1.65, 1.65, 1.65, 1.65, 1.65, 1.65, 2.11, 2.11, 2.11, 2.11, 2.77, 2.77, 2.77, 3.4, 3.96, 3.96, 4.19, 4.19, 4.78, 4.78, 5.54, 6.35]
NPS10 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36]
S10i = [7.82, 10.4, 13.8, 17.08, 22.48, 27.86, 36.66, 42.76, 54.76, 66.9, 82.8, 95.5, 108.2, 134.5, 161.5, 211.58, 264.62, 314.66, 342.9, 393.7, 444.3, 495.3, 546.3, 597.3, 644.16, 695.16, 746.16, 797.16, 848.16, 898.16]
S10o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 660, 711, 762, 813, 864, 914]
S10t = [1.24, 1.65, 1.65, 2.11, 2.11, 2.77, 2.77, 2.77, 2.77, 3.05, 3.05, 3.05, 3.05, 3.4, 3.4, 3.76, 4.19, 4.57, 6.35, 6.35, 6.35, 6.35, 6.35, 6.35, 7.92, 7.92, 7.92, 7.92, 7.92, 7.92]
NPS20 = [8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36]
S20i = [206.4, 260.3, 311.1, 339.76, 390.56, 441.16, 488.94, 539.94, 590.94, 634.6, 685.6, 736.6, 787.6, 838.6, 888.6]
S20o = [219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 660, 711, 762, 813, 864, 914]
S20t = [6.35, 6.35, 6.35, 7.92, 7.92, 7.92, 9.53, 9.53, 9.53, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7]
NPS30 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 30, 32, 34, 36]
S30i = [7.4, 10, 13.4, 16.48, 21.88, 27.6, 36.26, 41.94, 53.94, 63.44, 79.34, 92.04, 104.74, 205.02, 257.4, 307.04, 336.54, 387.34, 434.74, 482.6, 533.6, 581.46, 679.24, 730.24, 781.24, 832.24, 882.24]
S30o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 711, 762, 813, 864, 914]
S30t = [1.45, 1.85, 1.85, 2.41, 2.41, 2.9, 2.97, 3.18, 3.18, 4.78, 4.78, 4.78, 4.78, 7.04, 7.8, 8.38, 9.53, 9.53, 11.13, 12.7, 12.7, 14.27, 15.88, 15.88, 15.88, 15.88, 15.88]
NPS40 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 24, 32, 34, 36]
S40i = [6.84, 9.22, 12.48, 15.76, 20.96, 26.64, 35.08, 40.94, 52.48, 62.68, 77.92, 90.12, 102.26, 128.2, 154.08, 202.74, 254.46, 303.18, 333.34, 381, 428.46, 477.82, 575.04, 778.04, 829.04, 875.9]
S40o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 610, 813, 864, 914]
S40t = [1.73, 2.24, 2.31, 2.77, 2.87, 3.38, 3.56, 3.68, 3.91, 5.16, 5.49, 5.74, 6.02, 6.55, 7.11, 8.18, 9.27, 10.31, 11.13, 12.7, 14.27, 15.09, 17.48, 17.48, 17.48, 19.05]
NPS60 = [8, 10, 12, 14, 16, 18, 20, 22, 24]
S60i = [198.48, 247.6, 295.26, 325.42, 373.08, 418.9, 466.76, 514.54, 560.78]
S60o = [219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S60t = [10.31, 12.7, 14.27, 15.09, 16.66, 19.05, 20.62, 22.23, 24.61]
NPS80 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
S80i = [5.48, 7.66, 10.7, 13.84, 18.88, 24.3, 32.5, 38.14, 49.22, 58.98, 73.66, 85.44, 97.18, 122.24, 146.36, 193.7, 242.82, 288.84, 317.5, 363.52, 409.34, 455.62, 501.84, 548.08]
S80o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S80t = [2.41, 3.02, 3.2, 3.73, 3.91, 4.55, 4.85, 5.08, 5.54, 7.01, 7.62, 8.08, 8.56, 9.53, 10.97, 12.7, 15.09, 17.48, 19.05, 21.44, 23.83, 26.19, 28.58, 30.96]
NPS100 = [8, 10, 12, 14, 16, 18, 20, 22, 24]
S100i = [188.92, 236.48, 280.92, 307.94, 354.02, 398.28, 442.92, 489.14, 532.22]
S100o = [219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S100t = [15.09, 18.26, 21.44, 23.83, 26.19, 29.36, 32.54, 34.93, 38.89]
NPS120 = [4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
S120i = [92.04, 115.9, 139.76, 182.58, 230.12, 273, 300.02, 344.48, 387.14, 431.8, 476.44, 517.96]
S120o = [114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S120t = [11.13, 12.7, 14.27, 18.26, 21.44, 25.4, 27.79, 30.96, 34.93, 38.1, 41.28, 46.02]
NPS140 = [8, 10, 12, 14, 16, 18, 20, 22, 24]
S140i = [177.86, 222.2, 266.64, 292.1, 333.34, 377.66, 419.1, 463.74, 505.26]
S140o = [219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S140t = [20.62, 25.4, 28.58, 31.75, 36.53, 39.67, 44.45, 47.63, 52.37]
NPS160 = [0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
S160i = [11.74, 15.58, 20.7, 29.5, 34.02, 42.82, 53.94, 66.64, 87.32, 109.54, 131.78, 173.08, 215.84, 257.16, 284.18, 325.42, 366.52, 407.98, 451.04, 490.92]
S160o = [21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610]
S160t = [4.78, 5.56, 6.35, 6.35, 7.14, 8.74, 9.53, 11.13, 13.49, 15.88, 18.26, 23.01, 28.58, 33.32, 35.71, 40.49, 45.24, 50.01, 53.98, 59.54]
# Schedules designated STD, XS, and XXS
NPSSTD = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48]
STDi = [6.84, 9.22, 12.48, 15.76, 20.96, 26.64, 35.08, 40.94, 52.48, 62.68, 77.92, 90.12, 102.26, 128.2, 154.08, 202.74, 254.46, 304.74, 336.54, 387.34, 437.94, 488.94, 539.94, 590.94, 640.94, 691.94, 742.94, 793.94, 844.94, 894.94, 945.94, 996.94, 1047.94, 1098.94, 1148.94, 1199.94]
STDo = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 660, 711, 762, 813, 864, 914, 965, 1016, 1067, 1118, 1168, 1219]
STDt = [1.73, 2.24, 2.31, 2.77, 2.87, 3.38, 3.56, 3.68, 3.91, 5.16, 5.49, 5.74, 6.02, 6.55, 7.11, 8.18, 9.27, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53]
NPSXS = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48]
XSi = [5.48, 7.66, 10.7, 13.84, 18.88, 24.3, 32.5, 38.14, 49.22, 58.98, 73.66, 85.44, 97.18, 122.24, 146.36, 193.7, 247.6, 298.4, 330.2, 381, 431.6, 482.6, 533.6, 584.6, 634.6, 685.6, 736.6, 787.6, 838.6, 888.6, 939.6, 990.6, 1041.6, 1092.6, 1142.6, 1193.6]
XSo = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273, 323.8, 355.6, 406.4, 457, 508, 559, 610, 660, 711, 762, 813, 864, 914, 965, 1016, 1067, 1118, 1168, 1219]
XSt = [2.41, 3.02, 3.2, 3.73, 3.91, 4.55, 4.85, 5.08, 5.54, 7.01, 7.62, 8.08, 8.56, 9.53, 10.97, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7]
NPSXXS = [0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10, 12]
XXSi = [6.36, 11.06, 15.22, 22.8, 28, 38.16, 44.96, 58.42, 80.06, 103.2, 124.4, 174.64, 222.2, 273]
XXSo = [21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 114.3, 141.3, 168.3, 219.1, 273, 323.8]
XXSt = [7.47, 7.82, 9.09, 9.7, 10.15, 11.07, 14.02, 15.24, 17.12, 19.05, 21.95, 22.23, 25.4, 25.4]
NPSS5 = [0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 30]
SS5DN = [15, 20, 25, 32, 40, 50, 65, 80, 90, 100, 125, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 750]
SS5i = [18, 23.4, 30.1, 38.9, 45, 57, 68.78, 84.68, 97.38, 110.08, 135.76, 162.76, 213.56, 266.3, 315.98, 347.68, 398.02, 448.62, 498.44, 549.44, 598.92, 749.3]
SS5o = [21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273.1, 323.9, 355.6, 406.4, 457, 508, 559, 610, 762]
SS5t = [1.65, 1.65, 1.65, 1.65, 1.65, 1.65, 2.11, 2.11, 2.11, 2.11, 2.77, 2.77, 2.77, 3.4, 3.96, 3.96, 4.19, 4.19, 4.78, 4.78, 5.54, 6.35]
# Schedules 10, 40 and 80 from ASME B36.19M - Stainless Steel Pipe
NPSS10 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 30]
SS10DN = [6, 8, 10, 15, 20, 25, 32, 40, 50, 65, 80, 90, 100, 125, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 750]
SS10i = [7.82, 10.4, 13.8, 17.08, 22.48, 27.86, 36.66, 42.76, 54.76, 66.9, 82.8, 95.5, 108.2, 134.5, 161.5, 211.58, 264.72, 314.76, 346.04, 396.84, 447.44, 496.92, 547.92, 597.3, 746.16]
SS10o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273.1, 323.9, 355.6, 406.4, 457, 508, 559, 610, 762]
SS10t = [1.24, 1.65, 1.65, 2.11, 2.11, 2.77, 2.77, 2.77, 2.77, 3.05, 3.05, 3.05, 3.05, 3.4, 3.4, 3.76, 4.19, 4.57, 4.78, 4.78, 4.78, 5.54, 5.54, 6.35, 7.92]
NPSS40 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 24]
SS40DN = [6, 8, 10, 15, 20, 25, 32, 40, 50, 65, 80, 90, 100, 125, 150, 200, 250, 300, 350, 400, 450, 500, 600]
SS40i = [6.84, 9.22, 12.48, 15.76, 20.96, 26.64, 35.08, 40.94, 52.48, 62.68, 77.92, 90.12, 102.26, 128.2, 154.08, 202.74, 254.56, 304.84, 336.54, 387.34, 437.94, 488.94, 590.94]
SS40o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273.1, 323.9, 355.6, 406.4, 457, 508, 610]
SS40t = [1.73, 2.24, 2.31, 2.77, 2.87, 3.38, 3.56, 3.68, 3.91, 5.16, 5.49, 5.74, 6.02, 6.55, 7.11, 8.18, 9.27, 9.53, 9.53, 9.53, 9.53, 9.53, 9.53]
NPSS80 = [0.125, 0.25, 0.375, 0.5, 0.75, 1, 1.25, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 24]
SS80DN = [6, 8, 10, 15, 20, 25, 32, 40, 50, 65, 80, 90, 100, 125, 150, 200, 250, 300, 350, 400, 450, 500, 600]
SS80i = [5.48, 7.66, 10.7, 13.84, 18.88, 24.3, 32.5, 38.14, 49.22, 58.98, 73.66, 85.44, 97.18, 122.24, 146.36, 193.7, 247.7, 298.5, 330.2, 381, 431.6, 482.6, 584.6]
SS80o = [10.3, 13.7, 17.1, 21.3, 26.7, 33.4, 42.2, 48.3, 60.3, 73, 88.9, 101.6, 114.3, 141.3, 168.3, 219.1, 273.1, 323.9, 355.6, 406.4, 457, 508, 610]
SS80t = [2.41, 3.02, 3.2, 3.73, 3.91, 4.55, 4.85, 5.08, 5.54, 7.01, 7.62, 8.08, 8.56, 9.53, 10.97, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7, 12.7]
schedule_lookup = { '40': (NPS40, S40i, S40o, S40t),
'5': (NPS5, S5i, S5o, S5t),
'10': (NPS10, S10i, S10o, S10t),
'20': (NPS20, S20i, S20o, S20t),
'30': (NPS30, S30i, S30o, S30t),
'60': (NPS60, S60i, S60o, S60t),
'80': (NPS80, S80i, S80o, S80t),
'100': (NPS100, S100i, S100o, S100t),
'120': (NPS120, S120i, S120o, S120t),
'140': (NPS140, S140i, S140o, S140t),
'160': (NPS160, S160i, S160o, S160t),
'STD': (NPSSTD, STDi, STDo, STDt),
'XS': (NPSXS, XSi, XSo, XSt),
'XXS': (NPSXXS, XXSi, XXSo, XXSt),
'5S': (NPSS5, SS5i, SS5o, SS5t),
'10S': (NPSS10, SS10i, SS10o, SS10t),
'40S': (NPSS40, SS40i, SS40o, SS40t),
'80S': (NPSS80, SS80i, SS80o, SS80t)}
def nearest_pipe(Do=None, Di=None, NPS=None, schedule='40'):
r'''Searches for and finds the nearest standard pipe size to a given
specification. Acceptable inputs are:
- Nominal pipe size
- Nominal pipe size and schedule
- Outer diameter `Do`
- Outer diameter `Do` and schedule
- Inner diameter `Di`
- Inner diameter `Di` and schedule
Acceptable schedules are: '5', '10', '20', '30', '40', '60', '80', '100',
'120', '140', '160', 'STD', 'XS', 'XXS', '5S', '10S', '40S', '80S'.
Parameters
----------
Do : float
Pipe outer diameter, [m]
Di : float
Pipe inner diameter, [m]
NPS : float
Nominal pipe size, [-]
schedule : str
String representing schedule size
Returns
-------
NPS : float
Nominal pipe size, [-]
Di : float
Pipe inner diameter, [m]
Do : float
Pipe outer diameter, [m]
t : float
Pipe wall thickness, [m]
Notes
-----
Internal units within this function are mm.
The imperial schedules are not quite identical to these value, but
all rounding differences happen in the sub-0.1 mm level.
Examples
--------
>>> nearest_pipe(Di=0.021)
(1, 0.02664, 0.0334, 0.0033799999999999998)
>>> nearest_pipe(Do=.273, schedule='5S')
(10, 0.26630000000000004, 0.2731, 0.0034)
References
----------
.. [1] American National Standards Institute, and American Society of
Mechanical Engineers. B36.10M-2004: Welded and Seamless Wrought Steel
Pipe. New York: American Society of Mechanical Engineers, 2004.
.. [2] American National Standards Institute, and American Society of
Mechanical Engineers. B36-19M-2004: Stainless Steel Pipe.
New York, N.Y.: American Society of Mechanical Engineers, 2004.
'''
if Di:
Di *= 1E3
if Do:
Do *= 1E3
if NPS:
NPS = float(NPS)
def Di_lookup(Di, NPSes, Dis, Dos, ts):
for i in range(len(Dis)): # Go up ascending list; once larger than specified, return
if Dis[-1] < Di:
return None
if Dis[i] >= Di:
_nps, _di, _do, _t = NPSes[i], Dis[i], Dos[i], ts[i]
return (_nps, _di, _do, _t)
raise Exception('Di lookup failed')
def Do_lookup(Do, NPSes, Dis, Dos, ts):
for i in range(len(Dos)): # Go up ascending list; once larger than specified, return
if Dos[-1] < Do:
return None
if Dos[i] >= Do:
_nps, _di, _do, _t = NPSes[i], Dis[i], Dos[i], ts[i]
return (_nps, _di, _do, _t)
raise Exception('Do lookup failed')
def NPS_lookup(NPS, NPSes, Dis, Dos, ts):
for i in range(len(NPSes)): # Go up ascending list; once larger than specified, return
if NPSes[i] == NPS:
_nps, _di, _do, _t = NPSes[i], Dis[i], Dos[i], ts[i]
return (_nps, _di, _do, _t)
raise Exception('NPS not in list')
# If accidentally given an numerical schedule, convert it to a string
schedule_type = type(schedule)
if schedule_type in (int, float):
schedule = str(int(schedule))
if schedule not in schedule_lookup:
raise ValueError('Schedule not recognized')
else:
NPSes, Dis, Dos, ts = schedule_lookup[schedule]
# Handle the three cases of different inputs
if Di:
nums = Di_lookup(Di, NPSes, Dis, Dos, ts)
elif Do:
nums = Do_lookup(Do, NPSes, Dis, Dos, ts)
elif NPS:
nums = NPS_lookup(NPS, NPSes, Dis, Dos, ts)
if nums is None:
raise ValueError('Pipe input is larger than max of selected schedule')
_nps, _di, _do, _t = nums
return _nps, _di/1E3, _do/1E3, _t/1E3
### Wire gauge schedules
# Stub's Steel Wire Gage
SSWG_integers = list(range(1, 81))
SSWG_inch = [0.227, 0.219, 0.212, 0.207, 0.204, 0.201, 0.199, 0.197, 0.194,
0.191, 0.188, 0.185, 0.182, 0.18, 0.178, 0.175, 0.172, 0.168,
0.164, 0.161, 0.157, 0.155, 0.153, 0.151, 0.148, 0.146, 0.143,
0.139, 0.134, 0.127, 0.12, 0.115, 0.112, 0.11, 0.108, 0.106,
0.103, 0.101, 0.099, 0.097, 0.095, 0.092, 0.088, 0.085, 0.081,
0.079, 0.077, 0.075, 0.072, 0.069, 0.066, 0.063, 0.058, 0.055,
0.05, 0.045, 0.042, 0.041, 0.04, 0.039, 0.038, 0.037, 0.036,
0.035, 0.033, 0.032, 0.031, 0.03, 0.029, 0.027, 0.026, 0.024,
0.023, 0.022, 0.02, 0.018, 0.016, 0.015, 0.014, 0.013]
SSWG_SI = [round(i*inch, 7) for i in SSWG_inch] # 7 decimals for equal conversion
# British Standard Wire Gage (Imperial Wire Gage)
BSWG_integers = [0.143, .167, 0.2, 0.25, 0.33, 0.5] + list(range(51))
BSWG_inch = [0.5, 0.464, 0.432, 0.4, 0.372, 0.348, 0.324, 0.3, 0.276, 0.252, 0.232,
0.212, 0.192, 0.176, 0.16, 0.144, 0.128, 0.116, 0.104, 0.092, 0.08,
0.072, 0.064, 0.056, 0.048, 0.04, 0.036, 0.032, 0.028, 0.024, 0.022,
0.02, 0.018, 0.0164, 0.0149, 0.0136, 0.0124, 0.0116, 0.0108, 0.01,
0.0092, 0.0084, 0.0076, 0.0068, 0.006, 0.0052, 0.0048, 0.0044, 0.004,
0.0036, 0.0032, 0.0028, 0.0024, 0.002, 0.0016, 0.0012, 0.001]
BSWG_SI = [round(i*inch,8) for i in BSWG_inch] # 8 decimals for equal conversion
# Music Wire Gauge
MWG_integers = [.167, 0.2, 0.25, 0.33, 0.5] + list(range(46))
MWG_inch = [0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.011, 0.012,
0.013, 0.014, 0.016, 0.018, 0.02, 0.022, 0.024, 0.026, 0.029,
0.031, 0.033, 0.035, 0.037, 0.039, 0.041, 0.043, 0.045, 0.047,
0.049, 0.051, 0.055, 0.059, 0.063, 0.067, 0.071, 0.075, 0.08,
0.085, 0.09, 0.095, 0.1, 0.106, 0.112, 0.118, 0.124, 0.13, 0.138,
0.146, 0.154, 0.162, 0.17, 0.18]
MWG_SI = [round(i*inch,7) for i in MWG_inch] # 7 decimals for equal conversion
# Scale gets bigger instead of smaller; reverse for convenience
MWG_integers.reverse()
MWG_inch.reverse()
MWG_SI.reverse()
# Steel Wire Gage -Also Washburn & Moen gage, American Steel gage;
# Wire Co.gage; Roebling Wire Gages.
SWG_integers = [0.143, .167, 0.2, 0.25, 0.33, 0.5] + list(range(51))
SWG_inch = [0.49, 0.4615, 0.4305, 0.3938, 0.3625, 0.331, 0.3065, 0.283, 0.2625,
0.2437, 0.2253, 0.207, 0.192, 0.177, 0.162, 0.1483, 0.135, 0.1205,
0.1055, 0.0915, 0.08, 0.072, 0.0625, 0.054, 0.0475, 0.041, 0.0348,
0.0318, 0.0286, 0.0258, 0.023, 0.0204, 0.0181, 0.0173, 0.0162,
0.015, 0.014, 0.0132, 0.0128, 0.0118, 0.0104, 0.0095, 0.009,
0.0085, 0.008, 0.0075, 0.007, 0.0066, 0.0062, 0.006, 0.0058,
0.0055, 0.0052, 0.005, 0.0048, 0.0046, 0.0044]
SWG_SI = [round(i*inch,8) for i in SWG_inch] # 8 decimals for equal conversion
# American Wire or Brown & Sharpe Gage
AWG_integers = [.167, 0.2, 0.25, 0.33, 0.5] + list(range(51))
AWG_inch = [0.58, 0.5165, 0.46, 0.4096, 0.3648, 0.3249, 0.2893, 0.2576, 0.2294,
0.2043, 0.1819, 0.162, 0.1443, 0.1285, 0.1144, 0.1019, 0.0907,
0.0808, 0.072, 0.0641, 0.0571, 0.0508, 0.0453, 0.0403, 0.0359,
0.032, 0.0285, 0.0253, 0.0226, 0.0201, 0.0179, 0.0159, 0.0142,
0.0126, 0.0113, 0.01, 0.00893, 0.00795, 0.00708, 0.0063, 0.00561,
0.005, 0.00445, 0.00396, 0.00353, 0.00314, 0.0028, 0.00249,
0.00222, 0.00198, 0.00176, 0.00157, 0.0014, 0.00124, 0.00111,
0.00099]
AWG_SI = [round(i*inch,9) for i in AWG_inch] # 9 decimals for equal conversion
# Birmingham or Stub's Iron Wire Gage
BWG_integers = [0.2, 0.25, 0.33, 0.5] + list(range(37))
BWG_inch = [0.5, 0.454, 0.425, 0.38, 0.34, 0.3, 0.284, 0.259, 0.238, 0.22,
0.203, 0.18, 0.165, 0.148, 0.134, 0.12, 0.109, 0.095, 0.083,
0.072, 0.065, 0.058, 0.049, 0.042, 0.035, 0.032, 0.028, 0.025,
0.022, 0.02, 0.018, 0.016, 0.014, 0.013, 0.012, 0.01, 0.009,
0.008, 0.007, 0.005, 0.004]
BWG_SI = [round(i*inch,6) for i in BWG_inch]
wire_schedules = {'BWG': (BWG_integers, BWG_inch, BWG_SI, True),
'AWG': (AWG_integers, AWG_inch, AWG_SI, True),
'SWG': (SWG_integers, SWG_inch, SWG_SI, True),
'MWG': (MWG_integers, MWG_inch, MWG_SI, False),
'BSWG': (BSWG_integers, BSWG_inch, BSWG_SI, True),
'SSWG': (SSWG_integers, SSWG_inch, SSWG_SI, True)}
def gauge_from_t(t, SI=True, schedule='BWG'):
r'''Looks up the gauge of a given wire thickness of given schedule.
Values are all non-linear, and tabulated internally.
Parameters
----------
t : float
Thickness, [m]
SI : bool, optional
If False, requires that the thickness is given in inches not meters
schedule : str
Gauge schedule, one of 'BWG', 'AWG', 'SWG', 'MWG', 'BSWG', or 'SSWG'
Returns
-------
gauge : float-like
Wire Gauge, [-]
Notes
-----
An internal variable, tol, is used in the selection of the wire gauge. If
the next smaller wire gauge is within 10% of the difference between it and
the previous wire gauge, the smaller wire gauge is selected. Accordingly,
this function can return a gauge with a thickness smaller than desired
in some circumstances.
* Birmingham Wire Gauge (BWG) ranges from 0.2 (0.5 inch) to 36 (0.004 inch).
* American Wire Gauge (AWG) ranges from 0.167 (0.58 inch) to 51 (0.00099
inch). These are used for electrical wires.
* Steel Wire Gauge (SWG) ranges from 0.143 (0.49 inch) to 51 (0.0044 inch).
Also called Washburn & Moen wire gauge, American Steel gauge, Wire Co.
gauge, and Roebling wire gauge.
* Music Wire Gauge (MWG) ranges from 0.167 (0.004 inch) to 46 (0.18
inch). Also called Piano Wire Gauge.
* British Standard Wire Gage (BSWG) ranges from 0.143 (0.5 inch) to
51 (0.001 inch). Also called Imperial Wire Gage (IWG).
* Stub's Steel Wire Gage (SSWG) ranges from 1 (0.227 inch) to 80 (0.013 inch)
Examples
--------
>>> gauge_from_t(.5, SI=False, schedule='BWG')
0.2
References
----------
.. [1] Oberg, Erik, Franklin D. Jones, and Henry H. Ryffel. Machinery's
Handbook. Industrial Press, Incorporated, 2012.
'''
tol = 0.1
# Handle units
if SI:
t_inch = round(t/inch, 9) # all schedules are in inches
else:
t_inch = t
# Get the schedule
try:
sch_integers, sch_inch, sch_SI, decreasing = wire_schedules[schedule]
except:
raise ValueError('Wire gauge schedule not found')
# Check if outside limits
sch_max, sch_min = sch_inch[0], sch_inch[-1]
if t_inch > sch_max:
raise ValueError('Input thickness is above the largest in the selected schedule')
# If given thickness is exactly in the index, be happy
if t_inch in sch_inch:
gauge = sch_integers[sch_inch.index(t_inch)]
else:
for i in range(len(sch_inch)):
if sch_inch[i] >= t_inch:
larger = sch_inch[i]
else:
break
if larger == sch_min:
gauge = sch_min # If t is under the lowest schedule, be happy
else:
smaller = sch_inch[i]
if (t_inch - smaller) <= tol*(larger - smaller):
gauge = sch_integers[i]
else:
gauge = sch_integers[i-1]
return gauge
def t_from_gauge(gauge, SI=True, schedule='BWG'):
r'''Looks up the thickness of a given wire gauge of given schedule.
Values are all non-linear, and tabulated internally.
Parameters
----------
gauge : float-like
Wire Gauge, []
SI : bool, optional
If False, will return a thickness in inches not meters
schedule : str
Gauge schedule, one of 'BWG', 'AWG', 'SWG', 'MWG', 'BSWG', or 'SSWG'
Returns
-------
t : float
Thickness, [m]
Notes
-----
* Birmingham Wire Gauge (BWG) ranges from 0.2 (0.5 inch) to 36 (0.004 inch).
* American Wire Gauge (AWG) ranges from 0.167 (0.58 inch) to 51 (0.00099
inch). These are used for electrical wires.
* Steel Wire Gauge (SWG) ranges from 0.143 (0.49 inch) to 51 (0.0044 inch).
Also called Washburn & Moen wire gauge, American Steel gauge, Wire Co.
gauge, and Roebling wire gauge.
* Music Wire Gauge (MWG) ranges from 0.167 (0.004 inch) to 46 (0.18
inch). Also called Piano Wire Gauge.
* British Standard Wire Gage (BSWG) ranges from 0.143 (0.5 inch) to
51 (0.001 inch). Also called Imperial Wire Gage (IWG).
* Stub's Steel Wire Gage (SSWG) ranges from 1 (0.227 inch) to 80 (0.013 inch)
Examples
--------
>>> t_from_gauge(.2, False, 'BWG')
0.5
References
----------
.. [1] Oberg, Erik, Franklin D. Jones, and Henry H. Ryffel. Machinery's
Handbook. Industrial Press, Incorporated, 2012.
'''
try:
sch_integers, sch_inch, sch_SI, decreasing = wire_schedules[schedule]
except:
raise ValueError("Wire gauge schedule not found; supported gauges are \
'BWG', 'AWG', 'SWG', 'MWG', 'BSWG', and 'SSWG'.")
try:
i = sch_integers.index(gauge)
except:
raise ValueError('Input gauge not found in selected schedule')
if SI:
return sch_SI[i] # returns thickness in m
else:
return sch_inch[i] # returns thickness in inch
|
python
|
# Copyright 2009-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from django import forms
from django.db.models import Q
from django.shortcuts import render
from rfdoc.rfdocapp.models import Keyword
def search(request):
search_performed = False
kws = []
if request.method == 'POST':
form = SearchForm(request.POST)
if form.is_valid():
term = form.cleaned_data['search_term']
version = form.cleaned_data['search_version']
query = Q(name__icontains = term)
if form.cleaned_data['include_doc']:
query = query | Q(doc__icontains = term)
# SQLite LIKEs are case-insensitive by default.
# Thus using just name__contains wouldn't work expectedly.
# To circumvent this, an additional query using regular expressions
# is applied for the case-sensitive searches.
if not form.cleaned_data['case_insensitive']:
query = Q(name__regex = r'.*%s.*' % re.escape(term))
if form.cleaned_data['include_doc']:
query = query | Q(doc__regex = r'.*%s.*' % re.escape(term))
kws = Keyword.objects.filter(query)
if version:
version = re.escape(version).replace('\?','.').replace('\*','.*')
kws = kws.filter(library__version__regex=r'^%s$' % version)
search_performed = True
else:
form = SearchForm()
return render(request, 'search.html', {
'form': form,
'kws': kws,
'search_performed': search_performed
}
)
class SearchForm(forms.Form):
search_term = forms.CharField(error_messages={'required': 'Search term is required!'})
search_version = forms.CharField(required=False)
include_doc = forms.BooleanField(required=False, initial=True)
case_insensitive = forms.BooleanField(required=False, initial=True)
|
python
|
from connection import Connection
import viewer.page
def general_information(url: str) -> dict:
con = Connection()
soup = con.get(url).soup
return {
'Imslp Link': url,
'Genre Categories': viewer.page.genre_categories(soup),
'Work Title': viewer.page.work_title(soup),
'Name Translations': viewer.page.name_translations(soup),
'Name Aliases': viewer.page.name_aliases(soup),
'Composer': viewer.page.composer(soup),
'Catalogue Number': viewer.page.catalogue_number(soup),
'Catalogue': viewer.page.catalogue(soup), # Opus or B
'Catalogic Number': viewer.page.catalogic_number(soup), # the number only after opus or B
'I-Catalogue Number': viewer.page.i_catalogue_number(soup),
'Key': viewer.page.key(soup),
'Movements/Sections': viewer.page.movements_or_sections(soup, filtered_word), # if is list, list[0] is amount of sections, others are details.
'Year/Date of Composition': viewer.page.year_or_date_of_composition(soup),
'First Publication': viewer.page.first_publication(soup),
'Composer Time Period': viewer.page.composer_time_period(soup),
'Piece Style': viewer.page.piece_style(soup),
'Instrumentation': viewer.page.instrumentation(soup)
}
|
python
|
import fileinput
import re
lists = [['fileListGetter.py', 'fileListGetter', ['directory', '_nsns'], [], 'def fileListGetter(directory, _nsns): """ Function to get list of files and language types Inputs: directory: Stirng containing path to search for files in. Outputs: List of Lists. Lists are of format, [filename, language type] """'], ['fileListGetter.py', 'getLanguageType', ['file_extention'], [], 'def getLanguageType(file_extention): """ Function to assign language type based on file extention. Input: file_extention: String that lists file type. Output: languageType: string listsing identified language type. """'], ['fileListGetter.py', 'printFileLists', ['fileLists'], [], 'def printFileLists(fileLists): """ Function to print out the contents of fileLists Main use is debugging """']]
comments = ""
comment = ""
# for each #2 list item in input
functions = []
newitem =""
for i in lists:
newitem = i[1]
functions.append(newitem)
combined = "(" + "|".join(functions) + ")"
comments = comments + "# Functions\n"
for function in functions:
comments = comments + "\n##"+ (function) + "\n"
with open("readme.md") as openfile:
# search for function name, for each instance
for line in openfile:
comment = ""
# if comma-nated
match = re.search(combined + ", " + "\\b"+re.escape(function)+"\\b",line)
if match==None:
match = re.search("\\b"+re.escape(function)+"\\b" + ", " + combined,line)
if match==None:
# if new line
match = re.search("^" + "\\b"+re.escape(function)+"\\b" + "[,\" :-]*[*]*[,\" :-]*(.*)",line)
if match:
comment = match.group(1) + "\n"
else:
# if not new line or comma-nated
match = re.search("\\b"+re.escape(function)+"\\b" + "[,\" :-]*[*]*[,\" :-]*(.*)",line)
if match:
comment = match.group(1) + "\n"
comments = comments + comment
comments = comments + "\n"
output = str("externaldocresults.md")
text_file = open(output, "w")
text_file.write(comments)
text_file.close()
|
python
|
# Generated by Django 3.1.7 on 2021-07-07 17:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('messenger', '0023_auto_20210615_0956'),
]
operations = [
migrations.AlterModelOptions(
name='message',
options={'ordering': ('-date_sent', '-date_created')},
),
migrations.AddField(
model_name='messagelog',
name='sid',
field=models.CharField(blank=True, max_length=255),
),
]
|
python
|
import sys
import numpy as np
import cv2 as cv2
import time
import yolov2tiny
def open_video_with_opencv(in_video_path, out_video_path):
#
# This function takes input and output video path and open them.
#
# Your code from here. You may clear the comments.
#
#raise NotImplementedError('open_video_with_opencv is not implemented yet')
# Open an object of input video using cv2.VideoCapture.
cap = cv2.VideoCapture(in_video_path)
# Open an object of output video using cv2.VideoWriter.
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
videoWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
videoHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
videoFPS = int(cap.get(cv2.CAP_PROP_FPS))
out = cv2.VideoWriter(out_video_path, fourcc, videoFPS, (videoWidth, videoHeight))
# Return the video objects and anything you want for further process.
return cap, out, videoWidth, videoHeight
def resize_input(im):
imsz = cv2.resize(im, (yolov2tiny.in_width, yolov2tiny.in_height))
imsz = imsz / 255.
imsz = imsz[:, :, ::-1]
return np.asarray(imsz, dtype=np.float32)
def video_object_detection(in_video_path, out_video_path, proc="cpu", onnx_path="./y2t_weights.onnx"):
#
# This function runs the inference for each frame and creates the output video.
#
# Your code from here. You may clear the comments.
#
#raise NotImplementedError('video_object_detection is not implemented yet')
# Open video using open_video_with_opencv.
cap, out, video_width, video_height = open_video_with_opencv(in_video_path, out_video_path)
# Check if video is opened. Otherwise, exit.
if cap.isOpened() == False:
exit()
# Create an instance of the YOLO_V2_TINY class. Pass the dimension of
# the input, a path to weight file, and which device you will use as arguments.
input_dim = [1, yolov2tiny.in_height, yolov2tiny.in_width, 3]
y2t = yolov2tiny.YOLO2_TINY(input_dim, onnx_path, proc)
# Start the main loop. For each frame of the video, the loop must do the followings:
# 1. Do the inference.
# 2. Run postprocessing using the inference result, accumulate them through the video writer object.
# The coordinates from postprocessing are calculated according to resized input; you must adjust
# them to fit into the original video.
# 3. Measure the end-to-end time and the time spent only for inferencing.
# 4. Save the intermediate values for the first layer.
# Note that your input must be adjusted to fit into the algorithm,
# including resizing the frame and changing the dimension.
is_first_frame = True
elapse_end_2_end = 0.
elapse_inference = 0.
elapse_end_2_end_start = time.time()
while (cap.isOpened()):
ret, frame = cap.read()
if ret == True:
frame = resize_input(frame)
expanded_frame = np.expand_dims(frame, 0)
elapse_inference_start = time.time()
nodes, out_tensors = y2t.inference(expanded_frame)
elapse_inference += (time.time() - elapse_inference_start)
frame = yolov2tiny.postprocessing(out_tensors[-1], frame)
frame = np.uint8(frame * 255)
frame = frame[:, :, ::-1]
frame = cv2.resize(frame, (video_width, video_height))
if is_first_frame:
for i, out_tensor in enumerate(out_tensors):
np.save("intermediate/layer_" + str(i) + ".npy", out_tensor)
is_first_frame = False
out.write(frame)
else:
break
elapse_end_2_end += (time.time() - elapse_end_2_end_start)
# Check the inference peformance; end-to-end elapsed time and inferencing time.
# Check how many frames are processed per second respectivly.
print("end-to-end elpased time: ", elapse_end_2_end)
print("inferencing elapsed time: ", elapse_inference)
print("how may FPS processed: ", cap.get(cv2.CAP_PROP_FRAME_COUNT) / elapse_end_2_end)
# Release the opened videos.
cap.release()
out.release()
cv2.destroyAllWindows()
def main():
if len(sys.argv) < 3:
print(
"Usage: python3 __init__.py [in_video.mp4] [out_video.mp4] ([cpu|gpu])")
sys.exit()
in_video_path = sys.argv[1]
out_video_path = sys.argv[2]
if len(sys.argv) == 4:
proc = sys.argv[3]
else:
proc = "cpu"
video_object_detection(in_video_path, out_video_path, proc)
if __name__ == "__main__":
main()
|
python
|
"""
Server receiver of the message
"""
import socket
from ..constants import *
class UDPMessageReceiver:
def __init__(self, port=PORT):
self.__port = port
def receive_message(self):
# Create the server socket
# UDP socket
s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
# Bind the socket to our local address
s.bind((SERVER_HOST, self.__port))
# Receive the file infos
# Receive using client socket, not server socket
received = str(s.recvfrom(BUFFER_SIZE)[0].decode())
return received
|
python
|
#!/usr/bin/env python
# 参考 https://github.com/schedutron/CPAP/blob/master/Chap2/sleep_serv.py
from socket import *
HOST = ''
PORT = 1145
BUFSIZ = 1024
ADDR = (HOST, PORT)
with socket(AF_INET, SOCK_STREAM) as s:
s.bind(ADDR)
s.listen(5)
clnt, addr = s.accept()
print(f'连接到 {addr}。')
with clnt:
while True:
msg = clnt.recv(BUFSIZ)
if not msg: break
sec = msg.decode()
msg = f"sleep({sec})"
tup = addr + (sec, )
print(f"%s:%s 请求睡眠 %s 秒。" % tup)
clnt.send(msg.encode())
print(f"{addr} 断开。")
|
python
|
from .CDx import *
__version__ = '0.0.30'
|
python
|
$ make -C doc/sphinx html
|
python
|
# Create by Packetsss
# Personal use is allowed
# Commercial use is prohibited
from player import Player
from genotype import Genotype
class Population:
def __init__(self, size):
self.players = [Player(box2d[i]) for i in range(size)]
self.generation = 1
self.fitness_sum = 0
self.best_player_no = 0
self.found_winner = False
self.balls_sunk = 0
def update(self):
for i in range(len(self.players)):
self.players[i].update()
def calculate_fitness(self):
for i in range(len(self.players)):
self.players[i].calculate_fitness(self.balls_sunk)
self.fitness_sum = 0
for i in range(len(self.players)):
self.fitness_sum += self.players[i].fitness
self.set_best_player()
def set_fitness_sum(self):
self.fitness_sum = 0
for i in range(len(self.players)):
self.fitness_sum += self.players[i].fitness
def set_best_player(self):
max = 0
max_index = 0
for i in range(len(self.players)):
if self.players[i].fitness > max:
max = self.players[i].fitness
max_index = i
self.best_player_no = max_index
if self.players[max_index].won:
self.found_winner = True
for i in range(len(self.players)):
self.players[i].reset()
self.balls_sunk = self.players[max_index].balls_sunk()
reset_worlds()
for i in range(len(self.players)):
self.players[i] = self.players[max_index].clone(box2d[i])
self.increase_shots()
self.generation += 1
def select_player(self):
rand = random(self.fitness_sum)
running_sum = 0
for i in range(len(self.players)):
running_sum += self.players[i].fitness
if running_sum > rand:
return self.players[i]
def mutate(self):
for i in range(1, len(self.players)):
self.players[i].DNA.mutate()
def increase_shots(self):
for i in range(1, len(self.players)):
self.players[i].dna.increase_shot_length()
def natural_selection(self):
reset_worlds()
new_players = [Player(box2d[i]) for i in range(len(self.players))]
new_players[0] = self.players[self.best_player_no].clone(box2d[0])
for i in range(1, len(self.players)):
new_players[i] = self.select_player().clone(box2d[i])
new_players[i].DNA.mutate()
self.players = new_players.clone()
self.generation += 1
def done(self):
for i in range(1, len(self.players)):
if not self.players[i].game_over or not self.players[i].balls_stopped():
return False
return True
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.