max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/modules/vim.py
|
MrFishFinger/powerline
| 11,435 |
82700
|
# vim:fileencoding=utf-8:noet
_log = []
vars = {}
vvars = {'version': 703}
_tabpage = 0
_mode = 'n'
_buf_purge_events = set()
options = {
'paste': 0,
'ambiwidth': 'single',
'columns': 80,
'encoding': 'utf-8',
}
_last_bufnr = 0
_highlights = {}
from collections import defaultdict as _defaultdict
_environ = _defaultdict(lambda: '')
del _defaultdict
_thread_id = None
def _set_thread_id():
global _thread_id
from threading import current_thread
_thread_id = current_thread().ident
# Assuming import is done from the main thread
_set_thread_id()
def _print_log():
for item in _log:
print (item)
_log[:] = ()
def _vim(func):
from functools import wraps
from threading import current_thread
@wraps(func)
def f(*args, **kwargs):
global _thread_id
if _thread_id != current_thread().ident:
raise RuntimeError('Accessing vim from separate threads is not allowed')
_log.append((func.__name__, args))
return func(*args, **kwargs)
return f
def _unicode(func):
from functools import wraps
import sys
if sys.version_info < (3,):
return func
@wraps(func)
def f(*args, **kwargs):
from powerline.lib.unicode import u
ret = func(*args, **kwargs)
if isinstance(ret, bytes):
ret = u(ret)
return ret
return f
class _Buffers(object):
@_vim
def __init__(self):
self.d = {}
@_vim
def __len__(self):
return len(self.d)
@_vim
def __getitem__(self, item):
return self.d[item]
@_vim
def __setitem__(self, item, value):
self.d[item] = value
@_vim
def __iter__(self):
return iter(self.d.values())
@_vim
def __contains__(self, item):
return item in self.d
@_vim
def _keys(self):
return self.d.keys()
@_vim
def _pop(self, *args, **kwargs):
return self.d.pop(*args, **kwargs)
buffers = _Buffers()
class _ObjList(object):
@_vim
def __init__(self, objtype):
self.l = []
self.objtype = objtype
@_vim
def __getitem__(self, item):
return self.l[item - int(item > 0)]
@_vim
def __len__(self):
return len(self.l)
@_vim
def __iter__(self):
return iter(self.l)
@_vim
def _pop(self, idx):
obj = self.l.pop(idx - 1)
for moved_obj in self.l[idx - 1:]:
moved_obj.number -= 1
return obj
@_vim
def _append(self, *args, **kwargs):
return self.l.append(*args, **kwargs)
@_vim
def _new(self, *args, **kwargs):
number = len(self) + 1
new_obj = self.objtype(number, *args, **kwargs)
self._append(new_obj)
return new_obj
def _construct_result(r):
import sys
if sys.version_info < (3,):
return r
else:
if isinstance(r, str):
return r.encode('utf-8')
elif isinstance(r, list):
return [_construct_result(i) for i in r]
elif isinstance(r, dict):
return dict((
(_construct_result(k), _construct_result(v))
for k, v in r.items()
))
return r
def _str_func(func):
from functools import wraps
@wraps(func)
def f(*args, **kwargs):
return _construct_result(func(*args, **kwargs))
return f
def _log_print():
import sys
for entry in _log:
sys.stdout.write(repr(entry) + '\n')
_current_group = None
_on_wipeout = []
@_vim
def command(cmd):
global _current_group
cmd = cmd.lstrip()
if cmd.startswith('let g:'):
import re
varname, value = re.compile(r'^let g:(\w+)\s*=\s*(.*)').match(cmd).groups()
vars[varname] = value
elif cmd.startswith('hi '):
sp = cmd.split()
_highlights[sp[1]] = sp[2:]
elif cmd.startswith('augroup'):
augroup = cmd.partition(' ')[2]
if augroup.upper() == 'END':
_current_group = None
else:
_current_group = augroup
elif cmd.startswith('autocmd'):
rest = cmd.partition(' ')[2]
auevent, rest = rest.partition(' ')[::2]
pattern, aucmd = rest.partition(' ')[::2]
if auevent != 'BufWipeout' or pattern != '*':
raise NotImplementedError
import sys
if sys.version_info < (3,):
if not aucmd.startswith(':python '):
raise NotImplementedError
else:
if not aucmd.startswith(':python3 '):
raise NotImplementedError
_on_wipeout.append(aucmd.partition(' ')[2])
elif cmd.startswith('set '):
if cmd.startswith('set statusline='):
options['statusline'] = cmd[len('set statusline='):]
elif cmd.startswith('set tabline='):
options['tabline'] = cmd[len('set tabline='):]
else:
raise NotImplementedError(cmd)
else:
raise NotImplementedError(cmd)
@_vim
@_unicode
def eval(expr):
if expr.startswith('g:'):
return vars[expr[2:]]
elif expr.startswith('v:'):
return vvars[expr[2:]]
elif expr.startswith('&'):
return options[expr[1:]]
elif expr.startswith('$'):
return _environ[expr[1:]]
elif expr.startswith('PowerlineRegisterCachePurgerEvent'):
_buf_purge_events.add(expr[expr.find('"') + 1:expr.rfind('"') - 1])
return '0'
elif expr.startswith('exists('):
return '0'
elif expr.startswith('getwinvar('):
import re
match = re.match(r'^getwinvar\((\d+), "(\w+)"\)$', expr)
if not match:
raise NotImplementedError(expr)
winnr = int(match.group(1))
varname = match.group(2)
return _emul_getwinvar(winnr, varname)
elif expr.startswith('has_key('):
import re
match = re.match(r'^has_key\(getwinvar\((\d+), ""\), "(\w+)"\)$', expr)
if match:
winnr = int(match.group(1))
varname = match.group(2)
return 0 + (varname in current.tabpage.windows[winnr].vars)
else:
match = re.match(r'^has_key\(gettabwinvar\((\d+), (\d+), ""\), "(\w+)"\)$', expr)
if not match:
raise NotImplementedError(expr)
tabnr = int(match.group(1))
winnr = int(match.group(2))
varname = match.group(3)
return 0 + (varname in tabpages[tabnr].windows[winnr].vars)
elif expr == 'getbufvar("%", "NERDTreeRoot").path.str()':
import os
assert os.path.basename(current.buffer.name).startswith('NERD_tree_')
return '/usr/include'
elif expr.startswith('getbufvar('):
import re
match = re.match(r'^getbufvar\((\d+), ["\'](.+)["\']\)$', expr)
if not match:
raise NotImplementedError(expr)
bufnr = int(match.group(1))
varname = match.group(2)
return _emul_getbufvar(bufnr, varname)
elif expr == 'tabpagenr()':
return current.tabpage.number
elif expr == 'tabpagenr("$")':
return len(tabpages)
elif expr.startswith('tabpagewinnr('):
tabnr = int(expr[len('tabpagewinnr('):-1])
return tabpages[tabnr].window.number
elif expr.startswith('tabpagebuflist('):
import re
match = re.match(r'tabpagebuflist\((\d+)\)\[(\d+)\]', expr)
tabnr = int(match.group(1))
winnr = int(match.group(2)) + 1
return tabpages[tabnr].windows[winnr].buffer.number
elif expr.startswith('gettabwinvar('):
import re
match = re.match(r'gettabwinvar\((\d+), (\d+), "(\w+)"\)', expr)
tabnr = int(match.group(1))
winnr = int(match.group(2))
varname = match.group(3)
return tabpages[tabnr].windows[winnr].vars[varname]
elif expr.startswith('type(function('):
import re
match = re.match(r'^type\(function\("([^"]+)"\)\) == 2$', expr)
if not match:
raise NotImplementedError(expr)
return 0
raise NotImplementedError(expr)
@_vim
def bindeval(expr):
if expr == 'g:':
return vars
elif expr == '{}':
return {}
elif expr == '[]':
return []
import re
match = re.compile(r'^function\("([^"\\]+)"\)$').match(expr)
if match:
return globals()['_emul_' + match.group(1)]
else:
raise NotImplementedError
@_vim
@_str_func
def _emul_mode(*args):
if args and args[0]:
return _mode
else:
return _mode[0]
@_vim
@_str_func
def _emul_getbufvar(bufnr, varname):
import re
if varname[0] == '&':
if bufnr == '%':
bufnr = current.buffer.number
if bufnr not in buffers:
return ''
try:
return buffers[bufnr].options[varname[1:]]
except KeyError:
try:
return options[varname[1:]]
except KeyError:
return ''
elif re.match('^[a-zA-Z_]+$', varname):
if bufnr == '%':
bufnr = current.buffer.number
if bufnr not in buffers:
return ''
return buffers[bufnr].vars[varname]
raise NotImplementedError
@_vim
@_str_func
def _emul_getwinvar(winnr, varname):
return current.tabpage.windows[winnr].vars.get(varname, '')
@_vim
def _emul_setwinvar(winnr, varname, value):
current.tabpage.windows[winnr].vars[varname] = value
@_vim
def _emul_virtcol(expr):
if expr == '.':
return current.window.cursor[1] + 1
if isinstance(expr, list) and len(expr) == 3:
return expr[-2] + expr[-1]
raise NotImplementedError
_v_pos = None
@_vim
def _emul_getpos(expr):
if expr == '.':
return [0, current.window.cursor[0] + 1, current.window.cursor[1] + 1, 0]
if expr == 'v':
return _v_pos or [0, current.window.cursor[0] + 1, current.window.cursor[1] + 1, 0]
raise NotImplementedError
@_vim
@_str_func
def _emul_fnamemodify(path, modstring):
import os
_modifiers = {
'~': lambda path: path.replace(os.environ['HOME'].encode('utf-8'), b'~') if path.startswith(os.environ['HOME'].encode('utf-8')) else path,
'.': lambda path: (lambda tpath: path if tpath[:3] == b'..' + os.sep.encode() else tpath)(os.path.relpath(path)),
't': lambda path: os.path.basename(path),
'h': lambda path: os.path.dirname(path),
}
for mods in modstring.split(':')[1:]:
path = _modifiers[mods](path)
return path
@_vim
@_str_func
def _emul_expand(expr):
global _abuf
if expr == '<abuf>':
return _abuf or current.buffer.number
raise NotImplementedError
@_vim
def _emul_bufnr(expr):
if expr == '$':
return _last_bufnr
raise NotImplementedError
@_vim
def _emul_exists(ident):
if ident.startswith('g:'):
return ident[2:] in vars
elif ident.startswith(':'):
return 0
raise NotImplementedError
@_vim
def _emul_line2byte(line):
buflines = current.buffer._buf_lines
if line == len(buflines) + 1:
return sum((len(s) for s in buflines)) + 1
raise NotImplementedError
@_vim
def _emul_line(expr):
cursorline = current.window.cursor[0] + 1
numlines = len(current.buffer._buf_lines)
if expr == 'w0':
return max(cursorline - 5, 1)
if expr == 'w$':
return min(cursorline + 5, numlines)
raise NotImplementedError
@_vim
@_str_func
def _emul_strtrans(s):
# FIXME Do more replaces
return s.replace(b'\xFF', b'<ff>')
@_vim
@_str_func
def _emul_bufname(bufnr):
try:
return buffers[bufnr]._name or b''
except KeyError:
return b''
_window_id = 0
class _Window(object):
def __init__(self, number, buffer=None, cursor=(1, 0), width=80):
global _window_id
self.cursor = cursor
self.width = width
self.number = number
if buffer:
if type(buffer) is _Buffer:
self.buffer = buffer
else:
self.buffer = _Buffer(**buffer)
else:
self.buffer = _Buffer()
_window_id += 1
self._window_id = _window_id
self.options = {}
self.vars = {
'powerline_window_id': self._window_id,
}
def __repr__(self):
return '<window ' + str(self.number - 1) + '>'
class _Tabpage(object):
def __init__(self, number):
self.windows = _ObjList(_Window)
self.number = number
def _new_window(self, **kwargs):
self.window = self.windows._new(**kwargs)
return self.window
def _close_window(self, winnr, open_window=True):
curwinnr = self.window.number
win = self.windows._pop(winnr)
if self.windows and winnr == curwinnr:
self.window = self.windows[-1]
elif open_window:
current.tabpage._new_window()
return win
def _close(self):
global _tabpage
while self.windows:
self._close_window(1, False)
tabpages._pop(self.number)
_tabpage = len(tabpages)
tabpages = _ObjList(_Tabpage)
_abuf = None
class _Buffer(object):
def __init__(self, name=None):
global _last_bufnr
_last_bufnr += 1
bufnr = _last_bufnr
self.number = bufnr
# FIXME Use unicode() for python-3
self.name = name
self.vars = {'changedtick': 1}
self.options = {
'modified': 0,
'readonly': 0,
'fileformat': 'unix',
'filetype': '',
'buftype': '',
'fileencoding': 'utf-8',
'textwidth': 80,
}
self._buf_lines = ['']
self._undostate = [self._buf_lines[:]]
self._undo_written = len(self._undostate)
buffers[bufnr] = self
@property
def name(self):
import sys
if sys.version_info < (3,):
return self._name
else:
return str(self._name, 'utf-8') if self._name else None
@name.setter
def name(self, name):
if name is None:
self._name = None
else:
import os
if type(name) is not bytes:
name = name.encode('utf-8')
if b':/' in name:
self._name = name
else:
self._name = os.path.abspath(name)
def __getitem__(self, line):
return self._buf_lines[line]
def __setitem__(self, line, value):
self.options['modified'] = 1
self.vars['changedtick'] += 1
self._buf_lines[line] = value
from copy import copy
self._undostate.append(copy(self._buf_lines))
def __setslice__(self, *args):
self.options['modified'] = 1
self.vars['changedtick'] += 1
self._buf_lines.__setslice__(*args)
from copy import copy
self._undostate.append(copy(self._buf_lines))
def __getslice__(self, *args):
return self._buf_lines.__getslice__(*args)
def __len__(self):
return len(self._buf_lines)
def __repr__(self):
return '<buffer ' + str(self.name) + '>'
def __del__(self):
global _abuf
bufnr = self.number
try:
import __main__
except ImportError:
pass
except RuntimeError:
# Module may have already been garbage-collected
pass
else:
if _on_wipeout:
_abuf = bufnr
try:
for event in _on_wipeout:
exec(event, __main__.__dict__)
finally:
_abuf = None
class _Current(object):
@property
def buffer(self):
return self.window.buffer
@property
def window(self):
return self.tabpage.window
@property
def tabpage(self):
return tabpages[_tabpage - 1]
current = _Current()
_dict = None
@_vim
def _init():
global _dict
if _dict:
return _dict
_dict = {}
for varname, value in globals().items():
if varname[0] != '_':
_dict[varname] = value
_tabnew()
return _dict
@_vim
def _get_segment_info():
mode_translations = {
chr(ord('V') - 0x40): '^V',
chr(ord('S') - 0x40): '^S',
}
mode = _mode
mode = mode_translations.get(mode, mode)
window = current.window
buffer = current.buffer
tabpage = current.tabpage
return {
'window': window,
'winnr': window.number,
'buffer': buffer,
'bufnr': buffer.number,
'tabpage': tabpage,
'tabnr': tabpage.number,
'window_id': window._window_id,
'mode': mode,
'encoding': options['encoding'],
}
@_vim
def _launch_event(event):
pass
@_vim
def _start_mode(mode):
global _mode
if mode == 'i':
_launch_event('InsertEnter')
elif _mode == 'i':
_launch_event('InsertLeave')
_mode = mode
@_vim
def _undo():
if len(current.buffer._undostate) == 1:
return
buffer = current.buffer
buffer._undostate.pop(-1)
buffer._buf_lines = buffer._undostate[-1]
if buffer._undo_written == len(buffer._undostate):
buffer.options['modified'] = 0
@_vim
def _edit(name=None):
if current.buffer.name is None:
buffer = current.buffer
buffer.name = name
else:
buffer = _Buffer(name)
current.window.buffer = buffer
@_vim
def _tabnew(name=None):
global windows
global _tabpage
tabpage = tabpages._new()
windows = tabpage.windows
_tabpage = len(tabpages)
_new(name)
return tabpage
@_vim
def _new(name=None):
current.tabpage._new_window(buffer={'name': name})
@_vim
def _split():
current.tabpage._new_window(buffer=current.buffer)
@_vim
def _close(winnr, wipe=True):
win = current.tabpage._close_window(winnr)
if wipe:
for w in current.tabpage.windows:
if w.buffer.number == win.buffer.number:
break
else:
_bw(win.buffer.number)
@_vim
def _bw(bufnr=None):
bufnr = bufnr or current.buffer.number
winnr = 1
for win in current.tabpage.windows:
if win.buffer.number == bufnr:
_close(winnr, wipe=False)
winnr += 1
buffers._pop(bufnr)
if not buffers:
_Buffer()
_b(max(buffers._keys()))
@_vim
def _b(bufnr):
current.window.buffer = buffers[bufnr]
@_vim
def _set_cursor(line, col):
current.window.cursor = (line, col)
if _mode == 'n':
_launch_event('CursorMoved')
elif _mode == 'i':
_launch_event('CursorMovedI')
@_vim
def _get_buffer():
return current.buffer
@_vim
def _set_bufoption(option, value, bufnr=None):
buffers[bufnr or current.buffer.number].options[option] = value
if option == 'filetype':
_launch_event('FileType')
class _WithNewBuffer(object):
def __init__(self, func, *args, **kwargs):
self.call = lambda: func(*args, **kwargs)
def __enter__(self):
self.call()
self.bufnr = current.buffer.number
return _get_segment_info()
def __exit__(self, *args):
_bw(self.bufnr)
@_vim
def _set_dict(d, new, setfunc=None):
if not setfunc:
def setfunc(k, v):
d[k] = v
old = {}
na = []
for k, v in new.items():
try:
old[k] = d[k]
except KeyError:
na.append(k)
setfunc(k, v)
return old, na
class _WithBufOption(object):
def __init__(self, **new):
self.new = new
def __enter__(self):
self.buffer = current.buffer
self.old = _set_dict(self.buffer.options, self.new, _set_bufoption)[0]
def __exit__(self, *args):
self.buffer.options.update(self.old)
class _WithMode(object):
def __init__(self, new):
self.new = new
def __enter__(self):
self.old = _mode
_start_mode(self.new)
return _get_segment_info()
def __exit__(self, *args):
_start_mode(self.old)
class _WithDict(object):
def __init__(self, d, **new):
self.new = new
self.d = d
def __enter__(self):
self.old, self.na = _set_dict(self.d, self.new)
def __exit__(self, *args):
self.d.update(self.old)
for k in self.na:
self.d.pop(k)
class _WithSplit(object):
def __enter__(self):
_split()
def __exit__(self, *args):
_close(2, wipe=False)
class _WithBufName(object):
def __init__(self, new):
self.new = new
def __enter__(self):
import os
buffer = current.buffer
self.buffer = buffer
self.old = buffer.name
buffer.name = self.new
def __exit__(self, *args):
self.buffer.name = self.old
class _WithNewTabPage(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __enter__(self):
self.tab = _tabnew(*self.args, **self.kwargs)
def __exit__(self, *args):
self.tab._close()
class _WithGlobal(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __enter__(self):
self.empty = object()
self.old = dict(((key, globals().get(key, self.empty)) for key in self.kwargs))
globals().update(self.kwargs)
def __exit__(self, *args):
for k, v in self.old.items():
if v is self.empty:
globals().pop(k, None)
else:
globals()[k] = v
@_vim
def _with(key, *args, **kwargs):
if key == 'buffer':
return _WithNewBuffer(_edit, *args, **kwargs)
elif key == 'bufname':
return _WithBufName(*args, **kwargs)
elif key == 'mode':
return _WithMode(*args, **kwargs)
elif key == 'bufoptions':
return _WithBufOption(**kwargs)
elif key == 'options':
return _WithDict(options, **kwargs)
elif key == 'globals':
return _WithDict(vars, **kwargs)
elif key == 'wvars':
return _WithDict(current.window.vars, **kwargs)
elif key == 'environ':
return _WithDict(_environ, **kwargs)
elif key == 'split':
return _WithSplit()
elif key == 'tabpage':
return _WithNewTabPage(*args, **kwargs)
elif key == 'vpos':
return _WithGlobal(_v_pos=[0, kwargs['line'], kwargs['col'], kwargs['off']])
class error(Exception):
pass
|
database/msg/load.py
|
atztogo/spglib
| 131 |
82702
|
<reponame>atztogo/spglib
from pathlib import Path
import csv
from ruamel.yaml import YAML
def get_msg_numbers():
all_datum = []
with open(Path(__file__).resolve().parent / "msg_numbers.csv", 'r') as f:
reader = csv.reader(f, delimiter=',')
next(reader) # skip header
for row in reader:
if len(row) == 0:
break
litvin_number, bns_number, og_number, uni_number = row
all_datum.append((
int(litvin_number),
bns_number,
og_number,
int(uni_number),
))
assert len(all_datum) == 1651
return all_datum
def get_spg_table():
all_datum = {}
with open(Path("../spg.csv"), 'r') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
hall_number, choice, number, hall_symbol = int(row[0]), row[2], int(row[4]), row[6]
all_datum[hall_number] = {
'choice': choice,
'number': number,
'hall_symbol': hall_symbol,
}
assert len(all_datum) == 530
return all_datum
def get_msg_table():
# Load MSG for ITA standard settings
with open(Path("./magnetic_hall_symbols.yaml"), 'r') as f:
all_datum = dict(YAML().load(f))
return all_datum
|
sys/debug/struct.py
|
pj1031999/mimiker
| 185 |
82710
|
<filename>sys/debug/struct.py
import gdb
import re
from .utils import cast, relpath
def cstr(val):
try:
return val.string()
except gdb.MemoryError:
return '[bad-ptr 0x%x]' % val.address
def enum(v):
return v.type.target().fields()[int(v)].name
class ProgramCounter():
def __init__(self, pc):
self.pc = cast(pc, 'unsigned long')
def __str__(self):
if self.pc == 0:
return 'null'
line = gdb.execute('info line *0x%x' % self.pc, to_string=True)
m = re.match(r'Line (\d+) of "(.*)"', line)
if m:
lnum, path = m.groups()
return '%s:%s' % (relpath(path), lnum)
else:
return '0x%x' % self.pc
class GdbStructBase():
def __init__(self, obj):
self._obj = obj
def to_string(self):
return str(self)
def dump(self):
res = ['%s = %s' % (field, getattr(self, field))
for field in self._obj.type]
return '\n'.join(res)
def display_hint(self):
return 'map'
class GdbStructMeta(type):
def __new__(cls, name, bases, dct):
t = gdb.lookup_type(dct['__ctype__'])
# for each field of ctype make property getter of the same name
for f in t.fields():
def mkgetter(fname, caster):
if caster is None:
return lambda x: x._obj[fname]
# use cast function if available
def _caster(x):
val = x._obj[fname]
try:
return caster(val)
except gdb.MemoryError:
return '[bad-ptr: 0x%x]' % val.address
return _caster
caster = None
if '__cast__' in dct:
caster = dct['__cast__'].get(f.name, None)
dct[f.name] = property(mkgetter(f.name, caster))
# classes created with GdbStructMeta will inherit from GdbStructBase
return super().__new__(cls, name, (GdbStructBase,) + bases, dct)
class BinTime(metaclass=GdbStructMeta):
__ctype__ = 'struct bintime'
__cast__ = {'sec': int, 'frac': int}
def as_float(self):
return float(self.sec) + float(self.frac) / 2**64
def __str__(self):
return 'bintime{%.6f}' % self.as_float()
class List():
def __init__(self, lst, field):
self.lst = lst
self.field = field
def __iter__(self):
item = self.lst['lh_first']
while item != 0:
item = item.dereference()
yield item
item = item[self.field]['le_next']
class TailQueue():
def __init__(self, tq, field):
self.tq = tq
self.field = field
def __iter__(self):
item = self.tq['tqh_first']
while item != 0:
item = item.dereference()
yield item
item = item[self.field]['tqe_next']
class LinkerSet():
def __init__(self, name, typ):
self.start = gdb.parse_and_eval('(%s **)&__start_set_%s' % (typ, name))
self.stop = gdb.parse_and_eval('(%s **)&__stop_set_%s' % (typ, name))
def __iter__(self):
item = self.start
while item < self.stop:
yield item.dereference().dereference()
item = item + 1
|
scale/queue/migrations/0006_auto_20160316_1625.py
|
kaydoh/scale
| 121 |
82736
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('queue', '0005_queue_node_required'),
]
operations = [
migrations.RemoveField(
model_name='queuedepthbyjobtype',
name='job_type',
),
migrations.DeleteModel(
name='QueueDepthByJobType',
),
migrations.DeleteModel(
name='QueueDepthByPriority',
),
]
|
src/nvis/data.py
|
29ayush/simple_dqn
| 767 |
82756
|
<filename>src/nvis/data.py
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import h5py
import numpy as np
def convert_rgb_to_bokehrgba(img_data, downsample=1):
"""
Convert RGB image to two-dimensional array of RGBA values (encoded as 32-bit integers)
(required by Bokeh). The functionality is currently not available in Bokeh.
An issue was raised here: https://github.com/bokeh/bokeh/issues/1699 and this function is a
modified version of the suggested solution.
Arguments:
img_data: img (ndarray, shape: [N, M, 3], dtype: uint8): image data
dh: height of image
dw: width of image
Returns:
img (ndarray): 2D image array of RGBA values
"""
if img_data.dtype != np.uint8:
raise NotImplementedError
if img_data.ndim != 3:
raise NotImplementedError
# downsample for render performance, v-flip since plot origin is bottom left
# img_data = np.transpose(img_data, (1,2,0))
img_data = img_data[::-downsample, ::downsample, :]
img_h, img_w, C = img_data.shape
# add an alpha channel to the image and recast from pixels of u8u8u8u8 to u32
#bokeh_img = np.dstack([img_data, 255 * np.ones((img_h, img_w), np.uint8)])
#final_image = bokeh_img.reshape(img_h, img_w * (C+1)).view(np.uint32)
# put last 3 frames into separate color channels and add alpha channel
bokeh_img = np.dstack([img_data[:,:,1], img_data[:,:,2], img_data[:,:,3], 255 * np.ones((img_h, img_w), np.uint8)])
final_image = bokeh_img.reshape(img_h, img_w * 4).view(np.uint32)
return final_image
def h5_deconv_data(f):
"""
Read deconv visualization data from hdf5 file.
Returns:
list of lists. Each inner list represents one layer, and consists of
tuples (fm, deconv_data)
"""
ret = list()
if 'deconv' not in f.keys():
return None
act_data = f['deconv/max_act']
img_data = f['deconv/img']
for layer in act_data.keys():
layer_data = list()
for fm in range(act_data[layer]['vis'].shape[0]):
# to avoid storing entire dataset, imgs are cached as needed, have to look up
batch_ind, img_ind = act_data[layer]['batch_img'][fm]
img_store = img_data['batch_{}'.format(batch_ind)]
img_cache_ofs = img_store.attrs[str(img_ind)]
# have to convert from rgb to rgba and cast as uint32 dtype for bokeh
plot_img = convert_rgb_to_bokehrgba(img_store['HWC_uint8'][:, :, :, img_cache_ofs])
plot_deconv = convert_rgb_to_bokehrgba(act_data[layer]['vis'][fm])
layer_data.append((fm, plot_deconv, plot_img))
ret.append((layer, layer_data))
return ret
|
tinyboot.py
|
kragen/stoneknifeforth
| 322 |
82762
|
<gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tiny bootstrapping interpreter for the first bootstrap stage.
Implements an extremely minimal Forth-like language, used to write
tinyboot1.tbf1.
The theory is that first we ‘compile’ the program by reading through
it to find compile-time definitions and actions, which sets up the
initial state of memory; then we ‘run’ the program by directly
interpreting its text, given that initial state.
"""
import sys, cgitb
cgitb.enable(format='text')
def debug(text):
sys.stderr.write(text + "\n")
start_address = None
memory = [] # a list of bytes represented as integers
stack = []
rstack = []
### Compile-time actions.
# Note that these should leave program_counter pointing after the last
# byte they consume, i.e. they should eat_byte the last byte they
# consume.
program_counter = 0
def current_byte():
return program[program_counter]
def eat_byte():
global program_counter
current_byte = program[program_counter]
program_counter += 1
return current_byte
def advance_past_whitespace():
while program_counter < len(program) and current_byte() in ' \n':
eat_byte()
def advance_to_whitespace():
while program_counter < len(program) and current_byte() not in ' \n':
eat_byte()
def get_token():
advance_past_whitespace()
# XXX and on EOF?
rv = current_byte()
if rv not in "0123456789'": advance_to_whitespace()
return rv
def eat_comment():
comment_start = program_counter
while eat_byte() != ')': pass
jump_targets[comment_start] = program_counter
def push_dataspace_label(n):
return lambda: stack.append(n)
def define(name, action):
assert name not in run_time_dispatch, name
run_time_dispatch[name] = action
def dataspace_label():
"Define a label in data space."
name = get_token()
define(name, push_dataspace_label(len(memory)))
def call_function(n):
def rv():
global program_counter
rstack.append(program_counter)
program_counter = n
return rv
def define_function():
name = get_token()
define(name, call_function(program_counter))
def read_number():
start = program_counter
while eat_byte() in '0123456789': pass
return int(program[start:program_counter])
def literal_byte():
advance_past_whitespace()
memory.append(read_number())
def as_bytes(num):
"Convert a 32-byte number into a little-endian byte sequence."
return [num & 255, num >> 8 & 255, num >> 16 & 255, num >> 24 & 255]
def literal_word():
"Compile a little-endian literal 32-byte number into data space."
advance_past_whitespace()
memory.extend(as_bytes(read_number()))
def allocate_space():
advance_past_whitespace()
memory.extend([0] * read_number())
def set_start_address():
global start_address
start_address = program_counter
def nop(): pass
def skip_literal_byte():
eat_byte() # skip the '
eat_byte() # skip the character itself
# We have to find the backwards jump targets for loops while scanning
# forward. Otherwise we’d have to find them by scanning backwards,
# and you can’t correctly skip comments that way, since comments don’t
# nest.
jump_targets = {}
def start_conditional():
stack.append(program_counter)
def end_conditional():
jump_targets[stack.pop()] = program_counter
start_loop = start_conditional
def end_loop():
jump_targets[program_counter] = stack.pop()
compile_time_dispatch = {
'(': eat_comment,
'v': dataspace_label,
':': define_function,
'b': literal_byte,
'#': literal_word,
'*': allocate_space,
'^': set_start_address,
'[': start_conditional, ']': end_conditional,
'{': start_loop, '}': end_loop,
' ': eat_byte, '\n': eat_byte,
"'": skip_literal_byte,
}
for digit in '0123456789': compile_time_dispatch[digit] = read_number
def tbfcompile():
while program_counter < len(program):
token = get_token()
if token in compile_time_dispatch:
compile_time_dispatch[token]()
elif token in run_time_dispatch:
pass # ignore things from run-time for now
else:
excerpt_beginning = max(0, program_counter - 30)
assert False, '%r not defined at %r' % \
(token, program[excerpt_beginning:program_counter])
# To ensure the loop test condition hits the EOF instead of
# get_token() choking on it:
advance_past_whitespace()
### Run-time actions.
# Execution should pretty much stay inside of functions, and we
# shouldn't run into any compile-time actions there, right?
def write_out():
"Given an address and a count, write out some memory to stdout."
count = stack.pop()
address = stack.pop()
debug('writing address %d, count %d' % (address, count))
sys.stdout.write(''.join([chr(memory[ii])
for ii in range(address, address+count)]))
def quit():
sys.exit(0)
def subtract():
x = stack.pop()
stack.append((stack.pop() - x) & 0xFfffFfff)
def push_literal():
stack.append(read_number())
def decode(bytes):
rv = bytes[0] | bytes[1] << 8 | bytes[2] << 16 | bytes[3] << 24
if rv > 0x7fffFfff:
rv -= 0x100000000
return rv
def fetch():
addr = stack.pop()
stack.append(decode(memory[addr:addr+4]))
def extend_memory(addr):
# Addresses > 100k are probably just a bug; it’s not practical to
# run large programs with this interpreter anyway.
if len(memory) < addr + 1 and addr < 100000:
memory.extend([0] * (addr + 1 - len(memory)))
def store():
addr = stack.pop()
extend_memory(addr)
memory[addr:addr+4] = as_bytes(stack.pop())
def store_byte():
addr = stack.pop()
extend_memory(addr)
memory[addr] = stack.pop() & 255
def less_than():
b = stack.pop()
a = stack.pop()
if a < b:
stack.append(1)
else:
stack.append(0)
def return_from_function():
global program_counter
program_counter = rstack.pop()
def read_byte():
byte = sys.stdin.read(1)
if byte == '':
stack.append(-1)
else:
stack.append(ord(byte))
def jump():
global program_counter
program_counter = jump_targets[program_counter]
def conditional():
if stack.pop(): return
jump()
def loop():
if not stack.pop(): return
jump()
def literal_byte():
# you put 'A into your program to get 65, or 'B to get 66, etc.
eat_byte() # to skip the '
stack.append(ord(eat_byte()))
run_time_dispatch = {
'(': jump,
'W': write_out,
'G': read_byte,
'Q': quit,
'-': subtract,
'<': less_than,
'@': fetch,
'!': store,
# 'f': fetch_byte, not yet needed
's': store_byte,
';': return_from_function,
'[': conditional, ']': nop,
'{': nop, '}': loop,
' ': nop, '\n': nop,
"'": literal_byte,
}
for digit in '0123456789': run_time_dispatch[digit] = push_literal
def tbfrun():
assert start_address is not None
global program_counter
program_counter = start_address
while True:
run_time_dispatch[get_token()]()
def main(infile):
global program
program = infile.read()
tbfcompile()
tbfrun()
assert False, "tbfrun returned"
if __name__ == '__main__': main(file(sys.argv[1]))
|
tools/can_async_example.py
|
deafloo/ODrive
| 1,068 |
82780
|
import can
bus1 = can.interface.Bus('can0', bustype='virtual')
bus2 = can.interface.Bus('can0', bustype='virtual')
msg1 = can.Message(arbitration_id=0xabcde, data=[1,2,3])
bus1.send(msg1)
msg2 = bus2.recv()
print(hex(msg1.arbitration_id))
print(hex(msg2.arbitration_id))
assert msg1.arbitration_id == msg2.arbitration_id
|
tests/test_functions/http_flask_render_template/main.py
|
Daniel-Sanche/functions-framework-python
| 479 |
82781
|
<gh_stars>100-1000
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function used in Worker tests of handling HTTP functions."""
from flask import render_template
def function(request):
"""Test HTTP function whose behavior depends on the given mode.
The function returns a success, a failure, or throws an exception, depending
on the given mode.
Args:
request: The HTTP request which triggered this function. Must contain name
of the requested mode in the 'mode' field in JSON document in request
body.
Returns:
Value and status code defined for the given mode.
Raises:
Exception: Thrown when requested in the incoming mode specification.
"""
if request.args and "message" in request.args:
message = request.args.get("message")
elif request.get_json() and "message" in request.get_json():
message = request.get_json()["message"]
else:
message = "Hello World!"
return render_template("hello.html", name=message)
|
File_organiser/clean_folder.py
|
elawang9/Scripting-and-Web-Scraping
| 119 |
82789
|
<filename>File_organiser/clean_folder.py<gh_stars>100-1000
from colorama import Fore
import os
all_paths=[]
dir_name = input( 'Enter the name of directory you want to clear: ')
extension = set()
def source_path(dir_name):
for root in os.walk("/home"):
if dir_name == root[0].split('/')[-1]:
all_paths.append(root[0])
for i in range(len(all_paths)):
print()
print("{}. {}".format(i+1,all_paths[i]))
if len(all_paths) == 0:
print(Fore.LIGHTRED_EX + 'No directory found')
exit()
choice = int(input('\nEnter the option number: '))
if choice < 1 or choice > len(all_paths):
print(Fore.LIGHTRED_EX +'Wrong choice entered')
exit()
else:
path = all_paths[choice-1]
return path
def print_before(path):
print("Cleaning {} located at {}\n".format(path.split('/')[-1],path))
print(Fore.LIGHTBLUE_EX + "Before cleaning\n" + Fore.RESET)
for files in os.listdir(path):
print(files,end='\t')
print()
def destination_path(path):
os.chdir(path)
for f in os.listdir():
name = (os.path.splitext(f))[0]
ext = (os.path.splitext(f))[1]
extension.add(ext[1:])
new_dir = "New" + path.split('/')[-1]
new_dir_path = os.path.join(path,new_dir)
if not os.path.exists(new_dir_path):
os.mkdir(new_dir_path)
return new_dir_path,new_dir
def organise(new_dir_path,new_dir,path):
for ext in extension:
folder = os.path.join(new_dir_path,ext)
if not os.path.exists(folder):
os.mkdir(folder)
if ext !='':
for f in os.listdir():
if os.path.splitext(f)[1].strip('.') == ext:
os.rename(f,os.path.join(folder,f))
else:
for f in os.listdir():
if f!=new_dir and os.path.splitext(f)[1].strip('.') == ext:
print(f)
inner_folder = os.path.join(new_dir_path,f)
if os.path.exists(inner_folder):
os.chdir(os.path.join(path,f))
for file in os.listdir():
new_path = os.path.join(inner_folder,file)
os.rename(file,new_path)
os.rmdir(os.path.join(path,f))
else:
os.rename(f,inner_folder)
def print_after(path):
print(Fore.LIGHTBLUE_EX + "\nAfter cleaning\n" + Fore.RESET)
for files in os.listdir(path):
print(files,end='\t')
print(Fore.LIGHTMAGENTA_EX + "\n\nCLEANED\n" + Fore.RESET)
def file_manage():
path = source_path(dir_name)
print_before(path)
new_dir_path, new_dir = destination_path(path)
organise(new_dir_path, new_dir,path)
print_after(path)
file_manage()
|
exercises/ja/solution_02_10_01.py
|
Jette16/spacy-course
| 2,085 |
82801
|
<reponame>Jette16/spacy-course<filename>exercises/ja/solution_02_10_01.py
import spacy
nlp = spacy.load("ja_core_news_md")
doc1 = nlp("暖かい夏の日です")
doc2 = nlp("外は晴れています")
# doc1とdoc2の類似度を取得
similarity = doc1.similarity(doc2)
print(similarity)
|
quantecon/tests/test_inequality.py
|
Smit-create/QuantEcon.py
| 1,462 |
82803
|
"""
Tests for inequality.py
"""
import numpy as np
from numpy.testing import assert_allclose, assert_raises
from scipy.stats import linregress
from quantecon import lorenz_curve, gini_coefficient, \
shorrocks_index, rank_size
def test_lorenz_curve():
"""
Tests `lorenz` function, which calculates the lorenz curve
An income distribution where everyone has almost the same wealth should
be similar to a straight line
An income distribution where one person has almost the wealth should
be flat and then shoot straight up when it approaches one
"""
n = 3000
# Almost Equal distribution
y = np.repeat(1, n) + np.random.normal(scale=0.0001, size=n)
cum_people, cum_income = lorenz_curve(y)
assert_allclose(cum_people, cum_income, rtol=1e-03)
# Very uneven distribution
y = np.repeat(0.001, n)
y[4] = 100000
pop_cum, income_cum = lorenz_curve(y)
expected_income_cum = np.repeat(0., n + 1)
expected_income_cum[-1] = 1.
assert_allclose(expected_income_cum, income_cum, atol=1e-4)
def test_gini_coeff():
"""
Tests how the function `gini_coefficient` calculates the Gini coefficient
with the Pareto and the Weibull distribution.
Analytically, we know that Pareto with parameter `a` has
G = 1 / (2*a - 1)
Likewise, for the Weibull distribution with parameter `a` we know that
G = 1 - 2**(-1/a)
"""
n = 10000
# Tests Pareto: G = 1 / (2*a - 1)
a = np.random.randint(2, 15)
expected = 1 / (2 * a - 1)
y = (np.random.pareto(a, size=n) + 1) * 2
coeff = gini_coefficient(y)
assert_allclose(expected, coeff, rtol=1e-01)
# Tests Weibull: G = 1 - 2**(-1/a)
a = np.random.randint(2, 15)
expected = 1 - 2 ** (-1 / a)
y = np.random.weibull(a, size=n)
coeff = gini_coefficient(y)
assert_allclose(expected, coeff, rtol=1e-01)
def test_shorrocks_index():
"""
Test Shorrocks mobility index function against the example used in 'Wealth
distribution and social mobility in the US: A quantitative approach'
(Benhabib, <NAME>, 2017).''
https://www.econ.nyu.edu/user/bisina/RevisionAugust.pdf
"""
# Construct the mobility matrix from Benhabib et al.
P = [[0.222, 0.222, 0.215, 0.187, 0.081, 0.038, 0.029, 0.006],
[0.221, 0.220, 0.215, 0.188, 0.082, 0.039, 0.029, 0.006],
[0.207, 0.209, 0.210, 0.194, 0.090, 0.046, 0.036, 0.008],
[0.198, 0.201, 0.207, 0.198, 0.095, 0.052, 0.040, 0.009],
[0.175, 0.178, 0.197, 0.207, 0.110, 0.067, 0.054, 0.012],
[0.182, 0.184, 0.200, 0.205, 0.106, 0.062, 0.050, 0.011],
[0.123, 0.125, 0.166, 0.216, 0.141, 0.114, 0.094, 0.021],
[0.084, 0.084, 0.142, 0.228, 0.170, 0.143, 0.121, 0.028]]
expected = 0.98 # result from paper
index = shorrocks_index(P)
assert_allclose(expected, index, rtol=1e-2)
def test_rank_size():
"""
Tests `rank_size` function, which generates rank-size data for
a Pareto distribution.
The rank-size plot for a sample drawn from a Pareto distribution
should be a straight line.
The length of the `rank_data` array should be within (c x 100)%
of the size of the distribution.
"""
np.random.seed(15)
sample_size = 10000
c = 0.74
# Tests Pareto; r_squared ~ 1
pareto_draw = np.exp(np.random.exponential(scale=1.0, size=sample_size))
rank_data, size_data = rank_size(pareto_draw, c=c)
assert len(rank_data) == len(size_data)
assert_allclose(c*sample_size, len(rank_data), rtol=1e-3)
_, _, r_value, _, _ = linregress(np.log(rank_data), np.log(size_data))
r_sqval = r_value**2
assert_allclose(r_sqval, 1, rtol=1e-3)
# Tests Exponential; r_squared < 1
z = np.random.randn(sample_size)
exp_draw = np.exp(z)
rank_data_exp, size_data_exp = rank_size(exp_draw, c=c)
_, _, r_value_exp, _, _ = linregress(np.log(rank_data_exp),
np.log(size_data_exp))
r_sqval_exp = r_value_exp**2
assert_raises(AssertionError, assert_allclose, r_sqval_exp, 1, rtol=1e-3)
|
tools/Sikuli/TypeHome.sikuli/DoCtrlF4.py
|
marmyshev/vanessa-automation
| 296 |
82827
|
<filename>tools/Sikuli/TypeHome.sikuli/DoCtrlF4.py
type(Key.F4, KeyModifier.CTRL)
sleep(1)
exit(0)
|
macgraph/input/text_util.py
|
Octavian-ai/mac-graph
| 116 |
82836
|
<filename>macgraph/input/text_util.py
from collections import Counter
import tensorflow as tf
import numpy as np
from typing import List, Set
import re
import string
from tqdm import tqdm
import logging
logger = logging.getLogger(__name__)
from .util import read_gqa
# --------------------------------------------------------------------------
# Constants
# --------------------------------------------------------------------------
UNK = "<unk>"
SOS = "<sos>"
EOS = "<eos>"
SPACE = "<space>"
CHARS = ["<"+i+">" for i in string.ascii_lowercase] + ["<"+i+">" for i in string.ascii_uppercase]
SPECIAL_TOKENS = [UNK, SOS, EOS, SPACE] #+ CHARS
UNK_ID = SPECIAL_TOKENS.index(UNK)
SOS_ID = SPECIAL_TOKENS.index(SOS)
EOS_ID = SPECIAL_TOKENS.index(EOS)
# --------------------------------------------------------------------------
# Pretokenize
# --------------------------------------------------------------------------
ENGLISH_PUNCTUATION = '!"#$%&()*+,-./:;=?@[\\]^_`{|}~'
# --------------------------------------------------------------------------
def pretokenize_general(text):
text = text.replace("\n", "")
text = re.sub(r'\s*$', '', text)
text = text.replace(" ", f" {SPACE} ")
return text
def detokenize_general(text):
text = text.replace(f" {SPACE} ", " ")
return text
def pretokenize_json(value):
if isinstance(value, str) or isinstance(value, bool) or isinstance(value, int):
return str(value)
raise ValueError("Unsupported json value type")
def pretokenize_english(text):
text = pretokenize_general(text)
for p in ENGLISH_PUNCTUATION:
text = text.replace(p, f" {p} ")
text = re.sub(r'\s*$', '', text)
return text
def detokenize_english(text):
text = detokenize_general(text)
for p in ENGLISH_PUNCTUATION:
text = text.replace(f" {p} ", p)
return text
def bytes_to_string(p):
if len(p) == 0:
return ""
decode_utf8 = np.vectorize(lambda v: v.decode("utf-8"))
p = decode_utf8(p)
s = ''.join(p)
return s
# --------------------------------------------------------------------------
# Vocab
# --------------------------------------------------------------------------
class Vocab(object):
def __init__(self, table:List[str]):
self.table = table
def __contains__(self, value):
return value in self.table
def __iter__(self):
return iter(self.table)
def __len__(self):
return len(self.table)
# -------------------------------------------------------------------------- #
def lookup(self, value):
try:
return self.table.index(value)
except ValueError:
return UNK_ID
def inverse_lookup(self, value):
try:
return self.table[value]
except IndexError:
return UNK
def ids_to_string(self, line, output_as_array=False):
d = [self.inverse_lookup(i) for i in line]
if output_as_array:
return d
else:
return ' '.join(d)
def string_to_ids(self, line):
return [self.lookup(i) for i in line.split(' ')]
def expand_unknowns(self, line):
unknowns = set(line.split(' '))
unknowns -= set(self.table)
unknowns -= set([''])
for t in unknowns:
spaced = ''.join([f"<{c}> " for c in t])
line = line.replace(t, spaced)
return line
def english_to_ids(self, line):
# TODO: Make greedy w.r.t. tokens with spaces in them
line = pretokenize_english(line)
line = self.expand_unknowns(line)
line = self.string_to_ids(line)
return line
def ids_to_english(self, line):
line = self.ids_to_string(line)
line = detokenize_english(line)
return line
def prediction_value_to_string(self, v, output_as_array=False):
"""Rough 'n' ready get me the hell outta here fn.
Tries its best to deal with the mess of datatypes that end up coming out"""
if isinstance(v, np.int64):
s = self.inverse_lookup(v)
elif isinstance(v, np.ndarray):
if v.dtype == np.int64:
s = self.ids_to_string(v, output_as_array)
elif v.dtype == object:
s = bytes_to_string(v)
else:
raise ValueError()
else:
raise ValueError()
return s
def save(self, args):
with tf.gfile.GFile(args["vocab_path"], 'w') as out_file:
for i in self.table:
out_file.write(i + "\n")
# --------------------------------------------------------------------------
# Make me a vocab!
# --------------------------------------------------------------------------
@classmethod
def load(cls, path, size):
tokens = list()
with tf.gfile.GFile(path) as file:
for line in file.readlines():
tokens.append(line.replace("\n", ""))
if len(tokens) == size:
break
assert len(tokens) == len(set(tokens)), f"Duplicate lines in {path}"
return Vocab(tokens)
@classmethod
def load_from_args(cls, args):
return Vocab.load(args["vocab_path"], args["vocab_size"])
@classmethod
def build(cls, args, gqa_to_tokens, limit=None):
hits = Counter()
def add(tokens:List[str]):
for token in tokens:
if token not in ["", " ", "\n"]:
hits[token] += 1
for i in tqdm(read_gqa(args, limit=limit), total=limit):
add(gqa_to_tokens(i))
tokens = list()
tokens.extend(SPECIAL_TOKENS)
for i, c in hits.most_common(args["vocab_size"]):
if len(tokens) == args["vocab_size"]:
break
if i not in tokens:
tokens.append(i)
assert len(tokens) <= args["vocab_size"]
v = Vocab(tokens)
v.save(args)
return v
|
stravalib/tests/functional/test_client_write.py
|
jsamoocha/stravalib
| 599 |
82854
|
<filename>stravalib/tests/functional/test_client_write.py
from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
from io import BytesIO
from stravalib import model, exc, attributes, unithelper as uh
from stravalib.client import Client
from stravalib.tests.functional import FunctionalTestBase
class ClientWriteTest(FunctionalTestBase):
def test_create_activity(self):
"""
Test Client.create_activity simple case.
"""
now = datetime.now().replace(microsecond=0)
a = self.client.create_activity("test_create_activity#simple",
activity_type=model.Activity.RIDE,
start_date_local=now,
elapsed_time=timedelta(hours=3, minutes=4, seconds=5),
distance=uh.miles(15.2))
print(a)
self.assertIsInstance(a, model.Activity)
self.assertEquals("test_create_activity#simple", a.name)
self.assertEquals(now, a.start_date_local)
self.assertEquals(round(float(uh.miles(15.2)), 2), round(float(uh.miles(a.distance)), 2))
self.assertEquals(timedelta(hours=3, minutes=4, seconds=5), a.elapsed_time)
def test_update_activity(self):
"""
Test Client.update_activity simple case.
"""
now = datetime.now().replace(microsecond=0)
a = self.client.create_activity("test_update_activity#create",
activity_type=model.Activity.RIDE,
start_date_local=now,
elapsed_time=timedelta(hours=3, minutes=4, seconds=5),
distance=uh.miles(15.2))
self.assertIsInstance(a, model.Activity)
self.assertEquals("test_update_activity#create", a.name)
update1 = self.client.update_activity(a.id, name="test_update_activivty#update")
self.assertEquals("test_update_activivty#update", update1.name)
self.assertFalse(update1.private)
self.assertFalse(update1.trainer)
self.assertFalse(update1.commute)
update2 = self.client.update_activity(a.id, private=True)
self.assertTrue(update2.private)
update3 = self.client.update_activity(a.id, trainer=True)
self.assertTrue(update3.private)
self.assertTrue(update3.trainer)
def test_upload_activity(self):
"""
Test uploading an activity.
NOTE: This requires clearing out the uploaded activities from configured
writable Strava acct.
"""
with open(os.path.join(RESOURCES_DIR, 'sample.tcx')) as fp:
uploader = self.client.upload_activity(fp, data_type='tcx')
self.assertTrue(uploader.is_processing)
a = uploader.wait()
self.assertTrue(uploader.is_complete)
self.assertIsInstance(a, model.Activity)
self.assertEquals("02/21/2009 Leiden, ZH, The Netherlands", a.name)
# And we'll get an error if we try the same file again
with self.assertRaises(exc.ActivityUploadFailed):
self.client.upload_activity(fp, data_type='tcx')
|
prml/nn/normalization/batch_normalization.py
|
jinmang2/PRML
| 11,017 |
82871
|
<filename>prml/nn/normalization/batch_normalization.py
import numpy as np
from prml.nn.array.ones import ones
from prml.nn.array.zeros import zeros
from prml.nn.config import config
from prml.nn.function import Function
from prml.nn.network import Network
class BatchNormalizationFunction(Function):
def _forward(self, x):
self.mean = x.mean(axis=0)
self.xc = x - self.mean
self.var = np.mean(self.xc ** 2, axis=0)
self.std = np.sqrt(self.var + 1e-7)
return self.xc / self.std
def _backward(self, delta, x):
# dstd = -np.mean((delta * self.xc) / (self.std ** 2), axis=0)
dxc = delta / self.std - self.xc * np.mean((delta * self.xc) / (self.std ** 3), axis=0)
return dxc - np.mean(dxc, axis=0)
# dstd = -np.mean((delta * self.xc) / (self.std ** 2), axis=0)
# dxc = delta / self.std + self.xc * dstd / self.std
# return dxc - np.mean(dxc, axis=0)
# dxn = delta
# dxc = dxn / self.std
# dstd = -np.sum((dxn * self.xc) / (self.std ** 2), axis=0)
# dvar = 0.5 * dstd / self.std
# dxc += 2.0 * self.xc * dvar / delta.shape[0]
# dmu = np.sum(dxc, axis=0)
# dx = dxc - dmu / delta.shape[0]
# return dx
class BatchNormalization(Network):
def __init__(self, ndim, scale=None, bias=None, momentum=0.9):
super().__init__()
self.momentum = momentum
with self.set_parameter():
self.mean = zeros(ndim)
self.var = ones(ndim)
def __call__(self, x):
shape = x.shape
x = x.reshape(-1, x.shape[-1])
if config.is_updating_bn:
func = BatchNormalizationFunction()
out = func.forward(x)
self.mean.value = self.momentum * self.mean.value + (1 - self.momentum) * func.mean
self.var.value = self.momentum * self.var.value + (1 - self.momentum) * func.var
del func.mean
del func.var
else:
xc = x - self.mean
out = xc / np.sqrt(self.var.value + 1e-7)
return out.reshape(*shape)
|
utils/regression/applications/fruntoctrl/FrunToCtrlExecutor.py
|
noahsherrill/force-riscv
| 111 |
82873
|
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from common.path_utils import PathUtils
from common.msg_utils import Msg
from common.sys_utils import SysUtils
from common.errors import *
from executors.app_executor import *
from classes.control_item import ControlItem, CtrlItmKeys
class FrunToCtrlExecutor(AppExecutor):
def __init__(self):
super().__init__()
self.mFrunToCtrlCmd = None
self.log = None
self.elog = None
def load(self, aCtrlItem):
super().load(aCtrlItem)
self.mFrunToCtrlCmd = self.ctrl_item.fruntoctrl.get("path", None)
def skip(self):
if not self.ctrl_item.fruntoctrl.get("run", False):
Msg.user(
"[FrunToCtrlExecutor::skip] skipping - run is not True..."
)
return True
Msg.user("[FrunToCtrlExecutor::skip] not skipping")
return False
def execute(self):
if not PathUtils.check_file("./_def_frun.py"):
Msg.user(
"[FrunToCtrlExecutor::skip] skipping - no _def_frun.py found"
)
return True
my_cmd = self.mFrunToCtrlCmd
if my_cmd is None:
Msg.user("[FrunToCtrlExecutor::skip] skipping - no path was given")
return True
Msg.user("FrunToCtrlCommand = " + str({"frun-to-ctrl-cmd": my_cmd}))
Msg.flush()
self.log = "frun_to_ctrl_result.log"
self.elog = "frun_to_ctrl_result.err"
my_result = SysUtils.exec_process(
my_cmd, self.log, self.elog, self.ctrl_item.timeout, True
)
Msg.user("FrunToCtrlResult = " + str(my_result))
Msg.flush()
return SysUtils.success(int(my_result[0]))
|
app/migrations/0002_experiment_process_processstatus.py
|
tungpatrick/AutoOut
| 101 |
82882
|
# Generated by Django 2.2.1 on 2019-07-19 10:12
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Process',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='detection', max_length=50)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'db_table': 'processes',
},
),
migrations.CreateModel(
name='ProcessStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Not Started', max_length=100)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'db_table': 'process_statuses',
},
),
migrations.CreateModel(
name='Experiment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('results_path', models.TextField(default='')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Dataset')),
('process', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Process')),
('process_status', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.ProcessStatus')),
],
options={
'db_table': 'experiments',
},
),
]
|
agate/aggregations/deciles.py
|
andriyor/agate
| 663 |
82884
|
#!/usr/bin/env python
from agate.aggregations.base import Aggregation
from agate.aggregations.has_nulls import HasNulls
from agate.aggregations.percentiles import Percentiles
from agate.data_types import Number
from agate.exceptions import DataTypeError
from agate.utils import Quantiles
from agate.warns import warn_null_calculation
class Deciles(Aggregation):
"""
Calculate the deciles of a column based on its percentiles.
Deciles will be equivalent to the 10th, 20th ... 90th percentiles.
"Zeroth" (min value) and "Tenth" (max value) deciles are included for
reference and intuitive indexing.
See :class:`Percentiles` for implementation details.
This aggregation can not be applied to a :class:`.TableSet`.
:param column_name:
The name of a column containing :class:`.Number` data.
"""
def __init__(self, column_name):
self._column_name = column_name
def validate(self, table):
column = table.columns[self._column_name]
if not isinstance(column.data_type, Number):
raise DataTypeError('Deciles can only be applied to columns containing Number data.')
has_nulls = HasNulls(self._column_name).run(table)
if has_nulls:
warn_null_calculation(self, column)
def run(self, table):
"""
:returns:
An instance of :class:`Quantiles`.
"""
percentiles = Percentiles(self._column_name).run(table)
return Quantiles([percentiles[i] for i in range(0, 101, 10)])
|
iPERCore/tools/human_pose3d_estimators/spin/runner.py
|
JSssssss/iPERCore
| 2,223 |
82937
|
import torch
import numpy as np
from tqdm import tqdm
from typing import Union, List, Tuple, Any, Dict
from easydict import EasyDict
from .dataset import preprocess, InferenceDataset, InferenceDatasetWithKeypoints
from .network import build_spin
from .. import BasePose3dRunner, BasePose3dRefiner, ACTIONS
from iPERCore.tools.human_digitalizer.bodynets import SMPL
from iPERCore.tools.utils.dataloaders import build_inference_loader
from iPERCore.tools.utils.geometry.boxes import cal_head_bbox
from iPERCore.tools.utils.geometry.cam_pose_utils import cam_init2orig, cam_norm
from iPERCore.tools.utils.filesio.persistence import load_toml_file
__all__ = ["SPINRunner"]
class SPINRunner(BasePose3dRunner):
def __init__(self,
cfg_or_path: Union[EasyDict, str],
device=torch.device("cpu")):
"""
Args:
cfg_or_path (EasyDict or str): the configuration EasyDict or the cfg_path with `toml` file.
If it is an EasyDict instance, it must contains the followings,
--ckpt_path (str): the path of the pre-trained checkpoints;
--smpl_path (str): the path of the smpl model;
--smpl_mean_params (str): the path of the mean parameters of SMPL.
Otherwise if it is a `toml` file, an example could be the followings,
ckpt_path = "./assets/pretrains/spin_ckpt.pth"
smpl_path = "./assets/pretrains/smpl_model.pkl"
smpl_mean_params = "./assets/pretrains/smpl_mean_params.npz"
device (torch.device):
"""
self.device = device
# RGB
self.MEAN = torch.as_tensor([0.485, 0.456, 0.406])[None, :, None, None].to(self.device)
self.STD = torch.as_tensor([0.229, 0.224, 0.225])[None, :, None, None].to(self.device)
if isinstance(cfg_or_path, str):
cfg = EasyDict(load_toml_file(cfg_or_path))
else:
cfg = cfg_or_path
self.model = build_spin(pretrained=False)
checkpoint = torch.load(cfg["ckpt_path"])
self.model.load_state_dict(checkpoint, strict=True)
self.model.eval()
self._smpl = SMPL(cfg["smpl_path"]).to(self.device)
self.model = self.model.to(self.device)
def __call__(self, image: np.ndarray,
boxes: Union[np.ndarray, List, Tuple, Any],
action: ACTIONS = ACTIONS.SPLIT) -> Dict[str, Any]:
"""
Args:
image (np.ndarray): (H, W, C), color intensity [0, 255] with BGR color channel;
boxes (np.ndarray or List, or Tuple or None): (N, 4)
action:
-- 0: only return `cams`, `pose` and `shape` of SMPL;
-- 1: return `cams`, `pose`, `shape` and `verts`.
-- 2: return `cams`, `pose`, `shape`, `verts`, `j2d` and `j3d`.
Returns:
result (dict):
"""
image = np.copy(image)
proc_img, proc_info = preprocess(image, boxes)
proc_img = torch.tensor(proc_img).to(device=self.device)[None]
with torch.no_grad():
proc_img = (proc_img - self.MEAN) / self.STD
smpls = self.model(proc_img)
cams_orig = cam_init2orig(smpls[:, 0:3], proc_info["scale"],
torch.tensor(proc_info["start_pt"], device=self.device).float())
cams = cam_norm(cams_orig, proc_info["im_shape"][0])
smpls[:, 0:3] = cams
if action == ACTIONS.SPLIT:
result = self.body_model.split(smpls)
elif action == ACTIONS.SKIN:
result = self.body_model.skinning(smpls)
elif action == ACTIONS.SMPL:
result = {"theta": smpls}
else:
result = self.body_model.get_details(smpls)
result["proc_info"] = proc_info
return result
def run_with_smplify(self, image_paths: List[str], boxes: List[Union[List, Tuple, np.ndarray]],
keypoints_info: Dict, smplify_runner: BasePose3dRefiner,
batch_size: int = 16, num_workers: int = 4,
filter_invalid: bool = True, temporal: bool = True):
"""
Args:
image_paths (list of str): the image paths;
boxes (list of Union[np.np.ndarray, list, tuple)): the bounding boxes of each image;
keypoints_info (Dict): the keypoints information of each image;
smplify_runner (BasePose3dRefiner): the simplify instance, it must contains the keypoint_formater;
batch_size (int): the mini-batch size;
num_workers (int): the number of processes;
filter_invalid (bool): the flag to control whether filter invalid frames or not;
temporal (bool): use temporal smooth optimization or not.
Returns:
smpl_infos (dict): the estimated smpl infomations, it contains,
--all_init_smpls (torch.Tensor): (num, 85), the initialized smpls;
--all_opt_smpls (torch.Tensor): (num, 85), the optimized smpls;
--all_valid_ids (torch.Tensor): (num of valid frames,), the valid indexes.
"""
def head_is_valid(head_boxes):
return (head_boxes[:, 1] - head_boxes[:, 0]) * (head_boxes[:, 3] - head_boxes[:, 2]) > 10 * 10
dataset = InferenceDatasetWithKeypoints(image_paths, boxes, keypoints_info,
smplify_runner.keypoint_formater, image_size=224, temporal=temporal)
data_loader = build_inference_loader(dataset, batch_size=batch_size, num_workers=num_workers)
"""
sample (dict): the sample information, it contains,
--image (torch.Tensor): (3, 224, 224) is the cropped image range of [0, 1] and normalized
by MEAN and STD, RGB channel;
--orig_image (torch.Tensor): (3, height, width) is the in rage of [0, 1], RGB channel;
--im_shape (torch.Tensor): (height, width)
--keypoints (dict): (num_joints, 3), and num_joints could be [75,].
--center (torch.Tensor): (2,);
--start_pt (torch.Tensor): (2,);
--scale (torch.Tensor): (1,);
--img_path (str): the image path.
"""
all_init_smpls = []
all_opt_smpls = []
all_pose3d_img_ids = []
for sample in tqdm(data_loader):
images = sample["image"].to(self.device)
start_pt = sample["start_pt"].to(self.device)
scale = sample["scale"][:, None].to(self.device).float()
im_shape = sample["im_shape"][:, 0:1].to(self.device)
keypoints_info = sample["keypoints"].to(self.device)
img_ids = sample["img_id"]
with torch.no_grad():
init_smpls = self.model(images)
cams_orig = cam_init2orig(init_smpls[:, 0:3], scale, start_pt)
cams = cam_norm(cams_orig, im_shape)
init_smpls[:, 0:3] = cams
smplify_results = smplify_runner(
keypoints_info, cams, init_smpls[:, -10:], init_smpls[:, 3:-10], proc_kps=False, temporal=temporal
)
opt_smpls = torch.cat([cams, smplify_results["new_opt_pose"], smplify_results["new_opt_betas"]], dim=1)
if filter_invalid:
opt_smpls_info = self.get_details(opt_smpls)
head_boxes = cal_head_bbox(opt_smpls_info["j2d"], image_size=512)
valid = head_is_valid(head_boxes).nonzero(as_tuple=False)
valid.squeeze_(-1)
img_ids = img_ids[valid]
all_init_smpls.append(init_smpls.cpu())
all_opt_smpls.append(opt_smpls.cpu())
all_pose3d_img_ids.append(img_ids.cpu())
all_init_smpls = torch.cat(all_init_smpls, dim=0)
all_opt_smpls = torch.cat(all_opt_smpls, dim=0)
all_valid_ids = torch.cat(all_pose3d_img_ids, dim=0)
smpl_infos = {
"all_init_smpls": all_init_smpls,
"all_opt_smpls": all_opt_smpls,
"all_valid_ids": all_valid_ids
}
return smpl_infos
def run(self, image_paths: List[str], boxes: List[List],
batch_size: int = 16, num_workers: int = 4,
filter_invalid: bool = True, temporal: bool = True):
"""
Args:
image_paths (list of str): the image paths;
boxes (list of list): the bounding boxes of each image;
batch_size (int): the mini-batch size;
num_workers (int): the number of processes;
filter_invalid (bool): the flag to control whether filter invalid frames or not;
temporal (bool): use temporal smooth optimization or not.
Returns:
smpl_infos (dict): the estimated smpl infomations, it contains,
--all_init_smpls (torch.Tensor): (num, 85), the initialized smpls;
--all_opt_smpls (torch.Tensor): None
--all_valid_ids (torch.Tensor): (num of valid frames,), the valid indexes.
"""
def head_is_valid(head_boxes):
return (head_boxes[:, 1] - head_boxes[:, 0]) * (head_boxes[:, 3] - head_boxes[:, 2]) > 10 * 10
dataset = InferenceDataset(image_paths, boxes, image_size=224)
data_loader = build_inference_loader(dataset, batch_size=batch_size, num_workers=num_workers)
"""
sample (dict): the sample information, it contains,
--image (torch.Tensor): (3, 224, 224) is the cropped image range of [0, 1] and normalized
by MEAN and STD, RGB channel;
--orig_image (torch.Tensor): (3, height, width) is the in rage of [0, 1], RGB channel;
--im_shape (torch.Tensor): (height, width)
--keypoints (dict): (num_joints, 3), and num_joints could be [75,].
--center (torch.Tensor): (2,);
--start_pt (torch.Tensor): (2,);
--scale (torch.Tensor): (1,);
--img_path (str): the image path.
"""
all_init_smpls = []
all_pose3d_img_ids = []
for sample in tqdm(data_loader):
images = sample["image"].to(self.device)
start_pt = sample["start_pt"].to(self.device)
scale = sample["scale"][:, None].to(self.device).float()
im_shape = sample["im_shape"][:, 0:1].to(self.device)
img_ids = sample["img_id"]
with torch.no_grad():
init_smpls = self.model(images)
cams_orig = cam_init2orig(init_smpls[:, 0:3], scale, start_pt)
cams = cam_norm(cams_orig, im_shape)
init_smpls[:, 0:3] = cams
if filter_invalid:
init_smpls_info = self.get_details(init_smpls)
head_boxes = cal_head_bbox(init_smpls_info["j2d"], image_size=512)
valid = head_is_valid(head_boxes).nonzero(as_tuple=False)
valid.squeeze_(-1)
img_ids = img_ids[valid]
all_init_smpls.append(init_smpls.cpu())
all_pose3d_img_ids.append(img_ids.cpu())
all_init_smpls = torch.cat(all_init_smpls, dim=0)
all_valid_ids = torch.cat(all_pose3d_img_ids, dim=0)
smpl_infos = {
"all_init_smpls": all_init_smpls,
"all_opt_smpls": None,
"all_valid_ids": all_valid_ids
}
return smpl_infos
def get_details(self, smpls):
return self._smpl.get_details(smpls)
@property
def mean_theta(self):
mean_cam = self.model.init_cam
mean_pose = self.model.init_pose
mean_shape = self.model.init_shape
mean_theta = torch.cat([mean_cam, mean_pose, mean_shape], dim=-1)[0]
return mean_theta
@property
def body_model(self):
return self._smpl
|
tests/bugs/test-200907231924.py
|
eLBati/pyxb
| 123 |
82959
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:complexType name="tDescription" mixed="true">
<xs:sequence>
<xs:element ref="sub-description" minOccurs="0"/>
</xs:sequence>
</xs:complexType>
<xs:element name="sub-description" type="xs:string"/>
<xs:element name="description" type="tDescription"/>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac_200907231924 (unittest.TestCase):
# This verifies that we do not improperly interpret non-element
# content as being the content of a nested element.
def testSub (self):
xml = '<sub-description>Floor</sub-description>'
instance = CreateFromDocument(xml)
self.assertEqual(instance, 'Floor')
def testMain (self):
xml = '<description>Main Office</description>'
instance = CreateFromDocument(xml)
self.assertEqual(1, len(instance.orderedContent()))
self.assertTrue(instance.sub_description is None)
self.assertEqual(instance.orderedContent()[0].value, 'Main Office')
def testMainSub (self):
xml = '<description>Main Office<sub-description>Floor</sub-description>State</description>'
instance = CreateFromDocument(xml)
self.assertTrue(instance.sub_description is not None)
self.assertEqual(instance.sub_description, 'Floor')
self.assertEqual(3, len(instance.orderedContent()))
self.assertEqual(instance.orderedContent()[0].value, 'Main Office')
self.assertEqual(instance.orderedContent()[2].value, 'State')
if __name__ == '__main__':
unittest.main()
|
scripts/tests/ai2_internal/resume_daemon_test.py
|
MSLars/allennlp
| 11,433 |
82969
|
import pytest
import sqlite3
from unittest.mock import call, Mock
from allennlp.common.testing import AllenNlpTestCase
from scripts.ai2_internal.resume_daemon import (
BeakerStatus,
create_table,
handler,
logger,
resume,
start_autoresume,
)
# Don't spam the log in tests.
logger.removeHandler(handler)
class ResumeDaemonTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.connection = sqlite3.connect(":memory:")
create_table(self.connection)
def test_create_beaker_status_works(self):
status = BeakerStatus("stopped")
assert status.name == "stopped"
def test_create_beaker_status_throws(self):
with pytest.raises(ValueError):
status = BeakerStatus("garbage")
assert status.name == "garbage"
def test_does_nothing_on_empty_db(self):
beaker = Mock()
resume(self.connection, beaker)
assert not beaker.method_calls
def test_does_not_resume_a_running_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.running
resume(self.connection, beaker)
beaker.get_status.assert_called()
assert len(beaker.method_calls) == 1
def test_does_not_resume_a_finished_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.succeeded
resume(self.connection, beaker)
beaker.get_status.assert_called()
assert len(beaker.method_calls) == 1
def test_does_resume_a_preempted_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
beaker.resume.return_value = "foo2"
resume(self.connection, beaker)
beaker.get_status.assert_called()
beaker.resume.assert_called()
assert len(beaker.method_calls) == 2
def test_respects_upper_bound_on_resumes(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
for i in range(10):
beaker.resume.return_value = f"foo{i}"
resume(self.connection, beaker)
calls = [
call.get_status("foo"),
call.resume("foo"),
call.get_status("foo0"),
call.resume("foo0"),
call.get_status("foo1"),
call.resume("foo1"),
call.get_status("foo2"),
call.resume("foo2"),
call.get_status("foo3"),
call.resume("foo3"),
call.get_status("foo4"),
]
beaker.assert_has_calls(calls)
def test_handles_a_realistic_scenario(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
for i in range(10):
beaker.resume.return_value = f"foo{i}"
if i == 2:
beaker.get_status.return_value = BeakerStatus.succeeded
resume(self.connection, beaker)
calls = [
call.get_status("foo"),
call.resume("foo"),
call.get_status("foo0"),
call.resume("foo0"),
call.get_status("foo1"),
]
beaker.assert_has_calls(calls)
|
networks/graph_cmr/models/geometric_layers.py
|
solomon-ma/PaMIR
| 374 |
82970
|
"""
Useful geometric operations, e.g. Orthographic projection and a differentiable Rodrigues formula
Parts of the code are taken from https://github.com/MandyMo/pytorch_HMR
"""
import torch
def rodrigues(theta):
"""Convert axis-angle representation to rotation matrix.
Args:
theta: size = [B, 3]
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
l1norm = torch.norm(theta + 1e-8, p = 2, dim = 1)
angle = torch.unsqueeze(l1norm, -1)
normalized = torch.div(theta, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * normalized], dim = 1)
return quat2mat(quat)
def quat2mat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def orthographic_projection(X, camera):
"""Perform orthographic projection of 3D points X using the camera parameters
Args:
X: size = [B, N, 3]
camera: size = [B, 3]
Returns:
Projected 2D points -- size = [B, N, 2]
"""
camera = camera.view(-1, 1, 3)
X_trans = X[:, :, :2] + camera[:, :, 1:]
shape = X_trans.shape
X_2d = (camera[:, :, 0] * X_trans.view(shape[0], -1)).view(shape)
return X_2d
|
zmq/cl-zmq.py
|
bit0fun/plugins
| 173 |
83024
|
<reponame>bit0fun/plugins<filename>zmq/cl-zmq.py
#!/usr/bin/env python3
# Copyright (c) 2019 lightningd
# Distributed under the BSD 3-Clause License, see the accompanying file LICENSE
###############################################################################
# ZeroMQ publishing plugin for lightningd
#
# Using Twisted and txZMQ frameworks, this plugin binds to ZeroMQ endpoints and
# publishes notification of all possible subscriptions that have been opted-in
# for via lightningd launch parameter.
#
# This plugin doesn't interpret any of the content of the data which comes out
# of lightningd, it merely passes the received JSON through as encoded UTF-8,
# with the 'tag' being set to the Notification Type name (also encoded as
# UTF-8). It follows that adding future possible subscriptions *should* be as
# easy as appending it to NOTIFICATION_TYPE_NAMES below.
#
# The user-selectable configuration takes inspiration from the bitcoind ZeroMQ
# integration. The endpoint must be explicitly given as an argument to enable
# it. Also, the high water mark argument for the binding is set as an
# additional launch option.
#
# Due to how the plugins must register via getmanifest, this will opt-in to all
# subscriptions and ignore the messages from ones not bound to ZMQ endpoints.
# Hence, there might be a minor performance impact from subscription messages
# that result in no publish action. This can be mitigated by dropping
# notifications that are not of interest to your ZeroMQ subscribers from
# NOTIFICATION_TYPE_NAMES below.
###############################################################################
import json
import functools
from twisted.internet import reactor
from txzmq import ZmqEndpoint, ZmqEndpointType
from txzmq import ZmqFactory
from txzmq import ZmqPubConnection
from pyln.client import Plugin
###############################################################################
NOTIFICATION_TYPE_NAMES = ['channel_opened',
'connect',
'disconnect',
'invoice_payment',
'warning',
'forward_event',
'sendpay_success',
'sendpay_failure']
class NotificationType():
""" Wrapper for notification type string to generate the corresponding
plugin option strings. By convention of lightningd, the cli options
use dashes in place of rather than underscores or no spaces."""
def __init__(self, notification_type_name):
self.notification_type_name = notification_type_name
def __str__(self):
return self.notification_type_name
def endpoint_option(self):
return "zmq-pub-{}".format(str(self).replace("_", "-"))
def hwm_option(self):
return "zmq-pub-{}-hwm".format(str(self).replace("_", "-"))
NOTIFICATION_TYPES = [NotificationType(n) for n in NOTIFICATION_TYPE_NAMES]
###############################################################################
class Publisher():
""" Holds the connection state and accepts incoming notifications that
come from the subscription. If there is an associated publishing
endpoint connected, it will encode and pass the contents of the
notification. """
def __init__(self):
self.factory = ZmqFactory()
self.connection_map = {}
def load_setup(self, setup):
for e, s in setup.items():
endpoint = ZmqEndpoint(ZmqEndpointType.bind, e)
ZmqPubConnection.highWaterMark = s['high_water_mark']
connection = ZmqPubConnection(self.factory, endpoint)
for n in s['notification_type_names']:
self.connection_map[n] = connection
def publish_notification(self, notification_type_name, *args, **kwargs):
if notification_type_name not in self.connection_map:
return
tag = notification_type_name.encode("utf8")
message = json.dumps(kwargs).encode("utf8")
connection = self.connection_map[notification_type_name]
connection.publish(message, tag=tag)
publisher = Publisher()
###############################################################################
ZMQ_TRANSPORT_PREFIXES = ['tcp://', "ipc://", 'inproc://', "pgm://", "epgm://"]
class Setup():
""" Does some light validation of the plugin option input and generates a
dictionary to configure the Twisted and ZeroMQ setup """
def _at_least_one_binding(options):
n_bindings = sum(1 for o, v in options.items() if
not o.endswith("-hwm") and v != "null")
return n_bindings > 0
def _iter_endpoints_not_ok(options):
for nt in NOTIFICATION_TYPES:
endpoint_opt = nt.endpoint_option()
endpoint = options[endpoint_opt]
if endpoint != "null":
if len([1 for prefix in ZMQ_TRANSPORT_PREFIXES if
endpoint.startswith(prefix)]) != 0:
continue
yield endpoint
def check_option_warnings(options, plugin):
if not Setup._at_least_one_binding(options):
plugin.log("No zmq publish sockets are bound as per launch args",
level='warn')
for endpoint in Setup._iter_endpoints_not_ok(options):
plugin.log(("Endpoint option {} doesn't appear to be recognized"
).format(endpoint), level='warn')
###########################################################################
def _iter_endpoint_setup(options):
for nt in NOTIFICATION_TYPES:
endpoint_opt = nt.endpoint_option()
if options[endpoint_opt] == "null":
continue
endpoint = options[endpoint_opt]
hwm_opt = nt.hwm_option()
hwm = int(options[hwm_opt])
yield endpoint, nt, hwm
def get_setup_dict(options):
setup = {}
for e, nt, hwm in Setup._iter_endpoint_setup(options):
if e not in setup:
setup[e] = {'notification_type_names': [],
'high_water_mark': hwm}
setup[e]['notification_type_names'].append(str(nt))
# use the lowest high water mark given for the endpoint
setup[e]['high_water_mark'] = min(
setup[e]['high_water_mark'], hwm)
return setup
###########################################################################
def log_setup_dict(setup, plugin):
for e, s in setup.items():
m = ("Endpoint {} will get events from {} subscriptions "
"published with high water mark {}")
m = m.format(e, s['notification_type_names'], s['high_water_mark'])
plugin.log(m)
###############################################################################
plugin = Plugin()
@plugin.init()
def init(options, configuration, plugin, **kwargs):
Setup.check_option_warnings(options, plugin)
setup_dict = Setup.get_setup_dict(options)
Setup.log_setup_dict(setup_dict, plugin)
reactor.callFromThread(publisher.load_setup, setup_dict)
def on_notification(notification_type_name, plugin, *args, **kwargs):
if len(args) != 0:
plugin.log("got unexpected args: {}".format(args), level="warn")
reactor.callFromThread(publisher.publish_notification,
notification_type_name, *args, **kwargs)
DEFAULT_HIGH_WATER_MARK = 1000
for nt in NOTIFICATION_TYPES:
# subscribe to all notifications
on = functools.partial(on_notification, str(nt))
on.__annotations__ = {} # needed to please Plugin._coerce_arguments()
plugin.add_subscription(str(nt), on)
# zmq socket binding option
endpoint_opt = nt.endpoint_option()
endpoint_desc = "Enable publish {} info to ZMQ socket endpoint".format(nt)
plugin.add_option(endpoint_opt, None, endpoint_desc, opt_type='string')
# high water mark option
hwm_opt = nt.hwm_option()
hwm_desc = ("Set publish {} info message high water mark "
"(default: {})".format(nt, DEFAULT_HIGH_WATER_MARK))
plugin.add_option(hwm_opt, DEFAULT_HIGH_WATER_MARK, hwm_desc,
opt_type='int')
###############################################################################
def plugin_thread():
plugin.run()
reactor.callFromThread(reactor.stop)
reactor.callInThread(plugin_thread)
reactor.run()
|
convert_to_knowledge_repo.py
|
certara-ShengnanHuang/machine-learning
| 2,104 |
83032
|
"""
Examples
--------
Convert existing jupyter notebook to an airbnb knowledge repo format
- python convert_to_knowledge_repo.py --ml_repo . --knowledge_repo knowledge-repo
Deploying the webapp
- knowledge_repo --repo knowledge-repo deploy
"""
import os
import re
import json
import subprocess
from dateutil import parser as date_parser
def main(ml_repo, knowledge_repo, inplace):
ml_repo_path = os.path.abspath(ml_repo)
knowledge_repo_path = os.path.abspath(knowledge_repo)
if not os.path.isdir(knowledge_repo_path):
init_knowledge_repo(knowledge_repo_path)
convert_all_posts(ml_repo_path, knowledge_repo_path, inplace)
def init_knowledge_repo(path):
cmd = 'knowledge_repo --repo {} init'.format(path)
subprocess.call(cmd, shell=True)
def convert_all_posts(path, knowledge_repo_path, inplace):
"""Recursive walk down all directory to perform the conversion"""
if os.path.isdir(path):
files = [os.path.join(path, f) for f in os.listdir(path)]
for f in files:
convert_all_posts(f, knowledge_repo_path, inplace)
elif '-converted' not in path:
head, ext = os.path.splitext(path)
if ext == ".ipynb":
try:
converter = IpynbConverter(knowledge_repo_path, inplace)
notebook = converter.convert(path)
converter.add(notebook)
except Exception as e:
print('Skipping: {}'.format(path))
print(e)
class IpynbConverter:
"""
Converts Jupyter notebook to airbnb knowledge repo format [1]_.
Parameters
----------
knowledge_repo_path : str
Path to store the airbnb knowledge repo-ed notebook.
inplace : bool
Whether to perform the conversion inplace or not. If
false, then it will create a new notebook that has the
'-converted' appended to the file name.
Attributes
----------
date_created_ : str
Input notebook's creation date.
date_updated_ : str
Input notebook's latest updated date.
tags_ : str
The notebook's filename is use as the tag in this automated
conversion process. e.g. /Users/ethen/machine-learning/trees/decision_tree.ipynb,
we would use 'decision_tree' as the tag.
github_link_ : str
Notebook's original link on github.
title_ : str
Notebook's title, uses the first level 1 markdown header that's not
'Table of Contents' that could be automatically generated by newer
version of notebook. e.g. # Decision Tree (Classification)\n, then
Decision Tree (Classification) would be our title.
References
----------
.. [1] `Airbnb knowledge repo
<https://github.com/airbnb/knowledge-repo>`_
"""
AUTHOR = '<NAME>'
DATE_FORMAT = '%Y-%m-%d'
REPO_NAME = 'machine-learning'
BASE_URL = 'https://github.com/ethen8181/'
def __init__(self, knowledge_repo_path, inplace):
self.inplace = inplace
self.knowledge_repo_path = knowledge_repo_path
def convert(self, path):
"""
Convert the input path's notebook to a knowledge repo. This
will add a mandatory raw cell that contains the yaml information
needed by the knowledge repo and an additional cell that contains
link to the notebook on github.
Parameters
----------
path : str
Path that has the '.ipynb' extension.
Returns
-------
notebook : dict
Updated Jupyter notebook's raw json represented in dictionary format.
Ready to be passed to the .add method to add to the knowledge repo.
"""
self.date_created_ = self._date_created(path)
self.date_updated_ = self._date_updated(path)
self.tags_, self.github_link_ = self._tags_and_github_link(path)
with open(path, encoding='utf-8') as f:
notebook = json.load(f)
self.title_ = self._title(notebook)
# prepend the dictionary header to notebook['cells']
notebook['cells'] = ([self._construct_header()] +
[self._construct_github_link_cell()] +
notebook['cells'])
if not self.inplace:
head, ext = os.path.splitext(path)
head += '-converted'
path = head + ext
self._path = path
return notebook
def _date_created(self, path):
"""Grab the date of creation through git log."""
cmd = 'git log --diff-filter=A --follow --format=%cd -1 -- {}'.format(path)
return self._git_date_cmd(cmd)
def _date_updated(self, path):
"""Grab the last date modified through git log."""
cmd = 'git log --format=%cd -1 -- {}'.format(path)
return self._git_date_cmd(cmd)
def _git_date_cmd(self, cmd):
"""Run bash command to retrieve and format date string."""
date_str = subprocess.check_output(cmd, shell=True)
date_dt = date_parser.parse(date_str)
formatted_date = date_dt.strftime(self.DATE_FORMAT)
return formatted_date
def _tags_and_github_link(self, path):
"""
Use file name as tags, e.g. /Users/ethen/machine-learning/trees/decision_tree.ipynb
we would use 'decision_tree' as the tag
"""
_, file_path = path.split(self.REPO_NAME)
_, file_name = os.path.split(file_path)
tags, _ = os.path.splitext(file_name)
# /blob/master indicates github master branch
link = self.BASE_URL + self.REPO_NAME + '/blob/master' + file_path
return tags, link
def _title(self, notebook):
"""
A title in the notebook always starts with the '#' indicating a
markdown level 1 header e.g. # Decision Tree (Classification)\n
thus we can just parse all the text in between the '#' and the line break '\n'
"""
# TODO : we could fall back to the file path if it doesn't exist perhaps?
title_pattern = re.compile('# (.*)\n')
for cell in notebook['cells']:
if cell['cell_type'] == 'markdown':
# the [0] indicates the # title pattern
# should always appear in the first line
source = cell['source'][0]
matched = title_pattern.match(source)
if matched is not None:
title = matched.group(1)
# newer version of notebooks includes a
# Table of Contents automatically in the first
# cell, skip that and find the next level 1 header
if not title == 'Table of Contents':
break
return title
def _construct_header(self):
"""Create a knowledge repo style header as a dictionary."""
def flatten_list(l):
"""
Although not needed for the current version, we could
have multiple tags and authors, in that case we would
need to flatten them out.
"""
flat = []
for item in l:
if isinstance(item, list):
flat += item
else:
flat.append(item)
return flat
header = {'cell_type': 'raw', 'metadata': {}}
# header text required by the knowledge repo
# a '- ' in front is required for knowledge repo tag
header_text = [
'---',
'title: {}'.format(self.title_),
'authors:',
'- {}'.format(self.AUTHOR),
'tags:',
'- ' + self.tags_,
'created_at: {}'.format(self.date_created_),
'updated_at: {}'.format(self.date_updated_),
'tldr: Nothing for tldr section as of now.',
'---']
header_text = flatten_list(header_text)
header_text = [text + '\n' for text in header_text[:-1]] + [header_text[-1]]
header['source'] = header_text
return header
def _construct_github_link_cell(self):
"""Add a cell that contains link to original notebook on github"""
github_link_cell = {
'cell_type': 'markdown',
'metadata': {},
'source': ['Link to original notebook: {}'.format(self.github_link_)]}
return github_link_cell
def add(self, notebook):
"""
Add the converted notebook to the knowledge repo.
Parameters
----------
notebook : dict
Jupyter notebook's raw json represented in dictionary format.
"""
with open(self._path, 'w', encoding='utf-8') as f:
json.dump(notebook, f)
# create a run knowledge repo command
destination = os.path.join(self.knowledge_repo_path, 'project', self.tags_)
cmd = 'knowledge_repo --repo {} add {} -p {}'.format(
self.knowledge_repo_path, self._path, destination)
# communicate with the shell output to enable
# continuation of the script execution
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
p.communicate(input=b'generated by automated airbnb knowledge repo setup')
if not self.inplace:
os.remove(self._path)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Convert the machine-learning repository to an Airbnb Knowledge Repo.')
parser.add_argument(
'--ml_repo', type=str, help='Path to the root directory of the machine-learning repo.')
parser.add_argument(
'--knowledge_repo', type=str, help='Path to the knowledge repo.')
parser.add_argument(
'--inplace', action='store_true', help='Modify the existing .ipynb in place.')
args = vars(parser.parse_args())
main(**args)
|
neuralnets/ELMoWordEmbeddings.py
|
nirvana0311/Elmo_experiment
| 390 |
83047
|
<reponame>nirvana0311/Elmo_experiment
import urllib.request as urllib2
import urllib.parse as urlparse
from urllib.request import urlretrieve
import logging
import numpy as np
from allennlp.commands.elmo import ElmoEmbedder, DEFAULT_OPTIONS_FILE, DEFAULT_WEIGHT_FILE
import pickle as pkl
import os
import gzip
import sys
class ELMoWordEmbeddings:
def __init__(self, embeddings_path, elmo_options_file=DEFAULT_OPTIONS_FILE, elmo_weight_file=DEFAULT_WEIGHT_FILE, elmo_mode='average', elmo_cuda_device=-1):
self.embeddings_path = embeddings_path
self.embedding_name = os.path.splitext(os.path.basename(embeddings_path))[0] if embeddings_path is not None else 'None'
self.word2Idx = None
self.embeddings = None
self.elmo_options_file = elmo_options_file
self.elmo_weight_file = elmo_weight_file
self.elmo_cuda_device=elmo_cuda_device
self.elmo_mode = elmo_mode
self.elmo = None
self.cache_computed_elmo_embeddings = False
self.cache = {}
self.lazyCacheFiles = []
def getConfig(self):
return {
"embeddings_path": self.embeddings_path,
"elmo_options_file": self.elmo_options_file,
"elmo_weight_file": self.elmo_weight_file,
"elmo_mode": self.elmo_mode,
"elmo_cuda_device": self.elmo_cuda_device
}
def sentenceLookup(self, sentences):
elmo_vectors = None
# :: Elmo ::
if self.elmo_mode is not None:
elmo_vectors = self.getElmoEmbedding(sentences)
# :: Word Embedding ::
tokens_vectors = None
if self.embeddings_path is not None:
if self.word2Idx is None or self.embeddings is None:
self.word2Idx, self.embeddings = self.readEmbeddings(self.embeddings_path)
tokens_vectors = []
for sentence in sentences:
per_token_embedding = []
for token in sentence['tokens']:
vecId = self.word2Idx['UNKNOWN_TOKEN']
if token in self.word2Idx:
vecId = self.word2Idx[token]
elif token.lower() in self.word2Idx:
vecId = self.word2Idx[token.lower()]
per_token_embedding.append(self.embeddings[vecId])
per_token_embedding = np.asarray(per_token_embedding)
tokens_vectors.append(per_token_embedding)
out_vectors = {}
if tokens_vectors is not None:
out_vectors['tokens'] = tokens_vectors
if elmo_vectors is not None:
out_vectors['elmo'] = elmo_vectors
return out_vectors
def batchLookup(self, sentences, feature_name):
if feature_name == 'tokens':
if self.word2Idx is None or self.embeddings is None:
self.word2Idx, self.embeddings = self.readEmbeddings(self.embeddings_path)
tokens_vectors = []
for sentence in sentences:
per_token_embedding = []
for token in sentence['tokens']:
vecId = self.word2Idx['UNKNOWN_TOKEN']
if token in self.word2Idx:
vecId = self.word2Idx[token]
elif token.lower() in self.word2Idx:
vecId = self.word2Idx[token.lower()]
per_token_embedding.append(self.embeddings[vecId])
per_token_embedding = np.asarray(per_token_embedding)
tokens_vectors.append(per_token_embedding)
return np.asarray(tokens_vectors)
elif feature_name == 'elmo':
return np.asarray(self.getElmoEmbedding(sentences))
else:
print("Unknown feature name was passed to singleSentenceLookup")
assert(False)
def applyElmoMode(self, elmo_vectors):
if self.elmo_mode == 'average':
return np.average(elmo_vectors, axis=0).astype(np.float32)
elif self.elmo_mode == 'weighted_average':
return np.swapaxes(elmo_vectors,0,1)
elif self.elmo_mode == 'last':
return elmo_vectors[-1, :, :]
elif isinstance(self.elmo_mode, int):
return elmo_vectors[int(self.elmo_mode), :, :]
else:
print("Unknown ELMo mode")
assert (False)
def getElmoEmbedding(self, sentences):
if len(self.lazyCacheFiles) > 0:
self._loadLazyCache()
elmo_embeddings = []
non_cached_sentences = []
non_cached_sentences_indices = []
# :: Lookup cached sentences ::
for sentence in sentences:
tokens = sentence['tokens']
cache_key = tuple(tokens)
if len(self.cache) > 0 and cache_key in self.cache:
elmo_embeddings.append(self.applyElmoMode(self.cache[cache_key]))
else:
non_cached_sentences.append(tokens)
non_cached_sentences_indices.append(len(elmo_embeddings))
elmo_embeddings.append(None)
# :: Compute ELMo on the fly ::
if len(non_cached_sentences) > 0:
if self.elmo is None:
self.loadELMo()
idx = 0
for elmo_vectors in self.elmo.embed_sentences(non_cached_sentences):
assert(elmo_embeddings[non_cached_sentences_indices[idx]] == None)
elmo_embeddings[non_cached_sentences_indices[idx]] = self.applyElmoMode(elmo_vectors)
if self.cache_computed_elmo_embeddings:
tokens = non_cached_sentences[idx]
cache_key = tuple(tokens)
self.cache[cache_key] = elmo_vectors
idx += 1
return elmo_embeddings
def getIdentifier(self):
"""Returns a unique identifier for this lookup function"""
return "ELMoWordEmbeddings_" + self.embedding_name + "_" + str(self.elmo_mode)
def loadELMo(self):
self.elmo = ElmoEmbedder(self.elmo_options_file, self.elmo_weight_file, self.elmo_cuda_device)
def loadCache(self, inputPath):
self.lazyCacheFiles.append(inputPath)
def storeCache(self, outputPath):
f = open(outputPath, 'wb')
pkl.dump(self.cache, f, -1)
f.close()
def addToCache(self, sentences):
if self.elmo is None:
self.loadELMo()
idx = 0
for elmoEmbedding in self.elmo.embed_sentences(sentences):
cache_key = tuple(sentences[idx])
self.cache[cache_key] = elmoEmbedding
idx += 1
def _loadLazyCache(self):
while len(self.lazyCacheFiles) > 0:
inputPath = self.lazyCacheFiles.pop()
if not os.path.isfile(inputPath):
print("ELMo cache file not found:", inputPath)
continue
f = open(inputPath, 'rb')
loaded_cache = pkl.load(f)
f.close()
if len(self.cache) == 0:
self.cache = loaded_cache
else:
self.cache.update(loaded_cache)
def readEmbeddings(self, embeddingsPath):
filename = os.path.basename(embeddingsPath)
if not os.path.isfile(embeddingsPath):
if filename in ['komninos_english_embeddings.gz', 'levy_english_dependency_embeddings.gz',
'reimers_german_embeddings.gz']:
self.getEmbeddings(filename, embeddingsPath)
else:
print("The embeddings file %s was not found" % embeddingsPath)
exit()
# :: Read in word embeddings ::
logging.info("Read file: %s" % embeddingsPath)
word2Idx = {}
embeddings = []
embeddingsIn = gzip.open(embeddingsPath, "rt") if embeddingsPath.endswith('.gz') else open(embeddingsPath,
encoding="utf8")
embeddingsDimension = None
for line in embeddingsIn:
split = line.rstrip().split(" ")
word = split[0]
if embeddingsDimension==None:
embeddingsDimension = len(split) - 1
if (len(
split) - 1)!=embeddingsDimension: # Assure that all lines in the embeddings file are of the same length
print("ERROR: A line in the embeddings file had more or less dimensions than expected. Skip token.")
continue
if len(word2Idx)==0: # Add padding+unknown
word2Idx["PADDING_TOKEN"] = len(word2Idx)
vector = np.zeros(embeddingsDimension)
embeddings.append(vector)
word2Idx["UNKNOWN_TOKEN"] = len(word2Idx)
rndState = np.random.RandomState(
seed=12345) # Fixed rnd seed for unknown token, so that it is always the same
vector = rndState.uniform(-0.25, 0.25, embeddingsDimension) # Alternativ -sqrt(3/dim) ... sqrt(3/dim)
embeddings.append(vector)
vector = np.array([float(num) for num in split[1:]])
embeddings.append(vector)
word2Idx[word] = len(word2Idx)
return word2Idx, embeddings
def getEmbeddings(self, filename, savePath):
if not os.path.isfile(savePath):
self.download("https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/" + filename, savePath)
def download(self, url, savePath, silent=False):
filename = os.path.basename(urlparse.urlparse(url).path) or 'downloaded.file'
def get_size():
meta = urllib2.urlopen(url).info()
meta_func = meta.getheaders if hasattr(
meta, 'getheaders') else meta.get_all
meta_length = meta_func('Content-Length')
try:
return int(meta_length[0])
except:
return 0
def kb_to_mb(kb):
return kb / 1024.0 / 1024.0
def callback(blocks, block_size, total_size):
current = blocks * block_size
percent = 100.0 * current / total_size
line = '[{0}{1}]'.format(
'=' * int(percent / 2), ' ' * (50 - int(percent / 2)))
status = '\r{0:3.0f}%{1} {2:3.1f}/{3:3.1f} MB'
sys.stdout.write(
status.format(
percent, line, kb_to_mb(current), kb_to_mb(total_size)))
logging.info(
'Downloading: {0} ({1:3.1f} MB)'.format(url, kb_to_mb(get_size())))
try:
(savePath, headers) = urlretrieve(url, savePath, None if silent else callback)
except:
os.remove(savePath)
raise Exception("Can't download {0}".format(savePath))
else:
print()
logging.info('Downloaded to: {0}'.format(savePath))
return savePath
|
tdda/rexpy/testseq.py
|
jjlee42/tdda
| 232 |
83057
|
from __future__ import print_function
from tdda.rexpy import extract
from tdda.rexpy.seq import common_string_sequence
from tdda.rexpy.relib import re
x = extract(['Roger', 'Coger', 'Doger'], tag=True, as_object=True)
print(x)
patternToExamples = x.pattern_matches()
sequences = []
for j, (pattern, examples) in enumerate(patternToExamples.items()):
N = len(examples)
if N < 1:
print('%s:%s' % (pattern, examples))
else:
eparts = [re.match(x.results.rex[j], e).groups() for e in examples]
nparts = len(eparts[0])
for i in range(nparts):
(L, R) = (eparts[0][i], eparts[1][i])
n = 2
s = common_string_sequence(L, R)
while n < N and s != '':
s = common_string_sequence(s, eparts[n][i])
n += 1
sequences.append(s)
print(sequences)
|
tests/test_query.py
|
adamchainz/django-postgres-extra
| 529 |
83060
|
from django.db import models
from django.db.models import Case, F, Q, Value, When
from psqlextra.expressions import HStoreRef
from psqlextra.fields import HStoreField
from .fake_model import get_fake_model
def test_query_annotate_hstore_key_ref():
"""Tests whether annotating using a :see:HStoreRef expression works
correctly.
This allows you to select an individual hstore key.
"""
model_fk = get_fake_model({"title": HStoreField()})
model = get_fake_model(
{"fk": models.ForeignKey(model_fk, on_delete=models.CASCADE)}
)
fk = model_fk.objects.create(title={"en": "english", "ar": "arabic"})
model.objects.create(fk=fk)
queryset = (
model.objects.annotate(english_title=HStoreRef("fk__title", "en"))
.values("english_title")
.first()
)
assert queryset["english_title"] == "english"
def test_query_annotate_rename():
"""Tests whether field names can be overwritten with a annotated field."""
model = get_fake_model({"title": models.CharField(max_length=12)})
model.objects.create(title="swen")
obj = model.objects.annotate(title=F("title")).first()
assert obj.title == "swen"
def test_query_annotate_rename_chain():
"""Tests whether annotations are behaving correctly after a QuerySet
chain."""
model = get_fake_model(
{
"name": models.CharField(max_length=10),
"value": models.IntegerField(),
}
)
model.objects.create(name="test", value=23)
obj = model.objects.values("name").annotate(value=F("value"))[:1]
assert "value" in obj[0]
assert obj[0]["value"] == 23
def test_query_annotate_rename_order():
"""Tests whether annotation order is preserved after a rename."""
model = get_fake_model(
{
"name": models.CharField(max_length=10),
"value": models.IntegerField(),
}
)
qs = model.objects.annotate(value=F("value"), value_2=F("value"))
assert list(qs.query.annotations.keys()) == ["value", "value_2"]
def test_query_annotate_in_expression():
"""Tests whether annotations can be used in expressions."""
model = get_fake_model({"name": models.CharField(max_length=10)})
model.objects.create(name="henk")
result = model.objects.annotate(
real_name=F("name"),
is_he_henk=Case(
When(Q(real_name="henk"), then=Value("really henk")),
default=Value("definitely not henk"),
output_field=models.CharField(),
),
).first()
assert result.real_name == "henk"
assert result.is_he_henk == "really henk"
def test_query_hstore_value_update_f_ref():
"""Tests whether F(..) expressions can be used in hstore values when
performing update queries."""
model = get_fake_model(
{"name": models.CharField(max_length=255), "name_new": HStoreField()}
)
model.objects.create(name="waqas", name_new=dict(en="swen"))
model.objects.update(name_new=dict(en=models.F("name")))
inst = model.objects.all().first()
assert inst.name_new.get("en") == "waqas"
def test_query_hstore_value_update_cast():
"""Tests whether values in a HStore field are automatically cast to strings
when doing updates."""
model = get_fake_model({"title": HStoreField()})
model.objects.create(title=dict(en="test"))
model.objects.update(title=dict(en=2))
inst = model.objects.all().first()
assert inst.title.get("en") == "2"
def test_query_hstore_value_update_escape():
"""Tests whether values in a HStore field are properly escaped using
prepared statement values."""
model = get_fake_model({"title": HStoreField()})
model.objects.create(title=dict(en="test"))
model.objects.update(title=dict(en="console.log('test')"))
inst = model.objects.all().first()
assert inst.title.get("en") == "console.log('test')"
|
examples/issues/issue372.py
|
tgolsson/appJar
| 666 |
83100
|
import sys
sys.path.append("../../")
from appJar import gui
def showPositions():
for widg in app.getContainer().grid_slaves():
row, column = widg.grid_info()["row"], widg.grid_info()["column"]
print(widg, row, column)
with gui("Grid Demo", "300x300", sticky="news", expand="both") as app:
for x in range(5):
for y in range(5):
app.label(str(x)+str(y), row=x, column=y)
app.button("PRESS", showPositions, colspan=5)
|
qcodes/dataset/descriptions/versioning/converters.py
|
riju-pal/QCoDeS_riju
| 223 |
83103
|
"""
This module contains functions which implement conversion between different
(neighbouring) versions of RunDescriber.
"""
from typing import Dict, List
from ..dependencies import InterDependencies_
from ..param_spec import ParamSpec, ParamSpecBase
from .rundescribertypes import (RunDescriberV0Dict, RunDescriberV1Dict,
RunDescriberV2Dict, RunDescriberV3Dict)
from .v0 import InterDependencies
def old_to_new(idps: InterDependencies) -> InterDependencies_:
"""
Create a new InterDependencies_ object (new style) from an existing
InterDependencies object (old style). Leaves the original object unchanged.
Incidentally, this function can serve as a validator of the original object
"""
namedict: Dict[str, ParamSpec] = {ps.name: ps for ps in idps.paramspecs}
dependencies = {}
inferences = {}
standalones_mut = []
root_paramspecs: List[ParamSpecBase] = []
for ps in idps.paramspecs:
deps = tuple(namedict[n].base_version() for n in ps.depends_on_)
inffs = tuple(namedict[n].base_version() for n in ps.inferred_from_)
if len(deps) > 0:
dependencies.update({ps.base_version(): deps})
root_paramspecs += list(deps)
if len(inffs) > 0:
inferences.update({ps.base_version(): inffs})
root_paramspecs += list(inffs)
if len(deps) == len(inffs) == 0:
standalones_mut.append(ps.base_version())
standalones = tuple(set(standalones_mut).difference(set(root_paramspecs)))
idps_ = InterDependencies_(dependencies=dependencies,
inferences=inferences,
standalones=standalones)
return idps_
def new_to_old(idps: InterDependencies_) -> InterDependencies:
"""
Create a new InterDependencies object (old style) from an existing
InterDependencies_ object (new style). Leaves the original object
unchanged. Only meant to be used for ensuring backwards-compatibility
until we update sqlite module to forget about ParamSpecs
"""
paramspecs: Dict[str, ParamSpec] = {}
# first the independent parameters
for indeps in idps.dependencies.values():
for indep in indeps:
paramspecs.update({indep.name: ParamSpec(name=indep.name,
paramtype=indep.type,
label=indep.label,
unit=indep.unit)})
for inffs in idps.inferences.values():
for inff in inffs:
paramspecs.update({inff.name: ParamSpec(name=inff.name,
paramtype=inff.type,
label=inff.label,
unit=inff.unit)})
for ps_base in idps._paramspec_to_id.keys():
paramspecs.update({ps_base.name: ParamSpec(name=ps_base.name,
paramtype=ps_base.type,
label=ps_base.label,
unit=ps_base.unit)})
for ps, indeps in idps.dependencies.items():
for indep in indeps:
paramspecs[ps.name]._depends_on.append(indep.name)
for ps, inffs in idps.inferences.items():
for inff in inffs:
paramspecs[ps.name]._inferred_from.append(inff.name)
return InterDependencies(*tuple(paramspecs.values()))
def v0_to_v1(old: RunDescriberV0Dict) -> RunDescriberV1Dict:
"""
Convert a v0 RunDescriber Dict to a v1 RunDescriber Dict
"""
old_idps = InterDependencies._from_dict(old["interdependencies"])
new_idps_dict = old_to_new(old_idps)._to_dict()
return RunDescriberV1Dict(version=1, interdependencies=new_idps_dict)
def v1_to_v2(old: RunDescriberV1Dict) -> RunDescriberV2Dict:
"""
Convert a v1 RunDescriber Dict to a v2 RunDescriber Dict
"""
interdeps_dict = old['interdependencies']
interdeps_ = InterDependencies_._from_dict(interdeps_dict)
interdepsdict = new_to_old(interdeps_)._to_dict()
return RunDescriberV2Dict(version=2, interdependencies_=interdeps_dict,
interdependencies=interdepsdict)
def v2_to_v3(old: RunDescriberV2Dict) -> RunDescriberV3Dict:
return RunDescriberV3Dict(version=3,
interdependencies=old['interdependencies'],
interdependencies_=old['interdependencies_'],
shapes=None
)
def v0_to_v2(old: RunDescriberV0Dict) -> RunDescriberV2Dict:
"""
Convert a v0 RunDescriber Dict to a v2 RunDescriber Dict
"""
return v1_to_v2(v0_to_v1(old))
def v0_to_v3(old: RunDescriberV0Dict) -> RunDescriberV3Dict:
return v2_to_v3(v0_to_v2(old))
def v1_to_v3(old: RunDescriberV1Dict) -> RunDescriberV3Dict:
return v2_to_v3(v1_to_v2(old))
def v3_to_v2(new: RunDescriberV3Dict) -> RunDescriberV2Dict:
return RunDescriberV2Dict(version=2,
interdependencies=new['interdependencies'],
interdependencies_=new['interdependencies_'],
)
def v2_to_v1(new: RunDescriberV2Dict) -> RunDescriberV1Dict:
"""
Convert a v2 RunDescriber Dict to a v1 RunDescriber Dict
"""
rundescriberdictv1 = RunDescriberV1Dict(
version=1,
interdependencies=new['interdependencies_']
)
return rundescriberdictv1
def v1_to_v0(new: RunDescriberV1Dict) -> RunDescriberV0Dict:
"""
Convert a v1 RunDescriber Dict to a v0 RunDescriber Dict
"""
interdeps_dict = new['interdependencies']
interdeps_ = InterDependencies_._from_dict(interdeps_dict)
interdepsdict = new_to_old(interdeps_)._to_dict()
rundescriberv0dict = RunDescriberV0Dict(version=0,
interdependencies=interdepsdict)
return rundescriberv0dict
def v3_to_v1(new: RunDescriberV3Dict) -> RunDescriberV1Dict:
return v2_to_v1(v3_to_v2(new))
def v2_to_v0(new: RunDescriberV2Dict) -> RunDescriberV0Dict:
"""
Convert a v2 RunDescriber Dict to a v0 RunDescriber Dict
"""
return v1_to_v0(v2_to_v1(new))
def v3_to_v0(new: RunDescriberV3Dict) -> RunDescriberV0Dict:
return v1_to_v0(v3_to_v1(new))
|
utest/test/api/my_lib_args.py
|
hugovk/SeleniumLibrary
| 792 |
83108
|
<reponame>hugovk/SeleniumLibrary
from SeleniumLibrary.base import LibraryComponent, keyword
class my_lib_args(LibraryComponent):
def __init__(self, ctx, arg1, arg2, *args, **kwargs):
LibraryComponent.__init__(self, ctx)
self.arg1 = arg1
self.arg2 = arg2
self.args = args
self.kwargs = kwargs
@keyword(tags=["MyTag"])
def foo_1(self):
self.info("foo")
@keyword
def bar_2(self, arg):
self.info(arg)
@keyword
def add_cookie(self, foo, bar):
self.info(foo)
self.info(bar)
|
odps/mars_extension/__init__.py
|
wjsi/aliyun-odps-python-sdk
| 412 |
83112
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from .core import create_mars_cluster, to_mars_dataframe, \
persist_mars_dataframe, run_script_in_mars, run_mars_job, \
list_mars_instances, sql_to_mars_dataframe
except ImportError:
create_mars_cluster = None
to_mars_dataframe = None
persist_mars_dataframe = None
run_mars_script = None
run_mars_job = None
list_mars_instances = None
sql_to_mars_dataframe = None
try:
from . import dataframe
except ImportError:
dataframe = None
try:
from . import tensor
except ImportError:
tensor = None
try:
from mars.executor import register
from mars.remote.core import RemoteFunction
from .core import execute_with_odps_context
from .run_script import RunScript
register(RemoteFunction, execute_with_odps_context(RemoteFunction.execute))
register(RunScript, execute_with_odps_context(RunScript.execute))
except ImportError:
pass
# register filesystem
try:
from mars.filesystem import file_systems
from .filesystem import VolumeFileSystem
file_systems['odps'] = VolumeFileSystem
except ImportError:
pass
try:
from mars.lib.filesystem.core import register_filesystem
from .filesystem import VolumeFileSystem
register_filesystem('odps', VolumeFileSystem)
except ImportError:
pass
INTERNAL_PATTERN = '\/[^\.]+\.[^\.-]+\.[^\.-]+\-[^\.-]+\.'
|
l10n_br_point_of_sale/models/__init__.py
|
kaoecoito/odoo-brasil
| 181 |
83180
|
<reponame>kaoecoito/odoo-brasil<gh_stars>100-1000
# -*- coding: utf-8 -*-
# © 2016 <NAME> <<EMAIL>>, Trustcode
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import pos_order
from . import pos_session
from . import invoice_eletronic
from . import account_journal
from . import pos_payment_method
|
scripts/remove_after_use/verify_groups_guardian_migration.py
|
gaybro8777/osf.io
| 628 |
83196
|
"""Script to verify permissions have transferred post groups/guardian.
"docker-compose run --rm web python3 -m scripts.remove_after_use.verify_groups_guardian_migration"
"""
import logging
from random import randint
from website.app import setup_django
setup_django()
from django.apps import apps
from django.contrib.auth.models import Permission, Group
from osf.utils.permissions import PERMISSIONS, reduce_permissions
from osf.models import AbstractNode, Contributor, Preprint, Node, Registration, QuickFilesNode
from osf.models.node import NodeGroupObjectPermission
from osf.models.preprint import PreprintGroupObjectPermission
from osf.utils.permissions import READ, WRITE, ADMIN
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def check_expected(expected, actual, error_msg):
if expected != actual:
logger.info('{}. Expected {} rows migrated; received {}.'.format(error_msg, expected, actual))
else:
logger.info('{} rows added.'.format(actual))
def verify_permissions_created():
"""
Expecting three permissions added, read, write, admin perms
"""
expected = len(PERMISSIONS)
actual = Permission.objects.filter(codename__in=PERMISSIONS).count()
check_expected(expected, actual, 'Discepancy in Permission table.')
def verify_auth_groups():
"""
Expecting three groups added for every AbstractNode - read/write/admin
"""
expected = AbstractNode.objects.count() * 3
actual = Group.objects.filter(name__icontains='node_').count()
check_expected(expected, actual, 'Discepancy in auth_group table.')
def verify_expected_node_group_object_permission_counts():
"""
For every AbstactNode, three Django groups - admin, write, read are created.
Admin group gets admin/write/read perms, write - write/read, and read: read.
So for every node, 6 line items added to NodeGroupObjectPermission. Linking
these groups with their permissions to the given node.
"""
expected_nodegroupobjperm_count = AbstractNode.objects.count() * 6
actual_nodegroupobjperm_count = NodeGroupObjectPermission.objects.count()
check_expected(expected_nodegroupobjperm_count, actual_nodegroupobjperm_count, 'Discrepancy in NodeGroupObjectPermission table.')
def verify_expected_contributor_migration():
"""
Based on contributor admin/write/read columns, users are migrated to the osfgroupuser table and added to the appropriate Django group.
"""
OSFUserGroup = apps.get_model('osf', 'osfuser_groups')
expected = Contributor.objects.count()
actual = OSFUserGroup.objects.filter(group__name__icontains='node_').count()
check_expected(expected, actual, 'Discrepancy in contributor migration to OSFUserGroup table.')
def verify_preprint_foreign_key_migration():
expected_preprintgroupobjperm_count = Preprint.objects.count() * 6
actual_preprintgroupobjperm_count = PreprintGroupObjectPermission.objects.count()
check_expected(expected_preprintgroupobjperm_count, actual_preprintgroupobjperm_count, 'Discrepancy in PreprintGroupObjectPermission table.')
def verify_random_objects():
resources = [Node, Registration, QuickFilesNode]
for resource in resources:
for i in range(1,10):
random_resource = _get_random_object(resource)
if random_resource:
_verify_contributor_perms(random_resource)
def _verify_contributor_perms(resource):
for user in resource.contributors:
contrib = Contributor.objects.get(node=resource, user=user)
if contrib.admin:
if contrib.permission != ADMIN:
_suspected_contributor_migration_error(contrib)
elif contrib.write:
if contrib.permission != WRITE:
_suspected_contributor_migration_error(contrib)
elif contrib.read:
if contrib.permission != READ:
_suspected_contributor_migration_error(contrib)
def _suspected_contributor_migration_error(contrib):
logger.info('Suspected contributor migration error on {}.'.format(contrib._id))
def _get_random_object(model):
model_count = model.objects.count()
if model_count:
return model.objects.all()[randint(1, model_count - 1)]
return None
def main():
logger.info('Verifying permissions created...')
verify_permissions_created()
logger.info('Verifying auth groups created...')
verify_auth_groups()
logger.info('Verifying node groups given permissions to their nodes...')
verify_expected_node_group_object_permission_counts()
logger.info('Verifying contributors added to node django groups...')
verify_expected_contributor_migration()
logger.info('Verifying preprint perms migrated to direct foreign key table...')
verify_preprint_foreign_key_migration()
logger.info('Verifying a selection of random contributor permissions...')
verify_random_objects()
logger.info('Done!')
if __name__ == '__main__':
main()
|
test/scenario_test/route_server_malformed_test.py
|
alistairking/gobgp
| 2,938 |
83206
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import unittest
import inspect
import nose
from lib.noseplugin import OptionParser, parser_option
from lib import base
from lib.base import BGP_FSM_ESTABLISHED, local
from lib.gobgp import GoBGPContainer
from lib.exabgp import ExaBGPContainer
counter = 1
_SCENARIOS = {}
def register_scenario(cls):
global counter
_SCENARIOS[counter] = cls
counter += 1
def lookup_scenario(name):
for value in list(_SCENARIOS.values()):
if value.__name__ == name:
return value
return None
def wait_for(f, timeout=120):
interval = 1
count = 0
while True:
if f():
return
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
@register_scenario
class MalformedMpReachNlri(object):
"""
No.1 malformaed mp-reach-nlri
"""
@staticmethod
def boot(env):
gobgp_ctn_image_name = env.parser_option.gobgp_image
log_level = env.parser_option.gobgp_log_level
g1 = GoBGPContainer(name='g1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=log_level)
e1 = ExaBGPContainer(name='e1', asn=65001, router_id='192.168.0.2')
e2 = ExaBGPContainer(name='e2', asn=65001, router_id='192.168.0.2')
ctns = [g1, e1, e2]
initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(initial_wait_time)
for q in [e1, e2]:
g1.add_peer(q, is_rs_client=True)
q.add_peer(g1)
env.g1 = g1
env.e1 = e1
env.e2 = e2
@staticmethod
def setup(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
for c in [e1, e2]:
g1.wait_for(BGP_FSM_ESTABLISHED, c)
# advertise malformed MP_REACH_NLRI
e1.add_route('10.7.0.17/32', attribute='0x0e 0x60 0x11223344')
@staticmethod
def check(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
def f():
for line in e1.log().split('\n'):
if 'UPDATE message error / Attribute Flags Error / 0x600E0411223344' in line:
return True
return False
wait_for(f)
# check e2 is still established
g1.wait_for(BGP_FSM_ESTABLISHED, e2)
@staticmethod
def executor(env):
lookup_scenario("MalformedMpReachNlri").boot(env)
lookup_scenario("MalformedMpReachNlri").setup(env)
lookup_scenario("MalformedMpReachNlri").check(env)
@register_scenario
class MalformedMpUnReachNlri(object):
"""
No.2 malformaed mp-unreach-nlri
"""
@staticmethod
def boot(env):
lookup_scenario("MalformedMpReachNlri").boot(env)
@staticmethod
def setup(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
for c in [e1, e2]:
g1.wait_for(BGP_FSM_ESTABLISHED, c)
# advertise malformed MP_UNREACH_NLRI
e1.add_route('10.7.0.17/32', attribute='0x0f 0x60 0x11223344')
@staticmethod
def check(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
def f():
for line in e1.log().split('\n'):
if 'UPDATE message error / Attribute Flags Error / 0x600F0411223344' in line:
return True
return False
wait_for(f)
# check e2 is still established
g1.wait_for(BGP_FSM_ESTABLISHED, e2)
@staticmethod
def executor(env):
lookup_scenario("MalformedMpUnReachNlri").boot(env)
lookup_scenario("MalformedMpUnReachNlri").setup(env)
lookup_scenario("MalformedMpUnReachNlri").check(env)
@register_scenario
class MalformedAsPath(object):
"""
No.3 malformaed as-path
"""
@staticmethod
def boot(env):
lookup_scenario("MalformedMpReachNlri").boot(env)
@staticmethod
def setup(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
for c in [e1, e2]:
g1.wait_for(BGP_FSM_ESTABLISHED, c)
# advertise malformed AS_PATH
# Send the attribute to the length and number of aspath is inconsistent
# Attribute Type 0x02 (AS_PATH)
# Attribute Flag 0x40 (well-known transitive)
# Attribute Value 0x02020000ffdc (
# segment type = 02
# segment length = 02 -> # correct value = 01
# as number = 65500 )
e1.add_route('10.7.0.17/32', attribute='0x02 0x60 0x11223344')
@staticmethod
def check(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
def f():
for line in e1.log().split('\n'):
if 'UPDATE message error / Attribute Flags Error / 0x60020411223344' in line:
return True
return False
wait_for(f)
# check e2 is still established
g1.wait_for(BGP_FSM_ESTABLISHED, e2)
@staticmethod
def executor(env):
lookup_scenario("MalformedAsPath").boot(env)
lookup_scenario("MalformedAsPath").setup(env)
lookup_scenario("MalformedAsPath").check(env)
@register_scenario
class MalformedAs4Path(object):
"""
No.4 malformaed as4-path
"""
@staticmethod
def boot(env):
lookup_scenario("MalformedMpReachNlri").boot(env)
@staticmethod
def setup(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
for c in [e1, e2]:
g1.wait_for(BGP_FSM_ESTABLISHED, c)
# advertise malformed AS4_PATH
e1.add_route('10.7.0.17/32', attribute='0x11 0x60 0x11223344')
@staticmethod
def check(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
def f():
for line in e1.log().split('\n'):
if 'UPDATE message error / Attribute Flags Error / 0x60110411223344' in line:
return True
return False
wait_for(f)
# check e2 is still established
g1.wait_for(BGP_FSM_ESTABLISHED, e2)
@staticmethod
def executor(env):
lookup_scenario("MalformedAs4Path").boot(env)
lookup_scenario("MalformedAs4Path").setup(env)
lookup_scenario("MalformedAs4Path").check(env)
@register_scenario
class MalformedNexthop(object):
"""
No.5 malformaed nexthop
"""
@staticmethod
def boot(env):
lookup_scenario("MalformedMpReachNlri").boot(env)
@staticmethod
def setup(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
for c in [e1, e2]:
g1.wait_for(BGP_FSM_ESTABLISHED, c)
# advertise malformed NEXT_HOP
# 0x0e: MP_REACH_NLRI
# 0x60: Optional, Transitive
# 0x01: AFI(IPv4)
# 0x01: SAFI(unicast)
# 0x10: Length of Next Hop Address
# 0xffffff00: Network address of Next Hop
# 0x00: Reserved
e1.add_route('10.7.0.17/32', attribute='0x0e 0x60 0x010110ffffff0000')
@staticmethod
def check(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
def f():
for line in e1.log().split('\n'):
if 'UPDATE message error / Attribute Flags Error / 0x600E08010110FFFFFF0000' in line:
return True
return False
wait_for(f)
# check e2 is still established
g1.wait_for(BGP_FSM_ESTABLISHED, e2)
@staticmethod
def executor(env):
lookup_scenario("MalformedNexthop").boot(env)
lookup_scenario("MalformedNexthop").setup(env)
lookup_scenario("MalformedNexthop").check(env)
@register_scenario
class MalformedRouteFamily(object):
"""
No.6 malformaed route family
"""
@staticmethod
def boot(env):
lookup_scenario("MalformedMpReachNlri").boot(env)
@staticmethod
def setup(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
for c in [e1, e2]:
g1.wait_for(BGP_FSM_ESTABLISHED, c)
# advertise malformed ROUTE_FAMILY
# 0x0e: MP_REACH_NLRI
# 0x60: Optional, Transitive
# 0x01: AFI(IPv4)
# 0x01: SAFI(unicast)
# 0x10: Length of Next Hop Address
# 0xffffff00: Network address of Next Hop
# 0x00: Reserved
e1.add_route('10.7.0.17/32', attribute='0x0e 0x60 0x0002011020010db800000000000000000000000100')
@staticmethod
def check(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
def f():
for line in e1.log().split('\n'):
if 'UPDATE message error / Attribute Flags Error / 0x600E150002011020010DB800000000000000000000000100' in line:
return True
return False
wait_for(f)
# check e2 is still established
g1.wait_for(BGP_FSM_ESTABLISHED, e2)
@staticmethod
def executor(env):
lookup_scenario("MalformedRouteFamily").boot(env)
lookup_scenario("MalformedRouteFamily").setup(env)
lookup_scenario("MalformedRouteFamily").check(env)
@register_scenario
class MalformedAsPathSegmentLengthInvalid(object):
"""
No.7 malformaed aspath segment length invalid
"""
@staticmethod
def boot(env):
lookup_scenario("MalformedMpReachNlri").boot(env)
@staticmethod
def setup(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
for c in [e1, e2]:
g1.wait_for(BGP_FSM_ESTABLISHED, c)
# advertise malformed AS_PATH SEGMENT LENGTH
# Send the attribute to the length and number of aspath is inconsistent
# Attribute Type 0x02 (AS_PATH)
# Attribute Flag 0x40 (well-known transitive)
# Attribute Value 0x02020000ffdc (
# segment type = 02
# segment length = 02 -> # correct value = 01
# as number = 65500 )
e1.add_route('10.7.0.17/32', attribute='0x02 0x40 0x0202ffdc')
@staticmethod
def check(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
def f():
for line in e1.log().split('\n'):
if 'UPDATE message error / Malformed AS_PATH / 0x4002040202FFDC' in line:
return True
return False
wait_for(f)
# check e2 is still established
g1.wait_for(BGP_FSM_ESTABLISHED, e2)
@staticmethod
def executor(env):
lookup_scenario("MalformedAsPathSegmentLengthInvalid").boot(env)
lookup_scenario("MalformedAsPathSegmentLengthInvalid").setup(env)
lookup_scenario("MalformedAsPathSegmentLengthInvalid").check(env)
@register_scenario
class MalformedNexthopLoopbackAddr(object):
"""
No.8 malformaed nexthop loopback addr
"""
@staticmethod
def boot(env):
lookup_scenario("MalformedMpReachNlri").boot(env)
@staticmethod
def setup(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
for c in [e1, e2]:
g1.wait_for(BGP_FSM_ESTABLISHED, c)
# Malformed Invalid NEXT_HOP Attribute
# Send the attribute of invalid nexthop
# next-hop 127.0.0.1 -> # correct value = other than loopback and 0.0.0.0 address
e1.add_route('10.7.0.17/32', nexthop='127.0.0.1')
@staticmethod
def check(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
def f():
for line in e1.log().split('\n'):
if 'UPDATE message error / Invalid NEXT_HOP Attribute / 0x4003047F000001' in line:
return True
return False
wait_for(f)
# check e2 is still established
g1.wait_for(BGP_FSM_ESTABLISHED, e2)
@staticmethod
def executor(env):
lookup_scenario("MalformedNexthopLoopbackAddr").boot(env)
lookup_scenario("MalformedNexthopLoopbackAddr").setup(env)
lookup_scenario("MalformedNexthopLoopbackAddr").check(env)
@register_scenario
class MalformedOriginType(object):
"""
No.9 malformaed origin type
"""
@staticmethod
def boot(env):
lookup_scenario("MalformedMpReachNlri").boot(env)
@staticmethod
def setup(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
for c in [e1, e2]:
g1.wait_for(BGP_FSM_ESTABLISHED, c)
# Invalid ORIGIN Attribute
# Send the attribute of origin type 4
# Attribute Type 0x01 (Origin)
# Attribute Flag 0x40 (well-known transitive)
# Attribute Value 0x04 (
# origin type = 04 -> # correct value = 01 or 02 or 03 )
e1.add_route('10.7.0.17/32', attribute='0x1 0x40 0x04')
@staticmethod
def check(env):
g1 = env.g1
e1 = env.e1
e2 = env.e2
def f():
for line in e1.log().split('\n'):
if 'UPDATE message error / Invalid ORIGIN Attribute / 0x40010104' in line:
return True
return False
wait_for(f)
# check e2 is still established
g1.wait_for(BGP_FSM_ESTABLISHED, e2)
@staticmethod
def executor(env):
lookup_scenario("MalformedOriginType").boot(env)
lookup_scenario("MalformedOriginType").setup(env)
lookup_scenario("MalformedOriginType").check(env)
class TestGoBGPBase(unittest.TestCase):
wait_per_retry = 5
retry_limit = 10
@classmethod
def setUpClass(cls):
idx = parser_option.test_index
base.TEST_PREFIX = parser_option.test_prefix
cls.parser_option = parser_option
cls.executors = []
if idx == 0:
print('unset test-index. run all test sequential')
for _, v in list(_SCENARIOS.items()):
for k, m in inspect.getmembers(v, inspect.isfunction):
if k == 'executor':
cls.executor = m
cls.executors.append(cls.executor)
elif idx not in _SCENARIOS:
print('invalid test-index. # of scenarios: {0}'.format(len(_SCENARIOS)))
sys.exit(1)
else:
for k, m in inspect.getmembers(_SCENARIOS[idx], inspect.isfunction):
if k == 'executor':
cls.executor = m
cls.executors.append(cls.executor)
def test(self):
for e in self.executors:
yield e
if __name__ == '__main__':
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) != 0:
print("docker not found")
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0])
|
Packs/CaseManagement-Generic/Scripts/LinkIncidentsButton/LinkIncidentsButton.py
|
diCagri/content
| 799 |
83208
|
<filename>Packs/CaseManagement-Generic/Scripts/LinkIncidentsButton/LinkIncidentsButton.py<gh_stars>100-1000
import demistomock as demisto
action = demisto.getArg('action')
if action not in ['link', 'unlink']:
action = 'link'
demisto.results(demisto.executeCommand("linkIncidents", {"linkedIncidentIDs": demisto.getArg("linkedIncidentIDs"),
"action": action}))
|
recipes/Python/578874_Tkinter_simultaneous_scrolling/recipe-578874.py
|
tdiprima/code
| 2,023 |
83215
|
<reponame>tdiprima/code<filename>recipes/Python/578874_Tkinter_simultaneous_scrolling/recipe-578874.py
# Author: <NAME>
# Uncomment the next line to see my email
# print "Author's email: ", "61706c69636163696f6e616d656469646140676d61696c2e636f6d".decode("hex")
try:
import Tkinter as tk
import ttk
except ImportError:
import tkinter as tk
from tkinter import ttk
class MouseWheel(object):
def __init__(self, root, factor = 0.5):
self.activeArea = None
self.factor = factor
import platform
os = platform.system()
if os == "Linux" :
root.bind_all('<4>', self.onMouseWheel, add='+')
root.bind_all('<5>', self.onMouseWheel, add='+')
else:
# Windows and MacOS
root.bind_all("<MouseWheel>", self.onMouseWheel, add='+')
def onMouseWheel(self,event):
if self.activeArea:
self.activeArea.onMouseWheel(event.delta)
def mouseWheel_bind(self, widget):
self.activeArea = widget
def mouseWheel_unbind(self):
self.activeArea = None
def add_scrolling(self, scrollingArea, xscrollbar=None, yscrollbar=None):
scrollingArea.bind('<Enter>',lambda event: self.mouseWheel_bind(scrollingArea))
scrollingArea.bind('<Leave>', lambda event: self.mouseWheel_unbind())
if xscrollbar and not hasattr(xscrollbar, 'onMouseWheel'):
setattr(xscrollbar, 'onMouseWheel', lambda delta: scrollingArea.xview("scroll",(-1)*int(delta/(120*self.factor)),"units" ) )
if yscrollbar and not hasattr(yscrollbar, 'onMouseWheel'):
setattr(yscrollbar, 'onMouseWheel', lambda delta: scrollingArea.yview("scroll",(-1)*int(delta/(120*self.factor)),"units" ) )
active_scrollbar_on_mouse_wheel = yscrollbar or xscrollbar
if active_scrollbar_on_mouse_wheel:
setattr(scrollingArea, 'onMouseWheel', active_scrollbar_on_mouse_wheel.onMouseWheel)
for scrollbar in (xscrollbar, yscrollbar):
if scrollbar:
scrollbar.bind('<Enter>', lambda event, scrollbar=scrollbar: self.mouseWheel_bind(scrollbar) )
scrollbar.bind('<Leave>', lambda event: self.mouseWheel_unbind())
class simultaneousScrollbar(ttk.Scrollbar):
def __init__(self, master, factor = 0.5, **kwargs):
self.__scrollableWidgets = []
if 'orient' in kwargs:
if kwargs['orient']== tk.VERTICAL:
self.__orientLabel = 'y'
elif kwargs['orient']== tk.HORIZONTAL:
self.__orientLabel = 'x'
else:
raise Exception("Bad 'orient' argument in scrollbar.")
else:
self.__orientLabel = 'y'
kwargs['command'] = self.onScroll
self.factor = factor
ttk.Scrollbar.__init__(self, master, **kwargs)
def add_ScrollableArea(self, *scrollableWidgets):
for widget in scrollableWidgets:
self.__scrollableWidgets.append(widget)
widget[self.__orientLabel+'scrollcommand']=self.set
def onScroll(self, *args):
for widget in self.__scrollableWidgets:
getattr(widget, self.__orientLabel+'view')(*args)
def onMouseWheel(self, delta):
for widget in self.__scrollableWidgets:
getattr(widget, self.__orientLabel+'view')("scroll",(-1)*int(delta/(120*self.factor)),"units" )
def test():
root = tk.Tk()
scrollbar = simultaneousScrollbar(root, orient=tk.HORIZONTAL)
scrollbar.pack(side=tk.TOP, fill=tk.X)
emptySpace = tk.Frame(root, height=18)
emptySpace.pack()
tk.Label(root, text='First scrolled frame:').pack(anchor=tk.W)
canvas1 = tk.Canvas(root, width=300, height=100)
canvas1.pack(anchor=tk.NW)
frame1= tk.Frame(canvas1)
frame1.pack()
for i in range(20):
tk.Label(frame1, text="Label "+str(i)).pack(side=tk.LEFT)
canvas1.create_window(0, 0, window=frame1, anchor='nw')
canvas1.update_idletasks()
canvas1['scrollregion'] = (0,0,frame1.winfo_reqwidth(), frame1.winfo_reqheight())
tk.Label(root, text='Second scrolled frame:').pack(anchor=tk.W)
canvas2 = tk.Canvas(root,width=300, height=100)
canvas2.pack(anchor=tk.NW)
frame2= tk.Frame(canvas2)
frame2.pack()
for i in range(20):
tk.Label(frame2, text="Label "+str(i)).pack(side=tk.LEFT)
canvas2.create_window(0, 0, window=frame2, anchor='nw')
canvas2.update_idletasks()
canvas2['scrollregion'] = (0,0,frame2.winfo_reqwidth(), frame2.winfo_reqheight())
scrollbar.add_ScrollableArea(canvas1,canvas2)
MouseWheel(root).add_scrolling(canvas1, xscrollbar=scrollbar)
MouseWheel(root).add_scrolling(canvas2, xscrollbar=scrollbar)
root.mainloop()
if __name__== '__main__':
test()
|
benchmark/bench_file_handler.py
|
YoavCohen/logbook
| 771 |
83220
|
"""Benchmarks the file handler"""
from logbook import Logger, FileHandler
from tempfile import NamedTemporaryFile
log = Logger('Test logger')
def run():
f = NamedTemporaryFile()
with FileHandler(f.name) as handler:
for x in xrange(500):
log.warning('this is handled')
|
tests/test_chi_ssa_51.py
|
MAYANK25402/city-scrapers
| 255 |
83245
|
from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.constants import COMMISSION, PASSED
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.chi_ssa_51 import ChiSsa51Spider
test_response = file_response(
join(dirname(__file__), "files", "chi_ssa_51.html"),
url="http://www.cbatechworks.org/",
)
spider = ChiSsa51Spider()
freezer = freeze_time("2019-07-19")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
def test_start():
assert parsed_items[0]["start"] == datetime(2019, 3, 13, 12, 0)
def test_end():
assert parsed_items[0]["end"] == datetime(2019, 3, 13, 13, 0)
def test_id():
assert parsed_items[0]["id"] == "chi_ssa_51/201903131200/x/commission"
def test_status():
assert parsed_items[0]["status"] == PASSED
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
@pytest.mark.parametrize("item", parsed_items)
def test_title(item):
assert item["title"] == "Commission"
@pytest.mark.parametrize("item", parsed_items)
def test_description(item):
assert item["description"] == ""
@pytest.mark.parametrize("item", parsed_items)
def test_time_notes(item):
assert item["time_notes"] == ""
@pytest.mark.parametrize("item", parsed_items)
def test_location(item):
assert item["location"] == {
"address": "806 East 78th Street, Chicago IL 60619",
"name": "<NAME>",
}
@pytest.mark.parametrize("item", parsed_items)
def test_source(item):
assert item["source"] == "http://www.cbatechworks.org/"
@pytest.mark.parametrize("item", parsed_items)
def test_links(item):
assert item["links"] == []
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] == COMMISSION
|
src/output_parser/SarifHolder.py
|
damian-cs/smartbugs2
| 194 |
83255
|
import attr
import pandas
from sarif_om import *
from src.exception.VulnerabilityNotFoundException import VulnerabilityNotFoundException
VERSION = "2.1.0"
SCHEMA = "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json"
class SarifHolder:
def __init__(self):
self.sarif = SarifLog(runs=[], version=VERSION, schema_uri=SCHEMA)
self.translationDict = dict()
# each analysis is defined by a Run
def addRun(self, newRun):
# Check if already exists an analysis performed by the same tool
for run in self.sarif.runs:
if run.tool.driver.name == newRun.tool.driver.name:
# Append Unique Rules
for rule in newRun.tool.driver.rules:
if isNotDuplicateRule(rule, run.tool.driver.rules):
run.tool.driver.rules.append(rule)
# Append Unique Artifacts
for artifact in newRun.artifacts:
if isNotDuplicateArtifact(artifact, run.artifacts):
run.artifacts.append(artifact)
# Append Unique Logical Locations
if newRun.logical_locations is not None:
for logicalLocation in newRun.logical_locations:
if isNotDuplicateLogicalLocation(logicalLocation, run.logical_locations):
run.logical_locations.append(logicalLocation)
# Append Results
for result in newRun.results:
run.results.append(result)
return
self.sarif.runs.append(newRun)
# to print the analysis from a given tool
def printToolRun(self, tool):
run = -1
for i in range(len(self.sarif.runs)):
if self.sarif.runs[i].tool.driver.name.lower() == tool.lower():
run = i
sarifIndividual = SarifLog(runs=[], version=VERSION, schema_uri=SCHEMA)
if run != -1:
sarifIndividual.runs.append(self.sarif.runs[run])
return self.serializeSarif(sarifIndividual)
# print json formatted the SARIF file
def print(self):
return self.serializeSarif(self.sarif)
# creates dictionary to fix variable names from sarif_om to standard sarif
def serialize(self, inst, field, value):
if field is not None:
self.translationDict[field.name] = field.metadata['schema_property_name']
return value
# filters SARIF keys to discard default values in output
def filterUnusedKeys(self, field, value):
return not (value is None or (field.default == value and field.name != "level") or (
isinstance(field.default, attr.Factory) and field.default.factory() == value))
# returns a dictionary based on the schema_property_name and the values of the SARIF object
def serializeSarif(self, sarifObj):
valuesDict = attr.asdict(sarifObj, filter=self.filterUnusedKeys, value_serializer=self.serialize)
return self.recursiveSarif(valuesDict)
# uses translationDict to fix variable names from sarif_om to standard SARIF
def recursiveSarif(self, serializedSarif):
if isinstance(serializedSarif, (int, str)):
return serializedSarif
if isinstance(serializedSarif, dict):
dic = dict()
for key, value in serializedSarif.items():
dic[self.translationDict[key]] = self.recursiveSarif(value)
return dic
if isinstance(serializedSarif, list):
lis = list()
for item in serializedSarif:
lis.append(self.recursiveSarif(item))
return lis
def parseRule(tool, vulnerability, full_description=None):
vuln_info = findVulnerabilityOnTable(tool, vulnerability)
if full_description is None:
return ReportingDescriptor(id=vuln_info["RuleId"],
short_description=MultiformatMessageString(
vuln_info["Vulnerability"]),
name=vuln_info["Type"] + "Vulnerability")
return ReportingDescriptor(id=vuln_info["RuleId"],
short_description=MultiformatMessageString(
vuln_info["Vulnerability"]),
full_description=MultiformatMessageString(full_description),
name=vuln_info["Type"] + "Vulnerability")
def parseResult(tool, vulnerability, level="warning", uri=None, line=None, end_line=None, column=None, snippet=None,
logicalLocation=None):
vuln_info = findVulnerabilityOnTable(tool, vulnerability)
level = parseLevel(level)
locations = [
Location(physical_location=PhysicalLocation(artifact_location=ArtifactLocation(uri=uri),
region=Region(start_line=line,
end_line=end_line,
start_column=column,
snippet=ArtifactContent(text=snippet))))
]
if logicalLocation is not None:
locations[0].logical_locations = [logicalLocation]
return Result(rule_id=vuln_info["RuleId"],
message=Message(text=vulnerability),
level=level,
locations=locations)
def parseArtifact(uri, source_language="Solidity"):
return Artifact(location=ArtifactLocation(uri=uri), source_language=source_language)
def parseLogicalLocation(name, kind="contract"):
return LogicalLocation(name=name, kind=kind)
# returns the row from the table for a given vulnerability and tool
def findVulnerabilityOnTable(tool, vulnerability_found):
table = pandas.read_csv("src/output_parser/sarif_vulnerability_mapping.csv")
tool_table = table.loc[table["Tool"] == tool]
# Due to messages that have extra information (for example the line where the vulnerability was found) this loop
# will search if the vulnerability expressed on table exist inside vulnerability found
for index, row in tool_table.iterrows():
if row["Vulnerability"] in vulnerability_found or vulnerability_found in row["Vulnerability"]:
return row
raise VulnerabilityNotFoundException(tool=tool, vulnerability=vulnerability_found)
# given a level produced by a tool, returns the level in SARIF format
def parseLevel(level):
if isinstance(level, int):
return "warning"
if level.lower() == "warning" or level.lower() == "warnings" or level.lower() == "medium":
return "warning"
if level.lower() == "error" or level.lower() == "violations" or level.lower() == "high":
return "error"
if level.lower() == "note" or level.lower() == "conflicts" or level.lower() == "informational":
return "note"
if level.lower == "none" or level.lower() == "safe":
return "none"
return "warning"
# Returns True when rule is unique
def isNotDuplicateRule(newRule, rulesList):
for rule in rulesList:
if rule.id == newRule.id:
return False
return True
# Returns True when artifact is unique
def isNotDuplicateArtifact(newArtifact, artifactsList):
for artifact in artifactsList:
if artifact.location.uri == newArtifact.location.uri:
return False
return True
# Returns True when LogicalLocation is unique
def isNotDuplicateLogicalLocation(newLogicalLocation, logicalLocationList):
for logicalLocation in logicalLocationList:
if logicalLocation.name == newLogicalLocation.name:
return False
return True
|
node/tests/k8st/tests/test_simple.py
|
mikestephen/calico
| 3,973 |
83355
|
<reponame>mikestephen/calico
# Copyright (c) 2018 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import subprocess
import time
from kubernetes import client
from tests.k8st.test_base import TestBase
from tests.k8st.utils.utils import retry_until_success, DiagsCollector, kubectl, node_info, run
_log = logging.getLogger(__name__)
class TestGracefulRestart(TestBase):
def get_restart_node_pod_name(self):
self.restart_pod_name = run("kubectl get po -n kube-system" +
" -l k8s-app=calico-node" +
" --field-selector status.podIP=" + self.restart_node_ip +
" -o jsonpath='{.items[*].metadata.name}'")
if self.restart_pod_name == "":
raise Exception('pod name not found')
def _test_restart_route_churn(self, num_repeats, restart_func, expect_churn):
with DiagsCollector():
# Get 2 worker node names, one to monitor routes and one
# to have its calico-node restarted. The first name
# returned is always the master, so skip that.
nodes, ips, _ = node_info()
self.assertGreater(len(nodes), 2)
monitor_node = nodes[1]
self.restart_node = nodes[2]
self.restart_node_ip = ips[2]
# Start running ip monitor on the monitor node, to monitor
# IPv4 route changes. We use "fd00:10:244" to identify
# and exclude IPv6 workload block routes like
# fd00:10:244:0:1cc0:b1ac:ad47:e7c0/122. These definitely
# _do_ flap when the host of that block restarts, but it
# is not yet clear why this is; specifically it is not yet
# known if it indicates anything wrong with calico/node's
# GR setup. See
# https://marc.info/?l=bird-users&m=158298182509702&w=2
# for the mailing list discussion so far.
run("docker exec -d %s sh -c 'stdbuf -oL ip -ts monitor route | stdbuf -oL grep -v fd00:10:244 > rmon.txt'" %
monitor_node)
# Find the name of the calico-node pod on the restart node.
self.get_restart_node_pod_name()
# Restart the calico-node several times, on the other node.
for i in range(num_repeats):
# Restart it.
_log.info("Iteration %d: restart pod %s", i, self.restart_pod_name)
restart_func(self)
# Kill the ip monitor process.
run("docker exec %s pkill ip" % monitor_node)
# Dump the monitor output.
monitor_output = run("docker exec %s cat rmon.txt" % monitor_node)
if expect_churn:
# Assert that it is not empty.
self.assertNotEqual(monitor_output, "")
else:
# Assert that it is empty.
self.assertEqual(monitor_output, "")
def test_methodology(self):
# Test the methodology here, by verifying that we _do_ observe
# route churn if we kill BIRD with SIGTERM.
def kill_bird(self):
run("docker exec %s pkill bird" % self.restart_node)
def check_bird_running():
run("docker exec %s pgrep bird" % self.restart_node)
retry_until_success(check_bird_running, retries=10, wait_time=1)
time.sleep(5)
# Expect non-GR behaviour, i.e. route churn.
self._test_restart_route_churn(3, kill_bird, True)
def test_graceful_restart(self):
# Test that we do _not_ observe route churn when Kubernetes
# deletes and restarts a pod.
def delete_calico_node_pod(self):
run("kubectl delete po %s -n kube-system" % self.restart_pod_name)
# Wait until a replacement calico-node pod has been created.
retry_until_success(self.get_restart_node_pod_name, retries=10, wait_time=1)
# Wait until it is ready, before returning.
run("kubectl wait po %s -n kube-system --timeout=2m --for=condition=ready" %
self.restart_pod_name)
# Expect GR behaviour, i.e. no route churn.
self._test_restart_route_churn(8, delete_calico_node_pod, False)
class TestAllRunning(TestBase):
def test_kubesystem_pods_running(self):
with DiagsCollector():
self.check_pod_status('kube-system')
def test_default_pods_running(self):
with DiagsCollector():
self.check_pod_status('default')
def test_calico_monitoring_pods_running(self):
with DiagsCollector():
self.check_pod_status('calico-monitoring')
class TestSimplePolicy(TestBase):
def setUp(self):
TestBase.setUp(self)
self.create_namespace("policy-demo")
self.deploy("nginx:1.7.9", "nginx", "policy-demo", 80)
# Create two client pods that live for the duration of the
# test. We will use 'kubectl exec' to try wgets from these at
# particular times.
#
# We do it this way - instead of one-shot pods that are
# created, try wget, and then exit - because it takes a
# relatively long time (7 seconds?) in this test setup for
# Calico routing and policy to be set up correctly for a newly
# created pod. In particular it's possible that connection
# from a just-created pod will fail because that pod's IP has
# not yet propagated to the IP set for the ingress policy on
# the server pod - which can confuse test code that is
# expecting connection failure for some other reason.
kubectl("run access -n policy-demo" +
" --overrides='{\"metadata\": {\"annotations\": {\"cni.projectcalico.org/floatingIPs\":\"[\\\"172.16.31.10\\\", \\\"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b\\\"]\"}}}' "
" --image busybox --command /bin/sleep -- 3600")
kubectl("run no-access -n policy-demo" +
" --image busybox --command /bin/sleep -- 3600")
kubectl("wait --timeout=2m --for=condition=available" +
" deployment/nginx -n policy-demo")
kubectl("wait --timeout=2m --for=condition=ready" +
" pod/access -n policy-demo")
kubectl("wait --timeout=2m --for=condition=ready" +
" pod/no-access -n policy-demo")
def tearDown(self):
# Delete deployment
kubectl("delete --grace-period 0 pod access -n policy-demo")
kubectl("delete --grace-period 0 pod no-access -n policy-demo")
self.delete_and_confirm("policy-demo", "ns")
def test_simple_policy(self):
with DiagsCollector():
# Check we can talk to service.
retry_until_success(self.can_connect, retries=10, wait_time=1, function_args=["access"])
_log.info("Client 'access' connected to open service")
retry_until_success(self.can_connect, retries=10, wait_time=1, function_args=["no-access"])
_log.info("Client 'no-access' connected to open service")
# Create default-deny policy
policy = client.V1NetworkPolicy(
metadata=client.V1ObjectMeta(
name="default-deny",
namespace="policy-demo"
),
spec={
"podSelector": {
"matchLabels": {},
},
}
)
client.NetworkingV1Api().create_namespaced_network_policy(
body=policy,
namespace="policy-demo",
)
_log.debug("Isolation policy created")
# Check we cannot talk to service
retry_until_success(self.cannot_connect, retries=10, wait_time=1, function_args=["access"])
_log.info("Client 'access' failed to connect to isolated service")
retry_until_success(self.cannot_connect, retries=10, wait_time=1, function_args=["no-access"])
_log.info("Client 'no-access' failed to connect to isolated service")
# Create allow policy
policy = client.V1NetworkPolicy(
metadata=client.V1ObjectMeta(
name="access-nginx",
namespace="policy-demo"
),
spec={
'ingress': [{
'from': [{
'podSelector': {
'matchLabels': {
'run': 'access'
}
}
}]
}],
'podSelector': {
'matchLabels': {
'app': 'nginx'
}
}
}
)
client.NetworkingV1Api().create_namespaced_network_policy(
body=policy,
namespace="policy-demo",
)
_log.debug("Allow policy created.")
# Check we can talk to service as 'access'
retry_until_success(self.can_connect, retries=10, wait_time=1, function_args=["access"])
_log.info("Client 'access' connected to protected service")
# Check we cannot talk to service as 'no-access'
retry_until_success(self.cannot_connect, retries=10, wait_time=1, function_args=["no-access"])
_log.info("Client 'no-access' failed to connect to protected service")
def can_connect(self, name):
if not self.check_connected(name):
_log.warning("'%s' failed to connect, when connection was expected", name)
raise self.ConnectionError
_log.info("'%s' connected, as expected", name)
def cannot_connect(self, name):
if self.check_connected(name):
_log.warning("'%s' unexpectedly connected", name)
raise self.ConnectionError
_log.info("'%s' failed to connect, as expected", name)
@staticmethod
def check_connected(name):
try:
kubectl("exec " + name + " -n policy-demo" +
" -- /bin/wget -O /dev/null -q --timeout=1 nginx")
except subprocess.CalledProcessError:
_log.exception("Failed to wget from nginx service")
return False
_log.debug("Contacted service")
return True
class ConnectionError(Exception):
pass
|
WebMirror/management/rss_parser_funcs/feed_parse_extractIsogashiineetoWordpressCom.py
|
fake-name/ReadableWebProxy
| 193 |
83365
|
<filename>WebMirror/management/rss_parser_funcs/feed_parse_extractIsogashiineetoWordpressCom.py
def extractIsogashiineetoWordpressCom(item):
'''
Parser for 'isogashiineeto.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('NEET Hello Work', 'NEET dakedo Hello Work ni Ittara Isekai ni Tsuretekareta', 'translated'),
('Dark Magician Hero', 'Dark Magician as a Hero', 'translated'),
('Hatena☆Illusion', 'Hatena☆Illusion', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
python/powerlift/tests/powerlift/bench/test_experiment.py
|
microsoft/interpret
| 2,122 |
83366
|
from powerlift.bench import Experiment, Store
from powerlift.executors.docker import InsecureDocker
from powerlift.executors.localmachine import LocalMachine
from powerlift.executors.azure_ci import AzureContainerInstance
import pytest
import os
def _add(x, y):
return x + y
def _err_handler(e):
raise e
def _trials(task):
if task.problem == "binary" and task.scalar_measure("n_rows") <= 10000:
return ["rf", "svm"]
return []
def _benchmark(trial):
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
if trial.task.problem == "binary" and trial.task.origin == "openml":
X, y, meta = trial.task.data(["X", "y", "meta"])
# Holdout split
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.3)
# Build preprocessor
is_cat = meta["categorical_mask"]
cat_cols = [idx for idx in range(X.shape[1]) if is_cat[idx]]
num_cols = [idx for idx in range(X.shape[1]) if not is_cat[idx]]
cat_ohe_step = ("ohe", OneHotEncoder(sparse=True, handle_unknown="ignore"))
cat_pipe = Pipeline([cat_ohe_step])
num_pipe = Pipeline([("identity", FunctionTransformer())])
transformers = [("cat", cat_pipe, cat_cols), ("num", num_pipe, num_cols)]
ct = Pipeline(
[
("ct", ColumnTransformer(transformers=transformers)),
(
"missing",
SimpleImputer(add_indicator=True, strategy="most_frequent"),
),
]
)
# Connect preprocessor with target learner
if trial.method.name == "svm":
clf = Pipeline([("ct", ct), ("est", CalibratedClassifierCV(LinearSVC()))])
else:
clf = Pipeline([("ct", ct), ("est", RandomForestClassifier())])
# Train
clf.fit(X_tr, y_tr)
# Predict
predictions = clf.predict_proba(X_te)[:, 1]
# Score
auc = roc_auc_score(y_te, predictions)
trial.log("auc", auc)
def test_multiprocessing():
"""This tests exists to ensure there is no hang in pytest."""
from multiprocessing.pool import Pool
pool = Pool()
results = []
num_tasks = 32
for i in range(num_tasks):
result = pool.apply_async(_add, (i, i), error_callback=_err_handler)
results.append(result)
counter = 0
for i in range(num_tasks):
counter += results[i].get()
assert counter == 992
pool.close()
# def test_scikit_experiment_aci(populated_azure_store):
@pytest.mark.skip("Remove this when testing ACI.")
def test_scikit_experiment_aci():
"""
As of 2022-06-09:
- Takes roughly 20 seconds to submit 10 tasks.
- Roughly 80 seconds for first runs to return.
- 180 seconds to complete (5 parallel containers).
"""
from dotenv import load_dotenv
load_dotenv()
azure_tenant_id = os.getenv("AZURE_TENANT_ID")
azure_client_id = os.getenv("AZURE_CLIENT_ID")
azure_client_secret = os.getenv("AZURE_CLIENT_SECRET")
subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
resource_group = os.getenv("AZURE_RESOURCE_GROUP")
store = Store(os.getenv("AZURE_DB_URL"), force_recreate=False)
# store = populated_azure_store
executor = AzureContainerInstance(
store,
azure_tenant_id,
azure_client_id,
azure_client_secret,
subscription_id,
resource_group,
n_running_containers=5,
num_cores=1,
mem_size_gb=2,
raise_exception=True,
)
experiment = Experiment(store)
executor = experiment.run(_benchmark, _trials, timeout=10, executor=executor)
executor.join()
def test_scikit_experiment_debug(populated_store):
store = populated_store
executor = LocalMachine(store, n_cpus=1, raise_exception=True)
experiment = Experiment(store, name="scikit")
executor = experiment.run(_benchmark, _trials, timeout=10, executor=executor)
executor.join()
def test_scikit_experiment_local(populated_store):
store = populated_store
executor = LocalMachine(store, n_cpus=2)
experiment = Experiment(store, name="scikit")
executor = experiment.run(_benchmark, _trials, timeout=10, executor=executor)
executor.join()
def test_scikit_experiment_docker(populated_store):
from dotenv import load_dotenv
load_dotenv()
uri = os.getenv("DOCKER_DB_URL")
executor = InsecureDocker(
populated_store, n_running_containers=2, docker_db_uri=uri
)
experiment = Experiment(populated_store, name="scikit")
executor = experiment.run(_benchmark, _trials, timeout=10, executor=executor)
executor.join()
|
scale/ingest/scan/scanners/factory.py
|
kaydoh/scale
| 121 |
83388
|
"""Defines the factory for creating monitors"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
_SCANNERS = {}
def add_scanner_type(scanner_class):
"""Registers a scanner class so it can be used for Scale Scans
:param scanner_class: The class definition for a scanner
:type scanner_class: class:`ingest.scan.scanners.scanner.Scanner`
"""
scanner = scanner_class()
if scanner.scanner_type in _SCANNERS:
logger.warning('Duplicate scanner registration: %s', scanner.scanner_type)
_SCANNERS[scanner.scanner_type] = scanner_class
def get_scanner(scanner_type):
"""Returns a scanner of the given type that is set to scan the given workspace
:param scanner_type: The unique identifier of a registered scanner
:type scanner_type: string
:returns: A scanner for storing and retrieving files.
:rtype: :class:`ingest.scan.scanners.scanner.Scanner`
"""
if scanner_type in _SCANNERS:
return _SCANNERS[scanner_type]()
raise KeyError('\'%s\' is an invalid scanner type' % scanner_type)
def get_scanner_types():
"""Returns a list of type identifiers for all registered scanners
:returns: A list of scanner types
:rtype: [string]
"""
return _SCANNERS.keys()
|
Imaging/Core/Testing/Python/TestLassoStencil.py
|
forestGzh/VTK
| 1,755 |
83404
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# A script to test the vtkLassoStencilSource
reader = vtk.vtkPNGReader()
reader.SetDataSpacing(0.8,0.8,1.5)
reader.SetDataOrigin(0.0,0.0,0.0)
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
reader.Update()
shiftScale = vtk.vtkImageShiftScale()
shiftScale.SetInputConnection(reader.GetOutputPort())
shiftScale.SetScale(0.2)
shiftScale.Update()
points1 = vtk.vtkPoints()
points1.InsertNextPoint(80,50,0)
points1.InsertNextPoint(100,90,0)
points1.InsertNextPoint(200,50,0)
points1.InsertNextPoint(230,100,0)
points1.InsertNextPoint(150,170,0)
points1.InsertNextPoint(110,170,0)
points1.InsertNextPoint(80,50,0)
points2 = vtk.vtkPoints()
points2.InsertNextPoint(80,50,0)
points2.InsertNextPoint(100,90,0)
points2.InsertNextPoint(200,50,0)
points2.InsertNextPoint(230,100,0)
points2.InsertNextPoint(150,170,0)
points2.InsertNextPoint(110,170,0)
roiStencil1 = vtk.vtkLassoStencilSource()
roiStencil1.SetShapeToPolygon()
roiStencil1.SetSlicePoints(0,points1)
roiStencil1.SetInformationInput(reader.GetOutput())
roiStencil2 = vtk.vtkLassoStencilSource()
roiStencil2.SetShapeToPolygon()
roiStencil2.SetPoints(points2)
roiStencil2.SetInformationInput(reader.GetOutput())
roiStencil3 = vtk.vtkLassoStencilSource()
roiStencil3.SetShapeToSpline()
roiStencil3.SetPoints(points1)
roiStencil3.SetInformationInput(reader.GetOutput())
roiStencil4 = vtk.vtkLassoStencilSource()
roiStencil4.SetShapeToSpline()
roiStencil4.SetSlicePoints(0,points2)
roiStencil4.SetInformationInput(reader.GetOutput())
roiStencil4.Update()
stencil1 = vtk.vtkImageStencil()
stencil1.SetInputConnection(reader.GetOutputPort())
stencil1.SetBackgroundInputData(shiftScale.GetOutput())
stencil1.SetStencilConnection(roiStencil1.GetOutputPort())
stencil2 = vtk.vtkImageStencil()
stencil2.SetInputConnection(reader.GetOutputPort())
stencil2.SetBackgroundInputData(shiftScale.GetOutput())
stencil2.SetStencilConnection(roiStencil2.GetOutputPort())
stencil3 = vtk.vtkImageStencil()
stencil3.SetInputConnection(reader.GetOutputPort())
stencil3.SetBackgroundInputData(shiftScale.GetOutput())
stencil3.SetStencilConnection(roiStencil3.GetOutputPort())
stencil4 = vtk.vtkImageStencil()
stencil4.SetInputConnection(reader.GetOutputPort())
stencil4.SetBackgroundInputData(shiftScale.GetOutput())
stencil4.SetStencilConnection(roiStencil4.GetOutputPort())
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(stencil1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(stencil2.GetOutputPort())
mapper2.SetColorWindow(2000)
mapper2.SetColorLevel(1000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(stencil3.GetOutputPort())
mapper3.SetColorWindow(2000)
mapper3.SetColorLevel(1000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(stencil4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.5,0.0,1.0,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.0,0.5,0.5)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.5,0.5,1.0,1.0)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.0,0.5,0.5,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.SetSize(512,512)
imgWin.Render()
# --- end of script --
|
node_launcher/logging.py
|
ryan-lingle/node-launcher
| 249 |
83448
|
import logging.config
import os
import structlog
from node_launcher.constants import NODE_LAUNCHER_DATA_PATH, OPERATING_SYSTEM
timestamper = structlog.processors.TimeStamper(fmt='%Y-%m-%d %H:%M:%S')
pre_chain = [
# Add the log level and a timestamp to the event_dict if the log entry
# is not from structlog.
structlog.stdlib.add_log_level,
timestamper,
]
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'plain': {
'()': structlog.stdlib.ProcessorFormatter,
'processor': structlog.dev.ConsoleRenderer(colors=False),
'foreign_pre_chain': pre_chain,
},
'colored': {
'()': structlog.stdlib.ProcessorFormatter,
'processor': structlog.dev.ConsoleRenderer(colors=True),
'foreign_pre_chain': pre_chain,
},
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'colored',
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'filename': os.path.join(NODE_LAUNCHER_DATA_PATH[OPERATING_SYSTEM],
'debug.log'),
'formatter': 'plain',
},
},
'loggers': {
'': {
'handlers': ['default', 'file'],
'level': 'DEBUG',
'propagate': True,
},
}
})
def dropper(logger, method_name, event_dict):
for key in event_dict[0][0].keys():
if 'rpcpass' in key:
event_dict[0][0][key] = '<PASSWORD>'
return event_dict
structlog.configure(
processors=[
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
timestamper,
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
dropper
],
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
log = structlog.get_logger()
|
examples/face_detection/main_video.py
|
Ishticode/kornia
| 418 |
83474
|
import argparse
import cv2
import numpy as np
import torch
import kornia as K
from kornia.contrib import FaceDetector, FaceDetectorResult, FaceKeypoint
def draw_keypoint(img: np.ndarray, det: FaceDetectorResult, kpt_type: FaceKeypoint) -> np.ndarray:
kpt = det.get_keypoint(kpt_type).int().tolist()
return cv2.circle(img, kpt, 2, (255, 0, 0), 2)
def scale_image(img: np.ndarray, size: int) -> np.ndarray:
h, w = img.shape[:2]
scale = 1. * size / w
return cv2.resize(img, (int(w * scale), int(h * scale)))
def my_app():
# select the device
device = torch.device('cpu')
if args.cuda and torch.cuda.is_available():
device = torch.device('cuda:0')
torch.backends.cudnn.benchmark = True
# create the video capture object
cap = cv2.VideoCapture(0)
# compute scale
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
print(f"Video: h/w: {height}/{width} fps:{fps}")
scale = 1. * args.image_size / width
w, h = int(width * scale), int(height * scale)
# create the video writer object
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
out = cv2.VideoWriter(args.video_out, fourcc, fps, (w, h))
# create the detector object
face_detection = FaceDetector().to(device)
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
draw_keypoints: bool = False
while(True):
# Capture the video frame
# by frame
_, frame = cap.read()
start = cv2.getTickCount()
# preprocess
frame = scale_image(frame, args.image_size)
img = K.image_to_tensor(frame, keepdim=False).to(device)
img = K.color.bgr_to_rgb(img.float())
# detect !
with torch.no_grad():
dets = face_detection(img)
dets = [FaceDetectorResult(o) for o in dets]
fps: float = cv2.getTickFrequency() / (cv2.getTickCount() - start)
# show image
frame_vis = frame.copy()
frame_vis = cv2.putText(
frame_vis, f"FPS: {fps:.1f}", (10, 20), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
for b in dets:
if b.score < args.vis_threshold:
continue
# draw face bounding box
line_thickness = 2
line_length = 10
x1, y1 = b.top_left.int().tolist()
frame_vis = cv2.line(frame_vis, (x1, y1), (x1 + line_length, y1), (0, 255, 0), thickness=line_thickness)
frame_vis = cv2.line(frame_vis, (x1, y1), (x1, y1 + line_length), (0, 255, 0), thickness=line_thickness)
x1, y1 = b.top_right.int().tolist()
frame_vis = cv2.line(frame_vis, (x1, y1), (x1 - line_length, y1), (0, 255, 0), thickness=line_thickness)
frame_vis = cv2.line(frame_vis, (x1, y1), (x1, y1 + line_length), (0, 255, 0), thickness=line_thickness)
x1, y1 = b.bottom_right.int().tolist()
frame_vis = cv2.line(frame_vis, (x1, y1), (x1 - line_length, y1), (0, 255, 0), thickness=line_thickness)
frame_vis = cv2.line(frame_vis, (x1, y1), (x1, y1 - line_length), (0, 255, 0), thickness=line_thickness)
x1, y1 = b.bottom_left.int().tolist()
frame_vis = cv2.line(frame_vis, (x1, y1), (x1 + line_length, y1), (0, 255, 0), thickness=line_thickness)
frame_vis = cv2.line(frame_vis, (x1, y1), (x1, y1 - line_length), (0, 255, 0), thickness=line_thickness)
if draw_keypoints:
# draw facial keypoints
frame_vis = draw_keypoint(frame_vis, b, FaceKeypoint.EYE_LEFT)
frame_vis = draw_keypoint(frame_vis, b, FaceKeypoint.EYE_RIGHT)
frame_vis = draw_keypoint(frame_vis, b, FaceKeypoint.NOSE)
frame_vis = draw_keypoint(frame_vis, b, FaceKeypoint.MOUTH_LEFT)
frame_vis = draw_keypoint(frame_vis, b, FaceKeypoint.MOUTH_RIGHT)
# draw the text score and FPS
pt = b.top_left.int().tolist()
frame_vis = cv2.putText(
frame_vis, f"{b.score:.2f}", (pt[0], pt[1] - 12), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# write the processed frame
out.write(frame_vis)
# Display the resulting frame
cv2.imshow('frame', frame_vis)
# the 's' button is set as the
# switching button to draw the face keypoints
if cv2.waitKey(1) == ord('s'):
draw_keypoints = not draw_keypoints
# the 'q' button is set as the
# quitting button you may use any
# desired button of your choice
if cv2.waitKey(1) == ord('q'):
break
# After the loop release the cap and writing objects
cap.release()
out.release()
# Destroy all the windows
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Face and Landmark Detection')
parser.add_argument('--video_out', required=True, type=str, help='the file path to write the output.')
parser.add_argument('--image_size', default=320, type=int, help='the image size to process.')
parser.add_argument('--vis_threshold', default=0.8, type=float, help='visualization_threshold')
parser.add_argument('--vis_keypoints', dest='vis_keypoints', action='store_true')
parser.add_argument('--cuda', dest='cuda', action='store_true')
args = parser.parse_args()
my_app()
|
tests/oxml/unitdata/text.py
|
revvsales/python-docx-1
| 3,031 |
83481
|
<reponame>revvsales/python-docx-1<gh_stars>1000+
# encoding: utf-8
"""
Test data builders for text XML elements
"""
from ...unitdata import BaseBuilder
from .shared import CT_OnOffBuilder, CT_StringBuilder
class CT_BrBuilder(BaseBuilder):
__tag__ = 'w:br'
__nspfxs__ = ('w',)
__attrs__ = ('w:type', 'w:clear')
class CT_EmptyBuilder(BaseBuilder):
__nspfxs__ = ('w',)
__attrs__ = ()
def __init__(self, tag):
self.__tag__ = tag
super(CT_EmptyBuilder, self).__init__()
class CT_JcBuilder(BaseBuilder):
__tag__ = 'w:jc'
__nspfxs__ = ('w',)
__attrs__ = ('w:val',)
class CT_PBuilder(BaseBuilder):
__tag__ = 'w:p'
__nspfxs__ = ('w',)
__attrs__ = ()
class CT_PPrBuilder(BaseBuilder):
__tag__ = 'w:pPr'
__nspfxs__ = ('w',)
__attrs__ = ()
class CT_RBuilder(BaseBuilder):
__tag__ = 'w:r'
__nspfxs__ = ('w',)
__attrs__ = ()
class CT_RPrBuilder(BaseBuilder):
__tag__ = 'w:rPr'
__nspfxs__ = ('w',)
__attrs__ = ()
class CT_SectPrBuilder(BaseBuilder):
__tag__ = 'w:sectPr'
__nspfxs__ = ('w',)
__attrs__ = ()
class CT_TextBuilder(BaseBuilder):
__tag__ = 'w:t'
__nspfxs__ = ('w',)
__attrs__ = ()
def with_space(self, value):
self._set_xmlattr('xml:space', str(value))
return self
class CT_UnderlineBuilder(BaseBuilder):
__tag__ = 'w:u'
__nspfxs__ = ('w',)
__attrs__ = (
'w:val', 'w:color', 'w:themeColor', 'w:themeTint', 'w:themeShade'
)
def a_b():
return CT_OnOffBuilder('w:b')
def a_bCs():
return CT_OnOffBuilder('w:bCs')
def a_br():
return CT_BrBuilder()
def a_caps():
return CT_OnOffBuilder('w:caps')
def a_cr():
return CT_EmptyBuilder('w:cr')
def a_cs():
return CT_OnOffBuilder('w:cs')
def a_dstrike():
return CT_OnOffBuilder('w:dstrike')
def a_jc():
return CT_JcBuilder()
def a_noProof():
return CT_OnOffBuilder('w:noProof')
def a_shadow():
return CT_OnOffBuilder('w:shadow')
def a_smallCaps():
return CT_OnOffBuilder('w:smallCaps')
def a_snapToGrid():
return CT_OnOffBuilder('w:snapToGrid')
def a_specVanish():
return CT_OnOffBuilder('w:specVanish')
def a_strike():
return CT_OnOffBuilder('w:strike')
def a_tab():
return CT_EmptyBuilder('w:tab')
def a_vanish():
return CT_OnOffBuilder('w:vanish')
def a_webHidden():
return CT_OnOffBuilder('w:webHidden')
def a_p():
return CT_PBuilder()
def a_pPr():
return CT_PPrBuilder()
def a_pStyle():
return CT_StringBuilder('w:pStyle')
def a_sectPr():
return CT_SectPrBuilder()
def a_t():
return CT_TextBuilder()
def a_u():
return CT_UnderlineBuilder()
def an_emboss():
return CT_OnOffBuilder('w:emboss')
def an_i():
return CT_OnOffBuilder('w:i')
def an_iCs():
return CT_OnOffBuilder('w:iCs')
def an_imprint():
return CT_OnOffBuilder('w:imprint')
def an_oMath():
return CT_OnOffBuilder('w:oMath')
def an_outline():
return CT_OnOffBuilder('w:outline')
def an_r():
return CT_RBuilder()
def an_rPr():
return CT_RPrBuilder()
def an_rStyle():
return CT_StringBuilder('w:rStyle')
def an_rtl():
return CT_OnOffBuilder('w:rtl')
|
evm/utils/hexadecimal.py
|
zixuanzh/py-evm
| 137 |
83509
|
from __future__ import unicode_literals
import codecs
def encode_hex(value):
return '0x' + codecs.decode(codecs.encode(value, 'hex'), 'utf8')
def decode_hex(value):
_, _, hex_part = value.rpartition('x')
return codecs.decode(hex_part, 'hex')
|
saleor/graphql/payment/tests/mutations/test_payment_capture.py
|
eanknd/saleor
| 1,392 |
83521
|
<filename>saleor/graphql/payment/tests/mutations/test_payment_capture.py<gh_stars>1000+
from unittest.mock import patch
import graphene
from .....payment import TransactionKind
from .....payment.gateways.dummy_credit_card import (
TOKEN_EXPIRED,
TOKEN_VALIDATION_MAPPING,
)
from .....payment.models import ChargeStatus
from ....tests.utils import get_graphql_content
CAPTURE_QUERY = """
mutation PaymentCapture($paymentId: ID!, $amount: PositiveDecimal) {
paymentCapture(paymentId: $paymentId, amount: $amount) {
payment {
id,
chargeStatus
}
errors {
field
message
code
}
}
}
"""
def test_payment_capture_success(
staff_api_client, permission_manage_orders, payment_txn_preauth
):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment_txn_preauth.total)}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert not data["errors"]
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
def test_payment_capture_with_invalid_argument(
staff_api_client, permission_manage_orders, payment_txn_preauth
):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": 0}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert len(data["errors"]) == 1
assert data["errors"][0]["message"] == "Amount should be a positive number."
def test_payment_capture_with_payment_non_authorized_yet(
staff_api_client, permission_manage_orders, payment_dummy
):
"""Ensure capture a payment that is set as authorized is failing with
the proper error message.
"""
payment = payment_dummy
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": 1}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert data["errors"] == [
{
"field": None,
"message": "Cannot find successful auth transaction.",
"code": "PAYMENT_ERROR",
}
]
def test_payment_capture_gateway_error(
staff_api_client, permission_manage_orders, payment_txn_preauth, monkeypatch
):
# given
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment_txn_preauth.total)}
monkeypatch.setattr("saleor.payment.gateways.dummy.dummy_success", lambda: False)
# when
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
# then
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert data["errors"] == [
{"field": None, "message": "Unable to process capture", "code": "PAYMENT_ERROR"}
]
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
assert not txn.is_success
@patch(
"saleor.payment.gateways.dummy_credit_card.plugin."
"DummyCreditCardGatewayPlugin.DEFAULT_ACTIVE",
True,
)
def test_payment_capture_gateway_dummy_credit_card_error(
staff_api_client, permission_manage_orders, payment_txn_preauth, monkeypatch
):
# given
token = TOKEN_EXPIRED
error = TOKEN_VALIDATION_MAPPING[token]
payment = payment_txn_preauth
payment.gateway = "mirumee.payments.dummy_credit_card"
payment.save()
transaction = payment.transactions.last()
transaction.token = token
transaction.save()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment_txn_preauth.total)}
monkeypatch.setattr(
"saleor.payment.gateways.dummy_credit_card.dummy_success", lambda: False
)
# when
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
# then
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert data["errors"] == [
{"field": None, "message": error, "code": "PAYMENT_ERROR"}
]
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
assert not txn.is_success
|
robobrowser_examples/get_html_as_bytes.py
|
DazEB2/SimplePyScripts
| 117 |
83527
|
<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from robobrowser import RoboBrowser
browser = RoboBrowser(
user_agent='Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0',
parser='html.parser'
)
browser.open('https://github.com')
html = browser.state.response.content
print(html)
|
topic-db/topicdb/util/profiler.py
|
anthcp-infocom/Contextualise
| 184 |
83532
|
# See URL: https://hakibenita.com/fast-load-data-python-postgresql
import time
from functools import wraps
from memory_profiler import memory_usage # type: ignore
def profile(fn):
@wraps(fn)
def inner(*args, **kwargs):
fn_kwargs_str = ", ".join(f"{k}={v}" for k, v in kwargs.items())
print(f"\n{fn.__name__}({fn_kwargs_str})")
# Measure time
t = time.perf_counter()
return_value = fn(*args, **kwargs)
elapsed = time.perf_counter() - t
print(f"Time spent: {elapsed:0.4}")
# Measure memory
mem, return_value = memory_usage((fn, args, kwargs), retval=True, timeout=200, interval=1e-7)
print(f"Memory used: {max(mem) - min(mem)}")
return return_value
return inner
|
PyOpenGLExample/squares.py
|
DazEB2/SimplePyScripts
| 117 |
83545
|
import random
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
"""
Generating squares
This example will generate 25 squares each in a randomly chosen grayvalue.
The grayvalue is chosen out of 25 different possiblities. Every redraw of the
window will create a new set of squares.
http://www.de-brauwer.be/wiki/wikka.php?wakka=PyOpenGLSquares
"""
def initFun():
glClearColor(1.0, 1.0, 1.0, 0.0)
glColor3f(0.0, 0.0, 0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0, 640.0, 0.0, 480.0)
def displayFun():
glClear(GL_COLOR_BUFFER_BIT)
for i in range(0, 25):
gray = idx = random.randint(0, 25) / 25.0
glColor3f(gray, gray, gray)
glRecti(random.randint(0, 640), random.randint(0, 480),
random.randint(0, 640), random.randint(0, 480))
glFlush()
if __name__ == '__main__':
glutInit()
glutInitWindowSize(640, 480)
glutCreateWindow(b"DrawSquares")
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutDisplayFunc(displayFun)
initFun()
glutMainLoop()
|
saleor/account/migrations/0023_auto_20180719_0520.py
|
elwoodxblues/saleor
| 15,337 |
83591
|
# Generated by Django 2.0.3 on 2018-07-19 10:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("account", "0022_auto_20180718_0956")]
operations = [
migrations.AlterModelOptions(
name="user",
options={
"permissions": (
("manage_users", "Manage customers."),
("manage_staff", "Manage staff."),
("impersonate_users", "Impersonate customers."),
)
},
)
]
|
pylibui/controls/tab.py
|
superzazu/pylibui
| 222 |
83623
|
<reponame>superzazu/pylibui
"""
Python wrapper for libui.
"""
from pylibui import libui
from .control import Control
class Tab(Control):
def __init__(self):
"""
Creates a new tab.
"""
super().__init__()
self.control = libui.uiNewTab()
def append(self, name, control):
"""
Appends a control to the tab.
:param name: str
:param control: uiControl
:return: None
"""
libui.uiTabAppend(self.control, name, control.pointer())
def insertAt(self, name, before, control):
"""
Deletes a control from the tab.
:param name: str
:param before: int
:param control: uiControl
:return: None
"""
libui.uiTabInsertAt(self.control, name, before, control.pointer())
def delete(self, index):
"""
Deletes a control from the tab.
:param tab: uiTab
:param index: int
:return: None
"""
libui.uiTabDelete(self.control, index)
def setMargined(self, page, margined):
"""
Sets whether the tab's page is margined or not.
:param page: int
:param margined: bool
:return: None
"""
libui.uiTabSetMargined(self.control, page, int(margined))
def getMargined(self, page):
"""
Returns whether the tab's page is margined or not.
:param page: int
:return: bool
"""
return bool(libui.uiTabMargined(self.control, page))
def getNumPages(self):
"""
Returns the number of pages in the tab.
:return: int
"""
return libui.uiTabNumPages(self.control)
|
pytextrank/positionrank.py
|
CaptXiong/pytextrank
| 899 |
83649
|
<reponame>CaptXiong/pytextrank
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# see license https://github.com/DerwenAI/pytextrank#license-and-copyright
"""
Implements the *PositionRank* algorithm.
"""
import typing
from spacy.tokens import Doc # type: ignore # pylint: disable=E0401
from .base import BaseTextRankFactory, BaseTextRank, Lemma
from .util import groupby_apply
class PositionRankFactory (BaseTextRankFactory):
"""
A factory class that provides the document with its instance of
`PositionRank`
"""
def __call__ (
self,
doc: Doc,
) -> Doc:
"""
Set the extension attributes on a `spaCy` [`Doc`](https://spacy.io/api/doc)
document to create a *pipeline component* for `PositionRank` as
a stateful component, invoked when the document gets processed.
See: <https://spacy.io/usage/processing-pipelines#pipelines>
doc:
a document container, providing the annotations produced by earlier stages of the `spaCy` pipeline
"""
Doc.set_extension("textrank", force=True, default=None)
Doc.set_extension("phrases", force=True, default=[])
doc._.textrank = PositionRank(
doc,
edge_weight = self.edge_weight,
pos_kept = self.pos_kept,
token_lookback = self.token_lookback,
scrubber = self.scrubber,
stopwords = self.stopwords,
)
doc._.phrases = doc._.textrank.calc_textrank()
return doc
class PositionRank (BaseTextRank):
"""
Implements the *PositionRank* algorithm described by
[[florescuc17]](https://derwen.ai/docs/ptr/biblio/#florescuc17),
deployed as a `spaCy` pipeline component.
This class does not get called directly; instantiate its factory
instead.
"""
def get_personalization (
self
) -> typing.Optional[typing.Dict[Lemma, float]]:
"""
Get the *node weights* for initializing the use of the
[*Personalized PageRank*](https://derwen.ai/docs/ptr/glossary/#personalized-pagerank)
algorithm.
From the cited reference:
> Specifically, we propose to assign a higher probability to a word
> found on the 2nd position as compared with a word found on the 50th
> position in the same document. The weight of each candidate word is
> equal to its inverse position in the document. If the same word
> appears multiple times in the target document, then we sum all its
> position weights.
> For example, a word v_i occurring in the following positions: 2nd,
> 5th and 10th, has a weight p(v_i) = 1/2 + 1/5 + 1/10 = 4/5 = 0.8
> The weights of words are normalized before they are used in the
> position-biased PageRank.
returns:
Biased restart probabilities to use in the *PageRank* algorithm.
"""
weighted_tokens: typing.List[typing.Tuple[str, float]] = [
(tok, 1 / (i + 1))
for i, tok in enumerate(
token.lemma_ for token in self.doc if token.pos_ in self.pos_kept
)
]
keyfunc = lambda x: x[0]
applyfunc = lambda g: sum(w for text, w in g)
accumulated_weighted_tokens: typing.List[typing.Tuple[str, float]] = groupby_apply(
weighted_tokens,
keyfunc,
applyfunc,
)
accumulated_weighted_tokens = sorted(
accumulated_weighted_tokens, key=lambda x: x[1]
)
norm_weighted_tokens = {
k: w / sum(w_ for _, w_ in accumulated_weighted_tokens)
for k, w in accumulated_weighted_tokens
}
# while the authors assign higher probability to a "word",
# our *lemma graph* vertices are (lemma, pos) tuples,
# therefore we map each `Lemma` weight to all the *lemma
# graph* vertices which contain it
# TODO: # pylint: disable=W0511
# => should this map to (lemma, pos) pairs instead?
weighted_nodes: typing.Dict[Lemma, float] = {
Lemma(token.lemma_, token.pos_): norm_weighted_tokens[token.lemma_]
for token in self.doc
if token.pos_ in self.pos_kept
}
return weighted_nodes
|
language/mentionmemory/utils/metric_utils_test.py
|
urikz/language
| 1,199 |
83670
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metric utils."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from language.mentionmemory.utils import metric_utils
import numpy as np
_LARGE_NUMBER = 1e12
class ComputeMetricsTest(absltest.TestCase):
"""Test whether metrics computations produce expected values."""
batch_size = 32
seq_len = 20
vocab_size = 100
def test_logit_values_as_expected(self):
"""Test whether metrics computations produce expected values."""
logits = np.random.rand(self.batch_size, self.seq_len, self.vocab_size)
targets = np.random.randint(
self.vocab_size, size=(self.batch_size, self.seq_len))
dense_targets = jax.nn.one_hot(targets, self.vocab_size)
weights = np.random.randint(2, size=(self.batch_size, self.seq_len))
# Check loss and denominator make sense for random values
loss, denominator = metric_utils.compute_weighted_cross_entropy(
logits,
targets,
weights,
)
expected_loss = -jax.nn.log_softmax(logits, axis=-1) * dense_targets
expected_loss = (expected_loss * np.expand_dims(weights, axis=-1)).sum()
self.assertAlmostEqual(loss, expected_loss, 1)
self.assertAlmostEqual(denominator, weights.sum(), 1)
# Check loss makes sense for uniform and degenerate scores
logits = np.ones(shape=(self.batch_size, self.seq_len, self.vocab_size))
loss, denominator = metric_utils.compute_weighted_cross_entropy(
logits,
targets,
weights,
)
expected_loss = np.log(self.vocab_size)
self.assertAlmostEqual(loss / denominator, expected_loss, 4)
logits = np.zeros(shape=(self.batch_size, self.seq_len, self.vocab_size))
logits = logits + (
_LARGE_NUMBER * dense_targets - _LARGE_NUMBER * (1 - dense_targets))
loss, denominator = metric_utils.compute_weighted_cross_entropy(
logits,
targets,
weights,
)
self.assertAlmostEqual(loss / denominator, 0.0, 4)
def test_prob_values_as_expected(self):
probs = np.random.rand(self.batch_size, self.seq_len, self.vocab_size)
targets = np.random.randint(
self.vocab_size, size=(self.batch_size, self.seq_len))
dense_targets = jax.nn.one_hot(targets, self.vocab_size)
weights = np.random.randint(2, size=(self.batch_size, self.seq_len))
# Check loss and denominator make sense with probs as inputs
loss, denominator = metric_utils.compute_weighted_cross_entropy(
probs,
targets,
weights,
inputs_are_prob=True,
)
expected_loss = -np.log(probs) * dense_targets
expected_loss = (expected_loss * np.expand_dims(weights, axis=-1)).sum()
self.assertAlmostEqual(loss, expected_loss, 1)
self.assertAlmostEqual(denominator, weights.sum(), 1)
# Check loss makes sense for uniform and degenerate probabilities
probs = np.ones(shape=(self.batch_size, self.seq_len, self.vocab_size))
probs = probs / self.vocab_size
loss, denominator = metric_utils.compute_weighted_cross_entropy(
probs,
targets,
weights,
inputs_are_prob=True,
)
expected_loss = np.log(self.vocab_size)
self.assertAlmostEqual(loss / denominator, expected_loss, 4)
probs = np.zeros(shape=(self.batch_size, self.seq_len, self.vocab_size))
probs = probs + dense_targets
loss, denominator = metric_utils.compute_weighted_cross_entropy(
probs,
targets,
weights,
inputs_are_prob=True,
)
self.assertAlmostEqual(loss / denominator, 0.0, 4)
def test_accuracy_as_expected(self):
logits = np.random.rand(self.batch_size, self.seq_len, self.vocab_size)
targets = np.random.randint(
self.vocab_size, size=(self.batch_size, self.seq_len))
dense_targets = jax.nn.one_hot(targets, self.vocab_size)
weights = np.random.randint(2, size=(self.batch_size, self.seq_len))
# Check accuracy and denominator make sense
logits = np.ones((self.batch_size, self.seq_len, self.vocab_size),
dtype=np.float32)
correct = np.random.randint(2, size=(self.batch_size, self.seq_len, 1))
logits = logits + dense_targets * (0.5 * correct - 0.5 * (1 - correct))
acc, denominator = metric_utils.compute_weighted_accuracy(
logits,
targets,
weights,
)
expected_accuracy = (np.squeeze(correct) * weights).sum() / weights.sum()
self.assertAlmostEqual(acc / denominator, expected_accuracy, 1)
self.assertAlmostEqual(denominator, weights.sum(), 1)
class ComputeCrossEntropyTest(parameterized.TestCase):
"""Test whether loss and metrics computations produce expected values."""
@parameterized.parameters(
(0, 1, 29, 31, 31),
# Tests with large score values
(1, 1000000, 29, 31),
(2, 1000000, 29, 31),
# Tests with large number of positive, negatives and neutral classes
(3, 100, 29, 1001),
(4, 100, 323, 31),
# Tests whether lack of positives affects the numerical stability
(5, 1, 29, 31, 1, 31),
(6, 1, 29, 31, 0, 31),
(7, 1, 29, 31, 31, 1),
(8, 1, 29, 31, 31, 0),
(9, 1, 29, 31, 1, 1),
(10, 1, 29, 31, 0, 0),
(11, 1000000, 29, 31, 0, 0),
(12, 100, 29, 1001, 0, 0),
(13, 100, 323, 31, 0, 0),
)
def test_loss_and_metrics_as_expected(self,
seed,
scale,
local_n_mentions,
global_n_mentions,
max_num_positives=None,
max_num_negatives=None):
"""Test whether loss and metrics computation produces expected values."""
np.random.seed(seed)
max_num_negatives = max_num_negatives or global_n_mentions
max_num_positives = max_num_positives or global_n_mentions
shape = (local_n_mentions, global_n_mentions)
scores = np.random.random(shape) * scale
num_positives = np.random.randint(
max_num_positives + 1, size=(local_n_mentions))
num_positives[0] = 0
num_positives[-1] = global_n_mentions
num_negatives = np.random.randint(
max_num_negatives + 1, size=(local_n_mentions))
num_negatives = np.minimum(num_negatives, global_n_mentions - num_positives)
positives = np.zeros(shape, dtype=np.bool_)
negatives = np.zeros(shape, dtype=np.bool_)
for index in range(local_n_mentions):
ids = np.random.choice(
global_n_mentions,
num_positives[index] + num_negatives[index],
replace=False)
positives[index, ids[:num_positives[index]]] = True
negatives[index, ids[num_positives[index]:]] = True
self.assertEqual(np.logical_and(positives, negatives).sum(), 0)
weights = np.logical_and(num_positives > 0, num_negatives > 0)
(actual_loss, actual_metrics, (actual_acc_per_sample,
actual_weights_per_sample)
) = metric_utils.compute_cross_entropy_loss_with_positives_and_negatives_masks(
scores, positives, negatives)
expected_loss, expected_acc, expected_denom = 0, 0, 0
expected_acc_per_sample = []
# Consider every sample independently
for i in range(local_n_mentions):
if not weights[i]:
expected_acc_per_sample.append(0)
continue
# Collect positive and negative scores
positive_scores, negative_scores = [], []
for j in range(global_n_mentions):
if positives[i, j]:
positive_scores.append(scores[i, j])
if negatives[i, j]:
negative_scores.append(scores[i, j])
self.assertNotEmpty(positive_scores)
self.assertNotEmpty(negative_scores)
n_pos = len(positive_scores)
max_negative_scores = max(negative_scores)
current_loss, current_acc = 0, 0
# Consider positive class per sample independently
# and compute loss using a naive softmax op
for pos_index in range(n_pos):
current_scores = np.array([positive_scores[pos_index]] +
negative_scores)
current_scores = jax.nn.log_softmax(current_scores)
current_loss += -current_scores[0]
current_acc += int(positive_scores[pos_index] > max_negative_scores)
expected_loss += current_loss / n_pos
expected_acc += current_acc / n_pos
expected_denom += 1
expected_acc_per_sample.append(current_acc / n_pos)
self.assertAlmostEqual(actual_loss, expected_loss, places=2)
self.assertAlmostEqual(actual_metrics['loss'], expected_loss, places=2)
self.assertAlmostEqual(actual_metrics['acc'], expected_acc, places=4)
self.assertAlmostEqual(
actual_metrics['denominator'], expected_denom, places=4)
self.assertTrue(np.all(weights == actual_weights_per_sample))
self.assertSequenceAlmostEqual(
actual_acc_per_sample, expected_acc_per_sample, places=4)
class ComputeMetricsFromDuplicatesTest(absltest.TestCase):
"""Test whether metrics computation produces expected values."""
batch_size = 32
seq_len = 20
num_items = 100
num_classes = 200
def test_values_as_expected(self):
"""Test whether metrics computation produces expected values."""
probs = np.ones((self.batch_size, self.seq_len, self.num_items),
dtype=np.float32) / self.num_items
classes = np.ones((self.batch_size, self.seq_len, self.num_items),
dtype=np.int32)
targets = np.ones((self.batch_size, self.seq_len), dtype=np.int32)
weights = np.random.randint(2, size=(self.batch_size, self.seq_len))
# Check case where all classes are targets
loss, avg_prob, denominator = metric_utils.compute_loss_and_prob_from_probs_with_duplicates(
probs,
classes,
targets,
weights,
)
self.assertAlmostEqual(loss / denominator, 0.0, 4)
self.assertAlmostEqual(avg_prob / denominator, 1.0, 4)
self.assertAlmostEqual(denominator, weights.sum(), 4)
# Check case where no classes are targets
targets = np.zeros((self.batch_size, self.seq_len), dtype=np.int32)
loss, avg_prob, denominator = metric_utils.compute_loss_and_prob_from_probs_with_duplicates(
probs,
classes,
targets,
weights,
)
self.assertAlmostEqual(avg_prob / denominator, 0.0, 4)
# Check random cases
classes = np.random.randint(
self.num_classes, size=(self.batch_size, self.seq_len, self.num_items))
targets = np.random.randint(
self.num_classes, size=(self.batch_size, self.seq_len))
loss, avg_prob, denominator = metric_utils.compute_loss_and_prob_from_probs_with_duplicates(
probs,
classes,
targets,
weights,
)
correct_probs = (classes == np.expand_dims(targets, axis=-1)) * probs
expected_avg_prob = (
correct_probs * np.expand_dims(weights, axis=-1)).sum() / weights.sum()
self.assertAlmostEqual(avg_prob / denominator, expected_avg_prob, 4)
class ProcessMetricsTest(absltest.TestCase):
"""Test metrics processing."""
def test_values_as_expected(self):
"""Test whether processed dictionaries match expected values."""
metric_dict = {
'cat1': {
'key': 2.0,
'denominator': 1.0
},
'cat2': {
'key': 2.0,
'denominator': 2.0
},
}
processed_metrics = metric_utils.process_metrics(metric_dict)
expected_result = {
'cat1_key': 2.0,
'cat1_denom': 1.0,
'cat2_key': 1.0,
'cat2_denom': 2.0,
}
self.assertEqual(processed_metrics, expected_result)
metric_dict = {
'cat1': {
'key': 2.0,
'denominator': 1.0
},
'cat2': {
'key': 2.0,
'denominator': 2.0
},
}
processed_metrics = metric_utils.process_metrics(metric_dict, prefix='pref')
expected_result = {
'pref/cat1_key': 2.0,
'pref/cat1_denom': 1.0,
'pref/cat2_key': 1.0,
'pref/cat2_denom': 2.0,
}
self.assertEqual(processed_metrics, expected_result)
class UpdateMetricsDTypeTest(absltest.TestCase):
"""Test metrics processing."""
def test_types_as_expected(self):
"""Test whether updated metrics match expected types."""
metric_dict = {
'cat1': {
'key': jnp.asarray([1], dtype=jnp.int32),
'denominator': jnp.asarray([1], dtype=jnp.int16)
},
'cat2': {
'key': 2.0,
'denominator': jnp.asarray([1], dtype=jnp.bfloat16)
},
}
processed_metrics = metric_utils.update_metrics_dtype(metric_dict)
self.assertEqual(processed_metrics['cat1']['key'].dtype, jnp.float32)
self.assertEqual(processed_metrics['cat1']['denominator'].dtype,
jnp.float32)
self.assertIsInstance(processed_metrics['cat2']['key'], float)
self.assertEqual(processed_metrics['cat2']['denominator'].dtype,
jnp.float32)
if __name__ == '__main__':
absltest.main()
|
mephisto/scripts/mturk/cleanup.py
|
padentomasello/Mephisto
| 167 |
83674
|
<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utility script that finds, expires, and disposes HITs that may not
have been taking down during a run that exited improperly.
"""
from mephisto.abstractions.providers.mturk.mturk_utils import (
get_outstanding_hits,
expire_and_dispose_hits,
)
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.abstractions.providers.mturk.mturk_requester import MTurkRequester
from typing import List, Dict, Any, Optional
db = LocalMephistoDB()
all_requesters = db.find_requesters(provider_type="mturk")
all_requesters += db.find_requesters(provider_type="mturk_sandbox")
print("You have the following requesters available for mturk and mturk sandbox:")
r_names = [r.requester_name for r in all_requesters]
print(sorted(r_names))
use_name = input("Enter the name of the requester to clear HITs from:\n>> ")
while use_name not in r_names:
use_name = input(
f"Sorry, {use_name} is not in the requester list. "
f"The following are valid: {r_names}\n"
f"Select one:\n>> "
)
requester = db.find_requesters(requester_name=use_name)[0]
assert isinstance(requester, MTurkRequester)
client = requester._get_client(requester._requester_name)
outstanding_hit_types = get_outstanding_hits(client)
num_hit_types = len(outstanding_hit_types.keys())
sum_hits = sum([len(outstanding_hit_types[x]) for x in outstanding_hit_types.keys()])
all_hits: List[Dict[str, Any]] = []
for hit_type in outstanding_hit_types.keys():
all_hits += outstanding_hit_types[hit_type]
broken_hits = [
h
for h in all_hits
if h["NumberOfAssignmentsCompleted"] == 0 and h["HITStatus"] != "Reviewable"
]
print(
f"The requester {use_name} has {num_hit_types} outstanding HIT "
f"types, with {len(broken_hits)} suspected active or broken HITs.\n"
"This may include tasks that are still in-flight, but also "
"tasks that have already expired but have not been disposed of yet."
)
run_type = input("Would you like to cleanup by (t)itle, or just clean up (a)ll?\n>> ")
use_hits: Optional[List[Dict[str, Any]]] = None
while use_hits is None:
if run_type.lower().startswith("t"):
use_hits = []
for hit_type in outstanding_hit_types.keys():
cur_title = outstanding_hit_types[hit_type][0]["Title"]
print(f"HIT TITLE: {cur_title}")
print(f"HIT COUNT: {len(outstanding_hit_types[hit_type])}")
should_clear = input(
"Should we cleanup this hit type? (y)es for yes, anything else for no: "
"\n>> "
)
if should_clear.lower().startswith("y"):
use_hits += outstanding_hit_types[hit_type]
elif run_type.lower().startswith("a"):
use_hits = all_hits
else:
run_type = input("Options are (t)itle, or (a)ll:\n>> ")
print(f"Disposing {len(use_hits)} HITs.")
remaining_hits = expire_and_dispose_hits(client, use_hits)
if len(remaining_hits) == 0:
print("Disposed!")
else:
print(
f"After disposing, {len(remaining_hits)} could not be disposed.\n"
f"These may not have been reviewed yet, or are being actively worked on.\n"
"They have been expired though, so please try to dispose later."
"The first 20 dispose errors are added below:"
)
print([h["dispose_exception"] for h in remaining_hits[:20]])
|
better-bus-buffers/BBB_Polygons_Step1.py
|
d-wasserman/public-transit-tools
| 130 |
83686
|
<reponame>d-wasserman/public-transit-tools<filename>better-bus-buffers/BBB_Polygons_Step1.py
####################################################
## Tool name: BetterBusBuffers
## Created by: <NAME>, Esri, <EMAIL>
## Last updated: 5 December 2017
####################################################
''' BetterBusBuffers Polygon Tool: Step 1 - Preprocess Buffers
BetterBusBuffers provides a quantitative measure of access to public transit
in your city. It creates buffers around the transit stops and weights them by
the number of trips that pass that stop during the time window you select,
accounting for areas served by more than one stop.
Output can be shown as the total number of trips or the average number of trips
per hour during the time window. You can use the symbology settings of the
resulting feature class to highlight the frequency of service in different
areas of town. Note that the tool tells you nothing about the destination of
the buses that pass by the stops, only how many of them there are.
BetterBusBuffers uses GTFS public transit data and ArcGIS Network Analyst.
Step 1 does the following:
- Creates service areas around your transit stops
- Runs some post-processing on those service areas to prepare them for further
analysis
You should only have to run Step 1 once for the geography and buffer size you
are analyzing. Step 1 will take a while to run for larger transit systems.
'''
################################################################################
'''Copyright 2017 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
################################################################################
import os, sqlite3
from shutil import copyfile
import arcpy
import BBB_SharedFunctions
def runTool(outDir, outGDB, inSQLDbase, inNetworkDataset, imp, BufferSize, restrictions, TrimSettings):
try:
# ----- Set up the run -----
try:
BBB_SharedFunctions.CheckArcVersion(min_version_pro="1.2")
BBB_SharedFunctions.CheckArcInfoLicense()
BBB_SharedFunctions.CheckOutNALicense()
BBB_SharedFunctions.CheckWorkspace()
# It's okay to overwrite in-memory stuff.
OverwriteOutput = arcpy.env.overwriteOutput # Get the orignal value so we can reset it.
arcpy.env.overwriteOutput = True
# Append .gdb to geodatabase name.
if not outGDB.lower().endswith(".gdb"):
outGDB += ".gdb"
outGDBwPath = os.path.join(outDir, outGDB)
# Create a file geodatabase for the results.
arcpy.management.CreateFileGDB(outDir, outGDB)
# Make a copy of the input SQL file in the Step 1 output so we can modify it.
SQLDbase = os.path.join(outGDBwPath, "Step1_GTFS.sql")
copyfile(inSQLDbase, SQLDbase)
# Connect to or create the SQL file.
conn = sqlite3.connect(SQLDbase)
c = BBB_SharedFunctions.c = conn.cursor()
impedanceAttribute = BBB_SharedFunctions.CleanUpImpedance(imp)
TrimPolys, TrimPolysValue = BBB_SharedFunctions.CleanUpTrimSettings(TrimSettings)
except:
arcpy.AddError("Error setting up run.")
raise
#----- Make a feature class of GTFS stops that we can use for buffers -----
try:
# Create a feature class of transit stops
arcpy.AddMessage("Creating a feature class of GTFS stops...")
StopsLayer, StopIDList = BBB_SharedFunctions.MakeStopsFeatureClass(os.path.join(outGDBwPath, "Step1_Stops"))
except:
arcpy.AddError("Error creating a feature class of GTFS stops.")
raise
#----- Create Service Areas around all stops in the system -----
try:
arcpy.AddMessage("Creating service areas around stops...")
arcpy.AddMessage("(This step will take a while for large networks.)")
polygons = BBB_SharedFunctions.MakeServiceAreasAroundStops(StopsLayer,
inNetworkDataset, impedanceAttribute, BufferSize,
restrictions, TrimPolys, TrimPolysValue)
except:
arcpy.AddError("Error creating service areas around stops.")
raise
#----- Post-process the polygons to prepare for Step 2 -----
try:
arcpy.AddMessage("Reformatting polygons for further analysis...")
arcpy.AddMessage("(This step will take a while for large networks.)")
# ----- Flatten the overlapping service area polygons -----
# Use World Cylindrical Equal Area (WKID 54034) to ensure proper use of cluster tolerance in meters
arcpy.env.outputCoordinateSystem = BBB_SharedFunctions.WorldCylindrical
# Flatten the overlapping polygons. This will ultimately be our output.
# Dummy points to use in FeatureToPolygon to get rid of unnecessary fields.
dummypoints = arcpy.management.CreateFeatureclass("in_memory",
"DummyPoints", "POINT")
# The flattened polygons will be our ultimate output in the end (final
# output of step 2).
FlatPolys = os.path.join(outGDBwPath, "Step1_FlatPolys")
# FeatureToPolygon flattens overalpping polys.
# Set a large cluster tolerance to eliminate small sliver polygons and to
# keep the output file size down. Boundaries may move up to the distance
# specified in the cluster tolerance, but some amount of movement is
# acceptable, as service area polygons are inexact anyway.
# The large cluster tolerance may cause some geometry issues with the output
# later, but this is the best solution I've found so far that doesn't eat
# up too much analysis time and memory
clusTol = "5 meters"
arcpy.management.FeatureToPolygon(polygons, FlatPolys, clusTol, "", dummypoints)
arcpy.management.Delete(dummypoints)
# Add a field to the output file for number of trips and num trips / hour.
# Also create a polygon id field so we can keep track of them.
arcpy.management.AddField(FlatPolys, "PolyID", "LONG")
arcpy.management.AddField(FlatPolys, "NumTrips", "LONG")
arcpy.management.AddField(FlatPolys, "NumTripsPerHr", "DOUBLE")
arcpy.management.AddField(FlatPolys, "NumStopsInRange", "LONG")
arcpy.management.AddField(FlatPolys, "MaxWaitTime", "DOUBLE")
# ----- Create stacked points, one for each original SA polygon -----
# Create points for use in the Identity tool (one point per poly)
FlattenedPoints = os.path.join(outGDBwPath, "Step1_FlattenedPoints")
arcpy.management.FeatureToPoint(FlatPolys, FlattenedPoints, "INSIDE")
# Use Identity to stack points and keep the stop_ids from the original SAs.
# Results in a points layer with fields ORIG_FID for the IDs of the
# flattened polygons and a stop_id column with the stop ids.
# Points are stacked, and each has only one stop_id.
StackedPoints = os.path.join(outGDBwPath, "Step1_StackedPoints")
arcpy.analysis.Identity(FlattenedPoints, polygons, StackedPoints)
arcpy.management.Delete(FlattenedPoints)
# ----- Read the Stacked Points into an SQL table -----
# Create a SQL table associating the Polygon FID with the stop_ids that serve it.
c.execute("DROP TABLE IF EXISTS StackedPoints;")
schema = "Polygon_FID LONG, stop_id TEXT"
create_stmt = "CREATE TABLE StackedPoints (%s);" % schema
c.execute(create_stmt)
# Add data to the table. Track Polygon IDs with no associated stop_ids so we can delete them.
FIDsToDelete = []
AddToStackedPts = []
with arcpy.da.SearchCursor(StackedPoints, ["ORIG_FID", "stop_id"]) as StackedPtCursor:
for row in StackedPtCursor:
if not row[1]:
FIDsToDelete.append(row[0])
else:
AddToStackedPts.append((row[0], row[1],))
# Add the OD items to the SQL table
c.executemany('''INSERT INTO StackedPoints \
(Polygon_FID, stop_id) \
VALUES (?, ?);''', AddToStackedPts)
conn.commit()
arcpy.management.Delete(StackedPoints)
FIDsToDelete = set(FIDsToDelete)
# ----- Delete polygons not associated with any stop_ids -----
# These were generated by the FeatureToPolygon tool in areas completely
# surrounded by other polygons and aren't associated with any stops.
# Make feature layer containing only the polygons we want to delete.
desc2 = arcpy.Describe(FlatPolys)
OutputOIDName = desc2.OIDFieldName
# Anything with 0 area will just cause problems later.
WhereClause = '"Shape_Area" = 0'
if FIDsToDelete:
WhereClause += ' OR "' + OutputOIDName + '" IN ('
for FID in FIDsToDelete:
WhereClause += str(FID) + ", "
WhereClause = WhereClause[:-2] + ")"
arcpy.management.MakeFeatureLayer(FlatPolys, "FlatPolysLayer", WhereClause)
# Delete the polygons that don't correspond to any stop_ids.
arcpy.management.DeleteFeatures("FlatPolysLayer")
# ----- Populate the PolyID field -----
# Set PolyID equal to the OID.
expression = "!" + OutputOIDName + "!"
arcpy.management.CalculateField(FlatPolys, "PolyID", expression, "PYTHON")
except:
arcpy.AddError("Error post-processing polygons")
raise
arcpy.AddMessage("Done!")
arcpy.AddMessage("Files written to output geodatabase " + outGDBwPath + ":")
arcpy.AddMessage("- Step1_Stops")
arcpy.AddMessage("- Step1_FlatPolys")
arcpy.AddMessage("- Step1_GTFS.sql")
# Tell the tool that this is output. This will add the output to the map.
arcpy.SetParameterAsText(8, os.path.join(outGDBwPath, "Step1_Stops"))
arcpy.SetParameterAsText(9, os.path.join(outGDBwPath, "Step1_FlatPolys"))
arcpy.SetParameterAsText(10, os.path.join(outGDBwPath, "Step1_GTFS.sql"))
except BBB_SharedFunctions.CustomError:
arcpy.AddError("Failed to create BetterBusBuffers polygons.")
pass
except:
arcpy.AddError("Failed to create BetterBusBuffers polygons.")
raise
|
examples/hlapi/v3arch/asyncore/sync/manager/cmdgen/specific-v3-engine-id.py
|
RKinsey/pysnmp
| 492 |
83689
|
"""
Discover SNMPv3 SecurityEngineId
++++++++++++++++++++++++++++++++
Send SNMP GET request using the following scenario and options:
* try to communicate with a SNMPv3 Engine using:
* a non-existing user
* over IPv4/UDP
* to an Agent at demo.snmplabs.com:161
* if remote SNMP Engine ID is discovered, send SNMP GET request:
* with SNMPv3, user 'usr-md5-none', MD5 authentication, no privacy
at discovered securityEngineId
* to the same SNMP Engine ID
* for an OID in text form
"""#
from pysnmp.hlapi import *
snmpEngine = SnmpEngine()
transportTarget = UdpTransportTarget(('demo.snmplabs.com', 161))
#
# To discover remote SNMP EngineID we will tap on SNMP engine inner workings
# by setting up execution point observer setup on INTERNAL class PDU processing
#
observerContext = {}
# Register a callback to be invoked at specified execution point of
# SNMP Engine and passed local variables at execution point's local scope
snmpEngine.observer.registerObserver(
lambda e, p, v, c: c.update(securityEngineId=v['securityEngineId']),
'rfc3412.prepareDataElements:internal',
cbCtx=observerContext
)
# Send probe SNMP request with invalid credentials
authData = UsmUserData('non-existing-user')
errorIndication, errorStatus, errorIndex, varBinds = next(
getCmd(snmpEngine, authData, transportTarget, ContextData(),
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)))
)
# See if our SNMP engine received REPORT PDU containing securityEngineId
if 'securityEngineId' not in observerContext:
print("Can't discover peer EngineID, errorIndication: %s" % errorIndication)
raise Exception()
securityEngineId = observerContext.pop('securityEngineId')
print('Remote securityEngineId = %s' % securityEngineId.prettyPrint())
#
# Query remote SNMP Engine using usmUserTable entry configured for it
#
authData = UsmUserData('usr-md5-none', 'authkey1',
securityEngineId=securityEngineId)
iterator = getCmd(
snmpEngine,
authData,
transportTarget,
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.2.1.1.1.0'))
)
errorIndication, errorStatus, errorIndex, varBinds = next(iterator)
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for name, val in varBinds:
print('%s = %s' % (name.prettyPrint(), val.prettyPrint()))
|
etl/parsers/etw/Microsoft_Windows_WiFiHotspotService.py
|
IMULMUL/etl-parser
| 104 |
83706
|
<reponame>IMULMUL/etl-parser<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-WiFiHotspotService
GUID : 814182fe-58f7-11e1-853c-78e7d1ca7337
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=1003, version=0)
class Microsoft_Windows_WiFiHotspotService_1003_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=1004, version=0)
class Microsoft_Windows_WiFiHotspotService_1004_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=1005, version=0)
class Microsoft_Windows_WiFiHotspotService_1005_0(Etw):
pattern = Struct(
"Ptr1" / Int64ul,
"Ptr2" / Int64ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=1006, version=0)
class Microsoft_Windows_WiFiHotspotService_1006_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=1007, version=0)
class Microsoft_Windows_WiFiHotspotService_1007_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=1008, version=0)
class Microsoft_Windows_WiFiHotspotService_1008_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=1009, version=0)
class Microsoft_Windows_WiFiHotspotService_1009_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=1010, version=0)
class Microsoft_Windows_WiFiHotspotService_1010_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=1011, version=0)
class Microsoft_Windows_WiFiHotspotService_1011_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=1012, version=0)
class Microsoft_Windows_WiFiHotspotService_1012_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=2000, version=0)
class Microsoft_Windows_WiFiHotspotService_2000_0(Etw):
pattern = Struct(
"uString1" / WString,
"uString2" / WString,
"Dword1" / Int32ul,
"Dword2" / Int32ul,
"Dword3" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=3000, version=0)
class Microsoft_Windows_WiFiHotspotService_3000_0(Etw):
pattern = Struct(
"Dword1" / Int32ul,
"Dword2" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=3001, version=0)
class Microsoft_Windows_WiFiHotspotService_3001_0(Etw):
pattern = Struct(
"Ptr" / Int64ul,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=3002, version=0)
class Microsoft_Windows_WiFiHotspotService_3002_0(Etw):
pattern = Struct(
"Ptr" / Int64ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=3003, version=0)
class Microsoft_Windows_WiFiHotspotService_3003_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=3004, version=0)
class Microsoft_Windows_WiFiHotspotService_3004_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=4000, version=0)
class Microsoft_Windows_WiFiHotspotService_4000_0(Etw):
pattern = Struct(
"aString" / CString
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=4001, version=0)
class Microsoft_Windows_WiFiHotspotService_4001_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=4002, version=0)
class Microsoft_Windows_WiFiHotspotService_4002_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=4003, version=0)
class Microsoft_Windows_WiFiHotspotService_4003_0(Etw):
pattern = Struct(
"Dword1" / Int32ul,
"Dword2" / Int32ul,
"aString1" / CString,
"Dword3" / Int32ul,
"Dword4" / Int32ul,
"Dword5" / Int32ul,
"uString1" / WString
)
@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337"), event_id=4004, version=0)
class Microsoft_Windows_WiFiHotspotService_4004_0(Etw):
pattern = Struct(
"uString" / WString,
"Dword" / Int32ul
)
|
tests/unit/utils/test_ssdp.py
|
ifraixedes/saltstack-salt
| 9,425 |
83712
|
<reponame>ifraixedes/saltstack-salt<filename>tests/unit/utils/test_ssdp.py
"""
:codeauthor: :email:`<NAME> <<EMAIL>>`
"""
import datetime
import salt.utils.ssdp as ssdp
import salt.utils.stringutils
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
try:
import pytest
except ImportError:
pytest = None
class Mocks:
def get_socket_mock(self, expected_ip, expected_hostname):
"""
Get a mock of a socket
:return:
"""
sck = MagicMock()
sck.getsockname = MagicMock(return_value=(expected_ip, 123456))
sock_mock = MagicMock()
sock_mock.socket = MagicMock(return_value=sck)
sock_mock.gethostname = MagicMock(return_value=expected_hostname)
sock_mock.gethostbyname = MagicMock(return_value=expected_ip)
return sock_mock
def get_ssdp_factory(self, expected_ip=None, expected_hostname=None, **config):
if expected_ip is None:
expected_ip = "127.0.0.1"
if expected_hostname is None:
expected_hostname = "localhost"
sock_mock = self.get_socket_mock(expected_ip, expected_hostname)
with patch("salt.utils.ssdp.socket", sock_mock):
factory = ssdp.SSDPFactory(**config)
return factory
def get_ssdp_discovery_client(
self, expected_ip=None, expected_hostname=None, **config
):
if expected_ip is None:
expected_ip = "127.0.0.1"
if expected_hostname is None:
expected_hostname = "localhost"
sock_mock = self.get_socket_mock(expected_ip, expected_hostname)
with patch("salt.utils.ssdp.socket", sock_mock):
factory = ssdp.SSDPDiscoveryClient(**config)
return factory
def get_ssdp_discovery_server(
self, expected_ip=None, expected_hostname=None, **config
):
if expected_ip is None:
expected_ip = "127.0.0.1"
if expected_hostname is None:
expected_hostname = "localhost"
sock_mock = self.get_socket_mock(expected_ip, expected_hostname)
with patch("salt.utils.ssdp.socket", sock_mock):
factory = ssdp.SSDPDiscoveryServer(**config)
return factory
@skipIf(pytest is None, "PyTest is missing")
class SSDPBaseTestCase(TestCase, Mocks):
"""
TestCase for SSDP-related parts.
"""
@staticmethod
def exception_generic(*args, **kwargs):
"""
Side effect
:return:
"""
raise Exception("some network error")
@staticmethod
def exception_attr_error(*args, **kwargs):
"""
Side effect
:return:
"""
raise AttributeError("attribute error: {}. {}".format(args, kwargs))
@patch("salt.utils.ssdp._json", None)
@patch("salt.utils.ssdp.asyncio", None)
def test_base_avail(self):
"""
Test SSDP base class availability method.
:return:
"""
base = ssdp.SSDPBase()
assert not base._is_available()
with patch("salt.utils.ssdp._json", True):
assert not base._is_available()
with patch("salt.utils.ssdp.asyncio", True):
assert not base._is_available()
with patch("salt.utils.ssdp._json", True), patch(
"salt.utils.ssdp.asyncio", True
):
assert base._is_available()
def test_base_protocol_settings(self):
"""
Tests default constants data.
:return:
"""
base = ssdp.SSDPBase()
v_keys = ["signature", "answer", "port", "listen_ip", "timeout"]
v_vals = ["__salt_master_service", {}, 4520, "0.0.0.0", 3]
for key in v_keys:
assert key in base.DEFAULTS
for key in base.DEFAULTS:
assert key in v_keys
for key, value in zip(v_keys, v_vals):
assert base.DEFAULTS[key] == value
def test_base_self_ip(self):
"""
Test getting self IP method.
:return:
"""
base = ssdp.SSDPBase()
expected_ip = "192.168.1.10"
expected_host = "oxygen"
sock_mock = self.get_socket_mock(expected_ip, expected_host)
with patch("salt.utils.ssdp.socket", sock_mock):
assert base.get_self_ip() == expected_ip
sock_mock.socket().getsockname.side_effect = SSDPBaseTestCase.exception_generic
with patch("salt.utils.ssdp.socket", sock_mock):
assert base.get_self_ip() == expected_ip
@skipIf(pytest is None, "PyTest is missing")
class SSDPFactoryTestCase(TestCase, Mocks):
"""
Test socket protocol
"""
def test_attr_check(self):
"""
Tests attributes are set to the base class
:return:
"""
config = {
ssdp.SSDPBase.SIGNATURE: "-signature-",
ssdp.SSDPBase.ANSWER: {"this-is": "the-answer"},
}
expected_ip = "10.10.10.10"
factory = self.get_ssdp_factory(expected_ip=expected_ip, **config)
for attr in [ssdp.SSDPBase.SIGNATURE, ssdp.SSDPBase.ANSWER]:
assert hasattr(factory, attr)
assert getattr(factory, attr) == config[attr]
assert not factory.disable_hidden
assert factory.my_ip == expected_ip
def test_transport_sendto_success(self):
"""
Test transport send_to.
:return:
"""
transport = MagicMock()
log = MagicMock()
factory = self.get_ssdp_factory()
with patch.object(factory, "transport", transport), patch.object(
factory, "log", log
):
data = {"some": "data"}
addr = "10.10.10.10"
factory._sendto(data=data, addr=addr)
assert factory.transport.sendto.called
assert factory.transport.sendto.mock_calls[0][1][0]["some"] == "data"
assert factory.transport.sendto.mock_calls[0][2]["addr"] == "10.10.10.10"
assert factory.log.debug.called
assert factory.log.debug.mock_calls[0][1][0] == "Sent successfully"
def test_transport_sendto_retry(self):
"""
Test transport send_to.
:return:
"""
with patch("salt.utils.ssdp.time.sleep", MagicMock()):
transport = MagicMock()
transport.sendto = MagicMock(
side_effect=SSDPBaseTestCase.exception_attr_error
)
log = MagicMock()
factory = self.get_ssdp_factory()
with patch.object(factory, "transport", transport), patch.object(
factory, "log", log
):
data = {"some": "data"}
addr = "10.10.10.10"
factory._sendto(data=data, addr=addr)
assert factory.transport.sendto.called
assert ssdp.time.sleep.called
assert (
ssdp.time.sleep.call_args[0][0] > 0
and ssdp.time.sleep.call_args[0][0] < 0.5
)
assert factory.log.debug.called
assert "Permission error" in factory.log.debug.mock_calls[0][1][0]
def test_datagram_signature_bad(self):
"""
Test datagram_received on bad signature
:return:
"""
factory = self.get_ssdp_factory()
data = "nonsense"
addr = "10.10.10.10", "foo.suse.de"
with patch.object(factory, "log", MagicMock()):
factory.datagram_received(data=data, addr=addr)
assert factory.log.debug.called
assert "Received bad signature from" in factory.log.debug.call_args[0][0]
assert factory.log.debug.call_args[0][1] == addr[0]
assert factory.log.debug.call_args[0][2] == addr[1]
def test_datagram_signature_wrong_timestamp_quiet(self):
"""
Test datagram receives a wrong timestamp (no reply).
:return:
"""
factory = self.get_ssdp_factory()
data = "{}nonsense".format(ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE])
addr = "10.10.10.10", "foo.suse.de"
with patch.object(factory, "log", MagicMock()), patch.object(
factory, "_sendto", MagicMock()
):
factory.datagram_received(data=data, addr=addr)
assert factory.log.debug.called
assert (
"Received invalid timestamp in package"
in factory.log.debug.call_args[0][0]
)
assert not factory._sendto.called
def test_datagram_signature_wrong_timestamp_reply(self):
"""
Test datagram receives a wrong timestamp.
:return:
"""
factory = self.get_ssdp_factory()
factory.disable_hidden = True
signature = ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]
data = "{}nonsense".format(signature)
addr = "10.10.10.10", "foo.suse.de"
with patch.object(factory, "log", MagicMock()), patch.object(
factory, "_sendto", MagicMock()
):
factory.datagram_received(data=data, addr=addr)
assert factory.log.debug.called
assert (
"Received invalid timestamp in package"
in factory.log.debug.call_args[0][0]
)
assert factory._sendto.called
assert (
"{}:E:Invalid timestamp".format(signature)
== factory._sendto.call_args[0][0]
)
def test_datagram_signature_outdated_timestamp_quiet(self):
"""
Test if datagram processing reacts on outdated message (more than 20 seconds). Quiet mode.
:return:
"""
factory = self.get_ssdp_factory()
signature = ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]
data = "{}{}".format(signature, "1516623820")
addr = "10.10.10.10", "foo.suse.de"
ahead_dt = datetime.datetime.fromtimestamp(1516623841)
curnt_dt = datetime.datetime.fromtimestamp(1516623820)
delta = datetime.timedelta(0, 20)
with patch.object(factory, "log", MagicMock()), patch.object(
factory, "_sendto"
), patch("salt.utils.ssdp.datetime.datetime", MagicMock()), patch(
"salt.utils.ssdp.datetime.datetime.now", MagicMock(return_value=ahead_dt)
), patch(
"salt.utils.ssdp.datetime.datetime.fromtimestamp",
MagicMock(return_value=curnt_dt),
), patch(
"salt.utils.ssdp.datetime.timedelta", MagicMock(return_value=delta)
):
factory.datagram_received(data=data, addr=addr)
assert factory.log.debug.called
assert not factory.disable_hidden
assert not factory._sendto.called
assert "Received outdated package" in factory.log.debug.call_args[0][0]
def test_datagram_signature_outdated_timestamp_reply(self):
"""
Test if datagram processing reacts on outdated message (more than 20 seconds). Reply mode.
:return:
"""
factory = self.get_ssdp_factory()
factory.disable_hidden = True
signature = ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]
data = "{}{}".format(signature, "1516623820")
addr = "10.10.10.10", "foo.suse.de"
ahead_dt = datetime.datetime.fromtimestamp(1516623841)
curnt_dt = datetime.datetime.fromtimestamp(1516623820)
delta = datetime.timedelta(0, 20)
with patch.object(factory, "log", MagicMock()), patch.object(
factory, "_sendto"
), patch("salt.utils.ssdp.datetime.datetime", MagicMock()), patch(
"salt.utils.ssdp.datetime.datetime.now", MagicMock(return_value=ahead_dt)
), patch(
"salt.utils.ssdp.datetime.datetime.fromtimestamp",
MagicMock(return_value=curnt_dt),
), patch(
"salt.utils.ssdp.datetime.timedelta", MagicMock(return_value=delta)
):
factory.datagram_received(data=data, addr=addr)
assert factory.log.debug.called
assert factory.disable_hidden
assert factory._sendto.called
assert factory._sendto.call_args[0][
0
] == "{}:E:Timestamp is too old".format(signature)
assert "Received outdated package" in factory.log.debug.call_args[0][0]
def test_datagram_signature_correct_timestamp_reply(self):
"""
Test if datagram processing sends out correct reply within 20 seconds.
:return:
"""
factory = self.get_ssdp_factory()
factory.disable_hidden = True
signature = ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]
data = "{}{}".format(signature, "1516623820")
addr = "10.10.10.10", "foo.suse.de"
ahead_dt = datetime.datetime.fromtimestamp(1516623840)
curnt_dt = datetime.datetime.fromtimestamp(1516623820)
delta = datetime.timedelta(0, 20)
with patch.object(factory, "log", MagicMock()), patch.object(
factory, "_sendto"
), patch("salt.utils.ssdp.datetime.datetime", MagicMock()), patch(
"salt.utils.ssdp.datetime.datetime.now", MagicMock(return_value=ahead_dt)
), patch(
"salt.utils.ssdp.datetime.datetime.fromtimestamp",
MagicMock(return_value=curnt_dt),
), patch(
"salt.utils.ssdp.datetime.timedelta", MagicMock(return_value=delta)
):
factory.datagram_received(data=data, addr=addr)
assert factory.log.debug.called
assert factory.disable_hidden
assert factory._sendto.called
assert factory._sendto.call_args[0][0] == salt.utils.stringutils.to_bytes(
"{}:@:{{}}".format(signature)
)
assert 'Received "%s" from %s:%s' in factory.log.debug.call_args[0][0]
@skipIf(pytest is None, "PyTest is missing")
class SSDPServerTestCase(TestCase, Mocks):
"""
Server-related test cases
"""
def test_config_detached(self):
"""
Test if configuration is not a reference.
:return:
"""
old_ip = "10.10.10.10"
new_ip = "20.20.20.20"
config = {"answer": {"master": old_ip}}
with patch(
"salt.utils.ssdp.SSDPDiscoveryServer.get_self_ip",
MagicMock(return_value=new_ip),
):
srv = ssdp.SSDPDiscoveryServer(**config)
assert srv._config["answer"]["master"] == new_ip
assert config["answer"]["master"] == old_ip
def test_run(self):
"""
Test server runner.
:return:
"""
with patch("salt.utils.ssdp.SSDPFactory", MagicMock()):
config = {
"answer": {"master": "10.10.10.10"},
ssdp.SSDPBase.LISTEN_IP: "10.10.10.10",
ssdp.SSDPBase.PORT: 12345,
}
srv = self.get_ssdp_discovery_server(**config)
srv.create_datagram_endpoint = MagicMock()
srv.log = MagicMock()
trnsp = MagicMock()
proto = MagicMock()
loop = MagicMock()
loop.run_until_complete = MagicMock(return_value=(trnsp, proto))
io = MagicMock()
io.ported = False
io.get_event_loop = MagicMock(return_value=loop)
with patch("salt.utils.ssdp.asyncio", io):
srv.run()
cde_args = io.get_event_loop().create_datagram_endpoint.call_args[1]
cfg_ip_addr, cfg_port = cde_args["local_addr"]
assert io.get_event_loop.called
assert io.get_event_loop().run_until_complete.called
assert io.get_event_loop().create_datagram_endpoint.called
assert io.get_event_loop().run_forever.called
assert trnsp.close.called
assert loop.close.called
assert srv.log.info.called
assert (
srv.log.info.call_args[0][0]
== "Stopping service discovery listener."
)
assert "allow_broadcast" in cde_args
assert cde_args["allow_broadcast"]
assert "local_addr" in cde_args
assert (
not cfg_ip_addr == ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.LISTEN_IP]
and cfg_ip_addr == "10.10.10.10"
)
assert (
not cfg_port == ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.PORT]
and cfg_port == 12345
)
@skipIf(pytest is None, "PyTest is missing")
class SSDPClientTestCase(TestCase, Mocks):
"""
Client-related test cases
"""
class Resource:
"""
Fake network reader
"""
def __init__(self):
self.pool = [
("some", "10.10.10.10"),
("data", "20.20.20.20"),
("data", "10.10.10.10"),
(None, None),
]
def read(self, *args, **kwargs):
return self.pool.pop(0)
def test_config_passed(self):
"""
Test if the configuration is passed.
:return:
"""
config = {
ssdp.SSDPBase.SIGNATURE: "SUSE Enterprise Server",
ssdp.SSDPBase.TIMEOUT: 5,
ssdp.SSDPBase.PORT: 12345,
}
clnt = self.get_ssdp_discovery_client(**config)
assert clnt._config[ssdp.SSDPBase.SIGNATURE] == config[ssdp.SSDPBase.SIGNATURE]
assert clnt._config[ssdp.SSDPBase.PORT] == config[ssdp.SSDPBase.PORT]
assert clnt._config[ssdp.SSDPBase.TIMEOUT] == config[ssdp.SSDPBase.TIMEOUT]
def test_config_detached(self):
"""
Test if the passed configuration is not a reference.
:return:
"""
config = {
ssdp.SSDPBase.SIGNATURE: "SUSE Enterprise Server",
}
clnt = self.get_ssdp_discovery_client(**config)
clnt._config["foo"] = "bar"
assert "foo" in clnt._config
assert "foo" not in config
def test_query(self):
"""
Test if client queries the broadcast
:return:
"""
config = {
ssdp.SSDPBase.SIGNATURE: "SUSE Enterprise Server",
ssdp.SSDPBase.PORT: 4000,
}
f_time = 1111
_socket = MagicMock()
with patch("salt.utils.ssdp.socket", _socket), patch(
"salt.utils.ssdp.time.time", MagicMock(return_value=f_time)
):
clnt = ssdp.SSDPDiscoveryClient(**config)
clnt._query()
assert clnt._socket.sendto.called
message, target = clnt._socket.sendto.call_args[0]
assert message == salt.utils.stringutils.to_bytes(
"{}{}".format(config[ssdp.SSDPBase.SIGNATURE], f_time)
)
assert target[0] == "<broadcast>"
assert target[1] == config[ssdp.SSDPBase.PORT]
def test_get_masters_map(self):
"""
Test getting map of the available masters on the network
:return:
"""
_socket = MagicMock()
response = {}
with patch("salt.utils.ssdp.socket", _socket):
clnt = ssdp.SSDPDiscoveryClient()
clnt._socket.recvfrom = SSDPClientTestCase.Resource().read
clnt.log = MagicMock()
clnt._collect_masters_map(response=response)
assert "10.10.10.10" in response
assert "20.20.20.20" in response
assert response["10.10.10.10"] == ["some", "data"]
assert response["20.20.20.20"] == ["data"]
def test_get_masters_map_error_handling(self):
"""
Test getting map handles timeout network exception
:return:
"""
_socket = MagicMock()
response = {}
error_msg = "fake testing timeout just had happened"
with patch("salt.utils.ssdp.socket", _socket):
clnt = ssdp.SSDPDiscoveryClient()
clnt._socket.recvfrom = MagicMock(side_effect=Exception(error_msg))
clnt.log = MagicMock()
clnt._collect_masters_map(response=response)
assert clnt.log.error.called
assert (
"Discovery master collection failure" in clnt.log.error.call_args[0][0]
)
assert error_msg == str(clnt.log.error.call_args[0][1])
assert not response
def test_discover_no_masters(self):
"""
Test discover available master on the network (none found).
:return:
"""
clnt = self.get_ssdp_discovery_client()
clnt._query = MagicMock()
clnt._collect_masters_map = MagicMock()
clnt.log = MagicMock()
clnt.discover()
assert clnt.log.info.called
assert clnt.log.info.call_args[0][0] == "No master has been discovered."
def test_discover_general_error(self):
"""
Test discover available master on the network (erroneous found)
:return:
"""
_socket = MagicMock()
error = "Admins on strike due to broken coffee machine"
signature = ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]
fake_resource = SSDPClientTestCase.Resource()
fake_resource.pool = [
("{}:E:{}".format(signature, error), "10.10.10.10"),
(None, None),
]
with patch("salt.utils.ssdp.socket", _socket):
clnt = ssdp.SSDPDiscoveryClient()
clnt._socket.recvfrom = fake_resource.read
clnt._query = MagicMock()
clnt.log = MagicMock()
clnt.discover()
assert len(clnt.log.error.mock_calls) == 1
assert (
"Error response from the service publisher"
in clnt.log.error.call_args[0][0]
)
assert "10.10.10.10" == clnt.log.error.call_args[0][1]
assert clnt.log.error.call_args[1] == {}
assert clnt.log.error.call_args[0][2] == error
def test_discover_timestamp_error(self):
"""
Test discover available master on the network (outdated timestamp)
:return:
"""
_socket = MagicMock()
error = (
"We only support a 1200 bps connection. Routing timestamp problems on"
" neural net."
)
signature = ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]
fake_resource = SSDPClientTestCase.Resource()
fake_resource.pool = [
("{}:E:{}".format(signature, error), "10.10.10.10"),
(None, None),
]
with patch("salt.utils.ssdp.socket", _socket):
clnt = ssdp.SSDPDiscoveryClient()
clnt._socket.recvfrom = fake_resource.read
clnt._query = MagicMock()
clnt.log = MagicMock()
clnt.discover()
assert len(clnt.log.error.mock_calls) == 2
assert (
"Error response from the service publisher"
in clnt.log.error.mock_calls[0][1][0]
)
assert clnt.log.error.mock_calls[0][1][2] == error
assert clnt.log.error.mock_calls[0][2] == {}
assert (
"Publisher sent shifted timestamp" in clnt.log.error.mock_calls[1][1][0]
)
assert (
clnt.log.error.mock_calls[1][1][1]
== clnt.log.error.mock_calls[0][1][1]
== "10.10.10.10"
)
|
partial_weights.py
|
k5iogura/YOLOv2-chainer
| 387 |
83726
|
import time
import cv2
import numpy as np
from chainer import serializers, Variable
import chainer.functions as F
import argparse
from darknet19 import *
from yolov2 import *
from yolov2_grid_prob import *
from yolov2_bbox import *
n_classes = 10
n_boxes = 5
partial_layer = 18
def copy_conv_layer(src, dst, layers):
for i in layers:
src_layer = eval("src.conv%d" % i)
dst_layer = eval("dst.conv%d" % i)
dst_layer.W = src_layer.W
dst_layer.b = src_layer.b
def copy_bias_layer(src, dst, layers):
for i in layers:
src_layer = eval("src.bias%d" % i)
dst_layer = eval("dst.bias%d" % i)
dst_layer.b = src_layer.b
def copy_bn_layer(src, dst, layers):
for i in layers:
src_layer = eval("src.bn%d" % i)
dst_layer = eval("dst.bn%d" % i)
dst_layer.N = src_layer.N
dst_layer.avg_var = src_layer.avg_var
dst_layer.avg_mean = src_layer.avg_mean
dst_layer.gamma = src_layer.gamma
dst_layer.eps = src_layer.eps
# load model
print("loading original model...")
input_weight_file = "./backup/darknet19_448_final.model"
output_weight_file = "./backup/partial.model"
model = Darknet19Predictor(Darknet19())
serializers.load_hdf5(input_weight_file, model) # load saved model
yolov2 = YOLOv2(n_classes=n_classes, n_boxes=n_boxes)
copy_conv_layer(model.predictor, yolov2, range(1, partial_layer+1))
copy_bias_layer(model.predictor, yolov2, range(1, partial_layer+1))
copy_bn_layer(model.predictor, yolov2, range(1, partial_layer+1))
model = YOLOv2Predictor(yolov2)
print("saving model to %s" % (output_weight_file))
serializers.save_hdf5("%s" % (output_weight_file), model)
|
pl_bolts/models/rl/__init__.py
|
lavoiems/lightning-bolts
| 822 |
83743
|
<reponame>lavoiems/lightning-bolts
from pl_bolts.models.rl.advantage_actor_critic_model import AdvantageActorCritic
from pl_bolts.models.rl.double_dqn_model import DoubleDQN
from pl_bolts.models.rl.dqn_model import DQN
from pl_bolts.models.rl.dueling_dqn_model import DuelingDQN
from pl_bolts.models.rl.noisy_dqn_model import NoisyDQN
from pl_bolts.models.rl.per_dqn_model import PERDQN
from pl_bolts.models.rl.reinforce_model import Reinforce
from pl_bolts.models.rl.sac_model import SAC
from pl_bolts.models.rl.vanilla_policy_gradient_model import VanillaPolicyGradient
__all__ = [
"AdvantageActorCritic",
"DoubleDQN",
"DQN",
"DuelingDQN",
"NoisyDQN",
"PERDQN",
"Reinforce",
"SAC",
"VanillaPolicyGradient",
]
|
telemetry/telemetry/story/story_unittest.py
|
bopopescu/catapult-2
| 925 |
83790
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import story
from telemetry.story import shared_state
# pylint: disable=abstract-method
class SharedStateBar(shared_state.SharedState):
pass
class StoryFoo(story.Story):
def __init__(self, name='', labels=None):
super(StoryFoo, self).__init__(
SharedStateBar, name, labels)
class StoryTest(unittest.TestCase):
def testStoriesHaveDifferentIds(self):
s0 = story.Story(SharedStateBar, 'foo')
s1 = story.Story(SharedStateBar, 'bar')
self.assertNotEqual(s0.id, s1.id)
def testNamelessStoryDisplayName(self):
s = StoryFoo()
self.assertEquals('StoryFoo', s.display_name)
def testNamedStoryDisplayName(self):
s = StoryFoo('Bar')
self.assertEquals('Bar', s.display_name)
def testStoryFileSafeName(self):
s = StoryFoo('Foo Bar:Baz~0')
self.assertEquals('Foo_Bar_Baz_0', s.file_safe_name)
def testNamelessStoryAsDict(self):
s = story.Story(SharedStateBar)
s_dict = s.AsDict()
self.assertEquals(s_dict['id'], s.id)
self.assertNotIn('name', s_dict)
def testNamedStoryAsDict(self):
s = story.Story(SharedStateBar, 'Foo')
s_dict = s.AsDict()
self.assertEquals(s_dict['id'], s.id)
self.assertEquals('Foo', s_dict['name'])
def testMakeJavaScriptDeterministic(self):
s = story.Story(SharedStateBar)
self.assertTrue(s.make_javascript_deterministic)
s = story.Story(SharedStateBar, make_javascript_deterministic=False)
self.assertFalse(s.make_javascript_deterministic)
s = story.Story(SharedStateBar, make_javascript_deterministic=True)
self.assertTrue(s.make_javascript_deterministic)
|
Nimbus/settings/secret.sample.py
|
cgreencode/Nimbus
| 213 |
83797
|
<reponame>cgreencode/Nimbus
"""
Rename this file to 'secret.py' once all settings are defined
"""
SECRET_KEY = "..."
HOSTNAME = "example.com"
DATABASE_URL = "mysql://<user>:<password>@<host>/<database>"
AWS_ACCESS_KEY_ID = "12345"
AWS_SECRET_ACCESS_KEY = "12345"
|
funk_svd/dataset.py
|
grofers/funk-svd
| 151 |
83805
|
import datetime
import numpy as np
import os
import pandas as pd
import shutil
import urllib.request
import zipfile
__all__ = [
'fetch_ml_ratings',
]
VARIANTS = {
'100k': {'filename': 'u.data', 'sep': '\t'},
'1m': {'filename': 'ratings.dat', 'sep': r'::'},
'10m': {'filename': 'ratings.dat', 'sep': r'::'},
'20m': {'filename': 'ratings.csv', 'sep': ','}
}
def fetch_ml_ratings(data_dir_path=None, variant='20m', verbose=False):
"""Fetches MovieLens ratings dataset.
Parameters
----------
data_dir_path : str, default=None
Explicit data directory path to MovieLens ratings file.
variant : {'100k', '1m', '10m', '20m'}, default='20m'
Movie lens dataset variant.
verbose : bool, default=False
Whether or not downloading and unzipping the dataset with verbose.
Returns
-------
df : pandas.DataFrame
The MovieLens ratings dataset.
"""
if data_dir_path is None:
data_dir_path = _get_data_dir_path(data_dir_path)
dirname = 'ml-' + variant
filename = VARIANTS[variant]['filename']
csv_path = os.path.join(data_dir_path, dirname, filename)
zip_path = os.path.join(data_dir_path, dirname) + '.zip'
url = 'http://files.grouplens.org/datasets/movielens/ml-' + variant + \
'.zip'
else:
csv_path = data_dir_path
if os.path.exists(csv_path):
# Return data loaded into a DataFrame
df = _ml_ratings_csv_to_df(csv_path, variant)
return df
elif os.path.exists(zip_path):
# Unzip file before calling back itself
if verbose:
print('Unzipping data...')
with zipfile.ZipFile(zip_path, 'r') as zf:
zf.extractall(data_dir_path)
if variant == '10m':
os.rename(os.path.join(data_dir_path, 'ml-10M100K'),
os.path.join(data_dir_path, dirname))
os.remove(zip_path)
return fetch_ml_ratings(variant=variant, verbose=verbose)
else:
# Download the ZIP file before calling back itself
if verbose:
print('Downloading data...')
with urllib.request.urlopen(url) as r, open(zip_path, 'wb') as f:
shutil.copyfileobj(r, f)
return fetch_ml_ratings(variant=variant, verbose=verbose)
def _get_data_dir_path(data_dir_path=None):
"""Returns the path of the funk-svd data directory.
This folder is used to store large datasets to avoid downloading them
several times.
By default the data dir is set to a folder named 'funk_svd_data' in the
user home folder. Alternatively, it can be set by the `FUNK_SVD_DATA`
environment variable or programmatically by giving an explicit
`data_dir_path`.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_dir_path : str, default=None
Explicit data directory path for large datasets.
Returns
-------
data_dir_path: str
Explicit data directory path for large datasets.
"""
if data_dir_path is None:
default = os.path.join('~', 'funk_svd_data')
data_dir_path = os.environ.get('FUNK_SVD_DATA', default=default)
data_dir_path = os.path.expanduser(data_dir_path)
if not os.path.exists(data_dir_path):
os.makedirs(data_dir_path)
return data_dir_path
def _ml_ratings_csv_to_df(csv_path, variant):
names = ['u_id', 'i_id', 'rating', 'timestamp']
dtype = {'u_id': np.uint32, 'i_id': np.uint32, 'rating': np.float64}
def date_parser(time):
return datetime.datetime.fromtimestamp(float(time))
df = pd.read_csv(csv_path, names=names, dtype=dtype, header=0,
sep=VARIANTS[variant]['sep'], parse_dates=['timestamp'],
date_parser=date_parser, engine='python')
df.sort_values(by='timestamp', inplace=True)
df.reset_index(drop=True, inplace=True)
return df
|
resources/code/translation.py
|
GCBallesteros/imreg_dft
| 167 |
83832
|
import os
import scipy as sp
import scipy.misc
import imreg_dft as ird
basedir = os.path.join('..', 'examples')
# the TEMPLATE
im0 = sp.misc.imread(os.path.join(basedir, "sample1.png"), True)
# the image to be transformed
im1 = sp.misc.imread(os.path.join(basedir, "sample2.png"), True)
result = ird.translation(im0, im1)
tvec = result["tvec"].round(4)
# the Transformed IMaGe.
timg = ird.transform_img(im1, tvec=tvec)
# Maybe we don't want to show plots all the time
if os.environ.get("IMSHOW", "yes") == "yes":
import matplotlib.pyplot as plt
ird.imshow(im0, im1, timg)
plt.show()
print("Translation is {}, success rate {:.4g}"
.format(tuple(tvec), result["success"]))
|
pyexfil/Comm/DNSoTLS/constants.py
|
goffinet/PyExfil
| 603 |
83853
|
<filename>pyexfil/Comm/DNSoTLS/constants.py
import os
DNS_OVER_TLS_PORT = 853
CHUNK_SIZE = 128
CHECK_CERT = True # We recommend using valid certificates. An invalid certificate (self-signed) might trigger alerts on some systems.
LOCAL_HOST = 'localhost'
MAX_BUFFER = 4096
MAX_CLIENTS = 5
if os.getcwd() == 'DNSoTLS':
CERT_FILE = 'cert.ccc'
elif os.getcwd() == 'PyExfil':
CERT_FILE = 'pyexfil/Comm/DNSoTLS/cert.ccc'
else:
CERT_FILE = 'pyexfil/Comm/DNSoTLS/cert.ccc'
|
questions/number-of-digit-one/Solution.py
|
marcus-aurelianus/leetcode-solutions
| 141 |
83856
|
<filename>questions/number-of-digit-one/Solution.py
"""
Given an integer n, count the total number of digit 1 appearing in all non-negative integers less than or equal to n.
Example:
Input: 13
Output: 6
Explanation: Digit 1 occurred in the following numbers: 1, 10, 11, 12, 13.
"""
class Solution:
def countDigitOne(self, n: int) -> int:
cnt = 0
mark = 1
while n >= mark:
c, r = divmod(n, (mark * 10))
cnt += c * mark
if r >= mark:
cnt += min(r - mark + 1, mark)
mark *= 10
return cnt
|
modelchimp/migrations/0046_experimentasset.py
|
samzer/modelchimp-server
| 134 |
83857
|
<gh_stars>100-1000
# Generated by Django 2.1.7 on 2019-02-21 15:50
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('modelchimp', '0045_remove_machinelearningmodel_deep_learning_parameters'),
]
operations = [
migrations.CreateModel(
name='ExperimentAsset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meta_dict', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('asset', models.FileField(null=True, upload_to='asset/')),
('custom_file_name', models.CharField(blank=True, default='', max_length=200, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('ml_model', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='asset_experiment', to='modelchimp.MachineLearningModel')),
('project', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='asset_project', to='modelchimp.Project')),
],
),
]
|
fastapi/psql_minimum/dev.py
|
zacfrulloni/Rust-Full-Stack
| 1,091 |
83870
|
<filename>fastapi/psql_minimum/dev.py
# source ./bin/activate
import subprocess as cmd
response = input("[d]ev, [t]est?\n")
if response.startswith("t"):
cp = cmd.run(f"pytest", check=True, shell=True)
else:
cp = cmd.run(f"uvicorn main:app --reload", check=True, shell=True)
|
tools/build_defs/fb_native_wrapper.bzl
|
pdlanl/react-native
| 118,175 |
83871
|
<reponame>pdlanl/react-native
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
fb_native = struct(
android_aar = native.android_aar,
android_app_modularity = native.android_app_modularity,
android_binary = native.android_binary,
android_build_config = native.android_build_config,
android_bundle = native.android_bundle,
android_instrumentation_apk = native.android_instrumentation_apk,
android_instrumentation_test = native.android_instrumentation_test,
android_library = native.android_library,
android_manifest = native.android_manifest,
android_prebuilt_aar = native.android_prebuilt_aar,
android_resource = native.android_resource,
apk_genrule = native.apk_genrule,
apple_asset_catalog = native.apple_asset_catalog,
apple_binary = native.apple_binary,
apple_bundle = native.apple_bundle,
apple_library = native.apple_library,
apple_package = native.apple_package,
apple_resource = native.apple_resource,
apple_test = native.apple_test,
cgo_library = native.cgo_library,
command_alias = native.command_alias,
config_setting = native.config_setting,
constraint_setting = native.constraint_setting,
constraint_value = native.constraint_value,
core_data_model = native.core_data_model,
csharp_library = native.csharp_library,
cxx_binary = native.cxx_binary,
cxx_genrule = native.cxx_genrule,
cxx_library = native.cxx_library,
cxx_lua_extension = native.cxx_lua_extension,
cxx_precompiled_header = native.cxx_precompiled_header,
cxx_python_extension = native.cxx_python_extension,
cxx_test = native.cxx_test,
d_binary = native.d_binary,
d_library = native.d_library,
d_test = native.d_test,
export_file = native.export_file,
filegroup = native.filegroup,
gen_aidl = native.gen_aidl,
genrule = native.genrule,
go_binary = native.go_binary,
go_library = native.go_library,
go_test = native.go_test,
groovy_library = native.groovy_library,
groovy_test = native.groovy_test,
gwt_binary = native.gwt_binary,
halide_library = native.halide_library,
haskell_binary = native.haskell_binary,
haskell_ghci = native.haskell_ghci,
haskell_haddock = native.haskell_haddock,
haskell_library = native.haskell_library,
haskell_prebuilt_library = native.haskell_prebuilt_library,
http_archive = native.http_archive,
http_file = native.http_file,
jar_genrule = native.jar_genrule,
java_annotation_processor = native.java_annotation_processor,
java_binary = native.java_binary,
java_library = native.java_library,
java_test = native.java_test,
js_bundle = native.js_bundle,
js_bundle_genrule = native.js_bundle_genrule,
js_library = native.js_library,
keystore = native.keystore,
kotlin_library = native.kotlin_library,
kotlin_test = native.kotlin_test,
lua_binary = native.lua_binary,
lua_library = native.lua_library,
ndk_library = native.ndk_library,
ocaml_binary = native.ocaml_binary,
ocaml_library = native.ocaml_library,
platform = native.platform,
prebuilt_apple_framework = native.prebuilt_apple_framework,
prebuilt_cxx_library = native.prebuilt_cxx_library,
prebuilt_cxx_library_group = native.prebuilt_cxx_library_group,
prebuilt_dotnet_library = native.prebuilt_dotnet_library,
prebuilt_go_library = native.prebuilt_go_library,
prebuilt_jar = native.prebuilt_jar,
prebuilt_native_library = native.prebuilt_native_library,
prebuilt_ocaml_library = native.prebuilt_ocaml_library,
prebuilt_python_library = native.prebuilt_python_library,
prebuilt_rust_library = native.prebuilt_rust_library,
python_binary = native.python_binary,
python_library = native.python_library,
python_test = native.python_test,
remote_file = native.remote_file,
robolectric_test = native.robolectric_test,
rust_binary = native.rust_binary,
rust_library = native.rust_library,
rust_test = native.rust_test,
scala_library = native.scala_library,
scala_test = native.scala_test,
scene_kit_assets = native.scene_kit_assets,
sh_binary = native.sh_binary,
sh_test = native.sh_test,
swift_library = native.swift_library,
test_suite = native.test_suite,
versioned_alias = native.versioned_alias,
worker_tool = native.worker_tool,
xcode_postbuild_script = native.xcode_postbuild_script,
xcode_prebuild_script = native.xcode_prebuild_script,
xcode_workspace_config = native.xcode_workspace_config,
zip_file = native.zip_file,
)
|
pyctuator/impl/spring_boot_admin_registration.py
|
SolarEdgeTech/pyctuator
| 118 |
83891
|
import http.client
import json
import logging
import os
import ssl
import threading
import urllib.parse
from base64 import b64encode
from datetime import datetime
from http.client import HTTPConnection, HTTPResponse
from typing import Optional, Dict
from pyctuator.auth import Auth, BasicAuth
# pylint: disable=too-many-instance-attributes
class BootAdminRegistrationHandler:
def __init__(
self,
registration_url: str,
registration_auth: Optional[Auth],
application_name: str,
pyctuator_base_url: str,
start_time: datetime,
service_url: str,
registration_interval_sec: float,
application_metadata: Optional[dict] = None,
ssl_context: Optional[ssl.SSLContext] = None,
) -> None:
self.registration_url = registration_url
self.registration_auth = registration_auth
self.application_name = application_name
self.pyctuator_base_url = pyctuator_base_url
self.start_time = start_time
self.service_url = service_url if service_url.endswith("/") else service_url + "/"
self.registration_interval_sec = registration_interval_sec
self.instance_id = None
self.application_metadata = application_metadata if application_metadata else {}
self.ssl_context = ssl_context
self.should_continue_registration_schedule: bool = False
self.disable_certificate_validation_for_https_registration: bool = \
os.getenv("PYCTUATOR_REGISTRATION_NO_CERT") is not None
def _schedule_next_registration(self, registration_interval_sec: float) -> None:
timer = threading.Timer(
registration_interval_sec,
self._register_with_admin_server,
[]
)
timer.setDaemon(True)
timer.start()
def _register_with_admin_server(self) -> None:
# When waking up, make sure registration is still needed
if not self.should_continue_registration_schedule:
return
registration_data = {
"name": self.application_name,
"managementUrl": self.pyctuator_base_url,
"healthUrl": f"{self.pyctuator_base_url}/health",
"serviceUrl": self.service_url,
"metadata": {
"startup": self.start_time.isoformat(),
**self.application_metadata
}
}
logging.debug("Trying to post registration data to %s: %s", self.registration_url, registration_data)
conn: Optional[HTTPConnection] = None
try:
headers = {"Content-type": "application/json"}
self.authenticate(headers)
response = self._http_request(self.registration_url, "POST", headers, json.dumps(registration_data))
if response.status < 200 or response.status >= 300:
logging.warning("Failed registering with boot-admin, got %s - %s", response.status, response.read())
else:
self.instance_id = json.loads(response.read().decode('utf-8'))["id"]
except Exception as e: # pylint: disable=broad-except
logging.warning("Failed registering with boot-admin, %s (%s)", e, type(e))
finally:
if conn:
conn.close()
# Schedule the next registration unless asked to abort
if self.should_continue_registration_schedule:
self._schedule_next_registration(self.registration_interval_sec)
def deregister_from_admin_server(self) -> None:
if self.instance_id is None:
return
headers = {}
self.authenticate(headers)
deregistration_url = f"{self.registration_url}/{self.instance_id}"
logging.info("Deregistering from %s", deregistration_url)
conn: Optional[HTTPConnection] = None
try:
response = self._http_request(deregistration_url, "DELETE", headers)
if response.status < 200 or response.status >= 300:
logging.warning("Failed deregistering from boot-admin, got %s - %s", response.status, response.read())
except Exception as e: # pylint: disable=broad-except
logging.warning("Failed deregistering from boot-admin, %s (%s)", e, type(e))
finally:
if conn:
conn.close()
def authenticate(self, headers: Dict) -> None:
if isinstance(self.registration_auth, BasicAuth):
password = self.registration_auth.password if self.registration_auth.password else ""
authorization_string = self.registration_auth.username + ":" + password
encoded_authorization: str = b64encode(bytes(authorization_string, "utf-8")).decode("ascii")
headers["Authorization"] = f"Basic {encoded_authorization}"
def start(self, initial_delay_sec: float = None) -> None:
logging.info("Starting recurring registration of %s with %s",
self.pyctuator_base_url, self.registration_url)
self.should_continue_registration_schedule = True
self._schedule_next_registration(initial_delay_sec or self.registration_interval_sec)
def stop(self) -> None:
logging.info("Stopping recurring registration")
self.should_continue_registration_schedule = False
def _http_request(self, url: str, method: str, headers: Dict[str, str], body: Optional[str] = None) -> HTTPResponse:
url_parts = urllib.parse.urlsplit(url)
if url_parts.scheme == "http":
conn = http.client.HTTPConnection(url_parts.hostname, url_parts.port)
elif url_parts.scheme == "https":
context = self.ssl_context
if not context and self.disable_certificate_validation_for_https_registration:
context = ssl.SSLContext()
context.verify_mode = ssl.CERT_NONE
conn = http.client.HTTPSConnection(url_parts.hostname, url_parts.port, context=context)
else:
raise ValueError(f"Unknown scheme in {url}")
conn.request(
method,
url_parts.path,
body=body,
headers=headers,
)
return conn.getresponse()
|
solaris/nets/zoo/multiclass_segmentation.py
|
rbavery/solaris
| 367 |
83893
|
<gh_stars>100-1000
import torch
from torch import nn
from torchvision.models import vgg11, vgg16, resnet34
""" Code heavily adapted from ternaus robot-surgery-segmentation
https://github.com/ternaus/robot-surgery-segmentation """
class MultiClass_Resnet34(nn.Module):
def __init__(self, num_classes=1, num_filters=32, pretrained=True, is_deconv=False):
super().__init__()
self.num_classes = num_classes
self.pool = nn.MaxPool2d(2, 2)
self.encoder = resnet34(pretrained=pretrained)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(self.encoder.conv1,
self.encoder.bn1,
self.encoder.relu,
self.pool)
self.conv2 = self.encoder.layer1
self.conv3 = self.encoder.layer2
self.conv4 = self.encoder.layer3
self.conv5 = self.encoder.layer4
self.center = MultiClass_DecoderBlock(512, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec5 = MultiClass_DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec4 = MultiClass_DecoderBlock(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec3 = MultiClass_DecoderBlock(128 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv)
self.dec2 = MultiClass_DecoderBlock(64 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2, is_deconv)
self.dec1 = MultiClass_DecoderBlock(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv)
self.dec0 = MultiClass_ConvRelu(num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(dec2)
dec0 = self.dec0(dec1)
x_out = self.final(dec0)
return x_out
class MultiClass_UNet_VGG16(nn.Module):
def __init__(self, num_classes=1, num_filters=32, pretrained=True):
super().__init__()
self.num_classes = num_classes
self.encoder = vgg16(pretrained=pretrained).features
self.pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(self.encoder[0], self.relu, self.encoder[2], self.relu)
self.conv2 = nn.Sequential(self.encoder[5], self.relu, self.encoder[7], self.relu)
self.conv3 = nn.Sequential(self.encoder[10], self.relu, self.encoder[12], self.relu,
self.encoder[14], self.relu)
self.conv4 = nn.Sequential(self.encoder[17], self.relu, self.encoder[19], self.relu,
self.encoder[21], self.relu)
self.conv5 = nn.Sequential(self.encoder[24], self.relu, self.encoder[26], self.relu,
self.encoder[28], self.relu)
self.center = MultiClass_DecoderBlock(512, num_filters * 8 * 2,
num_filters * 8)
self.dec5 = MultiClass_DecoderBlock(
512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8)
self.dec4 = MultiClass_DecoderBlock(
512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8)
self.dec3 = MultiClass_DecoderBlock(
256 + num_filters * 8, num_filters * 4 * 2, num_filters * 2)
self.dec2 = MultiClass_DecoderBlock(
128 + num_filters * 2, num_filters * 2 * 2, num_filters)
self.dec1 = MultiClass_ConvRelu(64 + num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(self.pool(conv1))
conv3 = self.conv3(self.pool(conv2))
conv4 = self.conv4(self.pool(conv3))
conv5 = self.conv5(self.pool(conv4))
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
x_out = self.final(dec1)
return x_out
class MultiClass_UNet_VGG11(nn.Module):
def __init__(self, num_classes=1, num_filters=32, pretrained=True):
super().__init__()
self.num_classes = num_classes
self.pool = nn.MaxPool2d(2, 2)
self.encoder = vgg11(pretrained=pretrained).features
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(self.encoder[0], self.relu)
self.conv2 = nn.Sequential(self.encoder[3], self.relu)
self.conv3 = nn.Sequential(
self.encoder[6],
self.relu,
self.encoder[8],
self.relu,
)
self.conv4 = nn.Sequential(
self.encoder[11],
self.relu,
self.encoder[13],
self.relu,
)
self.conv5 = nn.Sequential(
self.encoder[16],
self.relu,
self.encoder[18],
self.relu,
)
self.center = MultiClass_DecoderBlock(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv=True)
self.dec5 = MultiClass_DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv=True)
self.dec4 = MultiClass_DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 4, is_deconv=True)
self.dec3 = MultiClass_DecoderBlock(256 + num_filters * 4, num_filters * 4 * 2, num_filters * 2, is_deconv=True)
self.dec2 = MultiClass_DecoderBlock(128 + num_filters * 2, num_filters * 2 * 2, num_filters, is_deconv=True)
self.dec1 = MultiClass_ConvRelu(64 + num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(self.pool(conv1))
conv3 = self.conv3(self.pool(conv2))
conv4 = self.conv4(self.pool(conv3))
conv5 = self.conv5(self.pool(conv4))
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
x_out = self.final(dec1)
return x_out
class MultiClass_LinkNet34(nn.Module):
def __init__(self, num_classes=1, num_channels=3, pretrained=True):
super().__init__()
assert num_channels == 3
self.num_classes = num_classes
filters = [64, 128, 256, 512]
resnet = resnet34(pretrained=pretrained)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Decoder
self.decoder4 = DecoderBlockLinkNet(filters[3], filters[2])
self.decoder3 = DecoderBlockLinkNet(filters[2], filters[1])
self.decoder2 = DecoderBlockLinkNet(filters[1], filters[0])
self.decoder1 = DecoderBlockLinkNet(filters[0], filters[0])
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
x_out = f5
return x_out
class MultiClass_ConvRelu(nn.Module):
def __init__(self, in_, out):
super().__init__()
self.conv = nn.Conv2d(in_, out, 3, padding=1)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
return x
class MultiClass_DecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True):
super().__init__()
self.in_channels = in_channels
if is_deconv:
self.block = nn.Sequential(
MultiClass_ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2,
padding=1),
nn.ReLU(inplace=True)
)
else:
self.block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
MultiClass_ConvRelu(in_channels, middle_channels),
MultiClass_ConvRelu(middle_channels, out_channels),
)
def forward(self, x):
return self.block(x)
class DecoderBlockLinkNet(nn.Module):
def __init__(self, in_channels, n_filters):
super().__init__()
self.relu = nn.ReLU(inplace=True)
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
# B, C/4, H, W -> B, C/4, 2 * H, 2 * W
self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, kernel_size=4,
stride=2, padding=1, output_padding=0)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
# B, C/4, H, W -> B, C, H, W
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu(x)
return x
|
shadowlands/sl_dapp/network_connection.py
|
kayagoban/shadowlands
| 140 |
83910
|
from shadowlands.sl_dapp import SLDapp, SLFrame
import pyperclip, os
import schedule
from shadowlands.tui.debug import debug
import pdb
class NetworkConnection(SLDapp):
def initialize(self):
self.add_sl_frame( NetworkStrategies(self, 10, 26, title="Network Options"))
self.connection_strategy = None
def attempt_connection(self):
fn = self._interface.node.__getattribute__(self.conn_fn)
self._interface.node.thread_shutdown = True
self._interface.node.heartbeat_thread.join()
self._interface.node.thread_shutdown = False
try:
if len(self.args) > 0:
return fn(self.args)
else:
return fn()
except StaleBlockchain:
self._scene.add_effect( MessageDialog(self._screen, "Stale blockchain on selected Node"))
return
self._interface.node.start_heartbeat_thread()
class NetworkStrategies(SLFrame):
def initialize(self):
options = [
('Local node', 'connect_w3_local'),
('Custom infura', 'connect_w3_custom_infura'),
('Custom http', 'connect_w3_custom_http'),
('Custom websocket', 'connect_w3_custom_websocket'),
('Custom ipc', 'connect_w3_custom_ipc'),
]
self.listbox_value = self.add_listbox(
5, options, on_select=self._select
#default_value=self.dapp.config.connection_strategy
)
self.add_button(self.close, "Cancel")
def _select(self):
connect_fn = self.listbox_value()
self.dapp.connection_strategy = connect_fn
if connect_fn == 'connect_w3_custom_http':
self.dapp.add_sl_frame(CustomHttpUri(self.dapp, 5, 30, title="Custom Http URI"))
elif connect_fn == 'connect_w3_custom_ipc':
self.dapp.add_sl_frame(CustomIpc(self.dapp, 5, 30, title="Custom IPC path"))
elif connect_fn == 'connect_w3_custom_websocket':
self.dapp.add_sl_frame(CustomWebsocket(self.dapp, 5, 30, title="Custom Websocket URI"))
elif connect_fn == 'connect_w3_custom_infura':
self.dapp.add_sl_frame(CustomInfura(self.dapp, 12, 45, title="Custom Infura Credentials"))
self.close()
class CustomInfura(SLFrame):
def initialize(self):
self.add_divider()
self.add_label(" WEB3_INFURA_PROJECT_ID")
self.id_value = self.add_textbox(
'',
default_value=os.environ.get('WEB3_INFURA_PROJECT_ID')
)
self.add_label(" WEB3_INFURA_API_SECRET")
self.secret_value = self.add_textbox(
'',
default_value=os.environ.get('WEB3_INFURA_API_SECRET')
)
self.add_button_row(
[
("Connect", self._connect, 0),
("Cancel", self.close, 3)
]
)
def _connect(self):
id_value = self.id_value()
secret_value = self.secret_value()
self.dapp.config.connection_args = (self.id_value(), self.secret_value())
self.dapp.config.connection_strategy = self.dapp.connection_strategy
#debug(); pdb.set_trace()
schedule.once().do(self.dapp.node.poll)
self.close()
class CustomHttpUri(SLFrame):
def initialize(self):
self.add_label("Ex: http://192.168.1.150:8545")
self.text_value = self.add_textbox()
self.add_button(self.close,"Cancel")
class CustomIpc(SLFrame):
def initialize(self):
self.add_label("Ex: http://192.168.1.150:8545")
self.text_value = self.add_textbox()
self.add_button(self.close,"Cancel")
class CustomWebsocket(SLFrame):
def initialize(self):
self.add_label("Ex: http://192.168.1.150:8545")
self.text_value = self.add_textbox()
self.add_button(self.close,"Cancel")
|
src/contrib/hod/hodlib/Common/util.py
|
hoppinghippo/HadoopMapReduce
| 194 |
83932
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import errno, sys, os, traceback, stat, socket, re, warnings, signal
from hodlib.Common.tcp import tcpSocket, tcpError
from hodlib.Common.threads import simpleCommand
setUGV = { 'S_ISUID' : 2, 'S_ISGID' : 1, 'S_ISVTX' : 0 }
reEscapeSeq = r"\\(.)?"
reEscapeSeq = re.compile(reEscapeSeq)
HOD_INTERRUPTED_CODE = 127
HOD_INTERRUPTED_MESG = "Hod interrupted. Cleaning up and exiting"
TORQUE_USER_LIMITS_COMMENT_FIELD = "User-limits exceeded. " + \
"Requested:([0-9]*) Used:([0-9]*) MaxLimit:([0-9]*)"
TORQUE_USER_LIMITS_EXCEEDED_MSG = "Requested number of nodes exceeded " + \
"maximum user limits. "
class AlarmException(Exception):
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
def isProcessRunning(pid):
'''Check if a process is running, by sending it a 0 signal, and checking for errors'''
# This method is documented in some email threads on the python mailing list.
# For e.g.: http://mail.python.org/pipermail/python-list/2002-May/144522.html
try:
os.kill(pid, 0)
return True
except OSError, err:
return err.errno == errno.EPERM
def untar(file, targetDir):
status = False
command = 'tar -C %s -zxf %s' % (targetDir, file)
commandObj = simpleCommand('untar', command)
commandObj.start()
commandObj.wait()
commandObj.join()
if commandObj.exit_code() == 0:
status = True
return status
def tar(tarFile, tarDirectory, tarList):
currentDir = os.getcwd()
os.chdir(tarDirectory)
status = False
command = 'tar -czf %s ' % (tarFile)
for file in tarList:
command = "%s%s " % (command, file)
commandObj = simpleCommand('tar', command)
commandObj.start()
commandObj.wait()
commandObj.join()
if commandObj.exit_code() == 0:
status = True
else:
status = commandObj.exit_status_string()
os.chdir(currentDir)
return status
def to_http_url(list):
"""convert [hostname, port] to a http url"""
str = ''
str = "http://%s:%s" % (list[0], list[1])
return str
def get_exception_string():
(type, value, tb) = sys.exc_info()
exceptList = traceback.format_exception(type, value, tb)
exceptString = ''
for line in exceptList:
exceptString = "%s%s" % (exceptString, line)
return exceptString
def get_exception_error_string():
(type, value, tb) = sys.exc_info()
if value:
exceptString = "%s %s" % (type, value)
else:
exceptString = type
return exceptString
def check_timestamp(timeStamp):
""" Checks the validity of a timeStamp.
timeStamp - (YYYY-MM-DD HH:MM:SS in UTC)
returns True or False
"""
isValid = True
try:
timeStruct = time.strptime(timeStamp, "%Y-%m-%d %H:%M:%S")
except:
isValid = False
return isValid
def sig_wrapper(sigNum, handler, *args):
if args:
handler(args)
else:
handler()
def get_perms(filename):
mode = stat.S_IMODE(os.stat(filename)[stat.ST_MODE])
permsString = ''
permSet = 0
place = 2
for who in "USR", "GRP", "OTH":
for what in "R", "W", "X":
if mode & getattr(stat,"S_I"+what+who):
permSet = permSet + 2**place
place = place - 1
permsString = "%s%s" % (permsString, permSet)
permSet = 0
place = 2
permSet = 0
for permFlag in setUGV.keys():
if mode & getattr(stat, permFlag):
permSet = permSet + 2**setUGV[permFlag]
permsString = "%s%s" % (permSet, permsString)
return permsString
def local_fqdn():
"""Return a system's true FQDN rather than any aliases, which are
occasionally returned by socket.gethostname."""
fqdn = None
me = os.uname()[1]
nameInfo=socket.gethostbyname_ex(me)
nameInfo[1].append(nameInfo[0])
for name in nameInfo[1]:
if name.count(".") and name.startswith(me):
fqdn = name
if fqdn == None:
fqdn = me
return(fqdn)
def need_to_allocate(allocated, config, command):
status = True
if allocated.isSet():
status = False
elif re.search("\s*dfs.*$", command) and \
config['gridservice-hdfs']['external']:
status = False
elif config['gridservice-mapred']['external']:
status = False
return status
def filter_warnings():
warnings.filterwarnings('ignore',
message=".*?'with' will become a reserved keyword.*")
def args_to_string(list):
"""return a string argument space seperated"""
arg = ''
for item in list:
arg = "%s%s " % (arg, item)
return arg[:-1]
def replace_escapes(object):
""" replace any escaped character. e.g \, with , \= with = and so on """
# here object is either a config object or a options object
for section in object._mySections:
for option in object._configDef[section].keys():
if object[section].has_key(option):
if object._configDef[section][option]['type'] == 'keyval':
keyValDict = object[section][option]
object[section][option] = {}
for (key,value) in keyValDict.iteritems():
match = reEscapeSeq.search(value)
if match:
value = reEscapeSeq.sub(r"\1", value)
object[section][option][key] = value
def hadoopVersion(hadoopDir, java_home, log):
# Determine the version of hadoop being used by executing the
# hadoop version command. Code earlier in idleTracker.py
hadoopVersion = { 'major' : None, 'minor' : None }
hadoopPath = os.path.join(hadoopDir, 'bin', 'hadoop')
cmd = "%s version" % hadoopPath
log.debug('Executing command %s to find hadoop version' % cmd)
env = os.environ
env['JAVA_HOME'] = java_home
hadoopVerCmd = simpleCommand('HadoopVersion', cmd, env)
hadoopVerCmd.start()
hadoopVerCmd.wait()
hadoopVerCmd.join()
if hadoopVerCmd.exit_code() == 0:
verLine = hadoopVerCmd.output()[0]
log.debug('Version from hadoop command: %s' % verLine)
hadoopVerRegExp = re.compile("Hadoop ([0-9]+)\.([0-9]+).*")
verMatch = hadoopVerRegExp.match(verLine)
if verMatch != None:
hadoopVersion['major'] = verMatch.group(1)
hadoopVersion['minor'] = verMatch.group(2)
return hadoopVersion
def get_cluster_status(hdfsAddress, mapredAddress):
"""Determine the status of the cluster based on socket availability
of HDFS and Map/Reduce."""
status = 0
mapredSocket = tcpSocket(mapredAddress)
try:
mapredSocket.open()
mapredSocket.close()
except tcpError:
status = 14
hdfsSocket = tcpSocket(hdfsAddress)
try:
hdfsSocket.open()
hdfsSocket.close()
except tcpError:
if status > 0:
status = 10
else:
status = 13
return status
def parseEquals(list):
# takes in a list of keyval pairs e.g ['a=b','c=d'] and returns a
# dict e.g {'a'='b','c'='d'}. Used in GridService/{mapred.py/hdfs.py} and
# HodRing/hodring.py. No need for specially treating escaped =. as in \=,
# since all keys are generated by hod and don't contain such anomalies
dict = {}
for elems in list:
splits = elems.split('=')
dict[splits[0]] = splits[1]
return dict
def getMapredSystemDirectory(mrSysDirRoot, userid, jobid):
return os.path.join(mrSysDirRoot, userid, 'mapredsystem', jobid)
class HodInterrupt:
def __init__(self):
self.HodInterruptFlag = False
self.log = None
def set_log(self, log):
self.log = log
def init_signals(self):
def sigStop(sigNum, handler):
sig_wrapper(sigNum, self.setFlag)
signal.signal(signal.SIGTERM, sigStop) # 15 : software termination signal
signal.signal(signal.SIGQUIT, sigStop) # 3 : Quit program
signal.signal(signal.SIGINT, sigStop) # 2 ^C : Interrupt program
def sig_wrapper(sigNum, handler, *args):
self.log.critical("Caught signal %s." % sigNum )
if args:
handler(args)
else:
handler()
def setFlag(self, val = True):
self.HodInterruptFlag = val
def isSet(self):
return self.HodInterruptFlag
class HodInterruptException(Exception):
def __init__(self, value = ""):
self.value = value
def __str__(self):
return repr(self.value)
hodInterrupt = HodInterrupt()
|
jmetal/problem/multiobjective/zdt.py
|
12yuens2/jMetalPy
| 335 |
83936
|
from math import sqrt, pow, sin, pi, cos
from jmetal.core.problem import FloatProblem
from jmetal.core.solution import FloatSolution
"""
.. module:: ZDT
:platform: Unix, Windows
:synopsis: ZDT problem family of multi-objective problems.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
class ZDT1(FloatProblem):
""" Problem ZDT1.
.. note:: Bi-objective unconstrained problem. The default number of variables is 30.
.. note:: Continuous problem having a convex Pareto front
"""
def __init__(self, number_of_variables: int=30):
""" :param number_of_variables: Number of decision variables of the problem.
"""
super(ZDT1, self).__init__()
self.number_of_variables = number_of_variables
self.number_of_objectives = 2
self.number_of_constraints = 0
self.obj_directions = [self.MINIMIZE, self.MINIMIZE]
self.obj_labels = ['x', 'y']
self.lower_bound = self.number_of_variables * [0.0]
self.upper_bound = self.number_of_variables * [1.0]
def evaluate(self, solution: FloatSolution) -> FloatSolution:
g = self.eval_g(solution)
h = self.eval_h(solution.variables[0], g)
solution.objectives[0] = solution.variables[0]
solution.objectives[1] = h * g
return solution
def eval_g(self, solution: FloatSolution):
g = sum(solution.variables) - solution.variables[0]
constant = 9.0 / (solution.number_of_variables - 1)
return constant * g + 1.0
def eval_h(self, f: float, g: float) -> float:
return 1.0 - sqrt(f / g)
def get_name(self):
return 'ZDT1'
class ZDT1Modified(ZDT1):
""" Problem ZDT1Modified.
.. note:: Version including a loop for increasing the computing time of the evaluation functions.
"""
def __init__(self, number_of_variables = 30):
super(ZDT1Modified, self).__init__(number_of_variables)
def evaluate(self, solution:FloatSolution) -> FloatSolution:
s: float = 0.0
for i in range(1000):
for j in range(10000):
s += i * 0.235 / 1.234 + 1.23525 * j
return super().evaluate(solution)
class ZDT2(ZDT1):
""" Problem ZDT2.
.. note:: Bi-objective unconstrained problem. The default number of variables is 30.
.. note:: Continuous problem having a non-convex Pareto front
"""
def eval_h(self, f: float, g: float) -> float:
return 1.0 - pow(f / g, 2.0)
def get_name(self):
return 'ZDT2'
class ZDT3(ZDT1):
""" Problem ZDT3.
.. note:: Bi-objective unconstrained problem. The default number of variables is 30.
.. note:: Continuous problem having a partitioned Pareto front
"""
def eval_h(self, f: float, g: float) -> float:
return 1.0 - sqrt(f / g) - (f / g) * sin(10.0 * f * pi)
def get_name(self):
return 'ZDT3'
class ZDT4(ZDT1):
""" Problem ZDT4.
.. note:: Bi-objective unconstrained problem. The default number of variables is 10.
.. note:: Continuous multi-modal problem having a convex Pareto front
"""
def __init__(self, number_of_variables: int=10):
""" :param number_of_variables: Number of decision variables of the problem.
"""
super(ZDT4, self).__init__(number_of_variables=number_of_variables)
self.lower_bound = self.number_of_variables * [-5.0]
self.upper_bound = self.number_of_variables * [5.0]
self.lower_bound[0] = 0.0
self.upper_bound[0] = 1.0
def eval_g(self, solution: FloatSolution):
g = 0.0
for i in range(1, solution.number_of_variables):
g += pow(solution.variables[i], 2.0) - 10.0 * cos(4.0 * pi * solution.variables[i])
g += 1.0 + 10.0 * (solution.number_of_variables - 1)
return g
def eval_h(self, f: float, g: float) -> float:
return 1.0 - sqrt(f / g)
def get_name(self):
return 'ZDT4'
class ZDT6(ZDT1):
""" Problem ZDT6.
.. note:: Bi-objective unconstrained problem. The default number of variables is 10.
.. note:: Continuous problem having a non-convex Pareto front
"""
def __init__(self, number_of_variables: int=10):
""" :param number_of_variables: Number of decision variables of the problem.
"""
super(ZDT6, self).__init__(number_of_variables=number_of_variables)
def eval_g(self, solution: FloatSolution):
g = sum(solution.variables) - solution.variables[0]
g = g / (solution.number_of_variables - 1)
g = pow(g, 0.25)
g = 9.0 * g
g = 1.0 + g
return g
def eval_h(self, f: float, g: float) -> float:
return 1.0 - pow(f / g, 2.0)
def get_name(self):
return 'ZDT6'
|
nequip/scripts/train.py
|
mir-group/nequip
| 153 |
83951
|
""" Train a network."""
import logging
import argparse
# This is a weird hack to avoid Intel MKL issues on the cluster when this is called as a subprocess of a process that has itself initialized PyTorch.
# Since numpy gets imported later anyway for dataset stuff, this shouldn't affect performance.
import numpy as np # noqa: F401
from os.path import isdir
from pathlib import Path
import torch
import e3nn
import e3nn.util.jit
from nequip.model import model_from_config
from nequip.utils import Config
from nequip.data import dataset_from_config
from nequip.utils.test import assert_AtomicData_equivariant, set_irreps_debug
from nequip.utils import load_file, dtype_from_name
from nequip.scripts.logger import set_up_script_logger
default_config = dict(
root="./",
run_name="NequIP",
wandb=False,
wandb_project="NequIP",
compile_model=False,
model_builders=[
"EnergyModel",
"PerSpeciesRescale",
"ForceOutput",
"RescaleEnergyEtc",
],
dataset_statistics_stride=1,
default_dtype="float32",
allow_tf32=False, # TODO: until we understand equivar issues
verbose="INFO",
model_debug_mode=False,
equivariance_test=False,
grad_anomaly_mode=False,
append=False,
_jit_bailout_depth=2, # avoid 20 iters of pain, see https://github.com/pytorch/pytorch/issues/52286
)
def main(args=None, running_as_script: bool = True):
config = parse_command_line(args)
if running_as_script:
set_up_script_logger(config.get("log", None), config.verbose)
found_restart_file = isdir(f"{config.root}/{config.run_name}")
if found_restart_file and not config.append:
raise RuntimeError(
f"Training instance exists at {config.root}/{config.run_name}; "
"either set append to True or use a different root or runname"
)
# for fresh new train
if not found_restart_file:
trainer = fresh_start(config)
else:
trainer = restart(config)
# Train
trainer.save()
trainer.train()
return
def parse_command_line(args=None):
parser = argparse.ArgumentParser(description="Train a NequIP model.")
parser.add_argument("config", help="configuration file")
parser.add_argument(
"--equivariance-test",
help="test the model's equivariance before training on n (default 1) random frames from the dataset",
const=1,
type=int,
nargs="?",
)
parser.add_argument(
"--model-debug-mode",
help="enable model debug mode, which can sometimes give much more useful error messages at the cost of some speed. Do not use for production training!",
action="store_true",
)
parser.add_argument(
"--grad-anomaly-mode",
help="enable PyTorch autograd anomaly mode to debug NaN gradients. Do not use for production training!",
action="store_true",
)
parser.add_argument(
"--log",
help="log file to store all the screen logging",
type=Path,
default=None,
)
args = parser.parse_args(args=args)
config = Config.from_file(args.config, defaults=default_config)
for flag in ("model_debug_mode", "equivariance_test", "grad_anomaly_mode"):
config[flag] = getattr(args, flag) or config[flag]
return config
def _set_global_options(config):
"""Configure global options of libraries like `torch` and `e3nn` based on `config`."""
# Set TF32 support
# See https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if torch.cuda.is_available():
if torch.torch.backends.cuda.matmul.allow_tf32 and not config.allow_tf32:
# it is enabled, and we dont want it to, so disable:
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
# For avoiding 20 steps of painfully slow JIT recompilation
# See https://github.com/pytorch/pytorch/issues/52286
torch._C._jit_set_bailout_depth(config["_jit_bailout_depth"])
if config.model_debug_mode:
set_irreps_debug(enabled=True)
torch.set_default_dtype(dtype_from_name(config.default_dtype))
if config.grad_anomaly_mode:
torch.autograd.set_detect_anomaly(True)
e3nn.set_optimization_defaults(**config.get("e3nn_optimization_defaults", {}))
def fresh_start(config):
_set_global_options(config)
# = Make the trainer =
if config.wandb:
import wandb # noqa: F401
from nequip.train.trainer_wandb import TrainerWandB
# download parameters from wandb in case of sweeping
from nequip.utils.wandb import init_n_update
config = init_n_update(config)
trainer = TrainerWandB(model=None, **dict(config))
else:
from nequip.train.trainer import Trainer
trainer = Trainer(model=None, **dict(config))
# what is this
# to update wandb data?
config.update(trainer.params)
# = Load the dataset =
dataset = dataset_from_config(config, prefix="dataset")
logging.info(f"Successfully loaded the data set of type {dataset}...")
try:
validation_dataset = dataset_from_config(config, prefix="validation_dataset")
logging.info(
f"Successfully loaded the validation data set of type {validation_dataset}..."
)
except KeyError:
# It couldn't be found
validation_dataset = None
# = Train/test split =
trainer.set_dataset(dataset, validation_dataset)
# = Build model =
final_model = model_from_config(
config=config, initialize=True, dataset=trainer.dataset_train
)
logging.info("Successfully built the network...")
if config.compile_model:
final_model = e3nn.util.jit.script(final_model)
logging.info("Successfully compiled model...")
# Equivar test
if config.equivariance_test > 0:
n_train: int = len(trainer.dataset_train)
assert config.equivariance_test <= n_train
final_model.eval()
indexes = torch.randperm(n_train)[: config.equivariance_test]
errstr = assert_AtomicData_equivariant(
final_model, [trainer.dataset_train[i] for i in indexes]
)
final_model.train()
logging.info(
"Equivariance test passed; equivariance errors:\n"
" Errors are in real units, where relevant.\n"
" Please note that the large scale of the typical\n"
" shifts to the (atomic) energy can cause\n"
" catastrophic cancellation and give incorrectly\n"
" the equivariance error as zero for those fields.\n"
f"{errstr}"
)
del errstr, indexes, n_train
# Set the trainer
trainer.model = final_model
# Store any updated config information in the trainer
trainer.update_kwargs(config)
return trainer
def restart(config):
# load the dictionary
restart_file = f"{config.root}/{config.run_name}/trainer.pth"
dictionary = load_file(
supported_formats=dict(torch=["pt", "pth"]),
filename=restart_file,
enforced_format="torch",
)
# compare dictionary to config and update stop condition related arguments
for k in config.keys():
if config[k] != dictionary.get(k, ""):
if k == "max_epochs":
dictionary[k] = config[k]
logging.info(f'Update "{k}" to {dictionary[k]}')
elif k.startswith("early_stop"):
dictionary[k] = config[k]
logging.info(f'Update "{k}" to {dictionary[k]}')
elif isinstance(config[k], type(dictionary.get(k, ""))):
raise ValueError(
f'Key "{k}" is different in config and the result trainer.pth file. Please double check'
)
# recursive loop, if same type but different value
# raise error
config = Config(dictionary, exclude_keys=["state_dict", "progress"])
# dtype, etc.
_set_global_options(config)
if config.wandb:
from nequip.train.trainer_wandb import TrainerWandB
from nequip.utils.wandb import resume
resume(config)
trainer = TrainerWandB.from_dict(dictionary)
else:
from nequip.train.trainer import Trainer
trainer = Trainer.from_dict(dictionary)
# = Load the dataset =
dataset = dataset_from_config(config, prefix="dataset")
logging.info(f"Successfully re-loaded the data set of type {dataset}...")
try:
validation_dataset = dataset_from_config(config, prefix="validation_dataset")
logging.info(
f"Successfully re-loaded the validation data set of type {validation_dataset}..."
)
except KeyError:
# It couldn't be found
validation_dataset = None
trainer.set_dataset(dataset, validation_dataset)
return trainer
if __name__ == "__main__":
main(running_as_script=True)
|
alibi_detect/cd/tabular.py
|
sugatoray/alibi-detect
| 1,227 |
83984
|
<filename>alibi_detect/cd/tabular.py
import numpy as np
from scipy.stats import chi2_contingency, ks_2samp
from typing import Callable, Dict, List, Optional, Tuple, Union
from alibi_detect.cd.base import BaseUnivariateDrift
class TabularDrift(BaseUnivariateDrift):
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
categories_per_feature: Dict[int, Optional[int]] = None,
preprocess_x_ref: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
correction: str = 'bonferroni',
alternative: str = 'two-sided',
n_features: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Mixed-type tabular data drift detector with Bonferroni or False Discovery Rate (FDR)
correction for multivariate data. Kolmogorov-Smirnov (K-S) univariate tests are applied to
continuous numerical data and Chi-Squared (Chi2) univariate tests to categorical data.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for significance of the K-S and Chi2 test for each feature.
If the FDR correction method is used, this corresponds to the acceptable q-value.
categories_per_feature
Dictionary with as keys the column indices of the categorical features and optionally as values
the number of possible categorical values for that feature or a list with the possible values.
If you know which features are categorical and simply want to infer the possible values of the
categorical feature from the reference data you can pass a Dict[int, NoneType] such as
{0: None, 3: None} if features 0 and 3 are categorical. If you also know how many categories are
present for a given feature you could pass this in the `categories_per_feature` dict in the
Dict[int, int] format, e.g. *{0: 3, 3: 2}*. If you pass N categories this will assume the possible
values for the feature are [0, ..., N-1]. You can also explicitly pass the possible categories in the
Dict[int, List[int]] format, e.g. {0: [0, 1, 2], 3: [0, 55]}. Note that the categories can be
arbitrary int values.
preprocess_x_ref
Whether to already preprocess and infer categories and frequencies for categorical reference data.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
Typically a dimensionality reduction technique.
correction
Correction type for multivariate data. Either 'bonferroni' or 'fdr' (False Discovery Rate).
alternative
Defines the alternative hypothesis for the K-S tests. Options are 'two-sided', 'less' or 'greater'.
n_features
Number of features used in the combined K-S/Chi-Squared tests. No need to pass it if
no preprocessing takes place. In case of a preprocessing step, this can also be inferred
automatically but could be more expensive to compute.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
preprocess_x_ref=preprocess_x_ref,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
correction=correction,
n_features=n_features,
input_shape=input_shape,
data_type=data_type
)
self.alternative = alternative
self.x_ref_categories, self.cat_vars = {}, [] # no categorical features assumed present
if isinstance(categories_per_feature, dict):
vals = list(categories_per_feature.values())
int_types = (int, np.int16, np.int32, np.int64)
if all(v is None for v in vals): # categories_per_feature = Dict[int, NoneType]
x_flat = self.x_ref.reshape(self.x_ref.shape[0], -1)
categories_per_feature = {f: list(np.unique(x_flat[:, f])) # type: ignore
for f in categories_per_feature.keys()}
elif all(isinstance(v, int_types) for v in vals):
# categories_per_feature = Dict[int, int]
categories_per_feature = {f: list(np.arange(v)) # type: ignore
for f, v in categories_per_feature.items()}
elif not all(isinstance(v, list) for v in vals) and \
all(isinstance(v, int_types) for val in vals for v in val): # type: ignore
raise ValueError('categories_per_feature needs to be None or one of '
'Dict[int, NoneType], Dict[int, int], Dict[int, List[int]]')
self.x_ref_categories = categories_per_feature
self.cat_vars = list(self.x_ref_categories.keys())
def feature_score(self, x_ref: np.ndarray, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute K-S or Chi-Squared test statistics and p-values per feature.
Parameters
----------
x_ref
Reference instances to compare distribution with.
x
Batch of instances.
Returns
-------
Feature level p-values and K-S or Chi-Squared statistics.
"""
x_ref = x_ref.reshape(x_ref.shape[0], -1)
x = x.reshape(x.shape[0], -1)
# apply counts on union of categories per variable in both the reference and test data
if self.cat_vars:
x_categories = {f: list(np.unique(x[:, f])) for f in self.cat_vars}
all_categories = {f: list(set().union(self.x_ref_categories[f], x_categories[f])) # type: ignore
for f in self.cat_vars}
x_ref_count = self._get_counts(x_ref, all_categories)
x_count = self._get_counts(x, all_categories)
p_val = np.zeros(self.n_features, dtype=np.float32)
dist = np.zeros_like(p_val)
for f in range(self.n_features):
if f in self.cat_vars:
contingency_table = np.vstack((x_ref_count[f], x_count[f]))
dist[f], p_val[f], _, _ = chi2_contingency(contingency_table)
else:
dist[f], p_val[f] = ks_2samp(x_ref[:, f], x[:, f], alternative=self.alternative, mode='asymp')
return p_val, dist
def _get_counts(self, x: np.ndarray, categories: Dict[int, List[int]]) -> Dict[int, List[int]]:
"""
Utility method for getting the counts of categories for each categorical variable.
"""
return {f: [(x[:, f] == v).sum() for v in vals] for f, vals in categories.items()}
|
idaes/generic_models/properties/core/reactions/tests/test_rate_forms.py
|
carldlaird/idaes-pse
| 112 |
83989
|
<gh_stars>100-1000
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for rate forms
"""
import pytest
from pyomo.environ import Block, ConcreteModel, Var, units as pyunits
from idaes.generic_models.properties.core.generic.generic_reaction import \
GenericReactionParameterBlock, ConcentrationForm
from idaes.generic_models.properties.core.reactions.rate_forms import *
from idaes.core.util.testing import PhysicalParameterTestBlock
from idaes.core.util.misc import add_object_reference
@pytest.mark.unit
def test_power_law_rate_no_order():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
m.thermo = m.pparams.build_state_block([1])
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"rate_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2},
"rate_form": power_law_rate,
"concentration_form": ConcentrationForm.moleFraction}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].k_rxn = Var(["r1"], initialize=1)
power_law_rate.build_parameters(
m.rparams.reaction_r1, m.rparams.config.rate_reactions["r1"])
# Check parameter construction
assert isinstance(m.rparams.reaction_r1.reaction_order, Var)
assert len(m.rparams.reaction_r1.reaction_order) == 4
for i, v in m.rparams.reaction_r1.reaction_order.items():
try:
stoic = m.rparams.config.rate_reactions.r1.stoichiometry[i]
except KeyError:
stoic = 0
if stoic < 1:
assert v.value == -stoic
else:
assert v.value == 0
# Check reaction form
rform = power_law_rate.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
assert str(rform) == str(
m.rxn[1].k_rxn["r1"] *
m.thermo[1].mole_frac_phase_comp["p1", "c1"] **
m.rparams.reaction_r1.reaction_order["p1", "c1"])
@pytest.mark.unit
def test_power_law_rate_with_order():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
m.thermo = m.pparams.build_state_block([1])
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"rate_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2},
"rate_form": power_law_rate,
"concentration_form": ConcentrationForm.moleFraction,
"parameter_data": {
"reaction_order": {("p1", "c1"): 1,
("p1", "c2"): 2,
("p2", "c1"): 3,
("p2", "c2"): 4}}}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].k_rxn = Var(["r1"], initialize=1)
power_law_rate.build_parameters(
m.rparams.reaction_r1, m.rparams.config.rate_reactions["r1"])
# Check parameter construction
assert isinstance(m.rparams.reaction_r1.reaction_order, Var)
assert len(m.rparams.reaction_r1.reaction_order) == 4
assert m.rparams.reaction_r1.reaction_order["p1", "c1"].value == 1
assert m.rparams.reaction_r1.reaction_order["p1", "c2"].value == 2
assert m.rparams.reaction_r1.reaction_order["p2", "c1"].value == 3
assert m.rparams.reaction_r1.reaction_order["p2", "c2"].value == 4
# Check reaction form
rform = power_law_rate.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
assert str(rform) == str(
m.rxn[1].k_rxn["r1"] * (
m.thermo[1].mole_frac_phase_comp["p1", "c1"] **
m.rparams.reaction_r1.reaction_order["p1", "c1"] *
m.thermo[1].mole_frac_phase_comp["p1", "c2"] **
m.rparams.reaction_r1.reaction_order["p1", "c2"] *
m.thermo[1].mole_frac_phase_comp["p2", "c1"] **
m.rparams.reaction_r1.reaction_order["p2", "c1"] *
m.thermo[1].mole_frac_phase_comp["p2", "c2"] **
m.rparams.reaction_r1.reaction_order["p2", "c2"]))
|
tracardi/service/network.py
|
bytepl/tracardi
| 153 |
84000
|
<gh_stars>100-1000
import logging
import socket
from typing import Optional
from tracardi.config import tracardi
logger = logging.getLogger('utils.network')
logger.setLevel(tracardi.logging_level)
def get_local_ip() -> Optional[str]:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except OSError as e:
logger.error(str(e))
return None
local_ip = get_local_ip()
|
chapter7/authentication/authentication.py
|
GoodMonsters/Building-Data-Science-Applications-with-FastAPI
| 107 |
84013
|
<reponame>GoodMonsters/Building-Data-Science-Applications-with-FastAPI<filename>chapter7/authentication/authentication.py
from typing import Optional
from tortoise.exceptions import DoesNotExist
from chapter7.authentication.models import (
AccessToken,
AccessTokenTortoise,
UserDB,
UserTortoise,
)
from chapter7.authentication.password import verify_password
async def authenticate(email: str, password: str) -> Optional[UserDB]:
try:
user = await UserTortoise.get(email=email)
except DoesNotExist:
return None
if not verify_password(password, user.hashed_password):
return None
return UserDB.from_orm(user)
async def create_access_token(user: UserDB) -> AccessToken:
access_token = AccessToken(user_id=user.id)
access_token_tortoise = await AccessTokenTortoise.create(**access_token.dict())
return AccessToken.from_orm(access_token_tortoise)
|
mcd/__init__.py
|
zwlanpishu/MCD
| 158 |
84025
|
"""Mel cepstral distortion (MCD) computations in python."""
# Copyright 2014, 2015, 2016, 2017 <NAME>
# This file is part of mcd.
# See `License` for details of license and warranty.
__version__ = '0.5.dev1'
|
application/controllers/admin/order/logistic.py
|
mutalisk999/bibi
| 1,037 |
84049
|
<filename>application/controllers/admin/order/logistic.py
# -*- coding: utf-8 -*-
import json
import os
import datetime
import time
import random
from copy import deepcopy
from bson import ObjectId, json_util
from itertools import chain
from flask_admin import BaseView, expose
from flask_babel import gettext as _
from flask_login import current_user
from mongoengine.queryset import Q
from flask import request, current_app, redirect, jsonify, Response, \
Markup, flash, url_for, make_response
import application.models as Models
from application.controllers.admin import AdminView
from application.extensions import admin
import application.services.jobs as Jobs
from application.utils import Pagination, format_date
from configs.config import TEMPLATE_DIR
num_per_page = 50
delay_status_by_date = {
'PAYMENT_RECEIVED':3,
'PROCESSING': 1,
'SHIPPING': 5,
'PORT_ARRIVED': 4,
}
def to_json(lo):
dt = {}
dt['id'] = str(lo.id)
dt['is_closed'] = lo.is_closed
dt['close_reason'] = lo.close_reason
dt['created_at'] = lo.created_at
dt['detail'] = lo.detail.to_mongo()
dt['detail']['partner'] = (lambda p: p and p.name)(lo.detail.partner)
dt['address'] = lo.order.address.to_json()
dt['order_id'] = lo.order.short_id
dt['logistic_provider'] = lo.order.logistic_provider
dt['entries'] = [entry_to_json(entry) for entry in lo.entries]
dt['estimated_weight'] = lo.estimated_weight
dt['returned_entries'] = [entry_to_json(entry) for entry in lo.returned_entries]
return dt
def entry_to_json(entry):
dt = {}
dt['id'] = str(entry.id)
dt['item'] = entry.item_snapshot.to_mongo()
dt['spec'] = entry.item_spec_snapshot.to_mongo()
try:
dt['item']['weight'] = entry.item_snapshot.weight
except:
pass
try:
dt['item']['title_en'] = entry.item_snapshot.title_en
except:
pass
dt['amount_usd'] = entry.amount_usd
dt['amount'] = entry.amount
dt['quantity'] = entry.quantity
dt['unit_price'] = entry.unit_price
dt['created_at'] = entry.created_at
dt['remark'] = entry.remark
dt['shipping_info'] = entry.shipping_info
return dt
def restruct_query(data):
format_date = lambda d: datetime.datetime.strptime(d, '%Y-%m-%dT%H:%M:%S.%fZ')
status = data.get('status')
query = {}
for k,v in data.items():
if v in [None, u"None", "", "null"]: continue
if k[-3:] == '_no':
query.update({'detail__%s'%k: v})
elif k in ['status']:
query.update({'detail__%s'%k: v})
elif k == 'start':
if status:
date_field = Models.LogisticDetail.attr_by_log_stat[status]
query.update({'detail__%s__gte' % date_field: format_date(v)})
else:
query.update({'created_at__gte': format_date(v)})
elif k == 'end':
if status:
date_field = Models.LogisticDetail.attr_by_log_stat[status]
query.update({'detail__%s__lt' % date_field: format_date(v)})
else:
query.update({'created_at__lt': format_date(v)})
elif k == 'query':
if v.startswith('MB'):
query.update({'detail__partner_tracking_no': v})
elif ObjectId.is_valid(v):
query.update({'id': v})
else:
query.update({'tracking_no': v})
elif k == 'partner':
partner = Models.Partner.objects(name=v).first()
query.update({'detail__partner': partner})
elif k == 'channel':
query.update({'detail__channel': v})
else:
query.update({'%s'%k: v})
return query
class N(AdminView):
_permission = 'logistic'
@expose('/', methods = ['GET', 'POST', 'DELETE', 'PATCH'])
def index(self, status="ALL"):
def render_tpml(status):
return make_response(open(os.path.join(
TEMPLATE_DIR, 'admin/logistic/index.html')).read())
def render_json(lid):
return jsonify(message="OK")
return request.is_xhr and {
'GET': lambda f: render_json(f.get('id')),
}[request.method](request.form) or render_tpml(status)
@expose("/logistics", methods=["GET"])
def logistics(self):
items_range = request.headers.get('Range', "0-9")
start, end = items_range.split('-')
per_page = int(end)-int(start)+1
query = restruct_query(request.args)
tracking_no = query.pop("tracking_no", "")
include_closed = query.get('include_closed') and query.pop('include_closed')
try:
if include_closed:
los = Models.Logistic.objects(**query)
else:
los = Models.Logistic.objects(is_closed=False, **query)
if tracking_no:
los = los.filter(Q(detail__us_tracking_no=tracking_no) | Q(detail__cn_tracking_no=tracking_no))
if request.args.get('status'):
los = los.order_by('detail__%s' %
Models.LogisticDetail.attr_by_log_stat[request.args.get('status')])
except:
pass
if query.get('receiver'):
addrs = Models.Address.objects(receiver=query.get('receiver')).distinct('id')
orders = Models.Order.commodities(address__in=addrs)
los = list(chain.from_iterable(order.logistics for order in orders))
if query.get('order_id'):
orders = Models.Order.commodities(short_id=int(query.get('order_id')))
los = list(chain.from_iterable(order.logistics for order in orders))
try:
los_size = los.count()
except:
los_size = len(los)
data = los[int(start): int(end)]
data = [to_json(l) for l in data]
resp = make_response(json_util.dumps(data), 200)
resp.headers['Accept-Range'] = 'items'
resp.headers['Content-Range'] = '%s-%s/%s'% (start, end, los_size)
resp.headers['Content-Type'] = 'application/json'
return resp
@expose("/logistics_delay/<status>/<delay_type>", methods=["GET"])
@expose("/logistics_delay/<status>/", methods=["GET"])
@expose("/logistics_delay/", methods=["GET"])
def logistics_delay(self, status=None, delay_type=None):
utcnow = datetime.datetime.utcnow()
if status:
items_range = request.headers.get('Range', "0-9")
start, end = items_range.split('-')
per_page = int(end)-int(start)+1
query = restruct_query(request.args)
tracking_no = query.pop("tracking_no", "")
date_field = Models.LogisticDetail.attr_by_log_stat[status]
delay_days = datetime.timedelta(days=delay_status_by_date[status])
query.update({
'detail__%s__lt' % date_field: utcnow - delay_days,
'detail__status': status,
})
los = Models.Logistic.objects(is_closed=False, **query).order_by('detail__%s' %
date_field)
if tracking_no:
los = los.filter(Q(detail__us_tracking_no=tracking_no) | Q(detail__cn_tracking_no=tracking_no))
if delay_type:
los = los.filter(detail__delay_details__reason__contains=delay_type)
data = los[int(start): int(end)]
data = [to_json(l) for l in data]
resp = make_response(json_util.dumps(data), 200)
resp.headers['Accept-Range'] = 'items'
resp.headers['Content-Range'] = '%s-%s/%s'% (start, end, los.count())
resp.headers['Content-Type'] = 'application/json'
return resp
data = {}
for status in ["PAYMENT_RECEIVED", 'PROCESSING', 'SHIPPING', "PORT_ARRIVED"]:
los = Models.Logistic.objects(is_closed=False)
date_field = Models.LogisticDetail.attr_by_log_stat[status]
delay_days = datetime.timedelta(days=delay_status_by_date[status])
query = {
'detail__%s__lt' % date_field: utcnow - delay_days,
'detail__status': status,
}
count = los.filter(**query).count()
data.update({status: count})
return jsonify(results=data)
@expose("/logistics_irregular/<process_status>/<irr_type>", methods=["GET"])
@expose("/logistics_irregular/<process_status>/", methods=["GET"])
@expose("/logistics_irregular", methods=["GET"])
def logistics_irregular(self, process_status=None, irr_type=None):
utcnow = datetime.datetime.utcnow()
if process_status:
items_range = request.headers.get('Range', "0-9")
start, end = items_range.split('-')
query = restruct_query(request.args)
tracking_no = query.pop('tracking_no', '')
los = Models.Logistic.objects(
detail__irregular_details__process_status=process_status,
**query).order_by('-detail.irregular_details.created_at')
if irr_type:
los = los.filter(detail__irregular_details__irr_type=irr_type).order_by('-detail.irregular_details.created_at')
if tracking_no:
los = los.filter(Q(detail__us_tracking_no=tracking_no) | Q(detail__cn_tracking_no=tracking_no))
data = los[int(start): int(end)]
data = [to_json(l) for l in data]
resp = make_response(json_util.dumps(data), 200)
resp.headers['Accept-Range'] = 'items'
resp.headers['Content-Range'] = '%s-%s/%s'% (start, end, los.count())
resp.headers['Content-Type'] = 'application/json'
return resp
data = {}
for status in ["WAITING_PROCESS", "PROCESSING", "PROCESSED"]:
los = Models.Logistic.objects(detail__irregular_details__process_status=status)
data.update({status: los.count()})
return jsonify(results=data)
@expose("/update", methods=["PUT"])
def update(self):
query = request.get_json()
dt = {}
for k,v in query.items():
if v in [None, u"None", "", "null"]: continue
if 'date' in k:
val = datetime.datetime.strptime(v, '%Y-%m-%d')
elif k.startswith('real'):
val = float(v)
elif k == 'partner':
val = Models.Partner.objects(name=v).first()
elif k == 'irregularity':
val = Models.LogisticIrregular(irr_at_status=v.get('status'),
irr_type=v.get('type'),
reason=v.get('reason'),
desc=v.get('desc'))
else:
val = v.strip()
dt.update({k:val})
try:
lo = Models.Logistic.objects.get(id=dt.pop('lid'))
lo.update_logistic(dt)
return jsonify(message="OK",
remarks=lo.detail.remarks,
delays=lo.detail.delay_details,
irregularities=lo.detail.irregular_details)
except Exception as e:
return jsonify(message="Failed", desc=e.message)
@expose("/update_delay", methods=["PUT"])
def update_delay(self):
query = request.get_json()
try:
lo = Models.Logistic.objects.get(id=query['lid'])
delays = lo.detail.delay_details.filter(status=query['status'])
delays.update(is_done=query['is_done'])
lo.save()
return jsonify(message="OK")
except Exception as e:
return jsonify(message="Failed", desc=e.message)
@expose("/update_irr_step", methods=["PUT"])
def update_irr_step(self):
query = request.get_json()
dt = {}
for k,v in query.items():
dt.update({k:v})
try:
lo = Models.Logistic.objects.get(id=dt['lid'])
irregular = lo.detail.irregular_details.filter(irr_type=dt['irr_type']).first()
irregular.steps = dt['solutions']
lo.save()
return jsonify(message="OK", irr_detail=irregular)
except Exception as e:
return jsonify(message="Failed", desc=e.message)
@expose("/set_irr_done", methods=["PUT"])
def set_irr_done(self):
query = request.get_json()
dt = {}
for k,v in query.items():
dt.update({k:v})
try:
lo = Models.Logistic.objects.get(id=dt['lid'])
irregular = lo.detail.irregular_details.filter(irr_type=dt['irr_type']).first()
irregular.process_status = dt['process_status']
lo.save()
return jsonify(message="OK", irr_detail=irregular)
except Exception as e:
return jsonify(message="Failed", desc=e.message)
@expose("/update_irr_remark", methods=["PUT"])
def update_irr_remark(self):
query = request.get_json()
dt = {}
for k,v in query.items():
dt.update({k:v})
try:
lo = Models.Logistic.objects.get(id=dt['lid'])
irregular = lo.detail.irregular_details.filter(irr_type=dt['irr_type']).first()
remark = Models.LogisticRemark(content=dt['irr_remark'], creator=current_user.name)
irregular.remarks.append(remark)
lo.save()
return jsonify(message="OK", irr_detail=irregular)
except Exception as e:
return jsonify(message="Failed", desc=e.message)
@expose("/merge", methods=["POST"])
def merge(self):
lids = request.json.get('lids')
if not lids:
return jsonify(message="Failed", desc="error~~~")
los = [Models.Logistic.objects(id=lid).first() for lid in lids]
if not type(los) is list:
return jsonify(message="Failed", desc="please select more than 2 logistics")
start = 0
for index in range(len(los)-1):
if los[index+1].detail.cn_tracking_no != \
los[start].detail.cn_tracking_no or \
los[index+1].order != los[0].order:
return jsonify(message="Failed", desc="CTN and OrderID should be the same")
for index in range(len(los)-1):
map(
lambda e: los[index+1].entries.append(e),
los[index].entries
)
los[index].entries = []
los[index].save()
los[index].close(
'merged with %s' %
los[index+1].id, datetime.datetime.utcnow()
)
los[index+1].save()
if index+1 == len(los)-1:
comment = Models.LogisticRemark(
content=u"合并单", creator=current_user.name
)
los[index+1].detail.remarks.append(comment)
los[index+1].save()
return jsonify(message="OK", lid=str(los[index+1].id))
@expose("/split_entries", methods=["POST"])
def split_entries(self):
entries = request.json.get('selected')
if not entries:
return jsonify(message="Failed", desc="Please select entries!")
lids = []
entry_ids = []
for l in entries:
c = l.split(':')
lids.append(c[1])
entry_ids.append(c[0])
los = [Models.Logistic.objects(id=lid).first() for lid in set(lids)]
e_lst = []
for i in entry_ids:
e = Models.OrderEntry.objects(id=str(i)).first()
e_lst.append(e)
entries_groups = map(lambda lo: filter(lambda e: e in lo.entries, e_lst),
los)
for lo, lst in zip(los, entries_groups):
lo.fork_by_entries([e.id for e in lst])
return jsonify(message="OK", oid=lo.order.short_id)
@expose('/split_quantity', methods=['POST'])
def split_quantity(self):
lid = request.json.get('lid')
eid = request.json.get('eid')
quantity = request.json.get('quantity')
lo = Models.Logistic.objects(id=lid).first()
entry = Models.OrderEntry.objects(id=eid).first()
if entry.quantity > 1 and entry.quantity - int(quantity) >=1 and entry and lo:
entry.quantity -= int(quantity)
entry.update_snapshot()
entry.update_amount()
new_entry = deepcopy(entry)
new_entry.__class__ = Models.OrderEntry
new_entry.id = None
new_entry.quantity = int(quantity)
new_entry.update_snapshot()
new_entry.update_amount()
new_entry.save()
lo.entries.append(new_entry)
lo.save()
order = lo.order
order.entries.append(new_entry)
order.save()
else:
return jsonify(message="Failed", desc="quantity error~~~~~~")
return jsonify(message="OK", entries=[json.loads(json_util.dumps(entry_to_json(entry))) for entry in lo.entries])
@expose('/download', methods=["GET"])
def download(self):
FIELDS = [u"包裹ID", u'IMG No', u'CTN', u"下单日期", u"订单ID",u'订单短号', u'收件人', u'手机号', u'合作物流商', u'remark',u"下单备注", u"估重", u"渠道"]
now = datetime.datetime.now()
status = request.args.get('status')
query = restruct_query(request.args)
delay_export = query.get('delay_export') and query.pop('delay_export')
delay_type = query.get('delay_type') and query.pop('delay_type')
try:
los = Models.Logistic.objects(is_closed=False, **query)
if status:
los = los.order_by('detail__%s' %
Models.LogisticDetail.attr_by_log_stat[status])
except:
pass
if delay_export:
date_field = Models.LogisticDetail.attr_by_log_stat[status]
delay_days = datetime.timedelta(days=delay_status_by_date[status])
query = {
'detail__%s__lt' % date_field: datetime.datetime.utcnow() - delay_days,
'detail__status': status,
}
los = los.filter(**query).order_by('detail__%s' %
date_field)
if delay_type:
los = los.filter(detail__delay_details__reason__contains=delay_type)
if query.get('receiver'):
addrs = Models.Address.objects(receiver=query.get('receiver')).distinct('id')
orders = Models.Order.commodities(address__in=addrs)
los = list(chain.from_iterable(order.logistics for order in orders))
if query.get('order_id'):
orders = Models.Order.commodities(short_id=int(query.get('order_id')))
los = list(chain.from_iterable(order.logistics for order in orders))
def generate():
yield ','.join(st for st in FIELDS) + '\n'
for log in los:
yield ','.join([
str(log.id),
log.detail.partner_tracking_no,
log.detail.carrier_tracking_no,
log.detail.cn_tracking_no,
log.detail.cn_logistic_name,
format_date(log.detail.payment_received_date),
str(log.order.id),
str(log.order.short_id),
log.order.address.receiver,
log.order.address.mobile_number,
format_date(log.detail.processing_date),
format_date(log.detail.shipping_date),
format_date(log.detail.port_arrived_date),
format_date(log.detail.received_date),
format_date(log.detail.modified),
log.detail.partner.name if log.detail.partner else '',
'; '.join([r.content for r in log.detail.remarks]),
log.detail.extra or '',
str(log.estimated_weight),
log.detail.channel,
]) + '\n'
return Response(generate(),
mimetype="text/csv",
headers={
"Content-Disposition":
"attachment;filename=%s %s.csv" % (format_date(now,'%Y-%m-%d'),'dumps_file')
}
)
@expose('/partner', methods=["GET"])
def partner(self):
partners = Models.Partner.objects().distinct('name')
return jsonify(results=partners, message="OK")
@expose('/close/<lid>', methods=['GET'])
def close(self, lid):
lo = Models.Logistic.objects(id=lid).first()
lo.close("Closed By %s" % current_user.name)
return jsonify(message="OK")
@expose('/logs/<ltype>/<lid>', methods=['GET'])
def logs(self, ltype, lid):
if ltype == 'express':
logs = Models.Logistic.objects(id=lid).first().express_tracking
return self.render('admin/logistic/express.html', logs=logs)
elif ltype == 'logistic':
logs = Models.LogisticLog.objects(logistic_id=lid, log_type__ne='API')
user = lambda i: getattr(Models.User.objects(id=i).first(), 'name', '') if i and i != 'system' else i
return self.render('admin/logistic/logs.html', logs=logs, user=user)
elif ltype == 'print':
lo = Models.Logistic.objects(id=lid).first()
if lo.is_closed:
return Response('this logistics id has been closed.')
return self.render('admin/logistic/print_page.html', lo=lo)
@expose('/refresh/<company>/<number>', methods=['GET'])
def refresh(self, company, number):
Jobs.express.kuaidi_request(company, number)
return jsonify(message="OK")
@expose('/back_status', methods=['GET'])
def back_status(self):
lid = request.args.get('lid')
status = request.args.get('status')
l = Models.Logistic.objects(id=lid).first()
l.detail.status = status
setattr(l.detail, Models.LogisticDetail.attr_by_log_stat[status],
datetime.datetime.utcnow())
l.save()
order = l.order
order.update_logistic_status()
return jsonify(message="OK")
admin.add_view(N(name=_('Logistics Backend'), category='Logistics', menu_icon_type="fa", menu_icon_value="truck"))
|
recipes/Python/578132_How_to_Mutate_a_Float/recipe-578132.py
|
tdiprima/code
| 2,023 |
84053
|
<gh_stars>1000+
import sys, struct, ctypes
def set_float(obj, value):
assert isinstance(obj, float), 'Object must be a float!'
assert isinstance(value, float), 'Value must be a float!'
stop = sys.getsizeof(obj)
start = stop - struct.calcsize('d')
array = ctypes.cast(id(obj), ctypes.POINTER(ctypes.c_ubyte))
for args in zip(range(start, stop), struct.pack('d', value)):
array.__setitem__(*args)
|
functional_tests/mp/04-pool.py
|
borisgrafx/client
| 3,968 |
84070
|
<filename>functional_tests/mp/04-pool.py
#!/usr/bin/env python
"""pool with finish."""
import multiprocessing as mp
import wandb
import yea
def do_run(num):
run = wandb.init()
run.config.id = num
run.log(dict(s=num))
run.finish()
return num
def main():
wandb.require("service")
wandb.setup()
num_proc = 4
pool = mp.Pool(processes=num_proc)
result = pool.map_async(do_run, range(num_proc))
data = result.get(60)
print(f"DEBUG: {data}")
assert len(data) == 4
if __name__ == "__main__":
yea.setup() # Use ":yea:start_method:" to set mp.set_start_method()
main()
|
prompt_tuning/train/models.py
|
google-research/prompt-tuning
| 108 |
84075
|
# Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T5X model subclasses for prompt tuning."""
import functools
from typing import Optional, Mapping, MutableMapping, Any, Tuple
import jax
import jax.numpy as jnp
from t5x import models
from flaxformer.types import Array
PyTreeDef = type(jax.tree_structure(None))
class PromptDecoderOnlyModel(models.DecoderOnlyModel):
"""A prompted DecoderOnly Model that uses the prefill cache for prompting."""
def __init__(self,
prompt_length: int,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.prompt_length = prompt_length
def _compute_logits(self,
params: Mapping[str, Array],
batch: Mapping[str, jnp.ndarray],
dropout_rng: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Hack off the prompt when calculating logits."""
logits = super()._compute_logits(params, batch, dropout_rng)
return logits[:, self.prompt_length:]
def predict_batch_with_aux(
self,
params: Mapping[str, Array],
batch: Mapping[str, jnp.ndarray],
rng: Optional[jax.random.KeyArray] = None,
*,
return_all_decodes: bool = False,
num_decodes: int = 1,
decoder_params: Optional[MutableMapping[str, Any]] = None,
) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:
# Get the maximum shape of an example, [B, input_tokens + target_tokens]
target_shape = batch['decoder_input_tokens'].shape
target_type = batch['decoder_input_tokens'].dtype
# We need this to be the full cache (including the prompt) length so masks
# and biases match when used with single slices.
max_decode_length = target_shape[1] + self.prompt_length
# Initialize a zero'd out auto-regressive cache. The shape for the keys and
# values are [B, ..., max_decode_length], the cache_index is a scalar, and
# the relpos_bias cache is
# [1, num_heads, max_decode_length, max_decode_length].
#
# We need to call this with `decode=True` so that the relpos cache is
# created, otherwise it would get created inside of the `lax.while_loop` and
# cause an error. However, when the `layers.PromptDecoderOnly` is called
# with `decode=True` the prompt is not added to the input because the input
# is expected to have a single step in the sequence dimension (The fact that
# this input has more than 1 step is okay because the single step check in
# the dense attention class is after the `is_initialized` check. When this
# is called with more than one step, the cache is created but the
# `is_initialized` check fails so we never get to the shape check).
#
# We can't set `prefill=True` which would normally be used to make sure the
# prompt is applied to the inputs, because then both `prefill` and `decode`
# are true and this causes an error (eventually `prefill` should be
# refactored to be a state of `decode` but that is too much to change right
# now). This means that our cache would only the input shape, there
# wouldn't be room for the prompt. So we update the shape of the dummy data
# to include the prompt.
#
# Now we can call apply with `decode=True` (so all parts of the cache are
# initialized) and the shapes will be correct (they will be longer to fit
# the prompts).
prompted_shape = target_shape[:1] + (max_decode_length,)
_, variables_with_cache = self.module.apply(
{'params': params},
jnp.ones(prompted_shape, target_type),
jnp.ones(prompted_shape, target_type),
enable_dropout=False,
decode=True,
prefill=False,
mutable=['cache'])
cache = variables_with_cache['cache']
# Calculate the size of the inputs for each example by summing their
# causal attention mask. This mask has 1 extra 1 token than the number of
# inputs so subtract off 1.
inputs_lengths = jnp.sum(batch['decoder_causal_attention'], axis=1) - 1
# Since decoder_input_tokens is shifted to the right AND
# `decoder_causal_attention` has one more 1 than the number of inputs
# tokens, this sifts out targets portion of the decoder_input_tokens.
inputs = batch['decoder_input_tokens'] * batch['decoder_causal_attention']
# We can prefill the cache with both the prompt representations and the
# input representations so our prefill length is the inputs plus the prompt
# length.
prefill_lengths = inputs_lengths + self.prompt_length
# If `self._inputs_bidirectional_attention = False`, we should not pass
# batch['decoder_causal_attention'] to `module.apply` during cache prefill
# and pass None instead.
maybe_decoder_causal_attention = self._get_decoder_causal_attention(batch)
# This prefills the cache. Because prefill=True, the
# layers.PromptDecoderOnly will add the prompt to the input.
_, variables_with_cache = self.module.apply(
{
'params': params,
'cache': cache,
},
inputs,
# Use the `decoder_causal_attention` as the targets so the decoder
# attention mask will correctly cover the whole input, otherwise the
# first input token (the 0 for BOS) will not be included in the mask.
# This also restricts the mask to not include any target positions like
# it would if you used `decoder_target_tokens`.
batch['decoder_causal_attention'],
decoder_causal_attention=maybe_decoder_causal_attention,
mutable=['cache'],
enable_dropout=False,
prefill=True,
prefill_lengths=prefill_lengths,
)
# The cache index will now be converted to a vector of `[B]`
prefilled_cache = variables_with_cache['cache']
tokens_ids_to_logits = functools.partial(
self._compute_logits_from_slice,
params=params,
max_decode_length=max_decode_length)
# Make sure that `decoder_params` can be unpacked with `**decoder_params`
# below.
if decoder_params is None:
decoder_params = {}
if rng is not None:
if decoder_params.get('decode_rng') is not None:
raise ValueError(
f'Got RNG both from the `rng` argument ({rng}) and '
f"`decoder_params['decode_rng']` ({decoder_params['decode_rng']}). "
'Please specify one or the other.')
decoder_params['decode_rng'] = rng
# When we run the actual decode function we will be indexing into the input
# array to extract the next token (or write it when generating). It make
# sure this index (which was created from prefilling and includes the prompt
# length) is the correct location we need to pad the input as if there were
# prompts added.
# Note: Adding the prompt to the end isn't well-formed. If the prompt
# (which doesn't have a token) is the last position, what should the last
# input token (which is fed into the decode function first) be? To insert
# a prompt at the "end" of the input we should actually insert it just
# before EOS. Decoding will begin part way though the inputs, the actual
# prompt positions will never be used as token inputs so their value does
# not matter (although, we use 2 just to avoid any complications multiple
# EOS tokens might bring), additionally, because the cached keys and value
# are used, we don't need the prompt to be inserted in the real location, we
# just need the shift in the location of the EOS to change appropriately.
# Thus we can use this padding with experiments where the prompt is not
# prepended.
prompt_pad = jnp.full((inputs.shape[0], self.prompt_length), 2,
dtype=inputs.dtype)
inputs_with_fake_prompts = jnp.concatenate([prompt_pad, inputs], axis=1)
# Using the above-defined single-step decoder function, run a decoding
# function that will produces a [batch, 1, max_decode_length] output.
decoded_sequences, scores = self._decode_fn(
inputs=inputs_with_fake_prompts,
cache=prefilled_cache,
tokens_to_logits=tokens_ids_to_logits,
eos_id=self.output_vocabulary.eos_id,
num_decodes=num_decodes,
initial_index=prefill_lengths,
**decoder_params)
if not return_all_decodes:
# Search returns [n_batch, n_beam/decodes, n_length] with the beam/decode
# dimension sorted in increasing order of log-probability.
# `scores` is [batch, beam/decode_size]
# We take the highest scoring sequence (-1) and its score
decoded_sequences = decoded_sequences[:, -1, :]
# Beam search returns []
aux = {'scores': scores[:, -1]}
else:
# We return all samples and scores, rather than just the top ones.
aux = {'scores': scores}
# Remove the prompt + input tokens from each sequence and shuffle them
# to the back of the sequence.
sequences = models.remove_prefix(decoded_sequences, prefill_lengths)
# Remove the extra space the prompt took up (these are all shifted to the
# end and zero'd out). Use ... so we don't need to know if there this was
# all decodes or just one.
trimmed_sequences = sequences[..., :-self.prompt_length]
return trimmed_sequences, aux
|
algorithms/strange-advertising.py
|
gajubadge11/HackerRank-1
| 340 |
84092
|
#!/bin/python3
import sys
def calc_pattern():
max_n = 50
ads = 5
output = []
for i in range(max_n):
output.append(ads//2)
ads = (ads//2)*3
return output
def viralAdvertising(n, pattern):
return sum(pattern[:n])
if __name__ == "__main__":
n = int(input().strip())
pattern = calc_pattern()
result = viralAdvertising(n, pattern)
print(result)
|
spinup_utils/plot.py
|
kaixindelele/DRLib
| 226 |
84111
|
"""
相比于原始的plot.py文件,增加了如下的功能:
1.可以直接在pycharm或者vscode执行,也可以用命令行传参;
2.按exp_name排序,而不是按时间排序;
3.固定好每个exp_name的颜色;
4.可以调节曲线的线宽,便于观察;
5.保存图片到本地,便于远程ssh画图~
6.自动显示全屏
7.图片自适应
8.针对颜色不敏感的人群,可以在每条legend上注明性能值,和性能序号
9.对图例legend根据性能从高到低排序,便于分析比较
10.提供clip_xaxis值,对训练程度进行统一截断,图看起来更整洁。
seaborn版本0.8.1
"""
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import json
import os
import os.path as osp
import numpy as np
DIV_LINE_WIDTH = 50
# Global vars for tracking and labeling data at load time.
exp_idx = 0
units = dict()
def plot_data(data, xaxis='Epoch', value="TestEpRet",
condition="Condition1", smooth=1,
linewidth=4,
rank=True,
performance=True,
**kwargs):
performance_rank_dict = {}
condition2_list = []
if smooth > 1:
"""
smooth data with moving window average.
that is,
smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])
where the "smooth" param is width of that window (2k+1)
"""
y = np.ones(smooth)
for datum in data:
condition2_list.append(datum["Condition2"].values[0])
x = np.asarray(datum[value])
z = np.ones(len(x))
smoothed_x = np.convolve(x, y, 'same') / np.convolve(z, y, 'same')
datum[value] = smoothed_x
# add mean performance to performance_rank{dict}
print("rank-add:", datum[condition].values[0])
if datum[condition].values[0] not in performance_rank_dict.keys():
performance_rank_dict[datum[condition].values[0]] = np.mean(smoothed_x[-len(smoothed_x)//10:])
else:
performance_rank_dict[datum[condition].values[0]] += np.mean(smoothed_x[-len(smoothed_x)//10:])
# concern the multi-seeds:
for key in performance_rank_dict.keys():
seed_num = sum([1 for cond in condition2_list if key in cond])
performance_rank_dict[key] /= seed_num
# value list 获取性能值排序序号
performance_list = []
performance_rank_keys = []
for key, val in performance_rank_dict.items():
print(key, val)
performance_list.append(val)
performance_rank_keys.append(key)
# 获取列表排序序号,一定要argsort2次~
performance_rank_list = np.argsort(np.argsort(-np.array(performance_list)))
performance_rank_sort_dict = {performance_rank_keys[index]: performance_rank_list[index]
for index in range(len(performance_rank_list))}
print("performance_rank_list:", performance_rank_list)
# 修改data[condition]的名字
for index, datum in enumerate(data):
origin_key = datum[condition].values[0]
if performance:
p = performance_rank_dict[origin_key]
datum[condition] = 'P-' + str(np.round(p, 3)) + "-" + datum[condition]
if rank:
rank_value = performance_rank_sort_dict[origin_key]
datum[condition] = 'Rank-' + str(rank_value) + "-" + datum[condition]
if isinstance(data, list):
data = pd.concat(data, ignore_index=True)
sns.set(style="darkgrid", font_scale=1.75, )
# # data按照lenged排序;
data.sort_values(by='Condition1', axis=0)
sns.tsplot(data=data,
time=xaxis,
value=value,
unit="Unit",
condition=condition,
ci='sd',
linewidth=linewidth,
color=sns.color_palette("Paired", len(data)),
# palette=sns.color_palette("hls", 8),
**kwargs)
"""
If you upgrade to any version of Seaborn greater than 0.8.1, switch from
tsplot to lineplot replacing L29 with:
sns.lineplot(data=data, x=xaxis, y=value, hue=condition, ci='sd', **kwargs)
Changes the colorscheme and the default legend style, though.
plt.legend()
loc:图例位置,可取('best', 'upper right', 'upper left', 'lower left', 'lower right',
'right', 'center left', 'center , right', 'lower center', 'upper center', 'center')
若是使用了bbox_to_anchor,则这项就无效了
fontsize: int或float或{'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'},字体大小;
frameon: 是否显示图例边框,
ncol: 图例的列的数量,默认为1,
title: 为图例添加标题
shadow: 是否为图例边框添加阴影,
markerfirst: True表示图例标签在句柄右侧,false反之,
markerscale: 图例标记为原图标记中的多少倍大小,
numpoints: 表示图例中的句柄上的标记点的个数,一般设为1,
fancybox: 是否将图例框的边角设为圆形
framealpha: 控制图例框的透明度
borderpad: 图例框内边距
labelspacing: 图例中条目之间的距离
handlelength: 图例句柄的长度
bbox_to_anchor: (横向看右,纵向看下),如果要自定义图例位置或者将图例画在坐标外边,用它,
比如bbox_to_anchor=(1.4,0.8),这个一般配合着ax.get_position(),
set_position([box.x0, box.y0, box.width*0.8 , box.height])使用
"""
# 对图例legend也做一个排序,这样看起来更直观~
handles, labels = plt.gca().get_legend_handles_labels()
sorted_handles = []
sorted_labels = []
for index in range(len(handles)):
order_index = list(performance_rank_list).index(index)
sorted_handles.append(handles[order_index])
sorted_labels.append(labels[order_index])
plt.legend(sorted_handles, sorted_labels, loc='upper center', labelspacing=0.25,
ncol=1,
handlelength=6,
mode="expand",
borderaxespad=0.,
)
# plt.legend(loc='upper center',
# ncol=1,
# handlelength=6,
# mode="expand",
# borderaxespad=0.,
# )
"""
For the version of the legend used in the Spinning Up benchmarking page,
swap L38 with:
plt.legend(loc='upper center', ncol=6, handlelength=1,
mode="expand", borderaxespad=0., prop={'size': 13})
"""
xscale = np.max(np.asarray(data[xaxis])) > 5e3
if xscale:
# Just some formatting niceness: x-axis scale in scientific notation if max x is large
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.tight_layout(pad=0.5)
def get_datasets(logdir, condition=None):
"""
Recursively look through logdir for output files produced by
spinup.logx.Logger.
Assumes that any file "progress.txt" is a valid hit.
"""
global exp_idx
global units
datasets = []
roots = []
exp_names = []
for root, _, files in os.walk(logdir):
if 'progress.txt' in files:
exp_name = None
try:
config_path = open(os.path.join(root, 'config.json'))
config = json.load(config_path)
if 'exp_name' in config:
exp_name = config['exp_name']
exp_names.append(exp_name)
roots.append(root)
except Exception as e:
print("e:", e)
print('No file named config.json')
# just leave one seed:
# roots_names_dict = {exp_names[index]: roots[index] for index in range(len(exp_names))}
# exp_name(str) --> roots(list) with diff seeds
roots_names_dict = {exp_names[index]: roots for index in range(len(exp_names))}
for key, value in roots_names_dict.items():
print(key, value)
# 按照实验名排序
roots_names_list = sorted(roots_names_dict.items(), key=lambda x: x[0])
print("roots_names_list:", roots_names_list)
roots_names_dict = {tup[0]: tup[1] for tup in roots_names_list}
print("roots_names_dict:", roots_names_dict)
for exp_name, roots in roots_names_dict.items():
for root in roots:
condition1 = condition or exp_name or 'exp'
condition2 = condition1 + '-' + str(exp_idx)
exp_idx += 1
if condition1 not in units:
units[condition1] = 0
unit = units[condition1]
units[condition1] += 1
# x轴截断值,默认为None,如果设置的为具体值,则直接统一截断。需要根据当前的x轴坐标手动添加,比如steps,1e6,epochs数量级是500。
# 以epoch=300截断为例,直接修改clip_xaxis=300即可
clip_xaxis = None
try:
exp_data = pd.read_table(os.path.join(root, 'progress.txt'))
if clip_xaxis is not None:
exp_data = exp_data[:clip_xaxis]
line_num = len(exp_data)
print('line num:{}, read from {}'.format(line_num,
os.path.join(root, 'progress.txt')))
except:
print('Could not read from %s' % os.path.join(root, 'progress.txt'))
continue
performance = 'TestEpRet' if 'TestEpRet' in exp_data else 'AverageTestEpRet'
exp_data.insert(len(exp_data.columns), 'Unit', unit)
exp_data.insert(len(exp_data.columns), 'Condition1', condition1)
exp_data.insert(len(exp_data.columns), 'Condition2', condition2)
exp_data.insert(len(exp_data.columns), 'Performance', exp_data[performance])
datasets.append(exp_data)
# # 默认按照时间顺序获取文件夹数据
# print("-"*10, 'sorted by time', '-'*10)
# for root, _, files in os.walk(logdir):
# if 'progress.txt' in files:
# exp_name = None
# try:
# config_path = open(os.path.join(root, 'config.json'))
# config = json.load(config_path)
# if 'exp_name' in config:
# exp_name = config['exp_name']
# except:
# print('No file named config.json')
# condition1 = condition or exp_name or 'exp'
# condition2 = condition1 + '-' + str(exp_idx)
# exp_idx += 1
# if condition1 not in units:
# units[condition1] = 0
# unit = units[condition1]
# units[condition1] += 1
#
# try:
# exp_data = pd.read_table(os.path.join(root, 'progress.txt'))
# line_num = len(exp_data)
# print('line num:{}, read from {}'.format(line_num,
# os.path.join(root, 'progress.txt')))
# except:
# print('Could not read from %s' % os.path.join(root, 'progress.txt'))
# continue
# # performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'TestEpRet'
# # performance = 'AverageEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet'
# performance = 'TestSuccess' if 'TestSuccess' in exp_data else 'AverageEpRet'
# exp_data.insert(len(exp_data.columns),'Unit',unit)
# exp_data.insert(len(exp_data.columns),'Condition1',condition1)
# exp_data.insert(len(exp_data.columns),'Condition2',condition2)
# exp_data.insert(len(exp_data.columns),'Performance',exp_data[performance])
# datasets.append(exp_data)
return datasets
def get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):
"""
For every entry in all_logdirs,
1) check if the entry is a real directory and if it is,
pull data from it;
2) if not, check to see if the entry is a prefix for a
real directory, and pull data from that.
"""
logdirs = []
for logdir in all_logdirs:
if osp.isdir(logdir) and logdir[-1] == os.sep:
logdirs += [logdir]
else:
basedir = osp.dirname(logdir)
fulldir = lambda x: osp.join(basedir, x)
prefix = logdir.split(os.sep)[-1]
print("basedir:", basedir)
listdir = os.listdir(basedir)
logdirs += sorted([fulldir(x) for x in listdir if prefix in x])
"""
Enforce selection rules, which check logdirs for certain substrings.
Makes it easier to look at graphs from particular ablations, if you
launch many jobs at once with similar names.
"""
if select is not None:
logdirs = [log for log in logdirs if all(x in log for x in select)]
if exclude is not None:
logdirs = [log for log in logdirs if all(not (x in log) for x in exclude)]
# Verify logdirs
print('Plotting from...\n' + '=' * DIV_LINE_WIDTH + '\n')
for logdir in logdirs:
print(logdir)
print('\n' + '=' * DIV_LINE_WIDTH)
# Make sure the legend is compatible with the logdirs
assert not (legend) or (len(legend) == len(logdirs)), \
"Must give a legend title for each set of experiments."
# Load data from logdirs
data = []
if legend:
for log, leg in zip(logdirs, legend):
data += get_datasets(log, leg)
else:
for log in logdirs:
data += get_datasets(log)
return data
def make_plots(all_logdirs, legend=None,
xaxis=None, values=None,
count=False,
font_scale=1.5, smooth=1,
linewidth=4,
select=None, exclude=None,
estimator='mean',
rank=True,
performance=True,
):
data = get_all_datasets(all_logdirs, legend, select, exclude)
values = values if isinstance(values, list) else [values]
condition = 'Condition2' if count else 'Condition1'
estimator = getattr(np, estimator) # choose what to show on main curve: mean? max? min?
for value in values:
plt.figure()
plot_data(data, xaxis=xaxis, value=value,
condition=condition, smooth=smooth, estimator=estimator,
linewidth=linewidth, rank=rank, performance=performance)
# 默认最大化图片
manager = plt.get_current_fig_manager()
try:
# matplotlib3.3.4 work
manager.resize(*manager.window.maxsize())
except:
# matplotlib3.2.1//2.2.3 work
manager.window.showMaximized()
fig = plt.gcf()
fig.set_size_inches((16, 9), forward=False)
select_str = ''
exclude_str = ''
print("select:", select)
print("select_str:", select_str)
if select is not None and type(select) is list:
for s_str in select:
select_str += s_str
if exclude is not None and type(exclude) is list:
for s_str in exclude:
exclude_str += s_str
print("select_str:", select_str)
try:
# 如果非远程,则显示图片
plt.show()
except:
pass
fig.savefig(all_logdirs[0] + 'ep_reward_'+select_str+exclude_str+'.png',
bbox_inches='tight',
dpi=300)
# plt.savefig(all_logdirs[0] + 'ep_reward.png',
# bbox_inches='tight',
# dpi=300,
# )
def main():
import argparse
parser = argparse.ArgumentParser()
import sys
# 如果是命令行启动,调用下面的语句,必须要输入数据路径!
if len(sys.argv) > 1:
print("run in command: \n argv:", sys.argv, '\n', '-' * 30)
parser.add_argument('logdir', nargs='*')
# other nargs
parser.add_argument('--select', nargs='*',
help='在当前路径下,选择特定关键词,不能是下一个文件夹,'
'在idle中不能是字符串,在终端,不用加双引号,多个关键词可以用空格隔开')
parser.add_argument('--exclude', nargs='*',
help='同select')
else:
# 如果是idle启动,用于debug,则需要将路径加入到下面的语句!
print("run in pycharm\n", '-' * 30)
parser.add_argument('--logdir', '-r', type=list,
default=[
# windows路径示例:这个2020的意思是,要保留子文件夹的一些前缀,比如子文件夹的名叫"2020-reach-*",不能只是"plot_demo_files\"
r"plot_demo_files\2020",
# Ubuntu路径示例:
# "plot_demo_files/2020",
])
# other nargs
parser.add_argument('--select', default=[], )
parser.add_argument('--exclude', default=[], )
parser.add_argument('--legend', '-l', nargs='*')
parser.add_argument('--xaxis', '-x', default='TotalEnvInteracts',
help='选择什么为横坐标,默认为TotalEnvInteracts')
parser.add_argument('--value', '-y', default='Performance', nargs='*',
help='选择特定变量为性能指标,默认为AverageTestEpRet')
parser.add_argument('--count', action='store_true',
help='是否显示每个随机种子,加--count为显示')
# parser.add_argument('--count', default="False")
parser.add_argument('--smooth', '-s', type=int, default=20,
help='滑动平均,20看起来会更平滑些')
parser.add_argument('--linewidth', '-lw', type=float, default=4,
help='实验线宽,粗点容易分清')
parser.add_argument('--rank', type=bool, default=True,
help='是否在legend上显示性能排序')
parser.add_argument('--performance', type=bool, default=True,
help='是否在legend上显示性能值')
parser.add_argument('--est', default='mean')
args = parser.parse_args()
print("args:", args)
make_plots(args.logdir, args.legend, args.xaxis, args.value, args.count,
smooth=args.smooth, select=args.select, exclude=args.exclude,
estimator=args.est,
linewidth=args.linewidth,
rank=args.rank,
performance=args.performance)
if __name__ == "__main__":
main()
|
reblur_package/reblur_package/utils.py
|
adityakrishnavamsy/SelfDeblur
| 104 |
84137
|
<reponame>adityakrishnavamsy/SelfDeblur
import torch
import meshzoo
def generate_2D_mesh(H, W):
_, faces = meshzoo.rectangle(
xmin = -1., xmax = 1.,
ymin = -1., ymax = 1.,
nx = W, ny = H,
zigzag=True)
x = torch.arange(0, W, 1).float().cuda()
y = torch.arange(0, H, 1).float().cuda()
xx = x.repeat(H, 1)
yy = y.view(H, 1).repeat(1, W)
grid = torch.stack([xx, yy], dim=0)
return grid, faces
|
backend/src/baserow/contrib/database/api/views/grid/serializers.py
|
cjh0613/baserow
| 839 |
84139
|
<gh_stars>100-1000
from rest_framework import serializers
from baserow.contrib.database.views.models import GridViewFieldOptions
class GridViewFieldOptionsSerializer(serializers.ModelSerializer):
class Meta:
model = GridViewFieldOptions
fields = ("width", "hidden", "order")
class GridViewFilterSerializer(serializers.Serializer):
field_ids = serializers.ListField(
allow_empty=False,
required=False,
default=None,
child=serializers.IntegerField(),
help_text="Only the fields related to the provided ids are added to the "
"response. If None are provided all fields will be returned.",
)
row_ids = serializers.ListField(
allow_empty=False,
child=serializers.IntegerField(),
help_text="Only rows related to the provided ids are added to the response.",
)
|
output/examples/example6/view.py
|
cy15196/FastCAE
| 117 |
84140
|
<filename>output/examples/example6/view.py<gh_stars>100-1000
MainWindow.clearData()
MainWindow.openPost3D()
PostProcess.script_openFile(-1,"Post3D","%examplesPath%/water.vtk")
PostProcess.script_openFile(-1,"Post3D","%examplesPath%/platform.vtk")
PostProcess.script_applyClicked(-1,"Post3D")
PostProcess.script_Properties_streamline_integration_direction(-1,"Post3D",3,2)
PostProcess.script_Properties_streamline_integration_type(-1,"Post3D",3,1)
PostProcess.script_Properties_streamline_integration_stepUnit(-1,"Post3D",3,2)
PostProcess.script_Properties_streamline_seeds_num_points(-1,"Post3D",3,100)
PostProcess.script_FilterStreamLine(-1,"Post3D",1)
PostProcess.script_applyClicked(-1,"Post3D")
|
Engine/libs/freeType/src/tools/docmaker/sources.py
|
lampardwade/tetris_lua_playgroundOSS
| 175 |
84148
|
<filename>Engine/libs/freeType/src/tools/docmaker/sources.py<gh_stars>100-1000
# Sources (c) 2002-2004, 2006-2009, 2012
# <NAME> <<EMAIL>>
#
#
# this file contains definitions of classes needed to decompose
# C sources files into a series of multi-line "blocks". There are
# two kinds of blocks:
#
# - normal blocks, which contain source code or ordinary comments
#
# - documentation blocks, which have restricted formatting, and
# whose text always start with a documentation markup tag like
# "<Function>", "<Type>", etc..
#
# the routines used to process the content of documentation blocks
# are not contained here, but in "content.py"
#
# the classes and methods found here only deal with text parsing
# and basic documentation block extraction
#
import fileinput, re, sys, os, string
################################################################
##
## BLOCK FORMAT PATTERN
##
## A simple class containing compiled regular expressions used
## to detect potential documentation format block comments within
## C source code
##
## note that the 'column' pattern must contain a group that will
## be used to "unbox" the content of documentation comment blocks
##
class SourceBlockFormat:
def __init__( self, id, start, column, end ):
"""create a block pattern, used to recognize special documentation blocks"""
self.id = id
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# format 1 documentation comment blocks look like the following:
#
# /************************************/
# /* */
# /* */
# /* */
# /************************************/
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# format 2 documentation comment blocks look like the following:
#
# /************************************ (at least 2 asterisks)
# *
# *
# *
# *
# **/ (1 or more asterisks at the end)
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?!/) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# the list of supported documentation block formats, we could add new ones
# relatively easily
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# the following regular expressions corresponds to markup tags
# within the documentation comment blocks. they're equivalent
# despite their different syntax
#
# notice how each markup tag _must_ begin a new line
#
re_markup_tag1 = re.compile( r'''\s*<((?:\w|-)*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@((?:\w|-)*):''' ) # @xxxx: format
#
# the list of supported markup tags, we could add new ones relatively
# easily
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# used to detect a cross-reference, after markup tags have been stripped
#
re_crossref = re.compile( r'@((?:\w|-)*)(.*)' )
#
# used to detect italic and bold styles in paragraph text
#
re_italic = re.compile( r"_(\w(\w|')*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*(\w(\w|')*)\*(.*)" ) # *bold*
#
# used to detect the end of commented source lines
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' )
#
# used to perform cross-reference within source output
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# a list of reserved source keywords
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
\#include |
\#define |
\#undef |
\#if |
\#ifdef |
\#ifndef |
\#else |
\#endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## A SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlocks".
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, including comments
##
## the important fields in a text block are the following ones:
##
## self.lines : a list of text lines for the corresponding block
##
## self.content : for documentation comment blocks only, this is the
## block content that has been "unboxed" from its
## decoration. This is None for all other blocks
## (i.e. sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock:
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
words = []
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = string.strip( l )
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only - not used in normal operations
def dump( self ):
if self.content:
print "{{{content start---"
for l in self.content:
print l
print "---content end}}}"
return
fmt = ""
if self.format:
fmt = repr( self.format.id ) + " "
for line in self.lines:
print line
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlock"
## objects.
##
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, include comments
##
##
class SourceProcessor:
def __init__( self ):
"""initialize a source processor"""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""reset a block processor, clean all its blocks"""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""parse a C source file, and add its blocks to the processor's list"""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# that's a normal block end, add it to 'lines' and
# create a new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# that's a normal column line, add it to 'lines'
self.lines.append( line )
else:
# humm.. this is an unexpected block end,
# create a new block, but don't process the line
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""process a normal line and check whether it is the start of a new block"""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""add the current accumulated lines and create a new block"""
if self.lines != []:
block = SourceBlock( self, self.filename, self.lineno, self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""print all blocks in a processor"""
for b in self.blocks:
b.dump()
# eof
|
IOPool/Common/test/testEdmConfigDump_cfg.py
|
ckamtsikis/cmssw
| 852 |
84156
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD1")
process.source = cms.Source("IntSource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(3)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testEdmProvDump.root'),
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_intProducerA_*_*'
)
)
process.a1 = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag( cms.InputTag("source") ),
expectedSum = cms.untracked.int32(12),
inputTagsNotFound = cms.untracked.VInputTag(
cms.InputTag("source", processName=cms.InputTag.skipCurrentProcess()),
cms.InputTag("intProducer", processName=cms.InputTag.skipCurrentProcess()),
cms.InputTag("intProducerU", processName=cms.InputTag.skipCurrentProcess())
)
)
process.a2 = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag( cms.InputTag("intProducerA") ),
expectedSum = cms.untracked.int32(300)
)
process.a3 = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag( cms.InputTag("aliasForInt") ),
expectedSum = cms.untracked.int32(300)
)
process.intProducer = cms.EDProducer("IntProducer", ivalue = cms.int32(1))
process.intProducerU = cms.EDProducer("IntProducer", ivalue = cms.int32(10))
process.intProducerA = cms.EDProducer("IntProducer", ivalue = cms.int32(100))
process.aliasForInt = cms.EDAlias(
intProducerA = cms.VPSet(
cms.PSet(type = cms.string('edmtestIntProduct')
)
)
)
process.intVectorProducer = cms.EDProducer("IntVectorProducer",
count = cms.int32(9),
ivalue = cms.int32(11)
)
process.t = cms.Task(process.intProducerU, process.intProducerA, process.intVectorProducer)
process.p = cms.Path(process.intProducer * process.a1 * process.a2 * process.a3, process.t)
process.e = cms.EndPath(process.out)
|
raiden/tests/unit/transfer/mediated_transfer/test_mediation_fee.py
|
tirkarthi/raiden
| 2,101 |
84183
|
<reponame>tirkarthi/raiden
from fractions import Fraction
from typing import Tuple
import pytest
from hypothesis import HealthCheck, assume, example, given, settings
from hypothesis.strategies import integers
from raiden.tests.unit.transfer.test_channel import make_hash_time_lock_state
from raiden.tests.utils import factories
from raiden.tests.utils.factories import (
NettingChannelEndStateProperties,
NettingChannelStateProperties,
)
from raiden.tests.utils.mediation_fees import (
get_amount_with_fees,
get_initial_amount_for_amount_after_fees,
)
from raiden.transfer.mediated_transfer.initiator import calculate_safe_amount_with_fee
from raiden.transfer.mediated_transfer.mediation_fee import (
NUM_DISCRETISATION_POINTS,
FeeScheduleState,
Interpolate,
calculate_imbalance_fees,
linspace,
)
from raiden.transfer.mediated_transfer.mediator import get_amount_without_fees
from raiden.transfer.state import NettingChannelState
from raiden.utils.mediation_fees import ppm_fee_per_channel
from raiden.utils.typing import (
Balance,
FeeAmount,
PaymentAmount,
PaymentWithFeeAmount,
ProportionalFeeAmount,
TokenAmount,
)
def test_interpolation():
interp = Interpolate((0, 100), (0, 100))
for i in range(101):
assert interp(i) == i
interp = Interpolate((0, 50, 100), (0, 100, 200))
for i in range(101):
assert interp(i) == 2 * i
interp = Interpolate((0, 50, 100), (0, -50, 50))
assert interp(40) == -40
assert interp(60) == -30
assert interp(90) == 30
assert interp(99) == 48
interp = Interpolate((0, 100), (Fraction("12.35"), Fraction("67.2")))
assert interp(0) == Fraction("12.35")
assert interp(50) == pytest.approx((12.35 + 67.2) / 2)
assert interp(100) == Fraction("67.2")
def test_imbalance_penalty():
r"""Test an imbalance penalty by moving back and forth
The imbalance fee looks like
20 | /
| /
10 |\. /
| \. /
0 | \/
---------------
0 50 100
For each input, we first assume the channel is used to forward tokens to a
payee, which moves the capacity from x1 to x2. The we assume the same
amount is mediated in the opposite direction (moving from x2 to x1) and
check that the calculated fee is the same as before just with the opposite
sign.
"""
v_schedule = FeeScheduleState(
imbalance_penalty=[
(TokenAmount(0), FeeAmount(10)),
(TokenAmount(50), FeeAmount(0)),
(TokenAmount(100), FeeAmount(20)),
]
)
reverse_schedule = FeeScheduleState(
imbalance_penalty=[
(TokenAmount(0), FeeAmount(20)),
(TokenAmount(50), FeeAmount(0)),
(TokenAmount(100), FeeAmount(10)),
]
)
for cap_fees, x1, amount, expected_fee_in, expected_fee_out in [
# Uncapped fees
(False, 0, 50, -8, -10),
(False, 50, 30, 20, 12),
(False, 0, 10, -2, -2),
(False, 10, 10, -2, -2),
(False, 0, 20, -3, -4),
(False, 40, 15, 0, 0),
(False, 50, 31, None, 12),
(False, 100, 1, None, None),
# Capped fees
(True, 0, 50, 0, 0),
(True, 50, 30, 20, 12),
(True, 0, 10, 0, 0),
(True, 10, 10, 0, 0),
(True, 0, 20, 0, 0),
(True, 40, 15, 0, 0),
]:
v_schedule.cap_fees = cap_fees
amount_with_fees = get_amount_with_fees(
amount_without_fees=PaymentWithFeeAmount(amount),
balance_in=Balance(x1),
balance_out=Balance(100),
schedule_in=v_schedule,
schedule_out=FeeScheduleState(cap_fees=cap_fees),
receivable_amount=TokenAmount(100 - x1),
)
if expected_fee_in is None:
assert amount_with_fees is None
else:
assert amount_with_fees is not None
assert amount_with_fees - amount == FeeAmount(expected_fee_in)
reverse_schedule.cap_fees = cap_fees
amount_with_fees = get_amount_with_fees(
amount_without_fees=PaymentWithFeeAmount(amount),
balance_in=Balance(0),
balance_out=Balance(100 - x1),
schedule_in=FeeScheduleState(cap_fees=cap_fees),
schedule_out=reverse_schedule,
receivable_amount=TokenAmount(100),
)
if expected_fee_out is None:
assert amount_with_fees is None
else:
assert amount_with_fees is not None
assert amount_with_fees - amount == FeeAmount(expected_fee_out)
def test_fee_capping():
r""" Test the capping when one section of the fee function crossed from the
positive into negative fees. Here, our fee curve looks like:
Fee
|
5 +
|\
| \
0 +--+-----+-> incoming_amount
| 25\ 100
| \
| \
| \
| \
-15 + \
0
When capping it, we need to insert the intersection point of (25, 0) into
our piecewise linear function before capping all y values to zero.
Otherwise we would just interpolate between (0, 5) and (100, 0).
"""
schedule = FeeScheduleState(
imbalance_penalty=[(TokenAmount(0), FeeAmount(0)), (TokenAmount(100), FeeAmount(20))],
flat=FeeAmount(5),
)
fee_func = FeeScheduleState.mediation_fee_func(
schedule_in=FeeScheduleState(),
schedule_out=schedule,
balance_in=Balance(0),
balance_out=Balance(100),
receivable=TokenAmount(100),
amount_with_fees=PaymentWithFeeAmount(5),
cap_fees=True,
)
assert fee_func(30) == 0 # 5 - 6, capped
assert fee_func(20) == 5 - 4
def test_linspace():
assert linspace(TokenAmount(0), TokenAmount(4), 5) == [0, 1, 2, 3, 4]
assert linspace(TokenAmount(0), TokenAmount(4), 4) == [0, 1, 3, 4]
assert linspace(TokenAmount(0), TokenAmount(4), 3) == [0, 2, 4]
assert linspace(TokenAmount(0), TokenAmount(4), 2) == [0, 4]
assert linspace(TokenAmount(0), TokenAmount(0), 3) == [0, 0, 0]
with pytest.raises(AssertionError):
assert linspace(TokenAmount(0), TokenAmount(4), 1)
with pytest.raises(AssertionError):
assert linspace(TokenAmount(4), TokenAmount(0), 2)
def test_rebalancing_fee_calculation():
sample = calculate_imbalance_fees(TokenAmount(200), ProportionalFeeAmount(50_000)) # 5%
assert sample is not None
assert len(sample) == NUM_DISCRETISATION_POINTS
assert all(0 <= x <= 200 for x, _ in sample)
assert max(x for x, _ in sample) == 200
assert all(0 <= y <= 10 for _, y in sample)
assert max(y for _, y in sample) == 10 # 5% of the 200 TokenAmount capacity
sample = calculate_imbalance_fees(TokenAmount(100), ProportionalFeeAmount(20_000)) # 2%
assert sample is not None
assert len(sample) == NUM_DISCRETISATION_POINTS
assert all(0 <= x <= 100 for x, _ in sample)
assert max(x for x, _ in sample) == 100
assert all(0 <= y <= 2 for _, y in sample)
assert max(y for _, y in sample) == 2 # 2% of the 100 TokenAmount capacity
sample = calculate_imbalance_fees(TokenAmount(15), ProportionalFeeAmount(50_000)) # 5%
assert sample is not None
assert len(sample) == 16
assert all(0 <= x <= 16 for x, _ in sample)
assert max(x for x, _ in sample) == 15
assert all(0 <= y <= 1 for _, y in sample)
assert max(y for _, y in sample) == 1 # 5% of the 5 rounded up
# test rounding of the max_balance_fee calculation
sample = calculate_imbalance_fees(TokenAmount(1000), ProportionalFeeAmount(5_490)) # 0.549%
assert sample is not None
assert len(sample) == NUM_DISCRETISATION_POINTS
assert all(0 <= x <= 1000 for x, _ in sample)
assert max(x for x, _ in sample) == 1000
assert all(0 <= y <= 5 for _, y in sample)
assert max(y for _, y in sample) == 5 # 5.49 is rounded to 5
sample = calculate_imbalance_fees(TokenAmount(1000), ProportionalFeeAmount(5_500)) # 0.55%
assert sample is not None
assert len(sample) == NUM_DISCRETISATION_POINTS
assert all(0 <= x <= 1000 for x, _ in sample)
assert max(x for x, _ in sample) == 1000
assert all(0 <= y <= 6 for _, y in sample)
assert max(y for _, y in sample) == 6 # 5.5 is rounded to 6
# test cases where no imbalance fee is created
assert calculate_imbalance_fees(TokenAmount(0), ProportionalFeeAmount(1)) is None
assert calculate_imbalance_fees(TokenAmount(10), ProportionalFeeAmount(0)) is None
@pytest.mark.parametrize(
"flat_fee, prop_fee, initial_amount, expected_amount",
[
# pure flat fee
(50, 0, 1000, 1000 - 50 - 50),
# proportional fee
(0, 1_000_000, 2000, 1000), # 100% per hop mediation fee
(0, 100_000, 1100, 1000), # 10% per hop mediation fee
(0, 50_000, 1050, 1000), # 5% per hop mediation fee
(0, 10_000, 1010, 1000), # 1% per hop mediation fee
(0, 10_000, 101, 100), # 1% per hop mediation fee
(0, 4_990, 100, 100), # 0,499% per hop mediation fee gets rounded away
# mixed tests
(1, 500_000, 1000 + 500 + 2, 1000),
(10, 500_000, 1000 + 500 + 20, 997),
(100, 500_000, 1000 + 500 + 200, 967),
# -
(1, 100_000, 1000 + 100 + 2, 1000),
(10, 100_000, 1000 + 100 + 20, 999),
(100, 100_000, 1000 + 100 + 200, 991),
# -
(1, 10_000, 1000 + 10 + 2, 1000),
(10, 10_000, 1000 + 10 + 20, 1000),
(100, 10_000, 1000 + 10 + 200, 999),
# -
(100, 500_000, 1000 + 750, 1000),
# - values found in run_test_mediated_transfer_with_fees
(0, 200_000, 47 + 9, 47),
(0, 200_000, 39 + 8, 39),
],
)
def test_get_lock_amount_after_fees(flat_fee, prop_fee, initial_amount, expected_amount):
"""Tests mediation fee deduction."""
prop_fee_per_channel = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))
lock = make_hash_time_lock_state(amount=initial_amount)
channel_in = factories.create(
NettingChannelStateProperties(
partner_state=NettingChannelEndStateProperties(balance=TokenAmount(2000)),
fee_schedule=FeeScheduleState(flat=flat_fee, proportional=prop_fee_per_channel),
)
)
channel_out = factories.create(
NettingChannelStateProperties(
our_state=NettingChannelEndStateProperties(balance=TokenAmount(2000)),
fee_schedule=FeeScheduleState(flat=flat_fee, proportional=prop_fee_per_channel),
)
)
locked_after_fees = get_amount_without_fees(
amount_with_fees=lock.amount, channel_in=channel_in, channel_out=channel_out
)
assert locked_after_fees == expected_amount
@pytest.mark.parametrize(
"cap_fees, flat_fee, prop_fee, imbalance_fee, initial_amount, expected_amount",
[
# No capping of the mediation fees
# The higher the imbalance fee, the stronger the impact of the fee iteration
(False, 0, 0, 10_000, 50_000, 50_000 + 2_000),
(False, 0, 0, 20_000, 50_000, 50_000 + 3_995),
(False, 0, 0, 30_000, 50_000, 50_000 + 5_910),
(False, 0, 0, 40_000, 50_000, 50_000 + 7_613),
(False, 0, 0, 50_000, 50_000, 50_000 + 9_091),
# Capping of mediation fees
(True, 0, 0, 10_000, 50_000, 50_000),
(True, 0, 0, 20_000, 50_000, 50_000),
(True, 0, 0, 30_000, 50_000, 50_000),
(True, 0, 0, 40_000, 50_000, 50_000),
(True, 0, 0, 50_000, 50_000, 50_000),
],
)
def test_get_lock_amount_after_fees_imbalanced_channel(
cap_fees, flat_fee, prop_fee, imbalance_fee, initial_amount, expected_amount
):
"""Tests mediation fee deduction."""
balance = TokenAmount(100_000)
prop_fee_per_channel = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))
imbalance_fee = calculate_imbalance_fees(
channel_capacity=balance, proportional_imbalance_fee=ProportionalFeeAmount(imbalance_fee)
)
lock = make_hash_time_lock_state(amount=initial_amount)
channel_in = factories.create(
NettingChannelStateProperties(
our_state=NettingChannelEndStateProperties(balance=TokenAmount(0)),
partner_state=NettingChannelEndStateProperties(balance=balance),
fee_schedule=FeeScheduleState(
cap_fees=cap_fees,
flat=FeeAmount(flat_fee),
proportional=prop_fee_per_channel,
imbalance_penalty=imbalance_fee,
),
)
)
channel_out = factories.create(
NettingChannelStateProperties(
our_state=NettingChannelEndStateProperties(balance=balance),
partner_state=NettingChannelEndStateProperties(balance=TokenAmount(0)),
fee_schedule=FeeScheduleState(
cap_fees=cap_fees,
flat=FeeAmount(flat_fee),
proportional=prop_fee_per_channel,
imbalance_penalty=imbalance_fee,
),
)
)
locked_after_fees = get_amount_without_fees(
amount_with_fees=lock.amount, channel_in=channel_in, channel_out=channel_out
)
assert locked_after_fees == expected_amount
@given(
integers(min_value=0, max_value=100),
integers(min_value=0, max_value=10_000),
integers(min_value=0, max_value=50_000),
integers(min_value=1, max_value=90_000_000_000_000_000),
integers(min_value=1, max_value=100_000_000_000_000_000),
integers(min_value=1, max_value=100_000_000_000_000_000),
)
@settings(suppress_health_check=[HealthCheck.filter_too_much])
def test_fee_round_trip(flat_fee, prop_fee, imbalance_fee, amount, balance1, balance2):
"""Tests mediation fee deduction.
First we're doing a PFS-like calculation going backwards from the target
amount to get the amount that the initiator has to send. Then we calculate
the fees from a mediator's point of view and check if `amount_with_fees -
fees = amount`.
"""
# Find examples where there is a reasonable chance of succeeding
amount = int(min(amount, balance1 * 0.95 - 1, balance2 * 0.95 - 1))
assume(amount > 0)
total_balance = TokenAmount(100_000_000_000_000_000_000)
prop_fee_per_channel = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))
imbalance_fee = calculate_imbalance_fees(
channel_capacity=total_balance,
proportional_imbalance_fee=ProportionalFeeAmount(imbalance_fee),
)
channel_in = factories.create(
NettingChannelStateProperties(
our_state=NettingChannelEndStateProperties(balance=total_balance - balance1),
partner_state=NettingChannelEndStateProperties(balance=balance1),
fee_schedule=FeeScheduleState(
cap_fees=False,
flat=FeeAmount(flat_fee),
proportional=prop_fee_per_channel,
imbalance_penalty=imbalance_fee,
),
)
)
channel_out = factories.create(
NettingChannelStateProperties(
our_state=NettingChannelEndStateProperties(balance=balance2),
partner_state=NettingChannelEndStateProperties(balance=total_balance - balance2),
fee_schedule=FeeScheduleState(
cap_fees=False,
flat=FeeAmount(flat_fee),
proportional=prop_fee_per_channel,
imbalance_penalty=imbalance_fee,
),
)
)
# How much do we need to send so that the target receives `amount`? PFS-like calculation.
fee_calculation = get_initial_amount_for_amount_after_fees(
amount_after_fees=PaymentAmount(amount), channels=[(channel_in, channel_out)]
)
assume(fee_calculation) # There is not enough capacity for the payment in all cases
assert fee_calculation
# How much would a mediator send to the target? Ideally exactly `amount`.
amount_without_margin_after_fees = get_amount_without_fees(
amount_with_fees=fee_calculation.total_amount,
channel_in=channel_in,
channel_out=channel_out,
)
assume(amount_without_margin_after_fees) # We might lack capacity for the payment
assert abs(amount - amount_without_margin_after_fees) <= 1 # Equal except for rounding errors
# If we add the fee margin, the mediator must always send at least `amount` to the target!
amount_with_fee_and_margin = calculate_safe_amount_with_fee(
fee_calculation.amount_without_fees, FeeAmount(sum(fee_calculation.mediation_fees))
)
amount_with_margin_after_fees = get_amount_without_fees(
amount_with_fees=amount_with_fee_and_margin, channel_in=channel_in, channel_out=channel_out
)
assume(amount_with_margin_after_fees) # We might lack capacity to add margins
assert amount_with_margin_after_fees >= amount
@example(flat_fee=0, prop_fee=0, imbalance_fee=1277, amount=1, balance1=33, balance2=481)
@given(
integers(min_value=0, max_value=100),
integers(min_value=0, max_value=10_000),
integers(min_value=0, max_value=50_000),
integers(min_value=1, max_value=90_000_000_000_000_000_000),
integers(min_value=1, max_value=100_000_000_000_000_000_000),
integers(min_value=1, max_value=100_000_000_000_000_000_000),
)
@settings(suppress_health_check=[HealthCheck.filter_too_much])
def test_fee_add_remove_invariant(flat_fee, prop_fee, imbalance_fee, amount, balance1, balance2):
"""First adding and then removing fees must yield the original value"""
total_balance = TokenAmount(100_000_000_000_000_000_000)
prop_fee_per_channel = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))
imbalance_fee = calculate_imbalance_fees(
channel_capacity=total_balance,
proportional_imbalance_fee=ProportionalFeeAmount(imbalance_fee),
)
fee_schedule = FeeScheduleState(
cap_fees=False,
flat=FeeAmount(flat_fee),
proportional=prop_fee_per_channel,
imbalance_penalty=imbalance_fee,
)
channel_in = factories.create(
NettingChannelStateProperties(
our_state=NettingChannelEndStateProperties(balance=total_balance - balance1),
partner_state=NettingChannelEndStateProperties(balance=balance1),
fee_schedule=fee_schedule,
)
)
channel_out = factories.create(
NettingChannelStateProperties(
our_state=NettingChannelEndStateProperties(balance=balance2),
partner_state=NettingChannelEndStateProperties(balance=total_balance - balance2),
fee_schedule=fee_schedule,
)
)
amount_with_fees = get_amount_with_fees(
amount_without_fees=amount,
schedule_in=channel_in.fee_schedule,
schedule_out=channel_out.fee_schedule,
receivable_amount=balance1,
balance_in=total_balance - balance1,
balance_out=balance2,
)
assume(amount_with_fees)
assert amount_with_fees
amount_without_fees = get_amount_without_fees(
amount_with_fees=amount_with_fees, channel_in=channel_in, channel_out=channel_out
)
assume(amount_without_fees)
assert amount - 1 <= amount_without_fees <= amount + 1
def running_sum(a):
total = 0
for item in a:
total += item
yield total
def make_channel_pair(
fee_schedule: FeeScheduleState, balance1: int = 0, balance2: int = 0
) -> Tuple[NettingChannelState, NettingChannelState]:
balance1 = TokenAmount(balance1)
balance2 = TokenAmount(balance2)
return (
factories.create(
NettingChannelStateProperties(
our_state=NettingChannelEndStateProperties(balance=balance2),
partner_state=NettingChannelEndStateProperties(balance=balance1),
fee_schedule=fee_schedule,
)
),
factories.create(
NettingChannelStateProperties(
our_state=NettingChannelEndStateProperties(balance=balance1),
partner_state=NettingChannelEndStateProperties(balance=balance2),
fee_schedule=fee_schedule,
)
),
)
def test_mfee1():
"""Unit test for the fee calculation in the mfee1_flat_fee scenario"""
amount = 10_000
deposit = 100_000
flat_fee = 100 // 2
fee_schedule = FeeScheduleState(flat=FeeAmount(flat_fee))
channels = make_channel_pair(fee_schedule, deposit)
# How much do we need to send so that the target receives `amount`? PFS-like calculation.
fee_calculation = get_initial_amount_for_amount_after_fees(
amount_after_fees=PaymentAmount(amount), channels=[channels, channels]
)
assert fee_calculation
amount_with_margin = calculate_safe_amount_with_fee(
fee_calculation.amount_without_fees, FeeAmount(sum(fee_calculation.mediation_fees))
)
assert amount_with_margin == 10_211
# print values for scenario
print(deposit - amount_with_margin, amount_with_margin)
for med_fee in running_sum(fee_calculation.mediation_fees):
print(deposit - amount_with_margin + med_fee, amount_with_margin - med_fee)
def test_mfee2():
"""Unit test for the fee calculation in the mfee2_proportional_fees scenario"""
amount = 10_000
deposit = 100_000
prop_fee = ppm_fee_per_channel(ProportionalFeeAmount(10_000))
fee_schedule = FeeScheduleState(proportional=ProportionalFeeAmount(prop_fee))
channels = make_channel_pair(fee_schedule, deposit)
# How much do we need to send so that the target receives `amount`? PFS-like calculation.
fee_calculation = get_initial_amount_for_amount_after_fees(
amount_after_fees=PaymentAmount(amount), channels=[channels, channels]
)
assert fee_calculation
amount_with_margin = calculate_safe_amount_with_fee(
fee_calculation.amount_without_fees, FeeAmount(sum(fee_calculation.mediation_fees))
)
assert amount_with_margin == 10_213
# print values for scenario
print(deposit - amount_with_margin, amount_with_margin)
for med_fee in running_sum(fee_calculation.mediation_fees):
print(deposit - amount_with_margin + med_fee, amount_with_margin - med_fee)
def test_mfee3():
"""Unit test for the fee calculation in the mfee3_only_imbalance_fees scenario"""
amount = 500_000_000_000_000_000
deposit = TokenAmount(1_000_000_000_000_000_000)
imbalance_penalty = calculate_imbalance_fees(deposit, ProportionalFeeAmount(10_000))
fee_schedule = FeeScheduleState(imbalance_penalty=imbalance_penalty, cap_fees=False)
channels = make_channel_pair(fee_schedule, deposit)
# How much do we need to send so that the target receives `amount`? PFS-like calculation.
fee_calculation = get_initial_amount_for_amount_after_fees(
amount_after_fees=PaymentAmount(amount), channels=[channels]
)
assert fee_calculation
amount_with_margin = calculate_safe_amount_with_fee(
fee_calculation.amount_without_fees, FeeAmount(sum(fee_calculation.mediation_fees))
)
assert amount_with_margin == 480_850_038_799_922_400
# print values for scenario
print("{:_} {:_}".format(deposit - amount_with_margin, amount_with_margin))
for med_fee in running_sum(fee_calculation.mediation_fees):
print(
"{:_} {:_}".format(
deposit - amount_with_margin + med_fee, amount_with_margin - med_fee
)
)
def test_mfee4():
"""Unit test for the fee calculation in the mfee4_combined_fees scenario"""
amount = PaymentAmount(500_000_000_000_000_000)
deposit = 1_000_000_000_000_000_000
prop_fee = ppm_fee_per_channel(ProportionalFeeAmount(10_000))
imbalance_penalty = calculate_imbalance_fees(
TokenAmount(deposit * 2), ProportionalFeeAmount(20_000)
)
fee_schedule = FeeScheduleState(
flat=FeeAmount(100 // 2),
proportional=prop_fee,
imbalance_penalty=imbalance_penalty,
cap_fees=False,
)
channels = make_channel_pair(fee_schedule, deposit, deposit)
# How much do we need to send so that the target receives `amount`? PFS-like calculation.
fee_calculation = get_initial_amount_for_amount_after_fees(
amount_after_fees=PaymentAmount(amount), channels=[channels, channels]
)
assert fee_calculation
amount_with_margin = calculate_safe_amount_with_fee(
amount, FeeAmount(sum(fee_calculation.mediation_fees))
)
# Calculate mediation fees for both mediators
med_fees = []
incoming_amount = amount_with_margin
for _ in range(2):
outgoing_amount = get_amount_without_fees(
amount_with_fees=incoming_amount, channel_in=channels[0], channel_out=channels[1]
)
assert outgoing_amount
med_fees.append(incoming_amount - outgoing_amount)
incoming_amount = outgoing_amount
assert amount_with_margin == 543_503_066_141_505_551
# print values for scenario
print("{:_} {:_}".format(deposit - amount_with_margin, deposit + amount_with_margin))
for med_fee in running_sum(med_fees):
print(
"{:_} {:_}".format(
deposit - amount_with_margin + med_fee, deposit + amount_with_margin - med_fee
)
)
|
rlbench/tasks/put_money_in_safe.py
|
Nicolinho/RLBench
| 619 |
84201
|
from typing import List, Tuple
import numpy as np
from pyrep.objects.shape import Shape
from pyrep.objects.dummy import Dummy
from pyrep.objects.proximity_sensor import ProximitySensor
from rlbench.backend.task import Task
from rlbench.backend.conditions import DetectedCondition, NothingGrasped
from rlbench.backend.spawn_boundary import SpawnBoundary
NUM_SHELVES_IN_SAFE = 3
class PutMoneyInSafe(Task):
def init_task(self) -> None:
self.index_dic = {0: 'bottom', 1: 'middle', 2: 'top'}
self.money = Shape('dollar_stack')
self.money_boundary = Shape('dollar_stack_boundary')
self.register_graspable_objects([self.money])
self.success_conditions = [NothingGrasped(self.robot.gripper)]
self.w1_rel_pos = [-2.7287 * 10 ** (-4), -2.3246 * 10 ** (-6),
+4.5627 * 10 ** (-2)]
self.w1_rel_ori = [-3.1416, 7.2824 * 10 ** (-1), -2.1265 * 10 ** (-2)]
def init_episode(self, index: int) -> List[str]:
self.target_shelf = index
w4 = Dummy('waypoint4')
target_dummy_name = 'dummy_shelf' + str(self.target_shelf)
target_pos_dummy = Dummy(target_dummy_name)
target_pos = target_pos_dummy.get_position()
w4.set_position(target_pos, reset_dynamics=False)
self.success_detector = ProximitySensor(
('success_detector' + str(self.target_shelf))
)
while len(self.success_conditions) > 1:
self.success_conditions.pop()
self.success_conditions.append(
DetectedCondition(self.money, self.success_detector)
)
self.register_success_conditions(self.success_conditions)
b = SpawnBoundary([self.money_boundary])
b.sample(self.money,
min_rotation=(0.00, 0.00, 0.00),
max_rotation=(0.00, 0.00, +0.5 * np.pi))
return ['put the money away in the safe on the %s shelf'
% self.index_dic[index],
'leave the money on the %s shelf on the safe'
% self.index_dic[index],
'place the stack of bank notes on the %s shelf of the safe'
% self.index_dic[index]]
def variation_count(self) -> int:
return NUM_SHELVES_IN_SAFE
def base_rotation_bounds(self) -> Tuple[List[float], List[float]]:
return [0.0, 0.0, 0.0], [0.0, 0.0, +0.5 * np.pi]
|
aerosandbox/aerodynamics/aero_3D/avl.py
|
msberk/AeroSandbox
| 322 |
84219
|
from aerosandbox.common import ExplicitAnalysis
import aerosandbox.numpy as np
import subprocess
from pathlib import Path
from aerosandbox.geometry import Airplane
from aerosandbox.performance import OperatingPoint
from typing import Union, List, Dict
import tempfile
import warnings
class AVL(ExplicitAnalysis):
"""
An interface to AVL, a 3D vortex lattice aerodynamics code developed by <NAME> at MIT.
Requires AVL to be on your computer; AVL is available here: https://web.mit.edu/drela/Public/web/avl/
It is recommended (but not required) that you add AVL to your system PATH environment variable such that it can
be called with the command `avl`. If this is not the case, you need to specify the path to your AVL
executable using the `avl_command` argument of the constructor.
Usage example:
>>>avl = asb.AVL(
>>> airplane=my_airplane,
>>> op_point=asb.OperatingPoint(
>>> velocity=100, # m/s
>>> alpha=5, # deg
>>> beta=4, # deg
>>> p=0.01, # rad/sec
>>> q=0.02, # rad/sec
>>> r=0.03, # rad/sec
>>> )
>>>)
>>>outputs = avl.run()
"""
def __init__(self,
airplane: Airplane,
op_point: OperatingPoint = OperatingPoint(),
avl_command: str = "avl",
verbose: bool = False,
working_directory: str = None,
):
"""
Interface to AVL.
Args:
airplane: The airplane object you wish to analyze.
op_point: The operating point you wish to analyze at.
avl_command: The command-line argument to call AVL.
* If AVL is on your system PATH, then you can just leave this as "avl".
* If AVL is not on your system PATH, thjen you should provide a filepath to the AVL executable.
Note that AVL is not on your PATH by default. To tell if AVL is on your system PATH, open up a
terminal and type "avl".
* If the AVL menu appears, it's on your PATH.
* If you get something like "'avl' is not recognized as an internal or external command..." or
"Command 'avl' not found, did you mean...", then it is not on your PATH and you'll need to
specify the location of your AVL executable as a string.
To add AVL to your path, modify your system's environment variables. (Google how to do this for your OS.)
verbose:
working_directory:
"""
self.airplane = airplane
self.op_point = op_point
self.avl_command = avl_command
self.verbose = verbose
self.working_directory = working_directory
def run(self) -> Dict:
return self._run_avl()
def _default_keystroke_file_contents(self) -> List[str]:
run_file_contents = []
# Disable graphics
run_file_contents += [
"plop",
"g",
"",
]
# Enter oper mode
run_file_contents += [
"oper",
]
# Set parameters
run_file_contents += [
"m"
f"mn {self.op_point.mach()}",
f"v {self.op_point.velocity}",
f"d {self.op_point.atmosphere.density()}",
"g 9.81",
""
]
# Set analysis state
p_bar = self.op_point.p * self.airplane.b_ref / (2 * self.op_point.velocity)
q_bar = self.op_point.q * self.airplane.c_ref / (2 * self.op_point.velocity)
r_bar = self.op_point.r * self.airplane.b_ref / (2 * self.op_point.velocity)
run_file_contents += [
f"a a {self.op_point.alpha}",
f"b b {self.op_point.beta}",
f"r r {p_bar}",
f"p p {q_bar}",
f"y y {r_bar}"
]
return run_file_contents
def _run_avl(self,
run_command: str = None,
) -> Dict[str, np.ndarray]:
"""
Private function to run AVL.
Args: run_command: A string with any AVL keystroke inputs that you'd like. By default, you start off within the OPER
menu. All of the inputs indicated in the constructor have been set already, but you can override them here (
for this run only) if you want.
Returns: A dictionary containing all of your results.
"""
with tempfile.TemporaryDirectory() as directory:
directory = Path(directory)
### Alternatively, work in another directory:
if self.working_directory is not None:
directory = Path(self.working_directory) # For debugging
# Designate an intermediate file for file I/O
output_filename = "output.txt"
with open(directory / output_filename, "w+") as f:
pass
# Handle the airplane file
airplane_file = "airplane.avl"
self.airplane.write_avl(directory / airplane_file)
# Handle the run file
keystroke_file_contents = self._default_keystroke_file_contents()
if run_command is not None:
keystroke_file_contents += [run_command]
keystroke_file_contents += [
"x",
"st",
f"{output_filename}",
"o",
"",
"",
"quit"
]
keystroke_file = "keystroke_file.txt"
with open(directory / keystroke_file, "w+") as f:
f.write(
"\n".join(keystroke_file_contents)
)
command = f'{self.avl_command} {airplane_file} < {keystroke_file}'
### Execute
subprocess.call(
command,
shell=True,
cwd=directory,
stdout=None if self.verbose else subprocess.DEVNULL
)
##### Parse the output file
# Read the file
with open(directory / output_filename, "r") as f:
output_data = f.read()
# Trim off the first few lines that contain name, # of panels, etc.
output_data = "\n".join(output_data.split("\n")[8:])
### Iterate through the string to find all the numeric values, based on where "=" appears.
values = []
index = output_data.find("=")
while index != -1:
output_data = output_data[index + 1:]
number = output_data[:12].split("\n")[0]
number = float(number)
values.append(number)
index = output_data.find("=")
### Record the keys associated with those values:
keys = [
"Sref",
"Cref",
"Bref",
"Xref",
"Yref",
"Zref",
"alpha",
"pb/2V",
"p'b/2V",
"beta",
"qc/2V",
"mach",
"rb/2V",
"r'b/2V",
"CX", # Note: these refer to "CXtot", etc. in AVL, but the "tot" is redundant.
"Cl",
"Cl'",
"CY",
"Cm",
"CZ",
"Cn",
"Cn'",
"CL",
"CD",
"CDvis",
"CDind",
"CLff",
"CDff",
"Cyff",
"e",
"CLa",
"CLb",
"CYa",
"CYb",
"Cla",
"Clb",
"Cma",
"Cmb",
"Cna",
"Cnb",
"CLp",
"CLq",
"CLr",
"CYp",
"CYq",
"CYr",
"Clp",
"Clq",
"Clr",
"Cmp",
"Cmq",
"Cmr",
"Cnp",
"Cnq",
"Cnr",
"Xnp",
"Clb Cnr / Clr Cnb"
]
if len(values) != 57 and len(values) != 56: # Sometimes the spiral mode term is inexplicably not displayed by AVL
raise RuntimeError(
"AVL could not run for some reason!\n"
"Investigate by turning on the `verbose` flag and looking at the output.\n"
"(Common culprit: angular rates too high.)"
)
res = {
k: v
for k, v in zip(
keys, values
)
}
##### Add a few more outputs for ease of use
res["p"] = res["pb/2V"] * (2 * self.op_point.velocity / self.airplane.b_ref)
res["q"] = res["qc/2V"] * (2 * self.op_point.velocity / self.airplane.c_ref)
res["r"] = res["rb/2V"] * (2 * self.op_point.velocity / self.airplane.b_ref)
return res
if __name__ == '__main__':
### Import Vanilla Airplane
import aerosandbox as asb
from pathlib import Path
geometry_folder = Path(asb.__file__).parent.parent / "tutorial" / "04 - Geometry" / "example_geometry"
import sys
sys.path.insert(0, str(geometry_folder))
from vanilla import airplane as vanilla
### Do the AVL run
avl = AVL(
airplane=vanilla,
op_point=OperatingPoint(
atmosphere=asb.Atmosphere(altitude=0),
velocity=1,
alpha=0.433476,
beta=0,
p=0,
q=0,
r=0,
),
)
res = avl.run()
for k, v in res.items():
print(f"{str(k).rjust(10)} : {v}")
|
nasbench301/surrogate_models/bananas/bananas_src/bo/acq/__init__.py
|
Basvanstein/nasbench301
| 167 |
84225
|
<reponame>Basvanstein/nasbench301
"""
Code for acquisition strategies.
"""
|
pycrc/expr.py
|
jbosboom/pycrc
| 118 |
84233
|
# pycrc -- parameterisable CRC calculation utility and C source code generator
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This modules simplifies an expression.
import pycrc.expr as exp
my_expr = exp.Xor('var', exp.Parenthesis(exp.And('0x700', 4)))
print('"{}" -> "{}"'.format(my_expr, my_expr.simplify()))
"""
def _classify(val):
"""
Creates a Terminal object if the parameter is a string or an integer.
"""
if type(val) is int:
return Terminal(val)
if type(val) is str:
if val.isdigit():
return Terminal(int(val), val)
if val[:2].lower() == '0x':
return Terminal(int(val, 16), val)
return Terminal(val)
return val
class Expression(object):
"""
Base class for all expressions.
"""
def is_int(self, val = None):
return False
class Terminal(Expression):
"""
A terminal object.
"""
def __init__(self, val, pretty = None):
"""
Construct a Terminal.
The val variable is usually a string or an integer. Integers may also
be passed as strings. The pretty-printer will use the string when
formatting the expression.
"""
self.val = val
self.pretty = pretty
def __str__(self):
"""
Return the string expression of this object.
"""
if self.pretty is None:
return str(self.val)
return self.pretty
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
return self
def is_int(self, val = None):
"""
Return True if the value of this Terminal is an integer.
"""
if type(self.val) is int:
return val is None or self.val == val
return False
class FunctionCall(Expression):
"""
Represent a function call
"""
def __init__(self, name, args):
"""
Construct a function call object.
"""
self.name = _classify(name)
self.args = [_classify(arg) for arg in args]
def __str__(self):
"""
Return the string expression of this object.
"""
return str(self.name) + '(' + ', '.join([str(arg) for arg in self.args]) + ')'
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
args = [arg.simplify() for arg in self.args]
return FunctionCall(self.name, args)
class Parenthesis(Expression):
"""
Represent a pair of round brackets.
"""
def __init__(self, val):
"""
Construct a parenthesis object.
"""
self.val = _classify(val)
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
val = self.val.simplify()
if type(val) is Terminal:
return val
return Parenthesis(val)
def __str__(self):
"""
Return the string expression of this object.
"""
return '(' + str(self.val) + ')'
class Add(Expression):
"""
Represent an addition of operands.
"""
def __init__(self, lhs, rhs):
"""
Construct an addition object.
"""
self.lhs = _classify(lhs)
self.rhs = _classify(rhs)
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
lhs = self.lhs.simplify()
rhs = self.rhs.simplify()
if lhs.is_int() and rhs.is_int():
return Terminal(lhs.val + rhs.val)
if lhs.is_int(0):
return rhs
if rhs.is_int(0):
return lhs
return Add(lhs, rhs)
def __str__(self):
"""
Return the string expression of this object.
"""
return str(self.lhs) + ' + ' + str(self.rhs)
class Sub(Expression):
"""
Represent a subtraction of operands.
"""
def __init__(self, lhs, rhs):
"""
Construct subtraction object.
"""
self.lhs = _classify(lhs)
self.rhs = _classify(rhs)
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
lhs = self.lhs.simplify()
rhs = self.rhs.simplify()
if lhs.is_int() and rhs.is_int():
return Terminal(lhs.val - rhs.val)
if lhs.is_int(0):
return rhs
if rhs.is_int(0):
return lhs
return Sub(lhs, rhs)
def __str__(self):
"""
Return the string expression of this object.
"""
return str(self.lhs) + ' - ' + str(self.rhs)
class Mul(Expression):
"""
Represent the multiplication of operands.
"""
def __init__(self, lhs, rhs):
"""
Construct a multiplication object.
"""
self.lhs = _classify(lhs)
self.rhs = _classify(rhs)
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
lhs = self.lhs.simplify()
rhs = self.rhs.simplify()
if lhs.is_int() and rhs.is_int():
return Terminal(lhs.val * rhs.val)
if lhs.is_int(0) or rhs.is_int(0):
return Terminal(0)
if lhs.is_int(1):
return rhs
if rhs.is_int(1):
return lhs
return Mul(lhs, rhs)
def __str__(self):
"""
Return the string expression of this object.
"""
return str(self.lhs) + ' * ' + str(self.rhs)
class Shl(Expression):
"""
Shift left operation.
"""
def __init__(self, lhs, rhs):
"""
Construct a shift left object.
"""
self.lhs = _classify(lhs)
self.rhs = _classify(rhs)
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
lhs = self.lhs.simplify()
rhs = self.rhs.simplify()
if lhs.is_int() and rhs.is_int():
return Terminal(lhs.val << rhs.val)
if lhs.is_int(0):
return Terminal(0)
if rhs.is_int(0):
return lhs
return Shl(lhs, rhs)
def __str__(self):
"""
Return the string expression of this object.
"""
return str(self.lhs) + ' << ' + str(self.rhs)
class Shr(Expression):
"""
Shift right operation.
"""
def __init__(self, lhs, rhs):
"""
Construct a shift right object.
"""
self.lhs = _classify(lhs)
self.rhs = _classify(rhs)
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
lhs = self.lhs.simplify()
rhs = self.rhs.simplify()
if lhs.is_int() and rhs.is_int():
return Terminal(lhs.val >> rhs.val)
if lhs.is_int(0):
return Terminal(0)
if rhs.is_int(0):
return lhs
return Shr(lhs, rhs)
def __str__(self):
"""
Return the string expression of this object.
"""
return str(self.lhs) + ' >> ' + str(self.rhs)
class Or(Expression):
"""
Logical or operation.
"""
def __init__(self, lhs, rhs):
"""
Construct a logical and object.
"""
self.lhs = _classify(lhs)
self.rhs = _classify(rhs)
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
lhs = self.lhs.simplify()
rhs = self.rhs.simplify()
if lhs.is_int() and rhs.is_int():
return Terminal(lhs.val | rhs.val)
if lhs.is_int(0):
return rhs
if rhs.is_int(0):
return lhs
return Or(lhs, rhs)
def __str__(self):
"""
Return the string expression of this object.
"""
return str(self.lhs) + ' | ' + str(self.rhs)
class And(Expression):
"""
Logical and operation.
"""
def __init__(self, lhs, rhs):
"""
Construct a logical and object.
"""
self.lhs = _classify(lhs)
self.rhs = _classify(rhs)
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
lhs = self.lhs.simplify()
rhs = self.rhs.simplify()
if lhs.is_int() and rhs.is_int():
return Terminal(lhs.val & rhs.val)
if lhs.is_int(0) or rhs.is_int(0):
return Terminal(0)
return And(lhs, rhs)
def __str__(self):
"""
Return the string expression of this object.
"""
return str(self.lhs) + ' & ' + str(self.rhs)
class Xor(Expression):
"""
Logical xor operation.
"""
def __init__(self, lhs, rhs):
"""
Construct a logical xor object.
"""
self.lhs = _classify(lhs)
self.rhs = _classify(rhs)
def simplify(self):
"""
Return a simplified version of this sub-expression.
"""
lhs = self.lhs.simplify()
rhs = self.rhs.simplify()
if lhs.is_int() and rhs.is_int():
return Terminal(lhs.val ^ rhs.val)
if lhs.is_int(0):
return rhs
if rhs.is_int(0):
return lhs
return Xor(lhs, rhs)
def __str__(self):
"""
Return the string expression of this object.
"""
return str(self.lhs) + ' ^ ' + str(self.rhs)
|
labml_nn/transformers/hour_glass/__init__.py
|
techthiyanes/annotated_deep_learning_paper_implementations
| 3,714 |
84307
|
<filename>labml_nn/transformers/hour_glass/__init__.py
"""
---
title: Hierarchical Transformers Are More Efficient Language Models
summary: >
This is an annotated implementation/tutorial of hourglass model in PyTorch.
---
# Hierarchical Transformers Are More Efficient Language Models
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Hierarchical Transformers Are More Efficient Language Models](https://papers.labml.ai/paper/2110.13711).
This paper introduces a hierarchical transformer architecture to handle long sequences
efficiently. The first half of the transformer layers down-sample tokens and the second
half up-samples with direct skip connections between layers of the same resolution.
This is a little similar to [U-Net](../../diffusion/ddpm/unet.html) for vision tasks.
They try different up-sampling and down-sampling techniques and build a model
with the best performing up and down-sampling techniques which they call the
hourglass model.
Here we have implemented the simplest up-sampling and down-sampling techniques for simplicity.
We will consider adding more complex (and better performing) implementations later.
Here is [the training code](experiment.html) for the hourglass model.
[](https://app.labml.ai/run/855b82363e4911ec9ae4a5b9c69d5061)
"""
from typing import List
import torch
from torch import nn
from labml_helpers.module import Module
from labml_nn.transformers import MultiHeadAttention, TransformerLayer
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.utils import subsequent_mask
class HourGlass(Module):
"""
## Hourglass model
This model recursively adds layers to the middle while shortening the sequence by down-sampling.
The shortened sequence processed by another hourglass model is sandwiched between two normal transformer
layers. (A transformer layer has a [self-attention layer](../mha.html)
and a [position-wise feed-forward layer](../feed_forward.html)).
"""
def __init__(self, n_heads: int, d_model: int, dropout: float, d_ff: int, shortening_factors: List[int]):
"""
* `n_heads` is the number of heads in [multi-head attention layers](../mha.html)
* `d_model` is the size of the token embeddings
* `dropout` is the dropout probability
* `d_ff` is the dimensionality of the hidden layer in [position-wise feed-forward layers](../feed_forward.html)
* `shortening_factors` is the list of shortening factors
"""
super().__init__()
# The transformer layer before down-sampling
self.pre = TransformerLayer(d_model=d_model,
# [Multi-head attention layer](../mha.html)
self_attn=MultiHeadAttention(n_heads, d_model, dropout),
# [Position wise feed-forward layers](.. / feed_forward.html)
feed_forward=FeedForward(d_model, d_ff, dropout),
#
dropout_prob=dropout)
# Auto-regressive mask
self.mask = AutoregressiveMask()
# The shortening factor $k$ (or the down-sampling rate)
k = shortening_factors[0]
# We shift the tokens to the right by $k - 1$ steps to make sure
# information doesn't leak from the future tokens to past tokens
# as a result of down-sampling and up-sampling
self.shift_right = ShiftRight(k - 1)
# Shortening or the down-sampling layer. We use the simplest form - average pooling.
# The paper shows that attention based down sampling works best, which we haven't implemented yet.
self.shortening = AvgPoolShortening(k)
# If there are no more shortening (middle of the hourglass)
if len(shortening_factors) == 1:
# The center layer is another transformer layer
self.shortened = TransformerLayer(d_model=d_model,
self_attn=MultiHeadAttention(n_heads, d_model, dropout),
feed_forward=FeedForward(d_model, d_ff, dropout),
dropout_prob=dropout)
# Autoregressive mask
self.mask_short = AutoregressiveMask()
self.hour_glass = None
else:
# Insert another hourglass model recursively
self.hour_glass = HourGlass(n_heads, d_model, dropout, d_ff, shortening_factors[1:])
# Up-sampling layer. We use naive up-sampling for simplicity and the paper shows attention based up sampling
# works better.
self.up_sampling = NaiveUpSampling(k)
# The final transformer layer after up-sampling
self.post = TransformerLayer(d_model=d_model,
self_attn=MultiHeadAttention(n_heads, d_model, dropout),
feed_forward=FeedForward(d_model, d_ff, dropout),
dropout_prob=dropout)
def forward(self, x: torch.Tensor):
# Initial transformer layer
# $$x \leftarrow PreVanillaLayers(x)$$
x = self.pre(x=x, mask=self.mask(x))
# Shifting and shortening
# $$x' \leftarrow Shortening(ShiftRight(x,k−1),k)$$
x_short = self.shortening(self.shift_right(x))
# If we are at the center of the hourglass,
# $$\textbf{\small if } \text{\small E\scriptsize MPTY}(shorten\_factors) \textbf{\small then}$$
if self.hour_glass is None:
# Center transformer layer
# $$x' \leftarrow ShortenedLayers(x')$$
x_short = self.shortened(x=x_short, mask=self.mask_short(x_short))
# $$\textbf{else}$$
else:
# $$x' \leftarrow \text{\small H\scriptsize OURGLASS}(x, shorten\_factors)$$
x_short = self.hour_glass(x_short)
# Up-sample the shortened sequence and add a skip connection
# $$x \leftarrow x + Upsampling(x, x', k)$$
x = x + self.up_sampling(x, x_short)
# Final transformer layer
# $$x \leftarrow PostVanillaLayers(x)$$
x = self.post(x=x, mask=self.mask(x))
#
return x
class ShiftRight(Module):
"""
### Shift right operation
This shifts the sequence to the right by the given number of steps
"""
def __init__(self, shift: int):
"""
* `shift` is the number of steps to shift by
"""
super().__init__()
# cannot be negative
assert shift >= 0
#
self.shift = shift
def forward(self, x: torch.Tensor):
"""
* `x` is a tensor of shape `[seq_len, ...]`
"""
# If the shift is $0$ return the original
if self.shift == 0:
return x
# Zeros to be appended to the left
prefix = x.new_zeros([self.shift, *x.shape[1:]])
# Concatenate the zeros and truncate the right
return torch.cat([prefix, x[:-self.shift]])
class AvgPoolShortening(Module):
"""
### Average pool shortening
This down-samples by a given factor with average pooling
"""
def __init__(self, k: int):
"""
* `k` is the shortening factor
"""
super().__init__()
# Average pooling layer
self.pool = nn.AvgPool1d(k, ceil_mode=True)
def forward(self, x: torch.Tensor):
"""
* `x` is of shape `[seq_len, batch_size, d_model]`
"""
# Pooling layer accepts shape `[batch_size, d_model, seq_len]` so we
# permute axes.
return self.pool(x.permute(1, 2, 0)).permute(2, 0, 1)
class NaiveUpSampling(Module):
"""
### Naive up-sampling
This up-samples by repeating
"""
def __init__(self, k: int):
"""
* `k` is the shortening factor
"""
super().__init__()
self.k = k
def forward(self, x: torch.Tensor, x_short: torch.Tensor):
"""
* `x` is the tensor with embeddings before down-sampling
* `x_short` is the tensor of higher density (to be up-sampled) representations
"""
# Repeat across the sequence dimension
expanded = torch.repeat_interleave(x_short, self.k, dim=0)
# Truncate the extra embeddings at the end
expanded = expanded[:x.shape[0]]
#
return expanded
class AutoregressiveMask(Module):
"""
### Generate auto-regressive mask
"""
def __init__(self):
super().__init__()
self.mask = None
def forward(self, x: torch.Tensor):
# Create a mask if we haven't created or sizes have changed
if self.mask is None or self.mask.size(0) != len(x):
# [Subsequent mask](../utils.html), will mask out tokens from seeing future tokens
self.mask = subsequent_mask(len(x)).to(x.device)
#
return self.mask
class LinearPoolingShortening(Module):
"""
### 🚧 Linear pooling for down-sampling
This concatenates the consecutive tokens embeddings that need to be merged and do a linear
transformation to map it to the size of a single token embedding.
"""
def __init__(self):
super().__init__()
raise NotImplementedError
class AttentionBasedShortening(Module):
"""
### 🚧 Down-sampling with attention
\begin{align}
x' &= S(x) + Attention \Big(Q=S(x),K = x, V =x \Big) \\
x' &= x' + FFN(x')
\end{align}
where $S(x)$ is average pooling or linear pooling.
"""
def __init__(self):
super().__init__()
raise NotImplementedError
class LinearUpSampling(Module):
"""
### 🚧 Linear projection for up-sampling
Make a linear projection of dense token embeddings to a size of $d_{\text{model}} k$.
"""
def __init__(self):
super().__init__()
raise NotImplementedError
class AttentionBasedUpSampling(Module):
"""
### 🚧 Attention based up-sampling
\begin{align}
x &= U(x,x') + Attention \Big(Q=U(x,x'),K = x', V = x' \Big) \\
x &= x + FFN(x)
\end{align}
where $U(x,x') = x + LinearUpsampling(x')$
"""
def __init__(self):
super().__init__()
raise NotImplementedError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.