max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/dbshell/fake_client.py
|
jpmallarino/django
| 61,676 |
47688
|
import sys
sys.exit(1)
|
pythonjs/runtime/builtins.py
|
bpmbank/PythonJS
| 319 |
47689
|
# PythonJS builtins
# by <NAME> and <NAME> - copyright 2013
# License: "New BSD"
pythonjs.configure( runtime_exceptions=False )
pythonjs.configure( direct_operator='+' )
pythonjs.configure( direct_operator='*' )
pythonjs.configure( direct_keys=True )
_PythonJS_UID = 0
inline('IndexError = function(msg) {this.message = msg || "";}; IndexError.prototype = Object.create(Error.prototype); IndexError.prototype.name = "IndexError";')
inline('KeyError = function(msg) {this.message = msg || "";}; KeyError.prototype = Object.create(Error.prototype); KeyError.prototype.name = "KeyError";')
inline('ValueError = function(msg) {this.message = msg || "";}; ValueError.prototype = Object.create(Error.prototype); ValueError.prototype.name = "ValueError";')
inline('AttributeError = function(msg) {this.message = msg || "";}; AttributeError.prototype = Object.create(Error.prototype);AttributeError.prototype.name = "AttributeError";')
inline('RuntimeError = function(msg) {this.message = msg || "";}; RuntimeError.prototype = Object.create(Error.prototype);RuntimeError.prototype.name = "RuntimeError";')
with lowlevel:
def __getfast__(ob, attr):
v = ob[ attr ]
if v is undefined:
raise AttributeError(attr)
else:
return v
with javascript:
def __wrap_function__(f):
f.is_wrapper = True
return f
def __gpu_object(cls, struct_name, data_name):
cls.prototype.__struct_name__ = struct_name
cls.prototype.__struct_data__ = data_name
with lowlevel:
gpu = {
'object' : __gpu_object
}
def glsljit_runtime(header):
return new( GLSLJITRuntime(header) )
class GLSLJITRuntime:
def __init__(self, header):
self.header = header
self.shader = []
self.object_packagers = []
self.struct_types = {}
self.glsltypes = ['vec2', 'vec3', 'vec4', 'mat4']
self.matrices = []
def compile_header(self):
a = [] ## insert structs at top of header
for sname in self.struct_types:
if sname in self.glsltypes:
pass
else:
a.push( self.struct_types[sname]['code'] )
## calls get_global_id, see WebCLGL API docs. ##
a.push('int matrix_index() { return int(get_global_id().y*%s.0); }' %self.matrices.length)
a.push('int matrix_row() { return int(get_global_id().x*4.0); }') ## returns: 0, 1, 2, 3
## first class array error, can not return an array, even when the size is known ##
#a.push('float[3] floatN( float a, float b, float c) { float f[3]; f[0]=a; f[1]=b; f[2]=b; return f; }')
## these could be generated for each array size to reduce the mess in main,
## TODO it would be better to upload them as uniforms.
#a.push('void floatN( float f[3], float a, float b, float c) { f[0]=a; f[1]=b; f[2]=b; }')
## the array can be declared in the header, but not filled with data here.
#a.push('float XXX[3];')
#a.push('floatN( XXX, 1.1, 2.2, 3.3 );')
#a.push('XXX[0]=1.1;')
a = '\n'.join(a)
## code in header could be methods that reference the struct types above.
b = "\n".join(self.header)
return '\n'.join([a,b])
def compile_main(self):
return '\n'.join(self.shader)
def push(self, s):
self.shader.push(s)
def define_structure(self, ob):
struct_name = None
#if Object.hasOwnProperty.call(ob,'__struct_name__'):
if ob.__struct_name__:
struct_name = ob.__struct_name__
if struct_name in self.struct_types:
return struct_name
arrays = []
floats = []
integers = []
structs = []
struct_type = [] ## fallback for javascript objects
if struct_name and struct_name in self.glsltypes:
return struct_name
#for key in ob.keys():
for key in dir( ob ):
if key.length==1 and key in '0123456789':
raise RuntimeError(key)
t = typeof( ob[key] )
if t=='object' and instanceof(ob[key], Array) and ob[key].length and typeof(ob[key][0])=='number':
struct_type.push( 'ARY_'+key )
arrays.push(key)
elif t=='number':
struct_type.push( 'NUM_'+key)
floats.push(key)
elif instanceof(ob[key], Int16Array):
struct_type.push( 'INT_'+key)
if ob[key].length == 1:
integers.push(key)
else:
pass ## TODO int16array
elif t=='object' and ob[key].__struct_name__:
struct_type.push( 'S_'+key)
structs.push( key )
if ob[key].__struct_name__ not in self.struct_types:
if ob[key].__struct_name__ in self.glsltypes:
pass
else:
self.define_structure( ob[key] )
if struct_name is None:
#print('DEGUG: new struct name', ob.__struct_name__)
#print(ob)
struct_name = ''.join( struct_type )
ob.__struct_name__ = struct_name
if struct_name not in self.struct_types:
member_list = []
for key in integers:
member_list.append('int '+key+';')
for key in floats:
member_list.append('float '+key+';')
for key in arrays:
arr = ob[key]
member_list.append('float '+key+'['+arr.length+'];')
for key in structs:
subtype = ob[key].__struct_name__
member_list.append( subtype+' '+key+';')
if len(member_list)==0:
raise RuntimeError(struct_name)
members = ''.join(member_list)
code = 'struct ' +struct_name+ ' {' +members+ '};'
#print('-------struct glsl code-------')
#print(code)
#print('------------------------------')
self.struct_types[ struct_name ] = {
'arrays' : arrays,
'floats' : floats,
'integers': integers,
'structs' : structs,
'code' : code
}
return struct_name
def structure(self, ob, name):
wrapper = None
if instanceof(ob, Object):
pass
elif ob.__class__ is dict:
wrapper = ob
ob = ob[...]
sname = self.define_structure(ob)
if wrapper:
wrapper.__struct_name__ = sname
args = []
stype = self.struct_types[ sname ]
# if stype is None: ## TODO fix me
if sname not in self.struct_types:
if sname in self.glsltypes:
if sname == 'mat4':
if ob.__struct_data__:
o = ob[ ob.__struct_data__ ]
else:
o = ob
for i in range(o.length):
value = o[i] +''
if '.' not in value: value += '.0'
args.push( value )
else:
raise RuntimeError('no method to pack structure: ' +sname)
has_arrays = False
if stype:
if stype['arrays'].length > 0:
has_arrays = True
for key in stype['integers']:
args.push( ob[key][0]+'' )
for key in stype['floats']:
value = ob[key] + ''
if '.' not in value:
value += '.0'
args.push( value )
for key in stype['arrays']:
#args.push( '{'+ob[key].toString()+ '}') ## this will not work
## arrays need to be assigned to a local variable before passing
## it to the struct constructor.
aname = '_'+key+name
self.array(ob[key], aname)
args.push( aname )
for key in stype['structs']:
aname = '_'+key+name
self.structure(ob[key], aname)
args.push( aname )
args = ','.join(args)
if has_arrays:
self.shader.push( sname + ' ' +name+ '=' +sname+ '(' +args+ ');' )
else:
self.header.push( 'const ' + sname + ' ' +name+ '=' +sname+ '(' +args+ ');' )
return stype
def int16array(self, ob, name):
a = ['int ' + name + '[' + ob.length + ']']
i = 0
while i < ob.length:
a.push(';'+name+'['+i+']='+ob[i])
i += 1
self.shader.push( ''.join(a) )
def array(self, ob, name):
if instanceof(ob[0], Array):
a = [] #'float ' + name + '[' + ob.length + ']']
i = 0
while i < ob.length:
subarr = ob[i]
subname = '%s_%s'%(name,i)
if a.length==0:
a.append('float ' + subname + '[' + subarr.length + ']')
else:
a.append(';float ' + subname + '[' + subarr.length + ']')
j = 0
while j < subarr.length:
v = subarr[j] + ''
if '.' not in v:
v += '.0'
a.push(';'+subname+'['+j+']='+v)
j += 1
i += 1
self.shader.push( ''.join(a) )
elif instanceof(ob[0], Object) or ob[0].__class__ is dict:
i = 0
while i < ob.length:
self.structure( ob[i], name+'_'+i)
i += 1
else:
a = ['float ' + name + '[' + ob.length + '];']
i = 0
while i < ob.length:
a.push(name+'['+i+']='+ob[i] + ';')
i += 1
self.shader.push( ''.join(a) )
def object(self, ob, name):
for p in self.object_packagers:
cls, func = p
if instanceof(ob, cls):
return func(ob)
def unpack_array2d(self, arr, dims):
if typeof(dims)=='number':
return arr
w,h = dims
row = []
rows = [row]
for value in arr:
row.append(value)
if row.length >= w:
row = []
rows.append(row)
rows.pop()
if rows.length != h:
print('ERROR: __unpack_array2d, invalid height.')
return rows
def unpack_vec4(self, arr, dims):
if typeof(dims)=='number':
w = dims
h = 1
else:
w,h = dims
rows = []
i=0
for y in range(h):
row = []
rows.append( row )
for x in range(w):
vec = []
for j in range(4):
vec.append( arr[i])
i += 1
row.append( vec )
if rows.length != h:
print('ERROR: __unpack_vec4, invalid height.')
return rows
def unpack_mat4(self, arr):
i = 0
for mat in self.matrices:
for j in range(16):
mat[j] = arr[i]
i += 1
return self.matrices
with lowlevel:
def __getattr__(ob, a ):
if ob.__getattr__:
return JS("ob.__getattr__(a)")
#else:
# raise AttributeError(a)
def __test_if_true__( ob ):
if ob is True:
return True
elif ob is False:
return False
elif typeof(ob) == 'string':
return ob.length != 0
elif not ob:
return False
elif instanceof(ob, Array):
return ob.length != 0
elif typeof(ob) == 'function':
return True
elif ob.__class__ and ob.__class__ is dict: #isinstance(ob, dict):
return Object.keys( ob[...] ).length != 0
elif instanceof(ob, Object):
return Object.keys(ob).length != 0
else:
return True
def __replace_method(ob, a, b):
## this is required because string.replace in javascript only replaces the first occurrence
if typeof(ob) == 'string':
return ob.split(a).join(b)
else:
return ob.replace(a,b)
def __split_method( ob, delim ):
## special case because calling string.split() without args its not the same as python,
## and we do not want to touch the default string.split implementation.
if typeof(ob) == 'string':
if delim is undefined:
return ob.split(' ')
else:
return ob.split( delim )
else:
if delim is undefined:
return ob.split()
else:
return ob.split( delim )
with javascript:
__dom_array_types__ = []
if typeof(NodeList) == 'function': ## NodeList is only available in browsers
## minimal dom array types common to allow browsers ##
__dom_array_types__ = [ NodeList, FileList, DOMStringList, HTMLCollection, SVGNumberList, SVGTransformList]
## extra dom array types ##
if typeof(DataTransferItemList) == 'function': ## missing in NodeWebkit
__dom_array_types__.push( DataTransferItemList )
if typeof(HTMLAllCollection) == 'function': ## missing in Firefox
__dom_array_types__.push( HTMLAllCollection )
if typeof(SVGElementInstanceList) == 'function':## missing in Firefox
__dom_array_types__.push( SVGElementInstanceList )
if typeof(ClientRectList) == 'function': ## missing in Firefox-trunk
__dom_array_types__.push( ClientRectList )
def __is_some_array( ob ):
if __dom_array_types__.length > 0:
for t in __dom_array_types__:
if instanceof(ob, t):
return True
return False
def __is_typed_array( ob ):
if instanceof( ob, Int8Array ) or instanceof( ob, Uint8Array ):
return True
elif instanceof( ob, Int16Array ) or instanceof( ob, Uint16Array ):
return True
elif instanceof( ob, Int32Array ) or instanceof( ob, Uint32Array ):
return True
elif instanceof( ob, Float32Array ) or instanceof( ob, Float64Array ):
return True
else:
return False
def __js_typed_array( t, a ):
if t == 'i':
arr = new( Int32Array(a.length) )
arr.set( a )
return arr
def __contains__( ob, a ):
t = typeof(ob)
if t == 'string':
if ob.indexOf(a) == -1: return False
else: return True
elif t == 'number':
raise TypeError
elif __is_typed_array(ob):
for x in ob:
if x == a:
return True
return False
elif ob.__contains__:
return ob.__contains__(a)
elif instanceof(ob, Object) and Object.hasOwnProperty.call(ob, a):
return True
else:
return False
def __add_op(a, b):
## 'number' is already checked before this gets called (ternary op)
## but it can still appear here when called from an inlined lambda
t = typeof(a)
if t == 'string' or t == 'number':
return JS("a+b")
elif instanceof(a, Array):
c = []
c.extend(a)
c.extend(b)
return c
elif a.__add__:
return a.__add__(b)
else:
raise TypeError('invalid objects for addition')
def __mul_op(a, b):
t = typeof(a)
if t == 'number':
return JS("a * b")
elif t == 'string':
arr = []
for i in range(b):
arr.append(a)
return ''.join(arr)
elif instanceof(a, Array):
c = []
for i in range(b):
c.extend(a)
return c
elif a.__mul__:
return a.__mul__(b)
else:
raise TypeError('invalid objects for multiplication')
def __jsdict( items ):
d = JS("{}")
for item in items:
key = item[0]
if instanceof(key, Array):
key = JSON.stringify(key)
elif key.__uid__:
key = key.__uid__
d[ key ] = item[1]
return d
def __jsdict_get(ob, key, default_value):
if instanceof(ob, Object):
if instanceof(key, Array):
key = JSON.stringify(key)
if JS("key in ob"): return ob[key]
return default_value
else: ## PythonJS object instance ##
## this works because instances from PythonJS are created using Object.create(null) ##
if default_value is not undefined:
return JS("ob.get(key, default_value)")
else:
return JS("ob.get(key)")
def __jsdict_set(ob, key, value):
if instanceof(ob, Object):
if instanceof(key, Array):
key = JSON.stringify(key)
ob[ key ] = value
else: ## PythonJS object instance ##
## this works because instances from PythonJS are created using Object.create(null) ##
JS("ob.set(key,value)")
def __jsdict_keys(ob):
if instanceof(ob, Object):
## in the case of tuple keys this would return stringified JSON instead of the original arrays,
## TODO, should this loop over the keys and convert the json strings back to objects?
## but then how would we know if a given string was json... special prefix character?
return JS("Object.keys( ob )")
else: ## PythonJS object instance ##
## this works because instances from PythonJS are created using Object.create(null) ##
return JS("ob.keys()")
def __jsdict_values(ob):
if instanceof(ob, Object):
arr = []
for key in ob:
if ob.hasOwnProperty(key):
value = ob[key]
arr.push( value )
return arr
else: ## PythonJS object instance ##
## this works because instances from PythonJS are created using Object.create(null) ##
return JS("ob.values()")
def __jsdict_items(ob):
## `ob.items is None` is for: "self.__dict__.items()" because self.__dict__ is not actually a dict
if instanceof(ob, Object) or ob.items is undefined: ## in javascript-mode missing attributes do not raise AttributeError
arr = []
for key in ob:
if Object.hasOwnProperty.call(ob, key):
value = ob[key]
arr.push( [key,value] )
return arr
else: ## PythonJS object instance ##
return JS("ob.items()")
def __jsdict_pop(ob, key, _default=None):
if instanceof(ob, Array):
if ob.length:
## note: javascript array.pop only pops the end of an array
if key is undefined:
return inline("ob.pop()")
else:
return ob.splice( key, 1 )[0]
else:
raise IndexError(key)
elif instanceof(ob, Object):
if JS("key in ob"):
v = ob[key]
JS("delete ob[key]")
return v
elif _default is undefined:
raise KeyError(key)
else:
return _default
else: ## PythonJS object instance ##
## this works because instances from PythonJS are created using Object.create(null) ##
return JS("ob.pop(key, _default)")
def dir(ob):
if instanceof(ob, Object):
return JS("Object.keys( ob )")
else:
return __object_keys__(ob)
def __object_keys__(ob):
'''
notes:
. Object.keys(ob) will not work because we create PythonJS objects using `Object.create(null)`
. this is different from Object.keys because it traverses the prototype chain.
'''
arr = []
JS('for (var key in ob) { arr.push(key) }')
return arr
def __bind_property_descriptors__(o, klass):
for name in klass.__properties__:
desc = {"enumerable":True}
prop = klass.__properties__[ name ]
if prop['get']:
desc['get'] = __generate_getter__(klass, o, name)
if prop['set']:
desc['set'] = __generate_setter__(klass, o, name)
Object.defineProperty( o, name, desc )
for base in klass.__bases__:
__bind_property_descriptors__(o, base)
def __generate_getter__(klass, o, n):
return lambda : klass.__properties__[ n ]['get']([o],{})
def __generate_setter__(klass, o, n):
return lambda v: klass.__properties__[ n ]['set']([o,v],{})
def __sprintf(fmt, args):
## note: '%sXXX%s'.split().length != args.length
## because `%s` at the start or end will split to empty chunks ##
if instanceof(args, Array):
chunks = fmt.split('%s')
arr = []
for i,txt in enumerate(chunks):
arr.append( txt )
if i >= args.length:
break
item = args[i]
if typeof(item) == 'string':
arr.append( item )
elif typeof(item) == 'number':
arr.append( ''+item )
else:
arr.append( Object.prototype.toString.call(item) )
return ''.join(arr)
else:
return fmt.replace('%s', args)
def __create_class__(class_name, parents, attrs, props):
"""Create a PythonScript class"""
#if attrs.__metaclass__:
# metaclass = attrs.__metaclass__
# attrs.__metaclass__ = None
# return metaclass([class_name, parents, attrs])
klass = Object.create(null)
klass.__bases__ = parents
klass.__name__ = class_name
#klass.__dict__ = attrs
klass.__unbound_methods__ = Object.create(null)
klass.__all_method_names__ = []
klass.__properties__ = props
klass.__attributes__ = attrs
for key in attrs:
if typeof( attrs[key] ) == 'function':
klass.__all_method_names__.push( key )
f = attrs[key]
if hasattr(f, 'is_classmethod') and f.is_classmethod:
pass
elif hasattr(f, 'is_staticmethod') and f.is_staticmethod:
pass
else:
klass.__unbound_methods__[key] = attrs[key]
if key == '__getattribute__': continue
klass[key] = attrs[key]
## this is needed for fast lookup of property names in __set__ ##
klass.__setters__ = []
klass.__getters__ = []
for name in klass.__properties__:
prop = klass.__properties__[name]
klass.__getters__.push( name )
if prop['set']:
klass.__setters__.push( name )
for base in klass.__bases__:
Array.prototype.push.apply( klass.__getters__, base.__getters__ )
Array.prototype.push.apply( klass.__setters__, base.__setters__ )
Array.prototype.push.apply( klass.__all_method_names__, base.__all_method_names__ )
def __call__():
"""Create a PythonJS object"""
object = Object.create(null) ## this makes pythonjs object not compatible with things like: Object.hasOwnProperty
object.__class__ = klass
object.__dict__ = object
## we need __dict__ so that __setattr__ can still set attributes using `old-style`: self.__dict__[n]=x
#Object.defineProperty(
# object,
# '__dict__',
# {enumerable:False, value:object, writeable:False, configurable:False}
#)
has_getattribute = False
has_getattr = False
for name in klass.__all_method_names__:
if name == '__getattribute__':
has_getattribute = True
elif name == '__getattr__':
has_getattr = True
else:
wrapper = __get__(object, name)
if not wrapper.is_wrapper:
print 'RUNTIME ERROR: failed to get wrapper for:',name
## to be safe the getters come after other methods are cached ##
if has_getattr:
__get__(object, '__getattr__')
if has_getattribute:
__get__(object, '__getattribute__')
__bind_property_descriptors__(object, klass)
if object.__init__:
object.__init__.apply(this, arguments)
#object.__init__.call(this,args, kwargs)
return object
__call__.is_wrapper = True
klass.__call__ = __call__
return klass
def type(ob_or_class_name, bases=None, class_dict=None):
'''
type(object) -> the object's type
type(name, bases, dict) -> a new type ## broken? - TODO test
'''
with javascript:
if bases is None and class_dict is None:
return ob_or_class_name.__class__
else:
return create_class(ob_or_class_name, bases, class_dict) ## TODO rename create_class to _pyjs_create_class
def hasattr(ob, attr):
## TODO check parent classes for attr
with javascript:
return Object.hasOwnProperty.call(ob, attr)
def getattr(ob, attr, property=False):
with javascript:
if property:
prop = _get_upstream_property( ob.__class__, attr )
if prop and prop['get']:
return prop['get']( [ob], {} )
else:
print "ERROR: getattr property error", prop
else:
return __get__(ob, attr)
def setattr(ob, attr, value, property=False):
with javascript:
if property:
prop = _get_upstream_property( ob.__class__, attr )
if prop and prop['set']:
prop['set']( [ob, value], {} )
else:
print "ERROR: setattr property error", prop
else:
__set__(ob, attr, value)
def issubclass(C, B):
if C is B:
return True
with javascript: bases = C.__bases__ ## js-array
i = 0
while i < bases.length:
if issubclass( bases[i], B ):
return True
i += 1
return False
def isinstance( ob, klass):
with javascript:
if ob is undefined or ob is null:
return False
elif instanceof(ob, Array) and klass is list:
return True
#elif klass is dict and instanceof(ob, Object): ## this is safe because instances created with Object.create(null) are not instances-of Object
# if instanceof(ob, Array):
# return False
# elif ob.__class__:
# return False
# else:
# return True
elif not Object.hasOwnProperty.call(ob, '__class__'):
return False
ob_class = ob.__class__
if ob_class is undefined:
return False
else:
return issubclass( ob_class, klass )
def int(a):
with javascript:
a = Math.round(a)
if isNaN(a):
raise ValueError('not a number')
return a
with javascript:
def int16(a): ## used by glsljit when packing structs.
arr = new(Int16Array(1))
arr[0]=a
return arr
def float(a):
with javascript:
if typeof(a)=='string':
if a.lower()=='nan':
return NaN
elif a.lower()=='inf':
return Infinity
b = Number(a)
if isNaN(b):
## invalid strings also convert to NaN, throw error ##
raise ValueError('can not convert to float: '+a)
return b
def round(a, places=0):
with javascript:
b = '' + a
if b.indexOf('.') == -1:
return a
else:
## this could return NaN with large numbers and large places,
## TODO check for NaN and instead fallback to `a.toFixed(places)`
p = Math.pow(10, places)
return Math.round(a * p) / p
def str(s):
return ''+s
def _setup_str_prototype():
'''
Extend JavaScript String.prototype with methods that implement the Python str API.
The decorator @String.prototype.[name] assigns the function to the prototype,
and ensures that the special 'this' variable will work.
'''
with javascript:
@String.prototype.__contains__
def func(a):
if this.indexOf(a) == -1: return False
else: return True
@String.prototype.get
def func(index):
if index < 0:
return this[ this.length + index ]
else:
return this[ index ]
@String.prototype.__iter__
def func(self):
with python:
return Iterator(this, 0)
@String.prototype.__getitem__
def func(idx):
if idx < 0:
return this[ this.length + idx ]
else:
return this[ idx ]
@String.prototype.__len__
def func():
return this.length
@String.prototype.__getslice__
def func(start, stop, step):
if start is undefined and stop is undefined and step == -1:
return this.split('').reverse().join('')
else:
if stop < 0:
stop = this.length + stop
return this.substring(start, stop)
@String.prototype.splitlines
def func():
return this.split('\n')
@String.prototype.strip
def func():
return this.trim() ## missing in IE8
@String.prototype.startswith
def func(a):
if this.substring(0, a.length) == a:
return True
else:
return False
@String.prototype.endswith
def func(a):
if this.substring(this.length-a.length, this.length) == a:
return True
else:
return False
@String.prototype.join
def func(a):
out = ''
if instanceof(a, Array):
arr = a
else:
arr = a[...]
i = 0
for value in arr:
out += value
i += 1
if i < arr.length:
out += this
return out
@String.prototype.upper
def func():
return this.toUpperCase()
@String.prototype.lower
def func():
return this.toLowerCase()
@String.prototype.index
def func(a):
i = this.indexOf(a)
if i == -1:
raise ValueError(a + ' - not in string')
return i
@String.prototype.find
def func(a):
return this.indexOf(a)
@String.prototype.isdigit
def func():
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
for char in this:
if char in digits: pass
else: return False
return True
@String.prototype.isnumber
def func():
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.']
for char in this:
if char in digits: pass
else: return False
return True
## TODO - for now these are just dummy functions.
@String.prototype.decode
def func(encoding):
return this
@String.prototype.encode
def func(encoding):
return this
@String.prototype.format
def func(fmt):
r = this
keys = Object.keys(fmt)
for key in keys:
r = r.split(key).join(fmt[key])
r = r.split('{').join('').split('}').join('')
return r
_setup_str_prototype()
## note Arrays in javascript by default sort by string order, even if the elements are numbers.
with javascript:
def __sort_method(ob):
if instanceof(ob, Array):
def f(a,b):
if a < b:
return -1
elif a > b:
return 1
else:
return 0
return JS("ob.sort( f )")
else:
return JS("ob.sort()")
def _setup_array_prototype():
with javascript:
@Array.prototype.jsify
def func():
i = 0
while i < this.length:
item = this[ i ]
if typeof(item) == 'object':
if item.jsify:
this[ i ] = item.jsify()
i += 1
return this
@Array.prototype.__contains__
def func(a):
if this.indexOf(a) == -1: return False
else: return True
@Array.prototype.__len__
def func():
return this.length
@Array.prototype.get
def func(index):
return this[ index ]
@Array.prototype.__getitem__
def __getitem__(index):
if index < 0: index = this.length + index
return this[index]
@Array.prototype.__setitem__
def __setitem__(index, value):
if index < 0: index = this.length + index
this[ index ] = value
@Array.prototype.__iter__
def func():
with python:
return Iterator(this, 0)
@Array.prototype.__getslice__
def func(start, stop, step):
arr = []
start = start | 0
if stop is undefined:
stop = this.length
if start < 0:
start = this.length + start
if stop < 0:
stop = this.length + stop
#reverse = step < 0 ## in javascript `null<0` and `undefined<0` are false
#reverse = False
if typeof(step)=='number':
#reverse = step < 0
#if reverse:
if step < 0:
#step = Math.abs(step)
i = start
while i >= 0:
arr.push( this[i] )
i += step
return arr
else:
i = start
n = stop
while i < n:
arr.push( this[i] )
i += step
return arr
else:
i = start
n = stop
while i < n:
#arr[ i ] = this[i] ## slower in chrome
arr.push( this[i] )
i += 1 ## this gets optimized to i++
return arr
#if reverse:
# arr.reverse()
#if step == 1:
# arr = new(Array(this.length))
# i = 0
# while i < this.length:
# arr[ i ] = this[i]
# i += 1 ## this gets optimized to i++
#else:
# arr = []
# i = 0
# while i < this.length:
# arr.push( this[i] )
# i += step
#if start is undefined and stop is undefined:
# if reverse: arr.reverse()
#elif reverse:
# arr = arr.slice(stop, start+1)
# arr.reverse()
#else:
# #if stop < 0: ## mozilla spec says negative indices are supported
# # stop = arr.length + stop
# arr = arr.slice(start, stop)
#return arr
@Array.prototype.__setslice__
def func(start, stop, step, items):
if start is undefined: start = 0
if stop is undefined: stop = this.length
arr = [start, stop-start]
for item in items: arr.push( item )
this.splice.apply(this, arr )
@Array.prototype.append
def func(item):
this.push( item )
return this
@Array.prototype.extend
def extend(other):
for obj in other:
this.push(obj)
return this
@Array.prototype.remove
def func(item):
index = this.indexOf( item )
this.splice(index, 1)
@Array.prototype.insert
def insert(index, obj):
if index < 0: index = this.length + index
this.splice(index, 0, obj)
@Array.prototype.index
def index(obj):
return this.indexOf(obj)
@Array.prototype.count
def count(obj):
a = 0
for item in this:
if item is obj: ## note that `==` will not work here, `===` is required for objects
a += 1
return a
## set-like features ##
@Array.prototype.bisect
def func(x, low, high):
if low is undefined: low = 0
if high is undefined: high = this.length
while low < high:
a = low+high
mid = Math.floor(a/2)
if x < this[mid]:
high = mid
else:
low = mid + 1
return low
## `-` operator
@Array.prototype.difference
def func(other):
f = lambda i: other.indexOf(i)==-1
return this.filter( f )
## `&` operator
@Array.prototype.intersection
def func(other):
f = lambda i: other.indexOf(i)!=-1
return this.filter( f )
## `<=` operator
@Array.prototype.issubset
def func(other):
for item in this:
if other.indexOf(item) == -1:
return False
return True
## non-standard utils ##
@Array.prototype.copy
def func():
arr = []
i = 0
while i < this.length:
arr.push( this[i] )
i += 1
return arr
_setup_array_prototype()
def _setup_nodelist_prototype():
with javascript:
@NodeList.prototype.__contains__
def func(a):
if this.indexOf(a) == -1: return False
else: return True
@NodeList.prototype.__len__
def func():
return this.length
@NodeList.prototype.get
def func(index):
return this[ index ]
@NodeList.prototype.__getitem__
def __getitem__(index):
if index < 0: index = this.length + index
return this[index]
@NodeList.prototype.__setitem__
def __setitem__(index, value):
if index < 0: index = this.length + index
this[ index ] = value
@NodeList.prototype.__iter__
def func():
with python:
return Iterator(this, 0)
@NodeList.prototype.index
def index(obj):
return this.indexOf(obj)
if __NODEJS__ == False and __WEBWORKER__ == False:
_setup_nodelist_prototype()
def bisect(a, x, low=None, high=None):
## bisect function from bisect module of the stdlib
with javascript:
return a.bisect(x, low, high)
def range(num, stop, step):
"""Emulates Python's range function"""
if stop is not undefined:
i = num
num = stop
else:
i = 0
if step is undefined:
step = 1
with javascript:
arr = []
while i < num:
arr.push(i)
i += step
return arr
def xrange(num, stop, step):
return range(num, stop, step)
def sum( arr ):
a = 0
for b in arr:
a += b
return a
class StopIteration: ## DEPRECATED
pass
def len(ob):
with javascript:
if instanceof(ob, Array):
return ob.length
elif __is_typed_array(ob):
return ob.length
elif instanceof(ob, ArrayBuffer):
return ob.byteLength
elif ob.__len__:
return ob.__len__()
else: #elif instanceof(ob, Object):
return Object.keys(ob).length
def next(obj):
return obj.next()
def map(func, objs):
with javascript: arr = []
for ob in objs:
v = func(ob)
with javascript:
arr.push( v )
return arr
def filter(func, objs):
with javascript: arr = []
for ob in objs:
if func( ob ):
with javascript:
arr.push( ob )
return arr
def min( lst ):
a = None
for value in lst:
if a is None: a = value
elif value < a: a = value
return a
def max( lst ):
a = None
for value in lst:
if a is None: a = value
elif value > a: a = value
return a
def abs( num ):
return JS('Math.abs(num)')
def ord( char ):
return JS('char.charCodeAt(0)')
def chr( num ):
return JS('String.fromCharCode(num)')
with javascript:
class __ArrayIterator:
def __init__(self, arr, index):
self.arr = arr
self.index = index
self.length = arr.length
def next(self):
index = self.index
self.index += 1
arr = self.arr
return JS('arr[index]')
class Iterator:
## rather than throwing an exception, it could be more optimized to have the iterator set a done flag,
## and another downside is having the try/catch around this makes errors in in the loop go slient.
def __init__(self, obj, index):
self.obj = obj
self.index = index
self.length = len(obj)
self.obj_get = obj.get ## cache this for speed
def next(self):
with javascript:
index = self.index
self.index += 1
return self.obj_get( [index], {} )
def tuple(a):
## TODO tuple needs a solution for dict keys
with javascript:
if Object.keys(arguments).length == 0: #arguments.length == 0:
return []
elif instanceof(a, Array):
return a.slice()
elif typeof(a) == 'string':
return a.split('')
else:
print a
print arguments
raise TypeError
def list(a):
with javascript:
if Object.keys(arguments).length == 0: #arguments.length == 0:
return []
elif instanceof(a, Array):
return a.slice()
elif typeof(a) == 'string':
return a.split('')
else:
print a
print arguments
raise TypeError
with javascript:
def __tuple_key__(arr):
r = []
i = 0
while i < arr.length:
item = arr[i]
t = typeof(item)
if t=='string':
r.append( "'"+item+"'")
elif instanceof(item, Array):
r.append( __tuple_key__(item) )
elif t=='object':
if item.__uid__ is undefined:
raise KeyError(item)
r.append( item.__uid__ )
else:
r.append( item )
i += 1
return r.join(',')
class dict:
# http://stackoverflow.com/questions/10892322/javascript-hashtable-use-object-key
# using a function as a key is allowed, but would waste memory because it gets converted to a string
# http://stackoverflow.com/questions/10858632/are-functions-valid-keys-for-javascript-object-properties
def __init__(self, js_object=None, pointer=None):
with javascript:
self[...] = {}
if pointer is not None:
self[...] = pointer
elif js_object:
ob = js_object
if instanceof(ob, Array):
for o in ob:
with lowlevel:
if instanceof(o, Array):
k= o[0]; v= o[1]
else:
k= o['key']; v= o['value']
try:
self.__setitem__( k,v )
except KeyError:
raise KeyError('error in dict init, bad key')
elif isinstance(ob, dict):
for key in ob.keys():
value = ob[ key ]
self.__setitem__( key, value )
else:
print 'ERROR init dict from:', js_object
raise TypeError
def jsify(self):
#keys = Object.keys( self[...] ) ## TODO check how this got broken, this should always be a low-level object?
keys = __object_keys__( self[...] )
for key in keys:
value = self[...][key]
if typeof(value) == 'object':
if hasattr(value, 'jsify'):
self[...][key] = value.jsify()
elif typeof(value) == 'function':
raise RuntimeError("can not jsify function")
return self[...]
def copy(self):
return dict( self )
def clear(self):
with javascript:
self[...] = {}
def has_key(self, key):
__dict = self[...]
if JS("typeof(key) === 'object' || typeof(key) === 'function'"):
# Test undefined because it can be in the dict
key = key.__uid__
if JS("key in __dict"):
return True
else:
return False
def update(self, other):
for key in other:
self.__setitem__( key, other[key] )
def items(self):
arr = []
for key in self.keys():
arr.append( [key, self[key]] )
return arr
def get(self, key, _default=None):
try:
return self[key]
except:
return _default
def set(self, key, value):
self.__setitem__(key, value)
def __len__(self):
__dict = self[...]
return JS('Object.keys(__dict).length')
def __getitem__(self, key):
'''
note: `"4"` and `4` are the same key in javascript, is there a sane way to workaround this,
that can remain compatible with external javascript?
'''
with javascript:
__dict = self[...]
err = False
if instanceof(key, Array):
#key = JSON.stringify( key ) ## fails on objects with circular references ##
key = __tuple_key__(key)
elif JS("typeof(key) === 'object' || typeof(key) === 'function'"):
# Test undefined because it can be in the dict
if JS("key.__uid__ && key.__uid__ in __dict"):
return JS('__dict[key.__uid__]')
else:
err = True
if __dict and JS("key in __dict"):
return JS('__dict[key]')
else:
err = True
if err:
msg = "missing key: %s -\n" %key
raise KeyError(__dict.keys())
def __setitem__(self, key, value):
with javascript:
if key is undefined:
raise KeyError('undefined is invalid key type')
if key is null:
raise KeyError('null is invalid key type')
__dict = self[...]
if instanceof(key, Array):
#key = JSON.stringify( key ) ## fails on objects with circular references ##
key = __tuple_key__(key)
if key is undefined:
raise KeyError('undefined is invalid key type (tuple)')
inline( '__dict[key] = value')
elif JS("typeof(key) === 'object' || typeof(key) === 'function'"):
if JS("key.__uid__ === undefined"):
# "" is needed so that integers can also be used as keys #
JS(u"key.__uid__ = '' + _PythonJS_UID++")
JS('__dict[key.__uid__] = value')
else:
JS('__dict[key] = value')
def keys(self):
with lowlevel:
return Object.keys( self[...] )
def pop(self, key, d=None):
v = self.get(key, None)
if v is None:
return d
else:
js_object = self[...]
JS("delete js_object[key]")
return v
def values(self):
with javascript:
keys = Object.keys( self[...] )
out = []
for key in keys:
out.push( self[...][key] )
return out
def __contains__(self, value):
try:
self[value]
return True
except:
return False
def __iter__(self):
return Iterator(self.keys(), 0)
def set(a):
'''
This returns an array that is a minimal implementation of set.
Often sets are used simply to remove duplicate entries from a list,
and then it get converted back to a list, it is safe to use fastset for this.
The array prototype is overloaded with basic set functions:
difference
intersection
issubset
Note: sets in Python are not subscriptable, but can be iterated over.
Python docs say that set are unordered, some programs may rely on this disorder
for randomness, for sets of integers we emulate the unorder only uppon initalization
of the set, by masking the value by bits-1. Python implements sets starting with an
array of length 8, and mask of 7, if set length grows to 6 (3/4th), then it allocates
a new array of length 32 and mask of 31. This is only emulated for arrays of
integers up to an array length of 1536.
'''
with javascript:
hashtable = null
if a.length <= 1536:
hashtable = {}
keys = []
if a.length < 6: ## hash array length 8
mask = 7
elif a.length < 22: ## 32
mask = 31
elif a.length < 86: ## 128
mask = 127
elif a.length < 342: ## 512
mask = 511
else: ## 2048
mask = 2047
fallback = False
if hashtable:
for b in a:
if typeof(b)=='number' and b is (b|0): ## set if integer
key = b & mask
hashtable[ key ] = b
keys.push( key )
else:
fallback = True
break
else:
fallback = True
s = []
if fallback:
for item in a:
if s.indexOf(item) == -1:
s.push( item )
else:
keys.sort()
for key in keys:
s.push( hashtable[key] )
return s
def frozenset(a):
return set(a)
class array:
## note that class-level dicts can only be used after the dict class has been defined above,
## however, we can still not rely on using a dict here because dict creation relies on get_attribute,
## and get_attribute relies on __NODEJS__ global variable to be set to False when inside NodeJS,
## to be safe this is changed to use JSObjects
with javascript:
typecodes = {
'c': 1, # char
'b': 1, # signed char
'B': 1, # unsigned char
'u': 2, # unicode
'h': 2, # signed short
'H': 2, # unsigned short
'i': 4, # signed int
'I': 4, # unsigned int
'l': 4, # signed long
'L': 4, # unsigned long
'f': 4, # float
'd': 8, # double
'float32':4,
'float16':2,
'float8' :1,
'int32' :4,
'uint32' :4,
'int16' :2,
'uint16' :2,
'int8' :1,
'uint8' :1,
}
typecode_names = {
'c': 'Int8',
'b': 'Int8',
'B': 'Uint8',
'u': 'Uint16',
'h': 'Int16',
'H': 'Uint16',
'i': 'Int32',
'I': 'Uint32',
#'l': 'TODO',
#'L': 'TODO',
'f': 'Float32',
'd': 'Float64',
'float32': 'Float32',
'float16': 'Int16',
'float8' : 'Int8',
'int32' : 'Int32',
'uint32' : 'Uint32',
'int16' : 'Int16',
'uint16' : 'Uint16',
'int8' : 'Int8',
'uint8' : 'Uint8',
}
def __init__(self, typecode, initializer=None, little_endian=False):
self.typecode = typecode
self.itemsize = self.typecodes[ typecode ]
self.little_endian = little_endian
if initializer:
self.length = len(initializer)
self.bytes = self.length * self.itemsize
if self.typecode == 'float8':
self._scale = max( [abs(min(initializer)), max(initializer)] )
self._norm_get = self._scale / 127 ## half 8bits-1
self._norm_set = 1.0 / self._norm_get
elif self.typecode == 'float16':
self._scale = max( [abs(min(initializer)), max(initializer)] )
self._norm_get = self._scale / 32767 ## half 16bits-1
self._norm_set = 1.0 / self._norm_get
else:
self.length = 0
self.bytes = 0
size = self.bytes
buff = JS('new ArrayBuffer(size)')
self.dataview = JS('new DataView(buff)')
self.buffer = buff
self.fromlist( initializer )
def __len__(self):
return self.length
def __contains__(self, value):
#lst = self.to_list()
#return value in lst ## this old style is deprecated
arr = self.to_array()
with javascript:
if arr.indexOf(value) == -1: return False
else: return True
def __getitem__(self, index):
step = self.itemsize
offset = step * index
dataview = self.dataview
func_name = 'get'+self.typecode_names[ self.typecode ]
func = JS('dataview[func_name].bind(dataview)')
if offset < self.bytes:
value = JS('func(offset)')
if self.typecode == 'float8':
value = value * self._norm_get
elif self.typecode == 'float16':
value = value * self._norm_get
return value
else:
raise IndexError(index)
def __setitem__(self, index, value):
step = self.itemsize
if index < 0: index = self.length + index -1 ## TODO fixme
offset = step * index
dataview = self.dataview
func_name = 'set'+self.typecode_names[ self.typecode ]
func = JS('dataview[func_name].bind(dataview)')
if offset < self.bytes:
if self.typecode == 'float8':
value = value * self._norm_set
elif self.typecode == 'float16':
value = value * self._norm_set
JS('func(offset, value)')
else:
raise IndexError(index)
def __iter__(self):
return Iterator(self, 0)
def get(self, index):
return self[ index ]
def fromlist(self, lst):
length = len(lst)
step = self.itemsize
typecode = self.typecode
size = length * step
dataview = self.dataview
func_name = 'set'+self.typecode_names[ typecode ]
func = JS('dataview[func_name].bind(dataview)')
if size <= self.bytes:
i = 0; offset = 0
while i < length:
item = lst[i]
if typecode == 'float8':
item *= self._norm_set
elif typecode == 'float16':
item *= self._norm_set
JS('func(offset,item)')
offset += step
i += 1
else:
raise TypeError
def resize(self, length):
buff = self.buffer
source = JS('new Uint8Array(buff)')
new_size = length * self.itemsize
new_buff = JS('new ArrayBuffer(new_size)')
target = JS('new Uint8Array(new_buff)')
JS('target.set(source)')
self.length = length
self.bytes = new_size
self.buffer = new_buff
self.dataview = JS('new DataView(new_buff)')
def append(self, value):
length = self.length
self.resize( self.length + 1 )
self[ length ] = value
def extend(self, lst): ## TODO optimize
for value in lst:
self.append( value )
def to_array(self):
arr = JSArray()
i = 0
while i < self.length:
item = self[i]
JS('arr.push( item )')
i += 1
return arr
def to_list(self):
return self.to_array()
def to_ascii(self):
string = ''
arr = self.to_array()
i = 0; length = arr.length
while i < length:
JS('var num = arr[i]')
JS('var char = String.fromCharCode(num)')
string += char
i += 1
return string
## file IO ##
class file:
'''
TODO, support multiple read/writes. Currently this just reads all data,
and writes all data.
'''
def __init__(self, path, flags):
self.path = path
if flags == 'rb':
self.flags = 'r'
self.binary = True
elif flags == 'wb':
self.flags = 'w'
self.binary = True
else:
self.flags = flags
self.binary = False
self.flags = flags
def read(self, binary=False):
_fs = require('fs')
path = self.path
with javascript:
if binary or self.binary:
return _fs.readFileSync( path, encoding=None )
else:
return _fs.readFileSync( path, {'encoding':'utf8'} )
def write(self, data, binary=False):
_fs = require('fs')
path = self.path
with javascript:
if binary or self.binary:
binary = binary or self.binary
if binary == 'base64': ## TODO: fixme, something bad in this if test
#print('write base64 data')
buff = new Buffer(data, 'base64')
_fs.writeFileSync( path, buff, {'encoding':None})
else:
#print('write binary data')
#print(binary)
_fs.writeFileSync( path, data, {'encoding':None})
else:
#print('write utf8 data')
_fs.writeFileSync( path, data, {'encoding':'utf8'} )
def close(self):
pass
def __open__( path, mode=None): ## this can not be named `open` because it replaces `window.open`
return file( path, mode )
with javascript:
## mini json library ##
json = {
'loads': lambda s: JSON.parse(s),
'dumps': lambda o: JSON.stringify(o)
}
def __get_other_workers_with_shared_arg( worker, ob ):
a = []
for b in threading.workers:
other = b['worker']
args = b['args']
if other is not worker:
for arg in args:
if arg is ob:
if other not in a:
a.append( other )
return a
threading = {'workers': [], '_blocking_callback':None }
def __start_new_thread(f, args):
worker = new(Worker(f))
worker.__uid__ = len( threading.workers )
threading.workers.append( {'worker':worker,'args':args} )
def func(event):
#print('got signal from thread')
#print(event.data)
if event.data.type == 'terminate':
worker.terminate()
elif event.data.type == 'call':
res = __module__[ event.data.function ].apply(null, event.data.args)
if res is not None and res is not undefined:
worker.postMessage({'type':'return_to_blocking_callback', 'result':res})
elif event.data.type == 'append':
#print('got append event')
a = args[ event.data.argindex ]
a.push( event.data.value )
for other in __get_other_workers_with_shared_arg(worker, a):
other.postMessage( {'type':'append', 'argindex':event.data.argindex, 'value':event.data.value} )
elif event.data.type == '__setitem__':
#print('got __setitem__ event')
a = args[ event.data.argindex ]
value = event.data.value
if a.__setitem__:
a.__setitem__(event.data.index, value)
else:
a[event.data.index] = value
for other in __get_other_workers_with_shared_arg(worker, a):
#print('relay __setitem__')
other.postMessage( {'type':'__setitem__', 'argindex':event.data.argindex, 'key':event.data.index, 'value':event.data.value} )
else:
raise RuntimeError('unknown event')
worker.onmessage = func
jsargs = []
for i,arg in enumerate(args):
if arg.jsify:
jsargs.append( arg.jsify() )
else:
jsargs.append( arg )
if instanceof(arg, Array):
__gen_worker_append(worker, arg, i)
worker.postMessage( {'type':'execute', 'args':jsargs} )
return worker
def __gen_worker_append(worker, ob, index):
def append(item):
#print('posting to thread - append')
worker.postMessage( {'type':'append', 'argindex':index, 'value':item} )
ob.push( item )
Object.defineProperty(ob, "append", {'enumerable':False, 'value':append, 'writeable':True, 'configurable':True})
######## webworker client #########
def __webworker_wrap(ob, argindex):
if instanceof(ob, Array):
#ob.__argindex__ = argindex
def func(index, item):
#print('posting to parent setitem')
postMessage({'type':'__setitem__', 'index':index, 'value':item, 'argindex':argindex})
Array.prototype.__setitem__.call(ob, index, item)
## this can raise RangeError recursive overflow if the worker entry point is a recursive function
Object.defineProperty(ob, "__setitem__", {"enumerable":False, "value":func, "writeable":True, "configurable":True})
#ob.__setitem__ =func
def func(item):
#print('posting to parent append')
postMessage({'type':'append', 'value':item, 'argindex':argindex})
Array.prototype.push.call(ob, item)
Object.defineProperty(ob, "append", {"enumerable":False, "value":func, "writeable":True, "configurable":True})
#ob.append = func
elif typeof(ob) == 'object':
def func(key, item):
#print('posting to parent setitem object')
postMessage({'type':'__setitem__', 'index':key, 'value':item, 'argindex':argindex})
ob[ key ] = item
#ob.__setitem__ = func
Object.defineProperty(ob, "__setitem__", {"enumerable":False, "value":func, "writeable":True, "configurable":True})
return ob
######### simple RPC API #########
def __rpc__( url, func, args):
req = new( XMLHttpRequest() )
req.open('POST', url, False) ## false is sync
req.setRequestHeader("Content-Type", "application/json;charset=UTF-8")
req.send( JSON.stringify({'call':func, 'args':args}) )
return JSON.parse( req.responseText )
def __rpc_iter__( url, attr):
req = new( XMLHttpRequest() )
req.open('POST', url, False) ## false is sync
req.setRequestHeader("Content-Type", "application/json;charset=UTF-8")
req.send( JSON.stringify({'iter':attr}) )
return JSON.parse( req.responseText )
def __rpc_set__( url, attr, value):
req = new( XMLHttpRequest() )
req.open('POST', url, False) ## false is sync
req.setRequestHeader("Content-Type", "application/json;charset=UTF-8")
req.send( JSON.stringify({'set':attr, 'value':value}) )
def __rpc_get__( url, attr):
req = new( XMLHttpRequest() )
req.open('POST', url, False) ## false is sync
req.setRequestHeader("Content-Type", "application/json;charset=UTF-8")
req.send( JSON.stringify({'get':attr}) )
return JSON.parse( req.responseText )
|
tools/perf/contrib/cluster_telemetry/rasterize_and_record_micro_ct.py
|
zealoussnow/chromium
| 14,668 |
47716
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from contrib.cluster_telemetry import ct_benchmarks_util
from contrib.cluster_telemetry import page_set
from contrib.cluster_telemetry import repaint_helpers
from benchmarks import rasterize_and_record_micro
# pylint: disable=protected-access
class RasterizeAndRecordMicroCT(
rasterize_and_record_micro._RasterizeAndRecordMicro):
"""Measures rasterize and record performance for Cluster Telemetry."""
@classmethod
def Name(cls):
return 'rasterize_and_record_micro_ct'
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
(rasterize_and_record_micro._RasterizeAndRecordMicro.
AddBenchmarkCommandLineArgs(parser))
ct_benchmarks_util.AddBenchmarkCommandLineArgs(parser)
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
ct_benchmarks_util.ValidateCommandLineArgs(parser, args)
def CreateStorySet(self, options):
return page_set.CTPageSet(
options.urls_list, options.user_agent, options.archive_data_file,
run_page_interaction_callback=repaint_helpers.WaitThenRepaint)
|
tool/rst2html_pygments.py
|
Tonhoko/cmemo
| 138 |
47722
|
<reponame>Tonhoko/cmemo<filename>tool/rst2html_pygments.py
#!/usr/bin/python
# :Author: <NAME>, the Pygments team, <NAME>
# :Date: $Date: $
# :Copyright: This module has been placed in the public domain.
# This is a merge of the docutils_ `rst2html` front end with an extension
# suggestion taken from the pygments_ documentation.
"""
A front end to docutils, producing HTML with syntax colouring using pygments
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. Uses `pygments` to colorize the content of'
'"code-block" directives. Needs an adapted stylesheet'
+ default_description)
# Define a new directive `code-block` that uses the `pygments` source
# highlighter to render code in color.
#
# Code from the `pygments`_ documentation for `Using Pygments in ReST
# documents`_.
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
pygments_formatter = HtmlFormatter()
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = get_lexer_by_name('text')
parsed = highlight(u'\n'.join(content), lexer, pygments_formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
directives.register_directive('code-block', pygments_directive)
# Call the docutils publisher to render the input as html::
publish_cmdline(writer_name='html', description=description)
# .. _doctutile: http://docutils.sf.net/
# .. _pygments: http://pygments.org/
# .. _Using Pygments in ReST documents: http://pygments.org/docs/rstdirective/
|
src/sites/nefilim.py
|
LAC-Japan/ransomwatch
| 244 |
47732
|
from datetime import datetime
import logging
from bs4 import BeautifulSoup
from db.models import Victim
from net.proxy import Proxy
from .sitecrawler import SiteCrawler
import time
class Nefilim(SiteCrawler):
actor = "Nefilim"
def _handle_page(self, soup):
victim_list = soup.find_all("header", class_="entry-header")
for victim in victim_list:
victim_title = victim.find("h2", class_="entry-title").text.strip()
victim_name = victim_title[0:victim_title.find(". Part")]
meta = victim.find("div", class_="entry-meta")
published = meta.find("time", class_="entry-date").attrs["datetime"]
published_dt = datetime.strptime(
published.strip()[:-6], "%Y-%m-%dT%H:%M:%S")
victim_leak_site = meta.find("span", class_="posted-on").find("a").attrs["href"]
q = self.session.query(Victim).filter_by(
url=victim_leak_site, site=self.site)
if q.count() == 0:
# new victim
v = Victim(name=victim_name, url=victim_leak_site, published=published_dt,
first_seen=datetime.utcnow(), last_seen=datetime.utcnow(), site=self.site)
self.session.add(v)
self.new_victims.append(v)
else:
# already seen, update last_seen
v = q.first()
v.last_seen = datetime.utcnow()
self.current_victims.append(v)
self.session.commit()
# server was timing out so slows it down a bit
time.sleep(1.0)
def scrape_victims(self):
with Proxy() as p:
r = p.get(f"{self.url}", headers=self.headers)
soup = BeautifulSoup(r.content.decode(), "html.parser")
page_count = 0
while True:
page_nav = soup.find("div", class_="nav-previous")
if page_nav is None:
break
url = page_nav.find("a").attrs["href"]
r = p.get(f"{url}", headers=self.headers)
soup = BeautifulSoup(r.content.decode(), "html.parser")
self._handle_page(soup)
|
packages/pyright-internal/src/tests/samples/properties3.py
|
Jasha10/pyright
| 3,934 |
47744
|
# This sample tests the type checker's ability to handle
# custom subclasses of property.
from typing import Any, Callable
class custom_property1(property):
pass
class Custom1(object):
@custom_property1
def x(self) -> int:
return 3
@custom_property1
def y(self) -> float:
return 3.5
@y.setter
def y(self, val: float):
pass
@y.deleter
def y(self):
pass
m1 = Custom1()
a1: int = m1.x
# This should generate an error because m.x is
# an int and cannot be assigned to str.
b1: str = m1.x
c1: float = m1.y
# This should generate an error because m.y is
# a float and cannot be assigned to int.
d1: int = m1.y
# This should generate an error because there
# is no setter for x.
m1.x = 4
m1.y = 4
# This should generate an error because there is
# no deleter for x.
del m1.x
del m1.y
class custom_property2(property):
_custom_func: Callable[..., Any] | None
def custom_function(self, _custom_func: Callable[..., Any]):
self._custom_func = _custom_func
return self
class Custom2(object):
@custom_property2
def x(self) -> int:
return 3
@custom_property2
def y(self) -> float:
return 3.5
@y.setter
def y(self, val: float):
pass
@y.deleter
def y(self):
pass
@y.custom_function
def y(self):
pass
m2 = Custom2()
a2 = m2.y
reveal_type(a2, expected_text="float")
m2.y = 4
del m2.y
|
.modules/.recon-ng/modules/reporting/pushpin.py
|
termux-one/EasY_HaCk
| 1,103 |
47770
|
<gh_stars>1000+
from recon.core.module import BaseModule
import codecs
import os
import re
import time
import webbrowser
class Module(BaseModule):
meta = {
'name': 'PushPin Report Generator',
'author': '<NAME> (@LaNMaSteR53)',
'description': 'Creates HTML media and map reports for all of the PushPins stored in the database.',
'options': (
('latitude', None, True, 'latitude of the epicenter'),
('longitude', None, True, 'longitude of the epicenter'),
('radius', None, True, 'radius from the epicenter in kilometers'),
('map_filename', os.path.join(BaseModule.workspace, 'pushpin_map.html'), True, 'path and filename for pushpin map report'),
('media_filename', os.path.join(BaseModule.workspace, 'pushpin_media.html'), True, 'path and filename for pushpin media report'),
),
}
def remove_nl(self, x, repl=''):
return re.sub('[\r\n]+', repl, self.html_escape(x))
def build_content(self, sources):
icons = {
'flickr': 'http://maps.google.com/mapfiles/ms/icons/orange-dot.png',
'instagram': 'http://maps.google.com/mapfiles/ms/icons/pink-dot.png',
'picasa': 'http://maps.google.com/mapfiles/ms/icons/purple-dot.png',
'shodan': 'http://maps.google.com/mapfiles/ms/icons/yellow-dot.png',
'twitter': 'http://maps.google.com/mapfiles/ms/icons/blue-dot.png',
'youtube': 'http://maps.google.com/mapfiles/ms/icons/red-dot.png',
}
media_content = ''
map_content = ''
map_arrays = ''
map_checkboxes = ''
for source in sources:
count = source[0]
source = source[1]
map_arrays += 'var %s = [];\n' % (source.lower())
map_checkboxes += '<input type="checkbox" id="%s" onchange="toggleMarkers(\'%s\');" checked="checked"/>%s<br />\n' % (source.lower(), source.lower(), source)
media_content += '<div class="media_column %s">\n<div class="media_header"><div class="media_summary">%s</div>%s</div>\n' % (source.lower(), count, source.capitalize())
items = self.query('SELECT * FROM pushpins WHERE source=?', (source,))
items.sort(key=lambda x: x[9], reverse=True)
for item in items:
item = [self.to_unicode_str(x) if x != None else u'' for x in item]
media_content += '<div class="media_row"><div class="prof_cell"><a href="%s" target="_blank"><img class="prof_img rounded" src="%s" /></a></div><div class="data_cell"><div class="trigger" id="trigger" lat="%s" lon="%s">[<a href="%s" target="_blank">%s</a>] %s<br /><span class="time">%s</span></div></div></div>\n' % (item[4], item[5], item[7], item[8], item[3], item[2], self.remove_nl(item[6], '<br />'), item[9])
map_details = "<table><tr><td class='prof_cell'><a href='%s' target='_blank'><img class='prof_img rounded' src='%s' /></a></td><td class='data_cell'>[<a href='%s' target='_blank'>%s</a>] %s<br /><span class='time'>%s</span></td></tr></table>" % (item[4], item[5], item[3], self.remove_nl(item[2]), self.remove_nl(item[6], '<br />'), item[9])
map_content += 'add_marker({position: new google.maps.LatLng(%s,%s),title:"%s",icon:"%s",map:map},{details:"%s"}, "%s");\n' % (item[7], item[8], self.remove_nl(item[2]), icons[source.lower()], map_details, source.lower())
media_content += '</div>\n'
return (media_content,), (map_content, map_arrays, map_checkboxes)
def write_markup(self, template, filename, content):
temp_content = open(template).read()
page = temp_content % content
with codecs.open(filename, 'wb', 'utf-8') as fp:
fp.write(page)
def module_run(self):
sources = self.query('SELECT COUNT(source), source FROM pushpins GROUP BY source')
media_content, map_content = self.build_content(sources)
meta_content = (self.options['latitude'], self.options['longitude'], self.options['radius'])
# create the media report
media_content = meta_content + media_content
media_filename = self.options['media_filename']
self.write_markup(os.path.join(self.data_path, 'template_media.html'), media_filename, media_content)
self.output('Media data written to \'%s\'' % (media_filename))
# order the map_content tuple
map_content = meta_content + map_content
order=[4,0,1,2,3,5]
map_content = tuple([map_content[i] for i in order])
# create the map report
map_filename = self.options['map_filename']
self.write_markup(os.path.join(self.data_path, 'template_map.html'), map_filename, map_content)
self.output('Mapping data written to \'%s\'' % (map_filename))
# open the reports in a browser
w = webbrowser.get()
w.open(media_filename)
time.sleep(2)
w.open(map_filename)
|
PyFunceble/checker/syntax/domain_base.py
|
Centaurioun/PyFunceble
| 213 |
47784
|
<reponame>Centaurioun/PyFunceble
"""
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the base of all domain syntax checker.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import functools
from typing import Optional, Tuple
from PyFunceble.checker.base import CheckerBase
from PyFunceble.dataset.iana import IanaDataset
from PyFunceble.dataset.public_suffix import PublicSuffixDataset
class DomainSyntaxCheckerBase(CheckerBase):
"""
Provides an interface to check the syntax of a second domain.
:param str subject:
Optional, The subject to work with.
"""
# pylint: disable=line-too-long
SPECIAL_USE_DOMAIN_NAMES_EXTENSIONS = ["onion"]
"""
Specifies the extension which are specified as "Special-Use Domain Names"
and supported by our project.
:type: list
.. seealso::
* `RFC6761`_
* `IANA Special-Use Domain Names`_ assignments.
* `RFC7686`_
.. _RFC6761: https://tools.ietf.org/html/rfc6761
.. _RFC7686: https://tools.ietf.org/html/rfc6761
.. _IANA Special-Use Domain Names: https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.txt
"""
last_point_index: Optional[int] = None
"""
Saves the index of the last point.
"""
iana_dataset: Optional[IanaDataset] = None
public_suffix_dataset: Optional[PublicSuffixDataset] = None
def __init__(self, subject: Optional[str] = None) -> None:
self.iana_dataset = IanaDataset()
self.public_suffix_dataset = PublicSuffixDataset()
super().__init__(subject)
def reset_last_point_index(func): # pylint: disable=no-self-argument
"""
Resets the last point index before executing the decorated method.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.last_point_index = None
return func(self, *args, **kwargs) # pylint: disable=not-callable
return wrapper
def find_last_point_index(func): # pylint: disable=no-self-argument
"""
Try to find the index of the last point after the execution of the
decorated method.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs) # pylint: disable=not-callable
self.last_point_index = self.get_last_point_index(self.idna_subject)
return result
return wrapper
@CheckerBase.subject.setter
@reset_last_point_index
@find_last_point_index
def subject(self, value: str):
"""
Sets the subject to work with.
:param value:
The subject to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`str`.
:raise ValueError:
When the given :code:`value` is empty.
"""
# pylint: disable=no-member
super(DomainSyntaxCheckerBase, self.__class__).subject.fset(self, value)
@staticmethod
def get_last_point_index(subject: str) -> Optional[int]:
"""
Provides the index of the last point of the given subject.
"""
try:
if subject.endswith("."):
return subject[:-1].rfind(".")
return subject.rindex(".")
except ValueError:
return None
def get_subject_without_suffix(
self, subject: str, extension: str
) -> Optional[Tuple[Optional[int], Optional[str]]]:
"""
Provides the given subject without the suffix.
:param subject:
The subject to work with.
:param extension:
The extension previously extracted.
"""
if extension in self.public_suffix_dataset:
for suffix in self.public_suffix_dataset.get_available_suffix(extension):
try:
return subject[: subject.rindex(f".{suffix}")], suffix
except ValueError:
continue
return None, None
@CheckerBase.ensure_subject_is_given
def get_extension(self) -> Optional[str]:
"""
Provides the extension to work with (if exists).
"""
if self.last_point_index is None:
return None
# Plus one is for the leading point.
extension = self.idna_subject[self.last_point_index + 1 :]
if extension.endswith("."):
extension = extension[:-1]
return extension
def is_valid(self) -> bool:
"""
Validate the given subject.
"""
raise NotImplementedError()
|
envs/hns/mujoco-worldgen/mujoco_worldgen/util/path.py
|
jiayu-ch15/curriculum
| 424 |
47801
|
from os.path import abspath, dirname, join
WORLDGEN_ROOT_PATH = abspath(join(dirname(__file__), '..', '..'))
def worldgen_path(*args):
"""
Returns an absolute path from a path relative to the mujoco_worldgen repository
root directory.
"""
return join(WORLDGEN_ROOT_PATH, *args)
|
tests/assets/sample_tasks/sample.py
|
MarcoJHB/ploomber
| 2,141 |
47815
|
<gh_stars>1000+
# + tags=["parameters"]
1 + 1
|
codesamples/apps.py
|
Manny27nyc/pythondotorg
| 911 |
47838
|
<reponame>Manny27nyc/pythondotorg<filename>codesamples/apps.py
from django.apps import AppConfig
class CodesamplesAppConfig(AppConfig):
name = 'codesamples'
|
desktop/core/ext-py/nose-1.3.7/unit_tests/test_issue_786.py
|
kokosing/hue
| 5,079 |
47846
|
def test_evens():
yield check_even_cls
class Test(object):
def test_evens(self):
yield check_even_cls
class Check(object):
def __call__(self):
pass
check_even_cls = Check()
|
botfw/bybit/order.py
|
lzpel/btc_bot_framework
| 115 |
47858
|
import time
from ..base import order as od
from .api import BybitApi
class BybitOrderManager(od.OrderManagerBase):
def __init__(self, api, ws=None, retention=60):
super().__init__(api, ws, retention)
self.ws.subscribe('execution', self.__on_events, True)
self.ws.subscribe('position', self.__on_events, True)
self.ws.subscribe('order', self.__on_events, True)
def _generate_order_object(self, e):
info = e.info
if e.type != od.EVENT_OPEN:
self.log.warning(f'event for unknown order: {e}')
return None
api = BybitApi.ccxt_instance()
symbol = api.markets_by_id[info['symbol']]['symbol']
return od.Order(
symbol, info['order_type'].lower(), info['side'].lower(),
info['qty'], float(info['price']))
def __on_events(self, msg):
topic = msg['topic']
for e in msg['data']:
oe = od.OrderEvent()
oe.info = e
oe.ts = time.time()
if topic == 'order':
oe.id = e['order_id']
st = e['order_status']
if st == 'New':
oe.type = od.EVENT_OPEN
elif st == 'Filled':
oe.type = od.EVENT_CLOSE
elif st in ['Cancelled', 'Rejected']:
oe.type = od.EVENT_CANCEL
else: # ignore(PartiallyFilled, Created, PendingCancel)
continue
elif topic == 'execution':
oe.type = od.EVENT_EXECUTION
oe.id = e['order_id']
oe.price = float(e['price'])
size = e['exec_qty']
oe.size = -size if e['side'] == 'Sell' else size
oe.fee = float(e['exec_fee']) * size
elif topic == 'position':
break
else:
assert False
self._handle_order_event(oe)
class BybitPositionGroup(od.PositionGroupBase):
INVERSE = True
class BybitOrderGroup(od.OrderGroupBase):
PositionGroup = BybitPositionGroup
class BybitOrderGroupManager(od.OrderGroupManagerBase):
OrderGroup = BybitOrderGroup
# Future
class BybitUsdtOrderManager(BybitOrderManager):
pass
class BybitUsdtPositionGroup(BybitPositionGroup):
INVERSE = False
class BybitUsdtOrderGroup(BybitOrderGroup):
PositionGroup = BybitUsdtPositionGroup
class BybitUsdtOrderGroupManager(BybitOrderGroupManager):
OrderGroup = BybitUsdtOrderGroup
|
river/optim/base.py
|
online-ml/creme
| 1,105 |
47902
|
import abc
import numbers
from typing import Union
import numpy as np
from river import base, optim, utils
VectorLike = Union[utils.VectorDict, np.ndarray]
__all__ = ["Initializer", "Scheduler", "Optimizer", "Loss"]
class Initializer(base.Base, abc.ABC):
"""An initializer is used to set initial weights in a model."""
@abc.abstractmethod
def __call__(self, shape=1):
"""Returns a fresh set of weights.
Parameters
----------
shape
Indicates how many weights to return. If `1`, then a single scalar value will be
returned.
"""
class Scheduler(base.Base, abc.ABC):
"""Can be used to program the learning rate schedule of an `optim.base.Optimizer`."""
@abc.abstractmethod
def get(self, t: int) -> float:
"""Returns the learning rate at a given iteration.
Parameters
----------
t
The iteration number.
"""
def __repr__(self):
return f"{self.__class__.__name__}({vars(self)})"
class Optimizer(base.Base):
"""Optimizer interface.
Every optimizer inherits from this base interface.
Parameters
----------
lr
Attributes
----------
learning_rate : float
Returns the current learning rate value.
"""
def __init__(self, lr: Union[Scheduler, float]):
if isinstance(lr, numbers.Number):
lr = optim.schedulers.Constant(lr)
self.lr = lr
self.n_iterations = 0
@property
def learning_rate(self) -> float:
return self.lr.get(self.n_iterations)
def look_ahead(self, w: dict) -> dict:
"""Updates a weight vector before a prediction is made.
Parameters:
w (dict): A dictionary of weight parameters. The weights are modified in-place.
Returns:
The updated weights.
"""
return w
def _step_with_dict(self, w: dict, g: dict) -> dict:
raise NotImplementedError
def _step_with_vector(self, w: VectorLike, g: VectorLike) -> VectorLike:
raise NotImplementedError
def step(
self, w: Union[dict, VectorLike], g: Union[dict, VectorLike]
) -> Union[dict, VectorLike]:
"""Updates a weight vector given a gradient.
Parameters
----------
w
A vector-like object containing weights. The weights are modified in-place.
g
A vector-like object of gradients.
Returns
-------
The updated weights.
"""
if isinstance(w, VectorLike.__args__) and isinstance(g, VectorLike.__args__):
try:
w = self._step_with_vector(w, g)
self.n_iterations += 1
return w
except NotImplementedError:
pass
w = self._step_with_dict(w, g)
self.n_iterations += 1
return w
def __repr__(self):
return f"{self.__class__.__name__}({vars(self)})"
class Loss(base.Base, abc.ABC):
"""Base class for all loss functions."""
def __repr__(self):
return f"{self.__class__.__name__}({vars(self)})"
@abc.abstractmethod
def __call__(self, y_true, y_pred):
"""Returns the loss.
Parameters
----------
y_true
Ground truth(s).
y_pred
Prediction(s).
Returns
-------
The loss(es).
"""
@abc.abstractmethod
def gradient(self, y_true, y_pred):
"""Return the gradient with respect to y_pred.
Parameters
----------
y_true
Ground truth(s).
y_pred
Prediction(s).
Returns
-------
The gradient(s).
"""
@abc.abstractmethod
def mean_func(self, y_pred):
"""Mean function.
This is the inverse of the link function. Typically, a loss function takes as input the raw
output of a model. In the case of classification, the raw output would be logits. The mean
function can be used to convert the raw output into a value that makes sense to the user,
such as a probability.
Parameters
----------
y_pred
Raw prediction(s).
Returns
-------
The adjusted prediction(s).
References
----------
[^1]: [Wikipedia section on link and mean function](https://www.wikiwand.com/en/Generalized_linear_model#/Link_function)
"""
|
Voice Analysis/Python/SVM/AudioSignal.py
|
lokesh9460/Realtime-Interview-Emotion-Analysis
| 574 |
47920
|
import os
import numpy
from pydub import AudioSegment
from scipy.fftpack import fft
class AudioSignal(object):
def __init__(self, sample_rate, signal=None, filename=None):
# Set sample rate
self._sample_rate = sample_rate
if signal is None:
# Get file name and file extension
file, file_extension = os.path.splitext(filename)
# Check if file extension if audio format
if file_extension in ['.mp3', '.wav']:
# Read audio file
self._signal = self.read_audio_file(filename)
# Check if file extension if video format
elif file_extension in ['.mp4', '.mkv', 'avi']:
# Extract audio from video
new_filename = self.extract_audio_from_video(filename)
# read audio file from extracted audio file
self._signal = self.read_audio_file(new_filename)
# Case file extension is not supported
else:
print("Error: file not found or file extension not supported.")
elif filename is None:
# Cast signal to array
self._signal = signal
else:
print("Error : argument missing in AudioSignal() constructor.")
'''
Function to extract audio from a video
'''
def extract_audio_from_video(self, filename):
# Get video file name and extension
file, file_extension = os.path.splitext(filename)
# Extract audio (.wav) from video
os.system('ffmpeg -i ' + file + file_extension + ' ' + '-ar ' + str(self._sample_rate) + ' ' + file + '.wav')
print("Sucessfully converted {} into audio!".format(filename))
# Return audio file name created
return file + '.wav'
'''
Function to read audio file and to return audio samples of a specified WAV file
'''
def read_audio_file(self, filename):
# Get audio signal
audio_file = AudioSegment.from_file(filename)
# Resample audio signal
audio_file = audio_file.set_frame_rate(self._sample_rate)
# Cast to integer
if audio_file.sample_width == 2:
data = numpy.fromstring(audio_file._data, numpy.int16)
elif audio_file.sample_width == 4:
data = numpy.fromstring(audio_file._data, numpy.int32)
# Merge audio channels
audio_signal = []
for chn in list(range(audio_file.channels)):
audio_signal.append(data[chn::audio_file.channels])
audio_signal = numpy.array(audio_signal).T
# Flat signals
if audio_signal.ndim == 2:
if audio_signal.shape[1] == 1:
audio_signal = audio_signal.flatten()
# Convert stereo to mono
audio_signal = self.stereo_to_mono(audio_signal)
# Return sample rate and audio signal
return audio_signal
'''
Function to convert an input signal from stereo to mono
'''
@staticmethod
def stereo_to_mono(audio_signal):
# Check if signal is stereo and convert to mono
if isinstance(audio_signal, int):
return -1
if audio_signal.ndim == 1:
return audio_signal
elif audio_signal.ndim == 2:
if audio_signal.shape[1] == 1:
return audio_signal.flatten()
else:
if audio_signal.shape[1] == 2:
return (audio_signal[:, 1] / 2) + (audio_signal[:, 0] / 2)
else:
return -1
'''
Function to split the input signal into windows of same size
'''
def framing(self, size, step, hamming=False):
# Rescale windows step and size
win_size = int(size * self._sample_rate)
win_step = int(step * self._sample_rate)
# Number of frames
nb_frames = 1 + int((len(self._signal) - win_size) / win_step)
# Build Hamming function
if hamming is True:
ham = numpy.hamming(win_size)
else:
ham = numpy.ones(win_size)
# Split signals (and multiply each windows signals by Hamming functions)
frames = []
for t in range(nb_frames):
sub_signal = AudioSignal(self._sample_rate, signal=self._signal[(t * win_step): (t * win_step + win_size)] * ham)
frames.append(sub_signal)
return frames
'''
Function to compute the magnitude of the Discrete Fourier Transform coefficient
'''
def dft(self, norm=False):
# Commpute the magnitude of the spectrum (and normalize by the number of sample)
if norm is True:
dft = abs(fft(self._signal)) / len(self._signal)
else:
dft = abs(fft(self._signal))
return dft
'''
Function to apply pre-emphasis filter on signal
'''
def pre_emphasis(self, alpha =0.97):
# Emphasized signal
emphasized_signal = numpy.append(self._signal[0], self._signal[1:] - alpha * self._signal[:-1])
return emphasized_signal
|
bricks/ev3dev/modules/pybricks/tools.py
|
ZPhilo/pybricks-micropython
| 115 |
47922
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2020 The Pybricks Authors
# Expose method and class written in C
from _pybricks.tools import wait, StopWatch
# Imports for DataLog implementation
from utime import localtime, ticks_us
class DataLog:
def __init__(self, *headers, name="log", timestamp=True, extension="csv", append=False):
# Make timestamp of the form yyyy_mm_dd_hh_mm_ss_uuuuuu
if timestamp:
y, mo, d, h, mi, s = localtime()[0:6]
u = ticks_us() % 1000000
stamp = "_{0}_{1:02d}_{2:02d}_{3:02d}_{4:02d}_{5:02d}_{6:06d}".format(
y, mo, d, h, mi, s, u
)
else:
stamp = ""
# File write mode
mode = "a+" if append else "w+"
# Append extension and open
self.file = open("{0}{1}.{2}".format(name, stamp, extension), mode)
# Get length of existing contents
self.file.seek(0, 2)
length = self.file.tell()
# If column headers were given and we are at the start of the file, print headers as first line
if len(headers) > 0 and length == 0:
print(*headers, sep=", ", file=self.file)
def log(self, *values):
print(*values, sep=", ", file=self.file)
def __repr__(self):
self.file.seek(0, 0)
return self.file.read()
|
tree_math/integration_test.py
|
cgarciae/tree-math
| 108 |
47952
|
<reponame>cgarciae/tree-math
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for tree_math."""
import functools
from absl.testing import absltest
import jax
from jax import lax
import tree_math as tm
from tree_math._src import test_util
import tree_math.numpy as tnp
# pylint: disable=g-complex-comprehension
class TreeMathTest(test_util.TestCase):
def test_norm(self):
@tm.wrap
def norm1(x, y):
return ((x - y) ** 2).sum() ** 0.5
@tm.wrap
def norm2(x, y):
d = x - y
return (d @ d) ** 0.5
x = {"a": 1, "b": 1}
y = {"a": 1 + 3, "b": 1 + 4}
expected = 5.0
actual = norm1(x, y)
self.assertAllClose(actual, expected)
actual = norm2(x, y)
self.assertAllClose(actual, expected)
def test_cg(self):
# an integration test to verify non-trivial examples work
# pylint: disable=invalid-name
@functools.partial(tm.wrap, vector_argnames=["b", "x0"])
def cg(A, b, x0, M=lambda x: x, maxiter=5, tol=1e-5, atol=0.0):
"""jax.scipy.sparse.linalg.cg, written with tree_math."""
A = tm.unwrap(A)
M = tm.unwrap(M)
atol2 = tnp.maximum(tol**2 * (b @ b), atol**2)
def cond_fun(value):
x, r, gamma, p, k = value # pylint: disable=unused-variable
return (r @ r > atol2) & (k < maxiter)
def body_fun(value):
x, r, gamma, p, k = value
Ap = A(p)
alpha = gamma / (p.conj() @ Ap)
x_ = x + alpha * p
r_ = r - alpha * Ap
z_ = M(r_)
gamma_ = r_.conj() @ z_
beta_ = gamma_ / gamma
p_ = z_ + beta_ * p
return x_, r_, gamma_, p_, k + 1
r0 = b - A(x0)
p0 = z0 = M(r0)
gamma0 = r0 @ z0
initial_value = (x0, r0, gamma0, p0, 0)
x_final, *_ = lax.while_loop(cond_fun, body_fun, initial_value)
return x_final
A = lambda x: {"a": x["a"] + 0.5 * x["b"], "b": 0.5 * x["a"] + x["b"]}
b = {"a": 1.0, "b": -1.0}
x0 = {"a": 0.0, "b": 0.0}
actual = cg(A, b, x0)
expected = jax.device_put({"a": 2.0, "b": -2.0})
self.assertTreeAllClose(actual, expected, check_dtypes=True)
if __name__ == "__main__":
absltest.main()
|
tests/python/contrib/test_ethosu/test_remove_concatenates.py
|
jwfromm/relax
| 2,084 |
48009
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
import tvm.script
from tvm.script import tir
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir.compiler import lower_to_tir
from .infra import make_ethosu_conv2d
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@tir.prim_func
def main(placeholder: tir.handle, placeholder_1: tir.handle, placeholder_2: tir.handle, placeholder_3: tir.handle, placeholder_4: tir.handle, placeholder_5: tir.handle, placeholder_6: tir.handle, placeholder_7: tir.handle, placeholder_8: tir.handle, placeholder_9: tir.handle, T_concat: tir.handle) -> None:
# function attr dict
tir.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = tir.match_buffer(placeholder_2, [2992], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_1 = tir.match_buffer(placeholder_4, [2992], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
placeholder_10 = tir.match_buffer(placeholder_1, [1, 8, 10, 16], dtype="int8", elem_offset=0, align=128, offset_factor=1)
buffer_2 = tir.match_buffer(placeholder_9, [160], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_3 = tir.match_buffer(placeholder_8, [2992], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_4 = tir.match_buffer(placeholder_5, [160], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_5 = tir.match_buffer(placeholder_6, [2992], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
T_concat_1 = tir.match_buffer(T_concat, [1, 8, 32, 16], dtype="int8", elem_offset=0, align=128, offset_factor=1)
placeholder_11 = tir.match_buffer(placeholder, [1, 8, 12, 16], dtype="int8", elem_offset=0, align=128, offset_factor=1)
buffer_6 = tir.match_buffer(placeholder_7, [160], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_7 = tir.match_buffer(placeholder_3, [160], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
# body
T_concat_2 = tir.allocate([2816], "int8", "global", annotations={"disable_lower_builtin": True})
tir.evaluate(tir.call_extern("ethosu_conv2d", "int8", 8, 10, 16, 8, 0, 10, tir.load("int8", placeholder_10.data, 0), 0, 0, 0, tir.float32(0.5), 10, "NHWC", 160, 16, 1, "int8", 8, 10, 16, 8, 0, 10, tir.load("int8", T_concat_2, 192), 0, 0, 0, tir.float32(0.25), 14, "NHWC", 352, 16, 1, 3, 3, 1, 1, 1, 1, tir.load("uint8", buffer_1.data, 0), 2992, 12, tir.load("uint8", buffer_4.data, 0), 160, 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
tir.evaluate(tir.call_extern("ethosu_conv2d", "int8", 8, 10, 16, 8, 0, 10, tir.load("int8", T_concat_2, 192), 0, 0, 0, tir.float32(0.5), 10, "NHWC", 352, 16, 1, "int8", 8, 10, 16, 8, 0, 10, tir.load("int8", T_concat_1.data, 352), 0, 0, 0, tir.float32(0.25), 14, "NHWC", 512, 16, 1, 3, 3, 1, 1, 1, 1, tir.load("uint8", buffer_3.data, 0), 2992, 12, tir.load("uint8", buffer_2.data, 0), 160, 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
tir.evaluate(tir.call_extern("ethosu_conv2d", "int8", 8, 12, 16, 8, 0, 12, tir.load("int8", placeholder_11.data, 0), 0, 0, 0, tir.float32(0.5), 10, "NHWC", 192, 16, 1, "int8", 8, 12, 16, 8, 0, 12, tir.load("int8", T_concat_2, 0), 0, 0, 0, tir.float32(0.25), 14, "NHWC", 352, 16, 1, 3, 3, 1, 1, 1, 1, tir.load("uint8", buffer.data, 0), 2992, 12, tir.load("uint8", buffer_7.data, 0), 160, 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
tir.evaluate(tir.call_extern("ethosu_conv2d", "int8", 8, 22, 16, 8, 0, 22, tir.load("int8", T_concat_2, 0), 0, 0, 0, tir.float32(0.5), 10, "NHWC", 352, 16, 1, "int8", 8, 22, 16, 8, 0, 22, tir.load("int8", T_concat_1.data, 0), 0, 0, 0, tir.float32(0.25), 14, "NHWC", 512, 16, 1, 3, 3, 1, 1, 1, 1, tir.load("uint8", buffer_5.data, 0), 2992, 12, tir.load("uint8", buffer_6.data, 0), 160, 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_concat():
def _get_func():
ifm1 = relay.var("ifm1", shape=(1, 8, 12, 16), dtype="int8")
ifm2 = relay.var("ifm2", shape=(1, 8, 10, 16), dtype="int8")
conv1 = make_ethosu_conv2d(ifm1, 16, 16, (3, 3), (1, 1), (1, 1), (1, 1))
conv2 = make_ethosu_conv2d(ifm2, 16, 16, (3, 3), (1, 1), (1, 1), (1, 1))
conc1 = relay.concatenate((conv1, conv2), axis=2)
conv3 = make_ethosu_conv2d(conc1, 16, 16, (3, 3), (1, 1), (1, 1), (1, 1))
conv4 = make_ethosu_conv2d(conv2, 16, 16, (3, 3), (1, 1), (1, 1), (1, 1))
conc2 = relay.concatenate((conv3, conv4), axis=2)
func = relay.Function(relay.analysis.free_vars(conc2), conc2)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, _ = lower_to_tir(func)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
if __name__ == "__main__":
pytest.main([__file__])
|
examples/simultaneous_translation/utils/monotonic_attention.py
|
Shiguang-Guo/fairseq
| 16,259 |
48021
|
<gh_stars>1000+
from typing import Optional
import torch
from torch import Tensor
from examples.simultaneous_translation.utils.functions import (
exclusive_cumprod,
prob_check,
moving_sum,
)
def expected_alignment_from_p_choose(
p_choose: Tensor,
padding_mask: Optional[Tensor] = None,
eps: float = 1e-6
):
"""
Calculating expected alignment for from stepwise probability
Reference:
Online and Linear-Time Attention by Enforcing Monotonic Alignments
https://arxiv.org/pdf/1704.00784.pdf
q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j}
a_ij = p_ij q_ij
Parallel solution:
ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi))
============================================================
Expected input size
p_choose: bsz, tgt_len, src_len
"""
prob_check(p_choose)
# p_choose: bsz, tgt_len, src_len
bsz, tgt_len, src_len = p_choose.size()
dtype = p_choose.dtype
p_choose = p_choose.float()
if padding_mask is not None:
p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0.0)
if p_choose.is_cuda:
p_choose = p_choose.contiguous()
from alignment_train_cuda_binding import alignment_train_cuda as alignment_train
else:
from alignment_train_cpu_binding import alignment_train_cpu as alignment_train
alpha = p_choose.new_zeros([bsz, tgt_len, src_len])
alignment_train(p_choose, alpha, eps)
# Mix precision to prevent overflow for fp16
alpha = alpha.type(dtype)
prob_check(alpha)
return alpha
def expected_soft_attention(
alpha: Tensor,
soft_energy: Tensor,
padding_mask: Optional[Tensor] = None,
chunk_size: Optional[int] = None,
eps: float = 1e-10
):
"""
Function to compute expected soft attention for
monotonic infinite lookback attention from
expected alignment and soft energy.
Reference:
Monotonic Chunkwise Attention
https://arxiv.org/abs/1712.05382
Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
https://arxiv.org/abs/1906.05218
alpha: bsz, tgt_len, src_len
soft_energy: bsz, tgt_len, src_len
padding_mask: bsz, src_len
left_padding: bool
"""
if padding_mask is not None:
alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0)
soft_energy = soft_energy.masked_fill(
padding_mask.unsqueeze(1), -float("inf")
)
prob_check(alpha)
dtype = alpha.dtype
alpha = alpha.float()
soft_energy = soft_energy.float()
soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0]
exp_soft_energy = torch.exp(soft_energy) + eps
if chunk_size is not None:
# Chunkwise
beta = (
exp_soft_energy
* moving_sum(
alpha / (eps + moving_sum(exp_soft_energy, chunk_size, 1)),
1, chunk_size
)
)
else:
# Infinite lookback
# Notice that infinite lookback is a special case of chunkwise
# where chunksize = inf
inner_items = alpha / (eps + torch.cumsum(exp_soft_energy, dim=2))
beta = (
exp_soft_energy
* torch.cumsum(inner_items.flip(dims=[2]), dim=2)
.flip(dims=[2])
)
if padding_mask is not None:
beta = beta.masked_fill(
padding_mask.unsqueeze(1).to(torch.bool), 0.0)
# Mix precision to prevent overflow for fp16
beta = beta.type(dtype)
beta = beta.clamp(0, 1)
prob_check(beta)
return beta
def mass_preservation(
alpha: Tensor,
padding_mask: Optional[Tensor] = None,
left_padding: bool = False
):
"""
Function to compute the mass perservation for alpha.
This means that the residual weights of alpha will be assigned
to the last token.
Reference:
Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
https://arxiv.org/abs/1906.05218
alpha: bsz, tgt_len, src_len
padding_mask: bsz, src_len
left_padding: bool
"""
prob_check(alpha)
if padding_mask is not None:
if not left_padding:
assert not padding_mask[:, 0].any(), (
"Find padding on the beginning of the sequence."
)
alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0)
if left_padding or padding_mask is None:
residuals = 1 - alpha[:, :, :-1].sum(dim=-1).clamp(0, 1)
alpha[:, :, -1] = residuals
else:
# right padding
_, tgt_len, src_len = alpha.size()
residuals = 1 - alpha.sum(dim=-1, keepdim=True).clamp(0, 1)
src_lens = src_len - padding_mask.sum(dim=1, keepdim=True)
src_lens = src_lens.expand(-1, tgt_len).contiguous()
# add back the last value
residuals += alpha.gather(2, src_lens.unsqueeze(2) - 1)
alpha = alpha.scatter(2, src_lens.unsqueeze(2) - 1, residuals)
prob_check(alpha)
return alpha
|
Tuchart3.0 -beta/main.py
|
Seedarchangel/TuChart
| 801 |
48038
|
#-*- coding:utf-8 -*-
from __future__ import print_function
import os,sys,sip,time
from datetime import datetime,timedelta
from qtpy.QtWidgets import QTreeWidgetItem,QMenu,QApplication,QAction,QMainWindow
from qtpy import QtGui,QtWidgets
from qtpy.QtCore import Qt,QUrl,QDate
from Graph import graphpage
from layout import Ui_MainWindow
from pandas import DataFrame as df
import pandas as pd
import tushare as ts
import pickle
import numpy as np
list1 = []
class MyUi(QMainWindow):
def __init__(self):
super(MyUi, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
cwd = os.getcwd()
cwd = str(cwd)
if os.path.isfile(cwd+"/time"):
with open("time","rb") as outfile:#reads current time
history = pickle.load(outfile)
if (datetime.now()-history).total_seconds()<43200: #measures if time elapse>12 hours
print("Less than 12 hours. Loading previously saved Pickle...")
else:
print("More than 12 hours. Updating Pickle...")
data = ts.get_industry_classified()
with open("class","wb+") as outfile:
pickle.dump(data,outfile)
now = datetime.now()
with open("time", "wb+") as outfile: #update time
pickle.dump(now, outfile)
else:
print("No Pickle found!") #If this is first time using tuchart in this directory
data = df()
data = ts.get_industry_classified()
with open('class', 'wb+') as outfile: #records pickle
pickle.dump(data, outfile)
now = datetime.now()
with open("time", "wb+") as outfile:
pickle.dump(now,outfile)
with open("class", "rb") as infile: # reads current time
series = pickle.load(infile)
#series = pd.read_json(cwd + "\\class.json")
#series = ts.get_industry_classified()
series = pd.DataFrame(series)
curdate = time.strftime("%Y/%m/%d") # gets current time to put into dateedit
curdateQ = QDate.fromString(curdate,"yyyy/MM/dd")
dateobj = datetime.strptime(curdate, "%Y/%m/%d")#converts to datetime object
past = dateobj - timedelta(days = 7) #minus a week to start date
pasttime = datetime.strftime(past, "%Y/%m/%d")
pastQ = QDate.fromString(pasttime,"yyyy/MM/dd") #convert to qtime so that widget accepts the values
pastL = dateobj - timedelta(days=30) # minus a month to start date
pasttimeL = datetime.strftime(pastL, "%Y/%m/%d")
pastQL = QDate.fromString(pasttimeL, "yyyy/MM/dd")
np_indexes = np.array([['sh', '上证指数', '大盘指数'],
['sz', '深证成指', '大盘指数'],
['hs300', '沪深300指数', '大盘指数'],
['sz50', '上证50', '大盘指数'],
['zxb', '中小板', '大盘指数'],
['cyb', '创业板', '大盘指数']])
indexes = df(data=np_indexes,
index=range(5000, 5006),
columns=["code", "name", "c_name"])
series = indexes.append(series)
list1_bfr = series["c_name"].tolist() #Get industry categories. Filters out redundant ones
list1 = list(set(list1_bfr))
list1.sort(key=list1_bfr.index)
#w = database()
#zsparent = QTreeWidgetItem(self.ui.treeWidget)
#zsparent.setText(0,"股票指数")
#zsnames =["上证指数-sh","深圳成指-sz","沪深300指数-hs300","上证50-"]
self.init_treeWidget(list1,series)
self.ui.treeWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget.customContextMenuRequested.connect(self.openMenu)
#self.ui.webView.setGeometry(QtCore.QRect(0, 30,1550, 861))
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "render.html")) #path to read html file
local_url = QUrl.fromLocalFile(file_path)
self.ui.webView.load(local_url)
#self.ui.commandLinkButton.setFixedSize(50, 50)
self.ui.search_btn.clicked.connect(lambda: self.search_comp(series))
self.ui.init_code_btn.clicked.connect(lambda: self.code_sort_tree(series))
self.ui.init_category_btn.clicked.connect(lambda: self.init_treeWidget(list1, series))
self.ui.commandLinkButton.clicked.connect(self.classify) #when the arrow button is clicked, trigger events
#self.ui.commandLinkButton.clicked.connect(lambda action: self.classify(action, self.ui.treewidget))
# QSizePolicy
try:
retain_size = self.ui.dateEdit_2.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.dateEdit_2.setSizePolicy(retain_size)
retain_size = self.ui.comboBox.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.comboBox.setSizePolicy(retain_size)
retain_size = self.ui.label_2.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.label_2.setSizePolicy(retain_size)
except AttributeError:
print("No PYQT5 Binding! Widgets might be deformed")
self.ui.dateEdit.setDate(pastQL)
self.ui.dateEdit_2.setDate(curdateQ)#populate widgets
self.ui.dateEdit.setCalendarPopup(True)
self.ui.dateEdit_2.setCalendarPopup(True)
self.ui.comboBox.addItems(["D", "W", "M", "5", "15", "30", "60"])
self.ui.treeWidget_2.setDragDropMode(self.ui.treeWidget_2.InternalMove)
self.ui.treeWidget_2.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget_2.customContextMenuRequested.connect(self.openWidgetMenu)
#self.ui.toolbutton.clicked.connect(lambda action: self.graphmerge(action, CombineKeyword))
self.ui.combobox.currentIndexChanged.connect(lambda: self.modifycombo(pastQL,pastQ))
def init_treeWidget(self, list1, series):
self.ui.treeWidget.clear()
for j in list1:
parent = QTreeWidgetItem(self.ui.treeWidget) #populate treewidget with names
parent.setText(0,j)
var = series.loc[series["c_name"] == j]
list2 = var["code"].tolist()
name = var["name"].tolist()
#var = showcollection(i) #Display database items
for idx,val in enumerate(list2):
child = QTreeWidgetItem(parent)
child.setText(0, name[idx]+"-"+val)
#for i in Drag:
#grandson = QTreeWidgetItem(child) #Commented out because increases program response time
#grandson.setText(0, i)
#self.ui.treeWidget.itemDoubleClicked.connect(self.onClickItem) #Display Collection items
def code_sort_tree(self, companies):
self.ui.treeWidget.clear()
sorted_comps = companies.sort_values(["code"])
code_list = sorted_comps["code"].tolist()
name_list = sorted_comps["name"].tolist()
shares_parent = QTreeWidgetItem(self.ui.treeWidget)
shares_parent.setText(0, "个股行情")
for idx, val in enumerate(code_list):
child = QTreeWidgetItem(shares_parent)
child.setText(0, name_list[idx] + "-" + str(val))
self.ui.treeWidget.expandToDepth(0)
def search_comp(self, companies):
self.ui.treeWidget.clear()
text = self.ui.search_lineEdit.text()
filtered_codes = companies[companies['code'].str.contains(text)]
filtered_names = companies[companies['name'].str.contains(text)]
filtered_comps = filtered_codes.append(filtered_names)
code_list = filtered_comps["code"].tolist()
name_list = filtered_comps["name"].tolist()
parent = QTreeWidgetItem(self.ui.treeWidget)
parent.setText(0, "搜索结果")
for idx, val in enumerate(code_list):
child = QTreeWidgetItem(parent)
child.setText(0, name_list[idx] + "-" + str(val))
self.ui.treeWidget.expandToDepth(0)
def modifycombo(self,pastQL,pastQ):
if self.ui.combobox.currentText()=="复权": #if 复权 is selected, clear all existing queries to avoid value conflict
self.ui.label_2.show()
self.ui.dateEdit_2.show()
self.ui.dateEdit.setDate(pastQL)
self.ui.interval_label.show()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["hfq", "qfq"])
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()=="K线":
self.ui.label_2.show()
self.ui.dateEdit_2.show()
self.ui.dateEdit.setDate(pastQL)
self.ui.interval_label.show()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["D", "W", "M", "5", "15", "30", "60"])#same as above
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()=="分笔数据":
self.ui.interval_label.hide()
self.ui.comboBox.hide()
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.dateEdit.setDate(pastQ)
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()=="历史分钟":
self.ui.interval_label.hide()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["1min","5min","15min","30min","60min"])
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.dateEdit.setDate(pastQ)
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"十大股东":
self.ui.interval_label.hide()
self.ui.comboBox.hide()
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.treeWidget_2.clear()
def openMenu(self,position):
indexes = self.ui.treeWidget.selectedIndexes()
item = self.ui.treeWidget.itemAt(position)
db_origin = ""
#if item.parent():
# db_origin = item.parent().text(0)
collec = item.text(0)
if len(indexes) > 0:
level = 0
index = indexes[0]
while index.parent().isValid():
index = index.parent()
level = level + 1
menu = QMenu()
#print((collec, db_origin))
if level ==0:
pass
else:
#keyarray = GetKeys(collec, db_origin)
#if "Open" in keyarray:
if self.ui.combobox.currentText()==u"K线":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))#open up different menu with different kind of graphs
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
#menu.addAction(QAction("P_change", menu, checkable=True))
#menu.addAction(QAction("Turnover",menu,checkable=True))
if self.ui.combobox.currentText()==u"复权":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
menu.addAction(QAction("Amount", menu, checkable=True))
if self.ui.combobox.currentText()==u"分笔数据":
menu.addAction(QAction("分笔", menu, checkable=True))
if self.ui.combobox.currentText()==u"历史分钟":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
menu.addAction(QAction("Amount", menu, checkable=True))
if self.ui.combobox.currentText()==u"十大股东":
menu.addAction(QAction("季度饼图", menu, checkable=True))
#menu.addAction(QAction("持股比例", menu, checkable=True))
#for g in keyarray:
#menu.addAction(QAction(g, menu, checkable=True))
menu.triggered.connect(lambda action: self.methodSelected(action, collec))
menu.exec_(self.ui.treeWidget.viewport().mapToGlobal(position))
def methodSelected(self, action, collec):
# print(action.text()) #Choice
# if (self.ui.treewidget.count() == 5):
# self.ui.label.setText("Maximum number of queries")
# return
# self.ui.label.setText("")
Choice = action.text()
Stock = collec
# print(collec) #Stock Name
# print(db_origin) #DataBase name
# list1 = [self.tr(Stock+"-"+Choice+"-"+db_origin)]
# self.ui.treewidget.addItems(list1)
parent = QTreeWidgetItem(self.ui.treeWidget_2)
parent.setText(0, Stock+ "-" + Choice)
def openWidgetMenu(self,position):
indexes = self.ui.treeWidget_2.selectedIndexes()
item = self.ui.treeWidget_2.itemAt(position)
if item == None:
return
#item = self.ui.listWidget.itemAt(position)
if len(indexes) > 0:
menu = QMenu()
menu.addAction(QAction("Delete", menu,checkable = True))#This function is perhaps useless
#menu.triggered.connect(self.eraseItem)
item = self.ui.treeWidget_2.itemAt(position)
#collec = str(item.text())
menu.triggered.connect(lambda action: self.ListMethodSelected(action, item))
menu.exec_(self.ui.treeWidget_2.viewport().mapToGlobal(position))
def ListMethodSelected(self, action, item):
if action.text() == "Delete":
self.eraseItem()
if action.text() == "Combine":
global CombineKeyword
collec = str(item.text())
CombineKeyword.append(collec)#Useless function(maybe?)
list1 = [self.tr(collec)]
self.ui.listwidget.addItems(list1)
self.eraseItem()
def eraseItem(self):
for x in self.ui.treeWidget_2.selectedItems():#delete with write click menu
#item = self.ui.treewidget.takeItem(self.ui.treewidget.currentRow())
sip.delete(x)
#item.delete
def classify(self, folder):
startdate = self.ui.dateEdit.date()
startdate = startdate.toPyDate()
startdate = startdate.strftime("%Y/%m/%d")#converts date from dateedit to tushare readable date
enddate = self.ui.dateEdit_2.date()
enddate = enddate.toPyDate()
enddate = enddate.strftime("%Y/%m/%d")
option = self.ui.comboBox.currentText()
option = str(option)
#if (self.ui.treewidget) == 0:
#self.ui.label.setText("Need to select at least one query")
#return
root = self.ui.treeWidget_2.invisibleRootItem()# This is for iterating child items
child_count = root.childCount()
texts = []
if child_count==0:
return
for i in range(child_count):
item = root.child(i)
text = item.text(0)#with 3 part'stock_name'+'-'+'code'+'-'+action
texts.append(text)
labels = [k for k in texts]
#items = ([x.encode("utf-8") for x in labels])
width = self.ui.webView.width()#give width and height of user's screen so that graphs can be generated with dynamic size
height = self.ui.webView.height()
mode_combo = self.ui.combobox.currentText()
graphpage(labels,mode_combo, startdate,enddate,option,width, height)#labels:复权ork线or分笔 option:hfq, qfq or 15, 30, D, etc
self.ui.webView.reload()#refreshes webengine
self.ui.webView.repaint()
self.ui.webView.update()
def graphmerge(self, combineKeyword):
sth = ""
for i in combineKeyword:
if sth == "":
sth = sth + i
else :
sth = sth + "\n" + "&"+ "-"+i
list1 = sth
return sth
global CombineKeyword
CombineKeyword = []
self.ui.listwidget.clear() #combine stuff so that different graphs can be drawn together
app = QApplication(sys.argv)
w = MyUi()
w.show()
sys.exit(app.exec_())
|
mmpose/models/necks/__init__.py
|
jlgzb/mmpose
| 367 |
48040
|
<filename>mmpose/models/necks/__init__.py
from .gap_neck import GlobalAveragePooling
__all__ = ['GlobalAveragePooling']
|
metrics/recall/recall.py
|
leondz/datasets
| 3,395 |
48042
|
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recall metric."""
from sklearn.metrics import recall_score
import datasets
_DESCRIPTION = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
_KWARGS_DESCRIPTION = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
_CITATION = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={<NAME> <NAME> <NAME> <NAME> <NAME>.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Recall(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}
),
reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"],
)
def _compute(
self,
predictions,
references,
labels=None,
pos_label=1,
average="binary",
sample_weight=None,
zero_division="warn",
):
score = recall_score(
references,
predictions,
labels=labels,
pos_label=pos_label,
average=average,
sample_weight=sample_weight,
zero_division=zero_division,
)
return {"recall": float(score) if score.size == 1 else score}
|
configs/selfsup/simmim/simmim_swin-base_16xb128-coslr-100e_in1k-192.py
|
mitming/mmselfsup
| 355 |
48071
|
_base_ = [
'../_base_/models/simmim_swin-base.py',
'../_base_/datasets/imagenet_simmim.py',
'../_base_/schedules/adamw_coslr-200e_in1k.py',
'../_base_/default_runtime.py',
]
# data
data = dict(samples_per_gpu=128)
# optimizer
optimizer = dict(
lr=2e-4 * 2048 / 512,
betas=(0.9, 0.999),
eps=1e-8,
paramwise_options={
'norm': dict(weight_decay=0.),
'bias': dict(weight_decay=0.),
'absolute_pos_embed': dict(weight_decay=0.),
'relative_position_bias_table': dict(weight_decay=0.0)
})
# clip gradient
optimizer_config = dict(grad_clip=dict(max_norm=5.0))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=1e-5 * 2048 / 512,
warmup='linear',
warmup_iters=10,
warmup_ratio=1e-6 / 2e-4,
warmup_by_epoch=True,
by_epoch=False)
# mixed precision
fp16 = dict(loss_scale='dynamic')
# schedule
runner = dict(max_epochs=100)
# runtime
checkpoint_config = dict(interval=1, max_keep_ckpts=3, out_dir='')
persistent_workers = True
log_config = dict(
interval=100, hooks=[
dict(type='TextLoggerHook'),
])
|
tests/cases/doc/test_parametrize_alt.py
|
broglep-work/python-pytest-cases
| 213 |
48112
|
# Authors: <NAME> <<EMAIL>>
# + All contributors to <https://github.com/smarie/python-pytest-cases>
#
# License: 3-clause BSD, <https://github.com/smarie/python-pytest-cases/blob/master/LICENSE>
import pytest
from pytest_cases import parametrize_with_cases
def case_sum_one_plus_two():
a = 1
b = 2
c = 3
return a, b, c
@parametrize_with_cases(argnames=["a", "b", "c"], cases=".")
def test_argnames_as_list(a, b, c):
assert a + b == c
@parametrize_with_cases(argnames=("a", "b", "c"), cases=".")
def test_argnames_as_tuple(a, b, c):
assert a + b == c
def test_argnames_from_invalid_type():
with pytest.raises(
TypeError, match="^argnames should be a string, list or a tuple$"
):
parametrize_with_cases(argnames=42, cases=".")(lambda _: None)
def test_argnames_element_from_invalid_type():
with pytest.raises(
TypeError, match="^all argnames should be strings$"
):
parametrize_with_cases(argnames=["a", 2, "c"], cases=".")(lambda _: None)
|
pytest-verbose-parametrize/tests/integration/parametrize_ids/tests/unit/test_duplicates.py
|
RaiVaibhav/pytest-plugins
| 282 |
48117
|
import pytest
@pytest.mark.parametrize(('x', 'y', ), [(0, [1]), (0, [1]), (str(0), str([1]))])
def test_foo(x, y):
assert str([int(x) + 1]) == y
|
recipes/Python/580741_ctypes_CDLL_automatic_errno/recipe-580741.py
|
tdiprima/code
| 2,023 |
48134
|
import ctypes
class CDLL_errno(ctypes.CDLL):
class _FuncPtr(ctypes._CFuncPtr):
_flags_ = ctypes._FUNCFLAG_CDECL | ctypes._FUNCFLAG_USE_ERRNO
_restype_ = ctypes.c_int
def __call__(self, *args):
ctypes.set_errno(0)
try:
return ctypes._CFuncPtr.__call__(self, *args)
finally:
errno = ctypes.get_errno()
if errno:
import os
raise IOError(errno, os.strerror(errno))
def __init__(self, *args, **kw):
ctypes.CDLL.__init__(self, *args, **kw)
del self._FuncPtr
|
mixly_arduino/sample/mixpy/人工智能Py/09词法分析-2.py
|
wecake/Mixly_Arduino
| 118 |
48137
|
<filename>mixly_arduino/sample/mixpy/人工智能Py/09词法分析-2.py<gh_stars>100-1000
import aip
client = aip.AipNlp("Enter Your APP_ID", "Enter Your API_KEY", "Enter Your SECRET_KEY")
word= {"r":"代词", "v":"动词", "nr":"名词"}
s = ""
for i in client.lexer("我爱米思齐", options={})["items"]:
s = s + i["item"]
s = s + "【"
s = s + word[i["pos"]]
s = s + "】"
print(s)
|
tests/h/services/flag_test.py
|
pombredanne/h
| 2,103 |
48146
|
<reponame>pombredanne/h
import pytest
from h import models
from h.services import flag
class TestFlagServiceFlagged:
def test_it_returns_true_when_flag_exists(self, svc, flag):
assert svc.flagged(flag.user, flag.annotation) is True
def test_it_returns_false_when_flag_does_not_exist(self, svc, user, annotation):
assert not svc.flagged(user, annotation)
def test_it_handles_missing_values(self, svc, user, annotation):
assert not svc.flagged(None, annotation)
assert not svc.flagged(user, None)
def test_it_uses_the_cache_if_possible(self, svc, user, annotation):
assert not svc.flagged(user, annotation)
svc._flagged_cache[ # pylint:disable=protected-access
(user.id, annotation.id)
] = True
assert svc.flagged(user, annotation)
def test_it_lists_flagged_ids(self, svc, user, flag, noise):
annotation_ids = [flag.annotation_id for flag in noise]
annotation_ids.append(flag.annotation_id)
all_flagged = svc.all_flagged(user, annotation_ids)
assert all_flagged == {flag.annotation_id}
assert svc._flagged_cache == { # pylint:disable=protected-access
(user.id, noise[0].annotation_id): False,
(user.id, noise[1].annotation_id): False,
(user.id, flag.annotation_id): True,
}
def test_it_handles_all_flagged_with_no_ids(self, svc, user):
assert svc.all_flagged(user, []) == set()
def test_it_handles_all_flagged_with_no_user(self, svc, annotation):
assert svc.all_flagged(None, [annotation.id]) == set()
@pytest.fixture
def flag(self, factories, user, annotation):
return factories.Flag(user=user, annotation=annotation)
@pytest.fixture
def user(self, factories):
return factories.User()
@pytest.fixture
def annotation(self, factories):
return factories.Annotation()
@pytest.fixture(autouse=True)
def noise(self, factories):
return factories.Flag.create_batch(2)
class TestFlagServiceCreate:
def test_it_creates_flag(self, svc, db_session, factories):
user = factories.User()
annotation = factories.Annotation(userid=user.userid)
svc.create(user, annotation)
flag = (
db_session.query(models.Flag)
.filter_by(user_id=user.id, annotation_id=annotation.id)
.first()
)
assert flag is not None
def test_it_skips_creating_flag_when_already_exists(
self, svc, db_session, factories
):
existing = factories.Flag()
svc.create(existing.user, existing.annotation)
assert (
db_session.query(models.Flag)
.filter_by(user_id=existing.user.id, annotation_id=existing.annotation.id)
.count()
== 1
)
class TestFlagServiceCount:
def test_flag_count_returns_zero_for_unflagged_annotation(self, svc, unflagged):
assert not svc.flag_count(unflagged)
def test_flag_count_returns_zero_for_None(self, svc):
assert not svc.flag_count(None)
def test_flag_count_returns_flag_count_for_flagged_annotation(self, svc, flagged):
assert svc.flag_count(flagged) == 2
def test_flag_count_uses_the_cache(self, svc, flagged):
svc._flag_count_cache[flagged.id] = 99999 # pylint:disable=protected-access
assert svc.flag_count(flagged) == 99999
def test_flag_counts(self, svc, flagged, unflagged):
ann_ids = [flagged.id, unflagged.id]
flag_counts = svc.flag_counts(ann_ids)
assert ( # pylint:disable=protected-access
flag_counts == svc._flag_count_cache == {flagged.id: 2, unflagged.id: 0}
)
def test_flag_counts_returns_empty_dict_for_no_ids(self, svc):
assert svc.flag_counts([]) == {}
def test_flag_counts_returns_zero_for_unflagged_annotation(self, svc, unflagged):
flag_counts = svc.flag_counts([unflagged.id])
assert not flag_counts[unflagged.id]
@pytest.fixture
def unflagged(self, factories):
return factories.Annotation()
@pytest.fixture
def flagged(self, factories):
annotation = factories.Annotation()
factories.Flag.create_batch(2, annotation=annotation)
return annotation
class TestFlagServiceFactory:
def test_it_returns_flag_service(self, pyramid_request):
svc = flag.flag_service_factory(None, pyramid_request)
assert isinstance(svc, flag.FlagService)
@pytest.fixture
def svc(db_session):
return flag.FlagService(db_session)
|
PhysicsTools/RecoAlgos/python/allSuperClusterCandidates_cfi.py
|
ckamtsikis/cmssw
| 852 |
48158
|
import FWCore.ParameterSet.Config as cms
allSuperClusterCandidates = cms.EDProducer("ConcreteEcalCandidateProducer",
src = cms.InputTag("hybridSuperClusters"),
particleType = cms.string('gamma')
)
|
src/Classes/Install.py
|
TheBossProSniper/electric-windows
| 210 |
48167
|
######################################################################
# INSTALL #
######################################################################
from Classes.Metadata import Metadata
class Install:
"""
Stores data about an installation for usage
"""
def __init__(self, json_name: str, display_name: str, path: str, install_switches, download_type: str, directory: str, custom_install_switch, install_exit_codes, uninstall_exit_codes, metadata: Metadata, version):
self.display_name = display_name
self.json_name = json_name
self.path = path
self.install_switches = install_switches
self.download_type = download_type
self.directory = directory
self.custom_install_switch = custom_install_switch
self.metadata = metadata
self.install_exit_codes = install_exit_codes
self.uninstall_exit_codes = uninstall_exit_codes
self.version = version
|
lib/subdomains.py
|
bbhunter/ODIN
| 533 |
48168
|
<reponame>bbhunter/ODIN
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
This module contains everything needed to hunt for subdomains, including collecting certificate
data from Censys.io and crt.sh for a given domain name.
The original crt.sh code is from PaulSec's unofficial crt.sh API. That project can be
found here:
https://github.com/PaulSec/crt.sh
"""
import re
import json
import base64
from time import sleep
import click
import requests
import censys.certificates
from bs4 import BeautifulSoup
from . import helpers
class CertSearcher(object):
"""Class for searching crt.sh and Censys.io for certificates and parsing the results."""
# Set a timeout, in seconds, for the web requests
requests_timeout = 10
# The user-agent and endpoint URIs used for the web requests
crtsh_base_uri = "https://crt.sh/?q={}&output=json"
user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
def __init__(self):
"""Everything that should be initiated with a new object goes here."""
try:
censys_api_id = helpers.config_section_map("Censys")["api_id"]
censys_api_secret = helpers.config_section_map("Censys")["api_secret"]
self.censys_cert_search = censys.certificates.CensysCertificates(api_id=censys_api_id,api_secret=censys_api_secret)
except censys.base.CensysUnauthorizedException:
self.censys_cert_search = None
click.secho("[!] Censys reported your API information is invalid, so Censys searches will be skipped.",fg="yellow")
click.secho("L.. You provided ID %s & Secret %s" % (censys_api_id,censys_api_secret),fg="yellow")
except Exception as error:
self.censys_cert_search = None
click.secho("[!] Did not find a Censys API ID/secret.",fg="yellow")
click.secho("L.. Details: {}".format(error),fg="yellow")
def search_crtsh(self,domain,wildcard=True):
"""Collect certificate information from crt.sh for the target domain name. This returns
a JSON containing certificate information that includes the issuer, issuer and expiration
dates, and the name.
Parameters:
domain Domain to search for on crt.sh
wildcard Whether or not to prepend a wildcard to the domain (default: True)
Return a list of objects, like so:
{
"issuer_ca_id": 16418,
"issuer_name": "C=US, O=Let's Encrypt, CN=Let's Encrypt Authority X3",
"name_value": "hatch.uber.com",
"min_cert_id": 325717795,
"min_entry_timestamp": "2018-02-08T16:47:39.089",
"not_before": "2018-02-08T15:47:39"
}
"""
headers = {"User-Agent":self.user_agent}
if wildcard:
domain = "%25.{}".format(domain)
try:
req = requests.get(self.crtsh_base_uri.format(domain),headers=headers,timeout=self.requests_timeout)
if req.ok:
try:
content = req.content.decode("utf-8")
data = json.loads("[{}]".format(content.replace('}{','},{')))
return data
except:
pass
except requests.exceptions.Timeout:
click.secho("\n[!] The connection to crt.sh timed out!",fg="red")
except requests.exceptions.TooManyRedirects:
click.secho("\n[!] The connection to crt.sh encountered too many redirects!",fg="red")
except requests.exceptions.RequestException as error:
click.secho("\n[!] The connection to crt.sh encountered an error!",fg="red")
click.secho("L.. Details: {}".format(error),fg="red")
return None
def search_censys_certificates(self,target):
"""Collect certificate information from Censys for the target domain name. This returns
a dictionary of certificate information that includes the issuer, subject, and a hash
Censys uses for the /view/ API calls to fetch additional information.
A Censys API key is required.
Parameters
target The domain name, e.g. apple.com, to be looked-up with on Censys.
"""
if self.censys_cert_search is None:
pass
else:
try:
# Use the `parsed.names` filter to avoid unwanted domains
query = "parsed.names: %s" % target
results = self.censys_cert_search.search(query,fields=['parsed.names',
'parsed.signature_algorithm.name','parsed.signature.self_signed',
'parsed.validity.start','parsed.validity.end','parsed.fingerprint_sha256',
'parsed.subject_dn','parsed.issuer_dn'])
return results
except censys.base.CensysRateLimitExceededException:
click.secho("\n[!] Censys reports your account has run out of API credits.",fg="red")
return None
except Exception as error:
click.secho("\n[!] Error collecting Censys certificate data for {}.".format(target),fg="red")
click.secho("L.. Details: {}".format(error),fg="red")
return None
def parse_cert_subdomain(self,subject_dn):
"""Accepts the Censys certificate data and parses the individual certificate's domain.
Parameters:
subject_dn Accepts the subject_dn field from a Censys search result.
"""
if "," in subject_dn:
pos = subject_dn.find('CN=')+3
else:
pos = 3
tmp = subject_dn[pos:]
if "," in tmp:
pos = tmp.find(",")
tmp = tmp[:pos]
return tmp
def filter_subdomains(self,domain,subdomains):
"""Filter out uninteresting domains that may be returned from certificates. These are
domains unrelated to the true target. For example, a search for blizzard.com on Censys
can return iran-blizzard.ir, an unwanted and unrelated domain.
Credit to christophetd for this nice bit of code:
https://github.com/christophetd/censys-subdomain-finder/blob/master/censys_subdomain_finder.py#L31
Parameters:
domain The base domain to be used for filtering subdomains, e.g. apple.com
subdomains A list of collected subdomains to filter
"""
return [ subdomain for subdomain in subdomains if '*' not in subdomain and subdomain.endswith(domain) ]
class SubdomainCollector(object):
"""Class for scraping DNS Dumpster and NetCraft to discover subdomains."""
# Set a timeout, in seconds, for the web requests
requests_timeout = 10
# The user-agent and endpoint URIs used for the web requests
dnsdumpster_uri = "https://dnsdumpster.com/"
findsubdomains_uri = "https://findsubdomains.com/subdomains-of/{}"
netcraft_uri = "http://searchdns.netcraft.com/?host={}"
netcraft_history_uri = "http://toolbar.netcraft.com/site_report?url={}"
user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
def __init__(self,webdriver=None):
"""Everything that should be initiated with a new object goes here.
Parameters:
webdriver A selenium webdriver object to be used for automated web browsing
"""
self.browser = webdriver
self.browser.set_page_load_timeout(10)
def check_dns_dumpster(self,domain):
"""Collect subdomains known to DNS Dumpster for the provided domain. This is based on
PaulSec's unofficial DNS Dumpster API available on GitHub.
Parameters:
domain The domain to search for on DNS Dumpster
"""
results = {}
cookies = {}
# Disable SSL warnings and create a session for web browsing
requests.packages.urllib3.disable_warnings()
session = requests.session()
# Try connecting to DNS Dumpster
# This is all in one try/except because request 1 must success for request 2
try:
# Make a request to stash the CSRF token and setup cookies and headers for the next request
request = session.get(self.dnsdumpster_uri,verify=False,timeout=self.requests_timeout)
csrf_token = session.cookies['csrftoken']
cookies['csrftoken'] = session.cookies['csrftoken']
headers = {"Referer": self.dnsdumpster_uri}
data = {"csrfmiddlewaretoken": csrf_token,"targetip":domain}
# Now make a POST to DNS Dumpster with the new cookies and headers to perform the search
request = session.post(self.dnsdumpster_uri,cookies=cookies,data=data,headers=headers,timeout=self.requests_timeout)
# Check if a 200 OK was returned
if request.ok:
soup = BeautifulSoup(request.content,"lxml")
tables = soup.findAll("table")
results['domain'] = domain
results['dns_records'] = {}
results['dns_records']['dns'] = self._retrieve_results(tables[0])
results['dns_records']['mx'] = self._retrieve_results(tables[1])
results['dns_records']['txt'] = self._retrieve_txt_record(tables[2])
results['dns_records']['host'] = self._retrieve_results(tables[3])
# Try to fetch the network mapping image
try:
val = soup.find('img',attrs={'class': 'img-responsive'})['src']
tmp_url = "{}{}".format(self.dnsdumpster_uri,val)
image_data = base64.b64encode(requests.get(tmp_url,timeout=self.requests_timeout).content)
except Exception:
image_data = None
finally:
results['image_data'] = image_data
else:
click.secho("\n[!] The DNS Dumpster request returned a {} status code!".format(request.status_code),fg="red")
except requests.exceptions.Timeout:
click.secho("\n[!] The connection to crt.sh timed out!",fg="red")
except requests.exceptions.TooManyRedirects:
click.secho("\n[!] The connection to crt.sh encountered too many redirects!",fg="red")
except requests.exceptions.RequestException as error:
click.secho("\n[!] The connection to crt.sh encountered an error!",fg="red")
click.secho("L.. Details: {}".format(error),fg="red")
return results
def _retrieve_results(self,table):
"""Used by check_dns_dumpster() to extract the results from the HTML.
Parameters:
table The HTML table pulled from DNS Dumpster results
"""
results = []
trs = table.findAll('tr')
for tr in trs:
tds = tr.findAll('td')
pattern_ip = r'([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})'
ip = re.findall(pattern_ip,tds[1].text)[0]
domain = tds[0].text.replace('\n','').split(' ')[0]
header = ' '.join(tds[0].text.replace('\n','').split(' ')[1:])
reverse_dns = tds[1].find('span',attrs={}).text
additional_info = tds[2].text
country = tds[2].find('span',attrs={}).text
autonomous_system = additional_info.split(' ')[0]
provider = ' '.join(additional_info.split(' ')[1:])
provider = provider.replace(country,'')
data = {'domain':domain,
'ip':ip,
'reverse_dns':reverse_dns,
'as':autonomous_system,
'provider':provider,
'country':country,
'header':header}
results.append(data)
return results
def _retrieve_txt_record(self,table):
"""Used by check_dns_dumpster() to extracts the domain's DNS TXT records.
Parameters:
table The HTML table pulled from DNS Dumpster results
"""
results = []
for td in table.findAll('td'):
results.append(td.text)
return results
def check_netcraft(self,domain):
"""Collect subdomains known to NetCraft for the provided domain. NetCraft blocks scripted
requests by requiring cookies and JavaScript for all browser, so Selenium is required.
This is based on code from the DataSploit project, but updated to work with today's
NetCraft and Python 3.
Parameters:
domain The domain to look-up on NetCraft
"""
results = []
target_dom_name = domain.split(".")
self.browser.get(self.netcraft_uri.format(domain))
link_regx = re.compile(r'<a href="http://toolbar.netcraft.com/site_report\?url=(.*)">')
links_list = link_regx.findall(self.browser.page_source)
for x in links_list:
dom_name = x.split("/")[2].split(".")
if (dom_name[len(dom_name) - 1] == target_dom_name[1]) and \
(dom_name[len(dom_name) - 2] == target_dom_name[0]):
results.append(x.split("/")[2])
num_regex = re.compile('Found (.*) site')
num_subdomains = num_regex.findall(self.browser.page_source)
if not num_subdomains:
num_regex = re.compile('First (.*) sites returned')
num_subdomains = num_regex.findall(self.browser.page_source)
if num_subdomains:
if num_subdomains[0] != str(0):
num_pages = int(num_subdomains[0]) // 20 + 1
if num_pages > 1:
last_regex = re.compile(
'<td align="left">%s.</td><td align="left">\n<a href="(.*)" rel="nofollow">' % (20))
last_item = last_regex.findall(self.browser.page_source)[0].split("/")[2]
next_page = 21
for x in range(2,num_pages):
url = "http://searchdns.netcraft.com/?host=%s&last=%s&from=%s&restriction=/site%%20contains" % (domain,last_item,next_page)
self.browser.get(url)
link_regx = re.compile(
r'<a href="http://toolbar.netcraft.com/site_report\?url=(.*)">')
links_list = link_regx.findall(self.browser.page_source)
for y in links_list:
dom_name1 = y.split("/")[2].split(".")
if (dom_name1[len(dom_name1) - 1] == target_dom_name[1]) and \
(dom_name1[len(dom_name1) - 2] == target_dom_name[0]):
results.append(y.split("/")[2])
last_item = links_list[len(links_list) - 1].split("/")[2]
next_page = 20 * x + 1
else:
pass
return results
def fetch_netcraft_domain_history(self,domain):
"""Fetch a domain's IP address history from NetCraft.
Parameters:
domain The domain to look-up on NetCraft
"""
# TODO: See if the "Last Seen" and other data can be easily collected for here
ip_history = []
sleep(1)
self.browser.get(self.netcraft_history_uri.format(domain))
soup = BeautifulSoup(self.browser.page_source,'html.parser')
urls_parsed = soup.findAll('a',href=re.compile(r".*netblock\?q.*"))
for url in urls_parsed:
if urls_parsed.index(url) != 0:
result = [str(url).split('=')[2].split(">")[1].split("<")[0],\
str(url.parent.findNext('td')).strip("<td>").strip("</td>")]
ip_history.append(result)
return ip_history
def query_subdomainof(self,domain):
"""Look-up the given domain on findsubdomains.com and parse the results to get a list of
subdomains.
Parameters:
domain The base domain for the subdomains query
"""
subdomains = []
headers = { 'User-Agent': self.user_agent }
request = requests.get(self.findsubdomains_uri.format(domain),headers=headers,timeout=self.requests_timeout)
soup = BeautifulSoup(request.content,"lxml")
subdomain_links = soup.findAll('a',{'class': 'aggregated-link'})
for subdomain in subdomain_links:
if not subdomain.string.strip() == domain:
subdomains.append(subdomain.string.strip())
unique_subdomains = list(set(subdomains))
return unique_subdomains
|
pyti/double_exponential_moving_average.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
| 635 |
48173
|
from __future__ import absolute_import
from pyti import catch_errors
from pyti.exponential_moving_average import (
exponential_moving_average as ema
)
def double_exponential_moving_average(data, period):
"""
Double Exponential Moving Average.
Formula:
DEMA = 2*EMA - EMA(EMA)
"""
catch_errors.check_for_period_error(data, period)
dema = (2 * ema(data, period)) - ema(ema(data, period), period)
return dema
|
prml/nn/math/product.py
|
brunomaga/PRML
| 11,017 |
48233
|
import numpy as np
from prml.nn.function import Function
class Product(Function):
def __init__(self, axis=None, keepdims=False):
if isinstance(axis, int):
axis = (axis,)
elif isinstance(axis, tuple):
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
def _forward(self, x):
self.output = np.prod(x, axis=self.axis, keepdims=True)
if not self.keepdims:
return np.squeeze(self.output)
else:
return self.output
def backward(self, delta, x):
if not self.keepdims and self.axis is not None:
for ax in self.axis:
delta = np.expand_dims(delta, ax)
dx = delta * self.output / x
return dx
def prod(x, axis=None, keepdims=False):
"""
product of all element in the array
Parameters
----------
x : tensor_like
input array
axis : int, tuple of ints
axis or axes along which a product is performed
keepdims : bool
keep dimensionality or not
Returns
-------
product : tensor_like
product of all element
"""
return Product(axis=axis, keepdims=keepdims).forward(x)
|
src/api/comment.py
|
piwaniuk/critic
| 216 |
48251
|
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2017 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
class CommentError(api.APIError):
pass
class InvalidCommentId(CommentError):
"""Raised when an invalid comment id is used."""
def __init__(self, comment_id):
"""Constructor"""
super(InvalidCommentId, self).__init__(
"Invalid comment id: %d" % comment_id)
self.comment_id = comment_id
class InvalidCommentIds(CommentError):
"""Raised by fetchMany() when invalid comment ids are used."""
def __init__(self, comment_ids):
"""Constructor"""
super(InvalidCommentIds, self).__init__(
"Invalid comment ids: %s" % ", ".join(map(str, comment_ids)))
self.comment_ids = comment_ids
class InvalidLocation(CommentError):
"""Raised when attempting to specify an invalid comment location"""
pass
class Comment(api.APIObject):
TYPE_VALUES = frozenset(["issue", "note"])
@property
def id(self):
"""The comment's unique id"""
return self._impl.id
@property
def type(self):
"""The comment's type
The type is one of "issue" and "note"."""
pass
@property
def is_draft(self):
"""True if the comment is not yet published
Unpublished comments are not displayed to other users."""
return self._impl.is_draft
@property
def review(self):
"""The review to which the comment belongs
The review is returned as an api.review.Review object."""
return self._impl.getReview(self.critic)
@property
def author(self):
"""The comment's author
The author is returned as an api.user.User object."""
return self._impl.getAuthor(self.critic)
@property
def timestamp(self):
"""The comment's timestamp
The return value is a datetime.datetime object."""
return self._impl.timestamp
@property
def location(self):
"""The location of the comment, or None
If the comment was made against lines in a commit message, the return
value is a api.comment.CommitMessageLocation object. If the comment
was made against lines in a file version, the return value is
api.comment.FileVersionLocation object. Otherwise, the return value
is None."""
return self._impl.getLocation(self.critic)
@property
def text(self):
"""The comment's text"""
return self._impl.text
@property
def replies(self):
"""The replies to the comment
The replies are returned as a list of api.reply.Reply objects."""
return self._impl.getReplies(self.critic)
class DraftChanges(object):
"""Draft changes to the comment"""
def __init__(self, author, is_draft, reply, new_type):
self.__author = author
self.__is_draft = is_draft
self.__reply = reply
self.__new_type = new_type
@property
def author(self):
"""The author of these draft changes
The author is returned as an api.user.User object."""
return self.__author
@property
def is_draft(self):
"""True if the comment itself is a draft (not published)"""
return self.__is_draft
@property
def reply(self):
"""The current unpublished reply
The reply is returned as an api.reply.Reply object, or None if
there is no current unpublished reply."""
return self.__reply
@property
def new_type(self):
"""The new type of an unpublished type change
The type is returned as a string. Comment.TYPE_VALUES defines the
set of possible return values."""
return self.__new_type
@property
def draft_changes(self):
"""The comment's current draft changes
The draft changes are returned as a Comment.DraftChanges object, or
None if the current user has no unpublished changes to this comment.
If the comment is currently an issue, or the current user has an
unpublished change of the comment's type to issue, the returned
object will be an Issue.DraftChanges instead."""
return self._impl.getDraftChanges(self.critic)
class Issue(Comment):
STATE_VALUES = frozenset(["open", "addressed", "resolved"])
@property
def type(self):
return "issue"
@property
def state(self):
"""The issue's state
The state is one of the strings "open", "addressed" or "resolved"."""
return self._impl.state
@property
def addressed_by(self):
"""The commit that addressed the issue, or None
The value is an api.commit.Commit object, or None if the issue's
state is not "addressed"."""
return self._impl.getAddressedBy(self.critic)
@property
def resolved_by(self):
"""The user that resolved the issue, or None
The value is an api.user.User object, or None if the issue's state is
not "resolved"."""
return self._impl.getResolvedBy(self.critic)
class DraftChanges(Comment.DraftChanges):
"""Draft changes to the issue"""
def __init__(self, author, is_draft, reply, new_type, new_state,
new_location):
super(Issue.DraftChanges, self).__init__(
author, is_draft, reply, new_type)
self.__new_state = new_state
self.__new_location = new_location
@property
def new_state(self):
"""The issue's new state
The new state is returned as a string, or None if the current
user has not resolved or reopened the issue. Issue.STATE_VALUES
defines the set of possible return values."""
return self.__new_state
@property
def new_location(self):
"""The issue's new location
The new location is returned as a FileVersionLocation objects, or
None if the issue has not been reopened, or if it was manually
resolved rather than addressed and did not need to be relocated
when being reopened.
Since only issues in file version locations can be addressed,
that is the only possible type of new location."""
return self.__new_location
class Note(Comment):
@property
def type(self):
return "note"
class Location(api.APIObject):
TYPE_VALUES = frozenset(["general", "commit-message", "file-version"])
def __len__(self):
"""Return the the length of the location, in lines"""
return (self.last_line - self.first_line) + 1
@property
def type(self):
"""The location's type
The type is one of "commit-message" and "file-version"."""
pass
@property
def first_line(self):
"""The line number of the first commented line
Note that line numbers are one-based."""
return self._impl.first_line
@property
def last_line(self):
"""The line number of the last commented line
Note that line numbers are one-based."""
return self._impl.last_line
class CommitMessageLocation(Location):
@property
def type(self):
return "commit-message"
@property
def commit(self):
"""The commit whose message was commented"""
return self._impl.getCommit(self.critic)
@staticmethod
def make(critic, first_line, last_line, commit):
return api.impl.comment.makeCommitMessageLocation(
critic, first_line, last_line, commit)
class FileVersionLocation(Location):
@property
def type(self):
return "file-version"
@property
def changeset(self):
"""The changeset containing the comment
The changeset is returned as an api.changeset.Changeset object.
If the comment was created while looking at a diff, this will
initially be that changeset. As additional commits are added to the
review, this changeset may be "extended" to contain those added
commits.
This is the ideal changeset to use to display the comment, unless it
is an issue that has been addressed, in which case a better changeset
would be the diff of the commit returned by Issue.addressed_by.
If the user did not make the comment while looking at a diff but
rather while looking at a single version of the file, then this
attribute returns None.
If this is an object returned by translateTo() called with a
changeset argument, then this will be that changeset."""
return self._impl.getChangeset(self.critic)
@property
def side(self):
"""The commented side ("old" or "new") of the changeset
If the user did not make the comment while looking at a changeset
(i.e. a diff) but rather while looking at a single version of the
file, then this attribute returns None."""
return self._impl.side
@property
def commit(self):
"""The commit whose version of the file this location references
The commit is returned as an api.commit.Commit object.
If this is an object returned by translateTo() called with a commit
argument, then this is the commit that was given as an argument to
it. If this is the primary location of the comment (returned from
Comment.location) then this is the commit whose version of the file
the comment was originally made against, or None if the comment was
made while looking at a diff."""
return self._impl.getCommit(self.critic)
@property
def file(self):
"""The commented file"""
return self._impl.getFile(self.critic)
@property
def is_translated(self):
"""True if this is a location returned by |translateTo()|"""
return self._impl.is_translated
def translateTo(self, changeset=None, commit=None):
"""Return a translated file version location, or None
The location is translated to the version of the file in a certain
commit. If |changeset| is not None, that commit is the changeset's
|to_commit|, unless the comment is not present there, and otherwise
the changeset's |from_commit|. If |commit| is not None, that's the
commit.
If the comment is not present in the commit, None is returned.
The returned object's |is_translated| will be True.
If the |changeset| argument is not None, then the returned object's
|changeset| will be that changeset, and its |side| will reflect which
of its |from_commit| and |to_commit| ended up being used. The
returned object's |commit| will be None.
If the |commit| argument is not None, the returned object's |commit|
will be that commit, and its |changeset| and |side| will be None."""
assert changeset is None \
or isinstance(changeset, api.changeset.Changeset)
assert commit is None or isinstance(commit, api.commit.Commit)
assert (changeset is None) != (commit is None)
return self._impl.translateTo(self.critic, changeset, commit)
@staticmethod
def make(critic, first_line, last_line, file, changeset=None, side=None,
commit=None):
# File is required.
assert isinstance(file, api.file.File)
# Changeset and side go together.
assert (changeset is None) == (side is None)
assert (changeset is None) \
or isinstance(changeset, api.changeset.Changeset)
# Commit conflicts with changeset, but one is required.
assert (commit is None) != (changeset is None)
assert (commit is None) or isinstance(commit, api.commit.Commit)
return api.impl.comment.makeFileVersionLocation(
critic, first_line, last_line, file, changeset, side, commit)
def fetch(critic, comment_id):
"""Fetch the Comment object with the given id"""
import api.impl
assert isinstance(critic, api.critic.Critic)
assert isinstance(comment_id, int)
return api.impl.comment.fetch(critic, comment_id)
def fetchMany(critic, comment_ids):
"""Fetch multiple Comment objects with the given ids"""
import api.impl
assert isinstance(critic, api.critic.Critic)
comment_ids = list(comment_ids)
assert all(isinstance(comment_id, int) for comment_id in comment_ids)
return api.impl.comment.fetchMany(critic, comment_ids)
def fetchAll(critic, review=None, author=None, comment_type=None, state=None,
location_type=None, changeset=None, commit=None):
"""Fetch all Comment objects
If |review| is not None, only comments in the specified review are
returned.
If |author| is not None, only comments created by the specified user are
returned.
If |comment_type| is not None, only comments of the specified type are
returned.
If |state| is not None, only issues in the specified state are returned.
This implies type="issue".
If |location_type| is not None, only issues in the specified type of
location are returned.
If |changeset| is not None, only comments against file versions that are
referenced by the specified changeset are returned. Must be combined with
|review|, and can not be combined with |commit|.
If |commit| is not None, only comments against the commit's message or
file versions referenced by the commit are returned. Must be combined
with |review|, and can not be combined with |changeset|."""
import api.impl
assert isinstance(critic, api.critic.Critic)
assert review is None or isinstance(review, api.review.Review)
assert author is None or isinstance(author, api.user.User)
assert comment_type is None or comment_type in Comment.TYPE_VALUES
assert state is None or state in Issue.STATE_VALUES
assert state is None or comment_type in (None, "issue")
assert location_type is None or location_type in Location.TYPE_VALUES
assert changeset is None or isinstance(changeset, api.changeset.Changeset)
assert changeset is None or review is not None
assert commit is None or isinstance(commit, api.commit.Commit)
assert commit is None or review is not None
assert changeset is None or commit is None
return api.impl.comment.fetchAll(critic, review, author, comment_type,
state, location_type, changeset, commit)
|
server/www/teleport/webroot/app/controller/rpc.py
|
zhoulhb/teleport
| 640 |
48299
|
# -*- coding: utf-8 -*-
import json
import urllib.parse
import tornado.gen
from app.const import *
from app.base.configs import tp_cfg
from app.base.session import tp_session
from app.base.core_server import core_service_async_post_http
from app.model import record
from app.base.stats import tp_stats
from app.base.logger import *
from app.base.controller import TPBaseJsonHandler
class RpcHandler(TPBaseJsonHandler):
@tornado.gen.coroutine
def get(self):
_uri = self.request.uri.split('?', 1)
if len(_uri) != 2:
return self.write_json(TPE_PARAM)
yield self._dispatch(urllib.parse.unquote(_uri[1]))
@tornado.gen.coroutine
def post(self):
req = self.request.body.decode('utf-8')
if req == '':
return self.write_json(TPE_PARAM)
yield self._dispatch(req)
@tornado.gen.coroutine
def _dispatch(self, req):
try:
_req = json.loads(req)
if 'method' not in _req or 'param' not in _req:
return self.write_json(TPE_PARAM)
except:
return self.write_json(TPE_JSON_FORMAT)
# log.d('WEB-JSON-RPC, method=`{}`\n'.format(_req['method']))
if 'get_conn_info' == _req['method']:
return self._get_conn_info(_req['param'])
elif 'session_begin' == _req['method']:
return self._session_begin(_req['param'])
elif 'session_update' == _req['method']:
return self._session_update(_req['param'])
elif 'session_end' == _req['method']:
return self._session_end(_req['param'])
elif 'register_core' == _req['method']:
return self._register_core(_req['param'])
elif 'exit' == _req['method']:
return self._exit()
else:
log.e('WEB-JSON-RPC got unknown method: `{}`.\n'.format(_req['method']))
return self.write_json(TPE_UNKNOWN_CMD)
def _get_conn_info(self, param):
if 'conn_id' not in param:
return self.write_json(TPE_PARAM)
conn_id = param['conn_id']
x = tp_session().taken('tmp-conn-info-{}'.format(conn_id), None)
if x is None:
return self.write_json(TPE_NOT_EXISTS)
else:
return self.write_json(TPE_OK, data=x)
def _session_begin(self, param):
try:
_sid = param['sid']
_user_id = param['user_id']
_host_id = param['host_id']
_account_id = param['acc_id']
_user_name = param['user_username']
_acc_name = param['acc_username']
_host_ip = param['host_ip']
_conn_ip = param['conn_ip']
_conn_port = param['conn_port']
_client_ip = param['client_ip']
_auth_type = param['auth_type']
_protocol_type = param['protocol_type']
_protocol_sub_type = param['protocol_sub_type']
except IndexError:
return self.write_json(TPE_PARAM)
err, record_id = record.session_begin(_sid, _user_id, _host_id, _account_id, _user_name, _acc_name, _host_ip, _conn_ip, _conn_port, _client_ip, _auth_type, _protocol_type, _protocol_sub_type)
if err != TPE_OK:
return self.write_json(err, message='can not write database.')
else:
tp_stats().conn_counter_change(1)
return self.write_json(TPE_OK, data={'rid': record_id})
def _session_update(self, param):
try:
rid = param['rid']
protocol_sub_type = param['protocol_sub_type']
code = param['code']
except:
return self.write_json(TPE_PARAM)
if 'rid' not in param or 'code' not in param:
return self.write_json(TPE_PARAM)
if not record.session_update(rid, protocol_sub_type, code):
return self.write_json(TPE_DATABASE, 'can not write database.')
else:
return self.write_json(TPE_OK)
def _session_end(self, param):
if 'rid' not in param or 'code' not in param:
return self.write_json(TPE_PARAM, message='invalid request.')
if not record.session_end(param['rid'], param['code']):
return self.write_json(TPE_DATABASE, 'can not write database.')
else:
tp_stats().conn_counter_change(-1)
return self.write_json(TPE_OK)
def _register_core(self, param):
# 因为core服务启动了(之前可能非正常终止了),做一下数据库中会话状态的修复操作
record.session_fix()
if 'rpc' not in param:
return self.write_json(TPE_PARAM, 'invalid param.')
tp_cfg().common.core_server_rpc = param['rpc']
# 获取core服务的配置信息
req = {'method': 'get_config', 'param': []}
_yr = core_service_async_post_http(req)
code, ret_data = yield _yr
if code != TPE_OK:
return self.write_json(code, 'get config from core-service failed.')
log.d('update base server config info.\n')
tp_cfg().update_core(ret_data)
# 将运行时配置发送给核心服务
req = {'method': 'set_config', 'param': {'noop_timeout': tp_cfg().sys.session.noop_timeout}}
_yr = core_service_async_post_http(req)
code, ret_data = yield _yr
if code != TPE_OK:
return self.write_json(code, 'set runtime-config to core-service failed.')
return self.write_json(TPE_OK)
def _exit(self):
# set exit flag.
return self.write_json(TPE_OK)
|
pysnmp/smi/mibs/instances/__SNMP-TARGET-MIB.py
|
RKinsey/pysnmp
| 492 |
48358
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
# This file instantiates some of the MIB managed objects for SNMP engine use
#
if 'mibBuilder' not in globals():
import sys
sys.stderr.write(__doc__)
sys.exit(1)
MibScalarInstance, = mibBuilder.importSymbols(
'SNMPv2-SMI',
'MibScalarInstance'
)
(snmpTargetSpinLock,
snmpUnavailableContexts,
snmpUnknownContexts) = mibBuilder.importSymbols(
'SNMP-TARGET-MIB',
'snmpTargetSpinLock',
'snmpUnavailableContexts',
'snmpUnknownContexts'
)
_snmpTargetSpinLock = MibScalarInstance(
snmpTargetSpinLock.name, (0,),
snmpTargetSpinLock.syntax.clone(0)
)
_snmpUnavailableContexts = MibScalarInstance(
snmpUnavailableContexts.name, (0,),
snmpUnavailableContexts.syntax.clone(0)
)
_snmpUnknownContexts = MibScalarInstance(
snmpUnknownContexts.name, (0,),
snmpUnknownContexts.syntax.clone(0)
)
mibBuilder.exportSymbols(
'__SNMP-TARGET-MIB',
snmpTargetSpinLock=_snmpTargetSpinLock,
snmpUnavailableContexts=_snmpUnavailableContexts,
snmpUnknownContexts=_snmpUnknownContexts
)
|
calamari_ocr/test/test_data_pagexml.py
|
jacektl/calamari
| 922 |
48385
|
import os
import unittest
this_dir = os.path.dirname(os.path.realpath(__file__))
class TestPageXML(unittest.TestCase):
def run_dataset_viewer(self, add_args):
from calamari_ocr.scripts.dataset_viewer import main
main(add_args + ["--no_plot"])
def test_cut_modes(self):
images = os.path.join(this_dir, "data", "avicanon_pagexml", "*.nrm.png")
self.run_dataset_viewer(["--gen", "PageXML", "--gen.images", images, "--gen.cut_mode", "BOX"])
self.run_dataset_viewer(["--gen", "PageXML", "--gen.images", images, "--gen.cut_mode", "MBR"])
|
interleaving/optimized.py
|
mpkato/interleaving
| 107 |
48427
|
from .ranking import CreditRanking
from .interleaving_method import InterleavingMethod
import numpy as np
from scipy.optimize import linprog
class Optimized(InterleavingMethod):
'''
Optimized Interleaving
Args:
lists: lists of document IDs
max_length: the maximum length of resultant interleaving.
If this is None (default), it is set to the minimum length
of the given lists.
sample_num: If this is None (default), an interleaved ranking is
generated every time when `interleave` is called.
Otherwise, `sample_num` rankings are sampled in the
initialization, one of which is returned when `interleave`
is called.
credit_func: either 'inverse' (1/rank) or 'negative' (-rank)
'''
def __init__(self, lists, max_length=None, sample_num=None,
credit_func='inverse', secure_sampling=False):
'''
lists: lists of document IDs
max_length: the maximum length of resultant interleaving.
If this is None (default), it is set to the minimum length
of the given lists.
sample_num: If this is None (default), an interleaved ranking is
generated every time when `interleave` is called.
Otherwise, `sample_num` rankings are sampled in the
initialization, one of which is returned when `interleave`
is called.
credit_func: either 'inverse' (1/rank) or 'negative' (-rank)
'''
if sample_num is None:
raise ValueError('sample_num cannot be None, '
+ 'i.e. the initial sampling is necessary')
if credit_func == 'inverse':
self._credit_func = lambda x: 1.0 / x
elif credit_func == 'negative':
self._credit_func = lambda x: -x
else:
raise ValueError('credit_func should be either inverse or negative')
self._secure_sampling = secure_sampling
super(Optimized, self).__init__(lists,
max_length=max_length, sample_num=sample_num)
# self._rankings (sampled rankings) is obtained here
res = self._compute_probabilities(lists, self._rankings)
is_success, self._probabilities, _ = res
self._probabilities /= np.sum(self._probabilities)
if not is_success:
raise ValueError('Optimization failed')
def _sample_rankings(self):
'''
Sample `sample_num` rankings
'''
distribution = {}
if self._secure_sampling:
rankings = set()
for _ in range(self.sample_num):
rankings.add(self._sample(self.max_length, self.lists))
for ranking in rankings:
distribution[ranking] = 1.0 / len(rankings)
else:
while len(distribution) < self.sample_num:
ranking = self._sample(self.max_length, self.lists)
distribution[ranking] = 1.0 / self.sample_num
self._rankings, self._probabilities = zip(*distribution.items())
def _sample(self, max_length, lists):
'''
Prefix constraint sampling
(Multileaved Comparisons for Fast Online Evaluation, CIKM'14)
max_length: the maximum length of resultant interleaving
lists: lists of document IDs
Return an instance of Ranking
'''
num_rankers = len(lists)
result = CreditRanking(num_rankers)
teams = set(range(num_rankers))
while len(result) < max_length:
if len(teams) == 0:
break
selected_team = np.random.choice(list(teams))
docs = [x for x in lists[selected_team] if not x in result]
if len(docs) > 0:
selected_doc = docs[0]
result.append(selected_doc)
else:
teams.remove(selected_team)
# assign credits
for docid in result:
for team in result.credits:
if docid in lists[team]:
rank = lists[team].index(docid) + 1
else:
rank = len(lists[team]) + 1
result.credits[team][docid] = self._credit_func(rank)
return result
def _compute_probabilities(self, lists, rankings):
'''
Solve the optimization problem in
(Multileaved Comparisons for Fast Online Evaluation, CIKM'14)
lists: lists of document IDs
rankings: a list of Ranking instances
Return a list of probabilities for input rankings
'''
# probability constraints
A_p_sum = np.array([1]*len(rankings))
# unbiasedness constraints
ub_cons = self._unbiasedness_constraints(lists, rankings)
# sensitivity
sensitivity = self._sensitivity(lists, rankings)
# constraints
A_eq = np.vstack((A_p_sum, ub_cons))
b_eq = np.array([1.0] + [0.0]*ub_cons.shape[0])
# solving the optimization problem
res = linprog(sensitivity, # objective function
A_eq=A_eq, b_eq=b_eq, # constraints
bounds=[(0, 1)]*len(rankings) # 0 <= p <= 1
)
return res.success, res.x, res.fun
def _unbiasedness_constraints(self, lists, rankings):
'''
for each k and team x, for a certain c_k:
sum_{L_i} {p_i} * sum^k_{j=1} ranking.credits[x][d_j] = c_k
In other words,
sum_{L_i} {p_i} * sum^k_{j=1}
(ranking.credits[x][d_j] - ranking.credits[x+1][d_j]) = 0
'''
result = []
credits = np.zeros((self.max_length, len(rankings), len(lists)))
for rid, ranking in enumerate(rankings):
for idx, docid in enumerate(ranking):
for team in ranking.credits:
credits[idx, rid, team] = ranking.credits[team][docid]
if idx > 0:
credits[idx, rid, team] += credits[idx-1, rid, team]
for i in range(len(lists) - 1):
result.append(credits[:, :, i] - credits[:, :, i+1])
result = np.vstack(result)
return result
def _sensitivity(self, lists, rankings):
'''
Expected variance
'''
# compute the mean of each ranking
mu = np.zeros(len(rankings))
for rid, ranking in enumerate(rankings):
for idx, docid in enumerate(ranking):
click_prob = 1.0 / (idx + 1)
credit = np.sum(
[ranking.credits[x][docid] for x in ranking.credits])
mu[rid] += click_prob * credit
mu /= len(lists)
# compute the variance
var = np.zeros(len(rankings))
for rid, ranking in enumerate(rankings):
for x in ranking.credits:
v = 0.0
for idx, docid in enumerate(ranking):
click_prob = 1.0 / (idx + 1)
if docid in ranking.credits[x]:
v += click_prob * ranking.credits[x][docid]
v -= mu[rid]
var[rid] += v ** 2
return var
@classmethod
def compute_scores(cls, ranking, clicks):
'''
ranking: an instance of Ranking
clicks: a list of indices clicked by a user
Return a list of scores of each ranker.
'''
return {i: sum([ranking.credits[i][ranking[c]] for c in clicks])
for i in ranking.credits}
|
amla/common/schedule.py
|
tremblerz/amla
| 118 |
48467
|
<filename>amla/common/schedule.py
#Copyright 2018 Cisco Systems All Rights Reserved
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""Task Schedule
"""
import collections
import operator
import json
class Schedule():
"""
Base class for schedule
TODO: Make ABC
A task is in one of 3 states: init, running, waiting, complete
"""
def __init__(self):
return
def get_next_task(self, tasks):
"""
Scheduling algorthm
Find oldest task where state is "init"
or "waiting" and "waitingfor" are completed
TODO: Topological sort of task graph
"""
if len(tasks) == 0:
return None
else:
schedulable =[]
for task in tasks:
if task["state"] == "init":
schedulable.append(task)
elif task["state"] == "waiting":
schedule = True
for task_id in task["waiting_for"]:
#Search for task in schedule
#TODO: Create an index
for t in tasks:
if task_id == t["task_id"]:
if t["state"] != "complete":
schedule = False
break
if schedule == True:
schedulable.append(task)
#After the tasks complete, then the iteration needs to be
#increased before this task is resumed
#TODO: Specific to generate tasks
#Move elsewhere?
task['iteration']+=1
task['state'] = 'running'
task['waiting_for'] = [];
task = None
#TODO: Sort schedulable based on task id
if len(schedulable) > 0:
schedulable.sort(key=operator.itemgetter('task_id'))
task = schedulable[0]
task['state'] = 'running'
task['waiting_for'] = [];
return task
class ScheduleMem(Schedule):
"""
Implements a schedule of tasks stored in memory
Can be used only when AMLA is used in single host mode
Currently a FIFO queue, stored in memory implemented using python's deque
Note: Not safe for concurrent access
TODO:
"""
def __init__(self):
#TODO: Assert in single host mode
self.tasks = collections.deque()
self.nexttask_id = 0
return
def add(self, t):
""" Add a task to the schedule
"""
task_id = self.nexttask_id
task = {'task_id': task_id, 'config': t['config'], 'state': 'init'}
if 'iteration' in t:
task['iteration'] = t['iteration']
else:
task['iteration'] = 0
self.tasks.append(task)
self.nexttask_id += 1
return task
def update(self, task):
for elem in self.tasks:
if elem['task_id'] == task['task_id']:
for key in task:
elem[key] = task[key]
return
def delete(self, task):
if not task:
return -1
for elem in self.tasks:
if elem['task_id'] == task['task_id']:
break
return task['task_id']
def get(self, task):
elem = None
for elem in self.tasks:
if elem['task_id'] == task['task_id']:
break
return elem
def get_next(self):
"""Get the next task to be scheduled
Currently uses a FIFO queue
"""
if len(self.tasks) == 0:
return None
else:
task = self.get_next_task(self.tasks)
return task
def get_all(self):
return list(self.tasks)
class ScheduleDB(Schedule):
"""
Implements a schedule of tasks stored in a DB
Currently uses mysql, with transactions to support
concurrent schedulers
"""
def __init__(self, sys_config):
import MySQLdb
host = sys_config["database"]["host"]
user = sys_config["database"]["user"]
passwd = sys_config["database"]["password"]
db = sys_config["database"]["db"]
self.db = MySQLdb.connect(host=host,
user=user,
passwd=<PASSWORD>,
db=db)
self.cur = self.db.cursor()
query = "CREATE TABLE IF NOT EXISTS schedule ( \
task_id INT(11) NOT NULL AUTO_INCREMENT, \
config VARCHAR(1024) DEFAULT NULL, \
state VARCHAR(32) DEFAULT 'init', \
steps INT(11) DEFAULT 0, \
iteration INT(11) DEFAULT 0, \
waiting_for VARCHAR(1024) DEFAULT NULL, \
PRIMARY KEY(task_id)) ENGINE=InnoDB;"
self.cur.execute(query)
self.db.commit()
return
def __del__(self):
self.db.close()
def add(self, task):
""" Add a task to the schedule
"""
#Task_id is Find task_id, increment and add new task
#Must be atomic
iteration = 0
if 'iteration' in task:
iteration = task['iteration']
query = "INSERT INTO schedule (config, iteration, state, waiting_for) VALUES \
('"+task['config']+"', "+str(iteration)+", 'init', '[]');"
self.cur.execute(query)
self.db.commit()
task_id = self.cur.lastrowid
task['task_id'] = task_id
return task
def update(self, task):
#TODO
if 'waiting_for' not in task:
task['waiting_for'] = []
query = "UPDATE schedule set state= '"+task['state']+"', waiting_for='"\
+json.dumps(task['waiting_for'])+"' WHERE task_id = "+str(task['task_id'])+";"
self.cur.execute(query)
self.db.commit()
return
def delete(self, task):
query = "DELETE FROM schedule WHERE task_id = "+str(task['task_id'])+";"
self.cur.execute(query)
self.db.commit()
return task['task_id']
def get(self, task):
query = "SELECT task_id, config, state FROM schedule WHERE task_id = "+str(task['task_id'])+";"
self.cur.execute(query)
row = self.cur.fetchone()
task = {"task_id": row[0], "config": row[1], "state": row[2]}
return task
def get_next(self):
"""Get the next task to be scheduled
Gets the task with the least task_id (oldest task) whose state is 'init'
"""
self.db.autocommit(False)
self.cur.execute("START TRANSACTION;")
task = None
try:
query = "SELECT task_id, config, state, iteration, waiting_for FROM schedule \
WHERE state='init' OR state='waiting';"
self.cur.execute(query)
rows = self.cur.fetchall()
if len(rows) == 0:
#No tasks to schedule
return None
tasks = []
for row in rows:
task = {"task_id": row[0], "config": row[1], "state": row[2],\
"iteration": int(row[3]), "waiting_for": json.loads(row[4])}
tasks.append(task)
task = self.get_next_task(tasks)
if task == None:
self.db.rollback()
return None
query = "UPDATE schedule set state = 'running', waiting_for='[]', \
iteration='"+str(task['iteration'])+"' WHERE task_id = "+str(task['task_id'])+";"
self.cur.execute(query)
self.db.commit()
except:
print("Error: Could not commit transaction. Rolling back")
self.db.rollback()
return task
def get_all(self):
query = "SELECT task_id, config, state FROM schedule;"
self.cur.execute(query)
rows = self.cur.fetchall()
tasks = []
for row in rows:
task = {"task_id": row[0], "config": row[1], "state": row[2]}
tasks.append(task)
return tasks
|
pygame/platform-moving-up-down/main-2-class.py
|
whitmans-max/python-examples
| 140 |
48471
|
<filename>pygame/platform-moving-up-down/main-2-class.py
#!/usr/bin/env python3
# date: 2020.01.23
# https://stackoverflow.com/questions/59870590/collision-detection-ball-landing-on-platform
# Press SPACE to change player_gravity when it falls
import pygame
# --- constants --- (UPPER_CASE_NAMES)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
FPS = 25 # for more than 220 it has no time to update screen
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
# --- classes --- (CamelCaseNames)
class Player():
def __init__(self):
self.rect = pygame.Rect(0,0,50,50)
self.color = (0,255,0)
self.rect.centerx = screen_rect.centerx
self.rect.bottom = screen_rect.bottom-25
self.gravity = 5 # try 5
self.speed_y = 0
def draw(self, screen):
pygame.draw.rect(screen, self.color, self.rect)
def update(self):
if self.speed_y < 0:
self.speed_y += 1
# move player down - to move down with platform
self.rect.y += self.gravity + self.speed_y
def handle_event(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.speed_y = -20
class Platform():
def __init__(self, x, y, w, h, min_y, max_y):
self.rect = pygame.Rect(x, y, w, h)
self.color = (255,0,0)
self.min_y = 100
self.max_y = 600
self.speed = 5
self.direction = 'top'
def draw(self, screen):
pygame.draw.rect(screen, self.color, self.rect)
def update(self):
self.rect.y -= self.speed
if self.direction == 'top':
if self.rect.top <= self.min_y:
self.direction = 'bottom'
self.speed = -self.speed
else:
if self.rect.bottom >= self.max_y:
self.direction = 'top'
self.speed = -self.speed
# --- functions --- (lower_case_names)
# --- main ---
pygame.init()
screen = pygame.display.set_mode( (SCREEN_WIDTH, SCREEN_HEIGHT) )
screen_rect = screen.get_rect()
player = Player()
platforms = [
Platform( 0, 500-25, 200, 25, 100, 600),
Platform(300, 600-25, 200, 25, 100, 600),
Platform(600, 400-25, 200, 25, 100, 600),
]
# --- mainloop ---
clock = pygame.time.Clock()
running = True
while running:
# --- events ---
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
running = False
player.handle_event(event)
# --- changes/moves/updates ---
player.update()
for p in platforms:
p.update()
# move player up with platform
for p in platforms:
if player.rect.colliderect(p.rect):
player.rect.bottom = p.rect.top
# --- draws ---
screen.fill(BLACK)
player.draw(screen)
for p in platforms:
p.draw(screen)
pygame.display.flip()
# --- FPS ---
ms = clock.tick(FPS)
# --- end ---
pygame.quit()
|
system/dynamic_connection_creation.py
|
airflow-plugins/example_dags
| 297 |
48509
|
"""
Dynamic Connection Creation from a Variable
This file contains one ongoing DAG that executes every 15 minutes.
This DAG makes use of one custom operator:
- CreateConnectionsFromVariable
https://github.com/airflow-plugins/variable_connection_plugin/blob/master/operator/variable_connection_operator.py#L36
If using encrypted tokens in the Variable (recommended), it is necessary
to create a separate "Fernet Key Connection" with the relevant Fernet Key
kept in the password field. This Conn ID can then be specified in the
operator below.
"""
from datetime import datetime
from airflow import DAG
from airflow.operators import CreateConnectionsFromVariable
FERNET_KEY_CONN_ID = None
CONFIG_VARIABLE_KEY = ''
args = {
'owner': 'airflow',
'start_date': datetime(2018, 2, 22, 0, 0),
'provide_context': True,
'email': [],
'email_on_failure': True
}
dag = DAG(
'__VARIABLE_CONNECTION_CREATION__',
schedule_interval="*/15 * * * *",
default_args=args,
catchup=False
)
create_airflow_connections = CreateConnectionsFromVariable(
task_id='create_airflow_connections',
fernet_key_conn_id=FERNET_KEY_CONN_ID,
config_variable_key=CONFIG_VARIABLE_KEY,
dag=dag)
create_airflow_connections
|
pyjswidgets/pyjamas/ui/DropWidget.py
|
takipsizad/pyjs
| 739 |
48529
|
# Copyright (C) 2010 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import Factory
from pyjamas import DOM
from pyjamas.dnd.DNDHelper import dndHelper
from pyjamas.ui.Widget import Widget
from pyjamas.ui.DropHandler import DropHandler
import pyjd
class DropWidget(object):
"""
Mix-in class for a drop-target widget
"""
pass
class Html5DropWidget(Widget, DropHandler):
def __init__(self, **kw):
if (not hasattr(self, 'attached')) or kw:
Widget.__init__(self, **kw)
DropHandler.__init__(self)
self.addDropListener(self)
class EmulatedDropWidget(Html5DropWidget):
def __init__(self, **kw):
Html5DropWidget.__init__(self, **kw)
dndHelper.registerTarget(self)
def init(is_native=None):
global DropWidget
if is_native is None:
html5_dnd = hasattr(DOM.createElement('span'), 'draggable')
else:
html5_dnd = is_native
if html5_dnd:
DropWidget = Html5DropWidget
else:
DropWidget = EmulatedDropWidget
if pyjd.is_desktop:
init(pyjd.native_dnd)
else:
init(None)
Factory.registerClass('pyjamas.ui.DropWidget', 'DropWidget', DropWidget)
|
alipay/aop/api/response/AlipayPayAppSmartwearStatusQueryResponse.py
|
antopen/alipay-sdk-python-all
| 213 |
48532
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayPayAppSmartwearStatusQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayPayAppSmartwearStatusQueryResponse, self).__init__()
self._device_model = None
self._product_type = None
self._security_solution = None
self._status = None
@property
def device_model(self):
return self._device_model
@device_model.setter
def device_model(self, value):
self._device_model = value
@property
def product_type(self):
return self._product_type
@product_type.setter
def product_type(self, value):
self._product_type = value
@property
def security_solution(self):
return self._security_solution
@security_solution.setter
def security_solution(self, value):
self._security_solution = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def parse_response_content(self, response_content):
response = super(AlipayPayAppSmartwearStatusQueryResponse, self).parse_response_content(response_content)
if 'device_model' in response:
self.device_model = response['device_model']
if 'product_type' in response:
self.product_type = response['product_type']
if 'security_solution' in response:
self.security_solution = response['security_solution']
if 'status' in response:
self.status = response['status']
|
saleor/graphql/payment/tests/queries/test_payments_filter.py
|
victor-abz/saleor
| 1,392 |
48535
|
<reponame>victor-abz/saleor
import graphene
from .....payment.models import Payment
from ....tests.utils import get_graphql_content
PAYMENT_QUERY = """ query Payments($filter: PaymentFilterInput){
payments(first: 20, filter: $filter) {
edges {
node {
id
gateway
capturedAmount {
amount
currency
}
total {
amount
currency
}
actions
chargeStatus
transactions {
error
gatewayResponse
amount {
currency
amount
}
}
}
}
}
}
"""
def test_query_payments_filter_by_checkout(
payment_dummy, checkouts_list, permission_manage_orders, staff_api_client
):
# given
payment1 = payment_dummy
payment1.checkout = checkouts_list[0]
payment1.save()
payment2 = Payment.objects.get(id=payment1.id)
payment2.id = None
payment2.checkout = checkouts_list[1]
payment2.save()
payment3 = Payment.objects.get(id=payment1.id)
payment3.id = None
payment3.checkout = checkouts_list[2]
payment3.save()
variables = {
"filter": {
"checkouts": [
graphene.Node.to_global_id("Checkout", checkout.pk)
for checkout in checkouts_list[1:4]
]
}
}
# when
response = staff_api_client.post_graphql(
PAYMENT_QUERY, variables, permissions=[permission_manage_orders]
)
# then
content = get_graphql_content(response)
edges = content["data"]["payments"]["edges"]
payment_ids = {edge["node"]["id"] for edge in edges}
assert payment_ids == {
graphene.Node.to_global_id("Payment", payment.pk)
for payment in [payment2, payment3]
}
|
alipay/aop/api/domain/MemberCardOperator.py
|
Anning01/alipay-sdk-python-all
| 213 |
48578
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MemberCardOperator(object):
def __init__(self):
self._op_id = None
self._op_type = None
@property
def op_id(self):
return self._op_id
@op_id.setter
def op_id(self, value):
self._op_id = value
@property
def op_type(self):
return self._op_type
@op_type.setter
def op_type(self, value):
self._op_type = value
def to_alipay_dict(self):
params = dict()
if self.op_id:
if hasattr(self.op_id, 'to_alipay_dict'):
params['op_id'] = self.op_id.to_alipay_dict()
else:
params['op_id'] = self.op_id
if self.op_type:
if hasattr(self.op_type, 'to_alipay_dict'):
params['op_type'] = self.op_type.to_alipay_dict()
else:
params['op_type'] = self.op_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MemberCardOperator()
if 'op_id' in d:
o.op_id = d['op_id']
if 'op_type' in d:
o.op_type = d['op_type']
return o
|
tests/catalyst/metrics/test_segmentation.py
|
tadejsv/catalyst
| 2,693 |
48596
|
<gh_stars>1000+
# flake8: noqa
from typing import Dict, List, Union
import pytest
import torch
from catalyst.metrics import DiceMetric, IOUMetric, TrevskyMetric
base_outputs = torch.tensor([[0.8, 0.1, 0], [0, 0.4, 0.3], [0, 0, 1]])
base_targets = torch.tensor([[1.0, 0, 0], [0, 1, 0], [1, 1, 0]])
base_outputs = torch.stack([base_outputs, base_targets])[None, :, :, :]
base_targets = torch.stack([base_targets, base_targets])[None, :, :, :]
base_outputs_2 = torch.tensor([[0.8, 0.1, 0.4], [0.1, 0.4, 0.3], [0, 1, 1]])
base_targets_2 = torch.tensor([[1.0, 0.1, 0], [0, 0.5, 0], [0, 1, 1]])
base_outputs_2 = torch.stack([base_outputs_2, base_targets_2])[None, :, :, :]
base_targets_2 = torch.stack([base_targets_2, base_targets_2])[None, :, :, :]
EPS = 1e-5
@pytest.mark.parametrize(
"outputs, targets, weights, class_names, batch_answers, total_answers",
(
(
[base_outputs, base_outputs_2],
[base_targets, base_targets_2],
[0.2, 0.8],
["class_name_00", "class_name_01"],
[
{
"dice/class_name_00": 0.3636363446712494,
"dice/class_name_01": 1.0,
"dice": 0.6818182,
"dice/_weighted": 0.8727272748947144,
},
{
"dice/class_name_00": 0.781818151473999,
"dice/class_name_01": 0.9055555462837219,
"dice": 0.8436868190765381,
"dice/_weighted": 0.8808081150054932,
},
],
[
{
"dice/class_name_00": 0.3636363446712494,
"dice/class_name_01": 1.0,
"dice": 0.6818181872367859,
"dice/_micro": 0.7123287916183472,
"dice/_weighted": 0.8727272748947144,
},
{
"dice/class_name_00": 0.5888112187385559,
"dice/class_name_01": 0.9552631378173828,
"dice/_micro": 0.7776271104812622,
"dice": 0.772037148475647,
"dice/_macro": 0.772037148475647,
"dice/_weighted": 0.8819727897644043,
},
],
),
),
)
def test_dice_metric(
outputs: List[torch.Tensor],
targets: List[torch.Tensor],
weights: List[float],
class_names: List[str],
batch_answers: List[Dict[str, float]],
total_answers: List[Dict[str, float]],
):
"""Docs."""
metric = DiceMetric(weights=weights, class_names=class_names)
for output, target, batch_answer, total_answer in zip(
outputs, targets, batch_answers, total_answers
):
batch_score = metric.update_key_value(output, target)
total_score = metric.compute_key_value()
for key, value in batch_answer.items():
assert key in batch_score
assert abs(batch_score[key] - batch_answer[key]) < EPS
for key, value in total_answer.items():
assert key in total_score
assert abs(total_score[key] - total_answer[key]) < EPS
@pytest.mark.parametrize(
"outputs, targets, weights, class_names, batch_answers, total_answers",
(
(
[base_outputs, base_outputs_2],
[base_targets, base_targets_2],
[0.2, 0.8],
["class_name_00", "class_name_01"],
[[0.3636363446712494, 1.0], [0.781818151473999, 0.9055555462837219]],
[
[
[0.3636363446712494, 1.0],
0.7123287916183472,
0.6818181872367859,
0.8727272748947144,
],
[
[0.5888112187385559, 0.9552631378173828],
0.7776271104812622,
0.772037148475647,
0.8819727897644043,
],
],
),
),
)
def test_dice_metric_compute(
outputs: List[torch.Tensor],
targets: List[torch.Tensor],
weights: List[float],
class_names: List[str],
batch_answers: List[List[float]],
total_answers: List[List[Union[List[float], float]]],
):
"""Docs."""
metric = DiceMetric(weights=weights, class_names=class_names)
for output, target, batch_answer, total_answer in zip(
outputs, targets, batch_answers, total_answers
):
batch_score = metric.update(output, target)
total_score = metric.compute()
assert len(batch_answer) == len(batch_score)
for pred, answer in zip(batch_score, batch_answer):
assert abs(pred - answer) < EPS
assert len(total_score) == len(total_score)
for pred, answer in zip(total_score, total_score):
if isinstance(pred, list):
for pred_sample, answer_sample in zip(pred, answer):
assert abs(pred_sample - answer_sample) < EPS
else:
assert abs(pred - answer) < EPS
@pytest.mark.parametrize(
"outputs, targets, weights, class_names, batch_answers, total_answers",
(
(
[base_outputs, base_outputs_2],
[base_targets, base_targets_2],
[0.2, 0.8],
["class_name_00", "class_name_01"],
[
{
"iou/class_name_00": 0.2222222536802292,
"iou/class_name_01": 1.0,
"iou": 0.6111111,
"iou/_weighted": 0.8444444537162781,
},
{
"iou/class_name_00": 0.641791045665741,
"iou/class_name_01": 0.8274111747741699,
"iou": 0.7346011400222778,
"iou/_weighted": 0.7902871370315552,
},
],
[
{
"iou/class_name_00": 0.2222222536802292,
"iou/class_name_01": 1.0,
"iou": 0.6111111044883728,
"iou/_micro": 0.5531914830207825,
"iou/_weighted": 0.8444444537162781,
},
{
"iou/class_name_00": 0.4172447919845581,
"iou/class_name_01": 0.9143576622009277,
"iou/_micro": 0.6361619234085083,
"iou": 0.6658012270927429,
"iou/_macro": 0.6658012270927429,
"iou/_weighted": 0.8149350881576538,
},
],
),
),
)
def test_iou_metric(
outputs: List[torch.Tensor],
targets: List[torch.Tensor],
weights: List[float],
class_names: List[str],
batch_answers: List[Dict[str, float]],
total_answers: List[Dict[str, float]],
):
"""Docs."""
metric = IOUMetric(weights=weights, class_names=class_names)
for output, target, batch_answer, total_answer in zip(
outputs, targets, batch_answers, total_answers
):
batch_score = metric.update_key_value(output, target)
total_score = metric.compute_key_value()
for key, value in batch_answer.items():
assert key in batch_score
assert abs(batch_score[key] - batch_answer[key]) < EPS
for key, value in total_answer.items():
assert key in total_score
assert abs(total_score[key] - total_answer[key]) < EPS
@pytest.mark.parametrize(
"outputs, targets, weights, class_names, batch_answers, total_answers",
(
(
[base_outputs, base_outputs_2],
[base_targets, base_targets_2],
[0.2, 0.8],
["class_name_00", "class_name_01"],
[[0.2222222536802292, 1.0], [0.641791045665741, 0.8274111747741699]],
[
[
[0.2222222536802292, 1.0],
0.5531914830207825,
0.6111111044883728,
0.8444444537162781,
],
[
[0.4172447919845581, 0.9143576622009277],
0.6361619234085083,
0.6658012270927429,
0.8149350881576538,
],
],
),
),
)
def test_iou_metric_compute(
outputs: List[torch.Tensor],
targets: List[torch.Tensor],
weights: List[float],
class_names: List[str],
batch_answers: List[List[float]],
total_answers: List[List[Union[List[float], float]]],
):
"""IOU update, compute test"""
metric = IOUMetric(weights=weights, class_names=class_names)
for output, target, batch_answer, total_answer in zip(
outputs, targets, batch_answers, total_answers
):
batch_score = metric.update(output, target)
total_score = metric.compute()
assert len(batch_answer) == len(batch_score)
for pred, answer in zip(batch_score, batch_answer):
assert abs(pred - answer) < EPS
assert len(total_score) == len(total_score)
for pred, answer in zip(total_score, total_score):
if isinstance(pred, list):
for pred_sample, answer_sample in zip(pred, answer):
assert abs(pred_sample - answer_sample) < EPS
else:
assert abs(pred - answer) < EPS
@pytest.mark.parametrize(
"outputs, targets, alpha, weights, class_names, batch_answers, total_answers",
(
(
[base_outputs, base_outputs_2],
[base_targets, base_targets_2],
0.2,
[0.2, 0.8],
["class_name_00", "class_name_01"],
[
{
"trevsky/class_name_00": 0.4166666567325592,
"trevsky/class_name_01": 1.0,
"trevsky": 0.7083333134651184,
"trevsky/_weighted": 0.8833333253860474,
},
{
"trevsky/class_name_00": 0.7524999976158142,
"trevsky/class_name_01": 0.9055555462837219,
"trevsky": 0.8290277719497681,
"trevsky/_weighted": 0.8749444484710693,
},
],
[
{
"trevsky/class_name_00": 0.4166666567325592,
"trevsky/class_name_01": 1.0,
"trevsky": 0.7083333134651184,
"trevsky/_micro": 0.7558139562606812,
"trevsky/_weighted": 0.8833333253860474,
},
{
"trevsky/class_name_00": 0.6119186282157898,
"trevsky/class_name_01": 0.9552631974220276,
"trevsky/_micro": 0.7921270728111267,
"trevsky": 0.7835909128189087,
"trevsky/_macro": 0.7835909128189087,
"trevsky/_weighted": 0.886594295501709,
},
],
),
),
)
def test_trevsky_metric(
outputs: List[torch.Tensor],
targets: List[torch.Tensor],
alpha: float,
weights: List[float],
class_names: List[str],
batch_answers: List[Dict[str, float]],
total_answers: List[Dict[str, float]],
):
metric = TrevskyMetric(alpha=alpha, weights=weights, class_names=class_names)
for output, target, batch_answer, total_answer in zip(
outputs, targets, batch_answers, total_answers
):
batch_score = metric.update_key_value(output, target)
total_score = metric.compute_key_value()
for key, value in batch_answer.items():
assert key in batch_score
assert abs(batch_score[key] - batch_answer[key]) < EPS
for key, value in total_answer.items():
assert key in total_score
assert abs(total_score[key] - total_answer[key]) < EPS
@pytest.mark.parametrize(
"outputs, targets, alpha, weights, class_names, batch_answers, total_answers",
(
(
[base_outputs, base_outputs_2],
[base_targets, base_targets_2],
0.2,
[0.2, 0.8],
["class_name_00", "class_name_01"],
[[0.4166666567325592, 1.0], [0.7524999976158142, 0.9055555462837219]],
[
[
[0.4166666567325592, 1.0],
0.7558139562606812,
0.7083333134651184,
0.8833333253860474,
],
[
[0.6119186282157898, 0.9552631974220276],
0.7921270728111267,
0.7835909128189087,
0.886594295501709,
],
],
),
),
)
def test_trevsky_metric_compute(
outputs: List[torch.Tensor],
targets: List[torch.Tensor],
alpha: float,
weights: List[float],
class_names: List[str],
batch_answers: List[List[float]],
total_answers: List[List[Union[List[float], float]]],
):
"""Trevsky update, compute test"""
metric = TrevskyMetric(alpha=alpha, weights=weights, class_names=class_names)
for output, target, batch_answer, total_answer in zip(
outputs, targets, batch_answers, total_answers
):
batch_score = metric.update(output, target)
total_score = metric.compute()
assert len(batch_answer) == len(batch_score)
for pred, answer in zip(batch_score, batch_answer):
assert abs(pred - answer) < EPS
assert len(total_score) == len(total_score)
for pred, answer in zip(total_score, total_score):
if isinstance(pred, list):
for pred_sample, answer_sample in zip(pred, answer):
assert abs(pred_sample - answer_sample) < EPS
else:
assert abs(pred - answer) < EPS
|
src/utils/encryption_utils.py
|
tomgilbertson/script-server-v1
| 833 |
48614
|
<reponame>tomgilbertson/script-server-v1<gh_stars>100-1000
import base64
import hashlib
from utils.apr1 import hash_apr1
def md5_apr1(salt, text):
return hash_apr1(salt, text)
def sha1(text):
result = hashlib.sha1(text.encode('utf8'))
return base64.b64encode(result.digest()).decode('utf8')
|
tools/build_rules/test_rules_private.bzl
|
jobechoi/bazel
| 16,989 |
48640
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bash runfiles library init code for test_rules.bzl."""
# Init code to load the runfiles.bash file.
# The runfiles library itself defines rlocation which you would need to look
# up the library's runtime location, thus we have a chicken-and-egg problem.
INIT_BASH_RUNFILES = [
"# --- begin runfiles.bash initialization ---",
"# Copy-pasted from Bazel Bash runfiles library (tools/bash/runfiles/runfiles.bash).",
"set -euo pipefail",
'if [[ ! -d "${RUNFILES_DIR:-/dev/null}" && ! -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then',
' if [[ -f "$0.runfiles_manifest" ]]; then',
' export RUNFILES_MANIFEST_FILE="$0.runfiles_manifest"',
' elif [[ -f "$0.runfiles/MANIFEST" ]]; then',
' export RUNFILES_MANIFEST_FILE="$0.runfiles/MANIFEST"',
' elif [[ -f "$0.runfiles/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then',
' export RUNFILES_DIR="$0.runfiles"',
" fi",
"fi",
'if [[ -f "${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then',
' source "${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash"',
'elif [[ -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then',
' source "$(grep -m1 "^bazel_tools/tools/bash/runfiles/runfiles.bash " \\',
' "$RUNFILES_MANIFEST_FILE" | cut -d " " -f 2-)"',
"else",
' echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash"',
" exit 1",
"fi",
"# --- end runfiles.bash initialization ---",
]
# Label of the runfiles library.
BASH_RUNFILES_DEP = "@bazel_tools//tools/bash/runfiles"
|
guillotina/db/strategies/simple.py
|
rboixaderg/guillotina
| 173 |
48654
|
<gh_stars>100-1000
from guillotina import configure
from guillotina import glogging
from guillotina.db.interfaces import IDBTransactionStrategy
from guillotina.db.interfaces import ITransaction
from guillotina.db.strategies.base import BaseStrategy
logger = glogging.getLogger("guillotina")
@configure.adapter(for_=ITransaction, provides=IDBTransactionStrategy, name="simple")
class SimpleStrategy(BaseStrategy):
async def tpc_begin(self):
await self.retrieve_tid()
if self._transaction._db_txn is None:
await self._storage.start_transaction(self._transaction)
async def tpc_finish(self):
# do actual db commit
if self.writable_transaction:
await self._storage.commit(self._transaction)
|
app/grandchallenge/products/migrations/0007_projectairfiles.py
|
nlessmann/grand-challenge.org
| 101 |
48656
|
# Generated by Django 3.1.11 on 2021-07-01 20:18
from django.db import migrations, models
import grandchallenge.core.storage
class Migration(migrations.Migration):
dependencies = [
("products", "0006_product_ce_under"),
]
operations = [
migrations.CreateModel(
name="ProjectAirFiles",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=150)),
(
"study_file",
models.FileField(
upload_to=grandchallenge.core.storage.get_pdf_path
),
),
],
),
]
|
archive/extras/cat_dog_estimator/main.py
|
Pandinosaurus/tensorflow-workshop
| 796 |
48675
|
"""
A very simplified introduction to TensorFlow using Estimator API for training a
cat vs. dog classifier from the CIFAR-10 dataset. This version is intentionally
simplified and has a lot of room for improvment, in speed and accuracy.
Usage:
python main.py [train|predict] [predict file]
"""
import sys
import numpy as np
from PIL import Image
import tensorflow as tf
# Data file saved by extract_cats_dogs.py
DATA_FILE = 'catdog_data.npy'
NUM_IMAGES = 10000
# Model checkpoints and logs are saved here. If you want to train from scratch,
# be sure to delete everything in MODEL_DIR/ or change the directory.
MODEL_DIR = 'models'
# Some of the tunable hyperparameters are set here
LEARNING_RATE = 0.01
MOMENTUM = 0.9
TRAIN_EPOCHS = 20
BATCH_SIZE = 32
def model_fn(features, labels, mode):
"""Defines the CNN model that runs on the data.
The model we run is 3 convolutional layers followed by 1 fully connected
layer before the output. This is much simpler than most CNN models and is
designed to run decently on CPU. With a GPU, it is possible to scale to
more layers and more filters per layer.
Args:
features: batch_size x 32 x 32 x 3 uint8 images
labels: batch_size x 1 uint8 labels (0 or 1)
mode: TRAIN, EVAL, or PREDICT
Returns:
EstimatorSpec which defines the model to run
"""
# Preprocess the features by converting to floats in [-0.5, 0.5]
features = tf.cast(features, tf.float32)
features = (features / 255.0) - 1.0
# Define the CNN network
# conv1: 32 x 32 x 3 -> 32 x 32 x 16
net = tf.layers.conv2d(
inputs=features,
filters=16, # 16 channels after conv
kernel_size=3, # 3x3 conv kernel
padding='same', # Output tensor is same shape
activation=tf.nn.relu) # ReLU activation
# pool1: 32 x 32 x 16 -> 16 x 16 x 16
net = tf.layers.max_pooling2d(
inputs=net,
pool_size=2,
strides=2) # Downsample 2x
# conv2: 16 x 16 x 16 -> 16 x 16 x 32
net = tf.layers.conv2d(
inputs=net,
filters=32,
kernel_size=3,
padding='same',
activation=tf.nn.relu)
# pool2: 16 x 16 x 32 -> 8 x 8 x 32
net = tf.layers.max_pooling2d(
inputs=net,
pool_size=2,
strides=2)
# conv3: 8 x 8 x 32 -> 8 x 8 x 64
net = tf.layers.conv2d(
inputs=net,
filters=64,
kernel_size=3,
padding='same',
activation=tf.nn.relu)
# flat: 8 x 8 x 64 -> 4096
net = tf.contrib.layers.flatten(net)
# fc4: 4096 -> 1000
net = tf.layers.dense(
inputs=net,
units=1000,
activation=tf.nn.relu)
# output: 1000 -> 2
logits = tf.layers.dense(
inputs=net,
units=2)
# Softmax for probabilities
probabilities = tf.nn.softmax(logits)
predictions = tf.argmax(
input=logits,
axis=1,
output_type=tf.int32)
# Return maximum prediction if we're running PREDICT
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'prediction': predictions,
'probability': probabilities})
# Loss function and optimizer for training
loss = tf.losses.softmax_cross_entropy(
onehot_labels=tf.one_hot(labels, depth=2),
logits=logits)
train_op = tf.train.MomentumOptimizer(
LEARNING_RATE, MOMENTUM).minimize(
loss=loss,
global_step=tf.train.get_global_step())
# Accuracy for evaluation
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels,
predictions=predictions)}
# EVAL uses loss and eval_metric_ops, TRAIN uses loss and train_op
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def input_fn_wrapper(is_training):
"""Input function wrapper for training and eval.
A wrapper funcution is used because we want to have slightly different
behavior for the dataset during training (shuffle and loop data) and
evaluation (don't shuffle and run exactly once).
Args:
is_training: bool for if the model is training
Returns:
function with signature () -> features, labels
where features and labels are the same shapes expected by model_fn
"""
def input_fn():
data = np.load(DATA_FILE).item()
np_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': data['images']},
y=data['labels'],
batch_size=BATCH_SIZE,
shuffle=is_training,
num_epochs=None if is_training else 1)
features_dict, labels = np_input_fn()
# Since the only feature is the image itself, return the image directly
# instead of the features dict
return features_dict['x'], labels
return input_fn
def process_image(image_file):
"""Convert PIL Image to a format that the network can accept.
Operations performed:
- Load image file
- Central crop square
- Resize to 32 x 32
- Convert to numpy array
Args:
image_file: str file name of image
Returns:
numpy.array image which shape [1, 32, 32, 3]
Assumes that image is RGB and at least 32 x 32.
"""
image = Image.open(image_file)
width, height = image.size
min_dim = min(width, height)
left = (width - min_dim) / 2
top = (height - min_dim) / 2
right = (width + min_dim) / 2
bottom = (height + min_dim) / 2
image = image.crop((left, top, right, bottom))
image = image.resize((32, 32), resample=Image.BILINEAR)
image = np.asarray(image, dtype=np.uint8)
image = np.reshape(image, [1, 32, 32, 3])
return image
def main():
if len(sys.argv) < 2 or sys.argv[1] not in ['train', 'predict']:
print 'Usage: python main.py [train|predict] [predict file]'
sys.exit()
tf.logging.set_verbosity(tf.logging.INFO)
# Create the estimator object that is used by train, evaluate, and predict
# Note that model_fn is not called until the first usage of the model.
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=tf.estimator.RunConfig().replace(
model_dir=MODEL_DIR))
if sys.argv[1] == 'train':
steps_per_epoch = NUM_IMAGES / BATCH_SIZE
for epoch in xrange(TRAIN_EPOCHS):
estimator.train(
input_fn=input_fn_wrapper(True),
steps=steps_per_epoch)
# Evaluating on the same dataset as training for simplicity, normally
# this is a very bad idea since you are not testing how well your
# model generalizes to unseen data.
estimator.evaluate(input_fn=input_fn_wrapper(False))
else: # sys.argv[1] == 'predict'
if len(sys.argv) < 3:
print 'Usage: python main.py predict [predict file]'
sys.exit()
image = process_image(sys.argv[2])
# Define a new input function for prediction which outputs a single image
def predict_input_fn():
np_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': image},
num_epochs=1,
shuffle=False)
features_dict = np_input_fn()
return features_dict['x']
pred_dict = estimator.predict(
input_fn=predict_input_fn).next()
print 'Probability of cat: %.5f\tProbability of dog: %.5f' % (
pred_dict['probability'][1], pred_dict['probability'][0])
print 'Prediction %s' % ('CAT' if pred_dict['prediction'] == 1 else 'DOG')
if __name__ == '__main__':
main()
|
homeassistant/components/fixer/__init__.py
|
domwillcode/home-assistant
| 30,023 |
48705
|
<filename>homeassistant/components/fixer/__init__.py
"""The fixer component."""
|
rlgraph/components/layers/preprocessing/image_crop.py
|
RLGraph/RLGraph
| 290 |
48732
|
<reponame>RLGraph/RLGraph
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from rlgraph import get_backend
from rlgraph.components.layers.preprocessing.preprocess_layer import PreprocessLayer
from rlgraph.utils.decorators import rlgraph_api
from rlgraph.utils.ops import flatten_op, unflatten_op
if get_backend() == "tf":
import tensorflow as tf
elif get_backend() == "pytorch":
import torch
class ImageCrop(PreprocessLayer):
"""
Crops one or more images to a new size without touching the color channel.
"""
def __init__(self, x=0, y=0, width=0, height=0, scope="image-crop", **kwargs):
"""
Args:
x (int): Start x coordinate.
y (int): Start y coordinate.
width (int): Width of resulting image.
height (int): Height of resulting image.
"""
super(ImageCrop, self).__init__(scope=scope, **kwargs)
self.x = x
self.y = y
self.width = width
self.height = height
assert self.x >= 0
assert self.y >= 0
assert self.width > 0
assert self.height > 0
# The output spaces after preprocessing (per flat-key).
self.output_spaces = dict()
def get_preprocessed_space(self, space):
ret = dict()
for key, value in space.flatten().items():
# Do some sanity checking.
rank = value.rank
assert rank == 2 or rank == 3, \
"ERROR: Given image's rank (which is {}{}, not counting batch rank) must be either 2 or 3!".\
format(rank, ("" if key == "" else " for key '{}'".format(key)))
# Determine the output shape.
shape = list(value.shape)
shape[0] = self.width
shape[1] = self.height
ret[key] = value.__class__(shape=tuple(shape), add_batch_rank=value.has_batch_rank)
return unflatten_op(ret)
def create_variables(self, input_spaces, action_space=None):
in_space = input_spaces["inputs"]
self.output_spaces = flatten_op(self.get_preprocessed_space(in_space))
@rlgraph_api(flatten_ops=True, split_ops=True, add_auto_key_as_first_param=True)
def _graph_fn_call(self, key, inputs):
"""
Images come in with either a batch dimension or not.
"""
if self.backend == "python" or get_backend() == "python":
if isinstance(inputs, list):
inputs = np.asarray(inputs)
# Preserve batch dimension.
if self.output_spaces[key].has_batch_rank is True:
return inputs[:, self.y:self.y + self.height, self.x:self.x + self.width]
else:
return inputs[self.y:self.y + self.height, self.x:self.x + self.width]
elif get_backend() == "pytorch":
if isinstance(inputs, list):
inputs = torch.tensor(inputs)
# TODO: the reason this key check is there is due to call during meta graph build - > out spaces
# do not exist yet -> need better solution.
# Preserve batch dimension.
if key in self.output_spaces and self.output_spaces[key].has_batch_rank is True:
return inputs[:, self.y:self.y + self.height, self.x:self.x + self.width]
else:
return inputs[self.y:self.y + self.height, self.x:self.x + self.width]
elif get_backend() == "tf":
return tf.image.crop_to_bounding_box(
image=inputs,
offset_height=self.y,
offset_width=self.x,
target_height=self.height,
target_width=self.width
)
|
pythonforandroid/recipes/pydantic/__init__.py
|
syrykh/python-for-android
| 6,278 |
48739
|
<reponame>syrykh/python-for-android<gh_stars>1000+
from pythonforandroid.recipe import PythonRecipe
class PydanticRecipe(PythonRecipe):
version = '1.8.2'
url = 'https://github.com/samuelcolvin/pydantic/archive/refs/tags/v{version}.zip'
depends = ['setuptools']
python_depends = ['Cython', 'devtools', 'email-validator', 'dataclasses', 'typing-extensions', 'python-dotenv']
call_hostpython_via_targetpython = False
recipe = PydanticRecipe()
|
maskrcnn_benchmark/nas/rl/genotypes.py
|
DominickZhang/NAS-FCOS
| 187 |
48759
|
from collections import namedtuple
Genotype = namedtuple('Genotype', 'backbone rpn')
OP_NAMES = [
'sep_conv_3x3',
'sep_conv_3x3_dil3',
'sep_conv_5x5_dil6',
'skip_connect',
'def_conv_3x3',
]
AGG_NAMES = [
'psum',
'cat'
]
HEAD_OP_NAMES = [
'conv1x1',
'conv3x3',
'sep_conv_3x3',
'sep_conv_3x3_dil3',
'skip_connect',
'def_conv_3x3',
]
HEAD_AGG_NAMES = [
'psum',
'cat'
]
|
tests/test_json_schema.py
|
riddopic/opta
| 595 |
48773
|
<filename>tests/test_json_schema.py<gh_stars>100-1000
from opta.json_schema import check_schemas
def test_returns_without_error() -> None:
check_schemas()
|
tests/test_util.py
|
LivingTrades83/fast_arrow
| 143 |
48796
|
import vcr
from fast_arrow import Client
def gen_vcr():
return vcr.VCR(
cassette_library_dir='tests/fixtures_vcr',
record_mode='none',
match_on=['method', 'scheme', 'host', 'port', 'path', 'query'],
)
def gen_client():
auth_data = gen_auth_data()
client = Client(auth_data)
return client
def gen_auth_data():
auth_data = {
"account_id": 123,
"access_token": "<PASSWORD>",
"refresh_token": "<PASSWORD>",
"device_token": "<PASSWORD>",
}
return auth_data
|
example/aqi_curve_onecity.py
|
solider245/OpenData
| 1,179 |
48888
|
<reponame>solider245/OpenData<filename>example/aqi_curve_onecity.py<gh_stars>1000+
# encoding: utf-8
from opendatatools import aqi
from pyecharts import Line
import pandas as pd
if __name__ == '__main__':
df_aqi = aqi.get_daily_aqi_onecity('北京市')
df_aqi.set_index('date', inplace=True)
df_aqi.sort_index(ascending=True, inplace=True)
df_aqi = df_aqi[df_aqi.index >= "2018-01-01"]
axis_x = df_aqi.index
axis_y = df_aqi['aqi']
line = Line("北京AQI趋势图")
line.add("aqi curve for beijing", axis_x, axis_y, mark_point=["average"])
line.render("aqi_bj_curve.html")
|
src/guestconfig/azext_guestconfig/vendored_sdks/guestconfig/models/_models_py3.py
|
Mannan2812/azure-cli-extensions
| 207 |
48925
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._guest_configuration_client_enums import *
class AssignmentInfo(msrest.serialization.Model):
"""Information about the guest configuration assignment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the guest configuration assignment.
:vartype name: str
:param configuration: Information about the configuration.
:type configuration: ~guest_configuration_client.models.ConfigurationInfo
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ConfigurationInfo'},
}
def __init__(
self,
*,
configuration: Optional["ConfigurationInfo"] = None,
**kwargs
):
super(AssignmentInfo, self).__init__(**kwargs)
self.name = None
self.configuration = configuration
class AssignmentReportDetails(msrest.serialization.Model):
"""Details of the guest configuration assignment report.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~guest_configuration_client.models.ComplianceStatus
:ivar start_time: Start date and time of the guest configuration assignment compliance status
check.
:vartype start_time: ~datetime.datetime
:ivar end_time: End date and time of the guest configuration assignment compliance status
check.
:vartype end_time: ~datetime.datetime
:ivar job_id: GUID of the report.
:vartype job_id: str
:ivar operation_type: Type of report, Consistency or Initial. Possible values include:
"Consistency", "Initial".
:vartype operation_type: str or ~guest_configuration_client.models.Type
:param resources: The list of resources for which guest configuration assignment compliance is
checked.
:type resources: list[~guest_configuration_client.models.AssignmentReportResource]
"""
_validation = {
'compliance_status': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'job_id': {'readonly': True},
'operation_type': {'readonly': True},
}
_attribute_map = {
'compliance_status': {'key': 'complianceStatus', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'job_id': {'key': 'jobId', 'type': 'str'},
'operation_type': {'key': 'operationType', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[AssignmentReportResource]'},
}
def __init__(
self,
*,
resources: Optional[List["AssignmentReportResource"]] = None,
**kwargs
):
super(AssignmentReportDetails, self).__init__(**kwargs)
self.compliance_status = None
self.start_time = None
self.end_time = None
self.job_id = None
self.operation_type = None
self.resources = resources
class AssignmentReportResource(msrest.serialization.Model):
"""The guest configuration assignment resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~guest_configuration_client.models.ComplianceStatus
:param reasons: Compliance reason and reason code for a resource.
:type reasons:
list[~guest_configuration_client.models.AssignmentReportResourceComplianceReason]
:ivar properties: Properties of a guest configuration assignment resource.
:vartype properties: object
"""
_validation = {
'compliance_status': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'compliance_status': {'key': 'complianceStatus', 'type': 'str'},
'reasons': {'key': 'reasons', 'type': '[AssignmentReportResourceComplianceReason]'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
*,
reasons: Optional[List["AssignmentReportResourceComplianceReason"]] = None,
**kwargs
):
super(AssignmentReportResource, self).__init__(**kwargs)
self.compliance_status = None
self.reasons = reasons
self.properties = None
class AssignmentReportResourceComplianceReason(msrest.serialization.Model):
"""Reason and code for the compliance of the guest configuration assignment resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar phrase: Reason for the compliance of the guest configuration assignment resource.
:vartype phrase: str
:ivar code: Code for the compliance of the guest configuration assignment resource.
:vartype code: str
"""
_validation = {
'phrase': {'readonly': True},
'code': {'readonly': True},
}
_attribute_map = {
'phrase': {'key': 'phrase', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssignmentReportResourceComplianceReason, self).__init__(**kwargs)
self.phrase = None
self.code = None
class ConfigurationInfo(msrest.serialization.Model):
"""Information about the configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the configuration.
:vartype name: str
:ivar version: Version of the configuration.
:vartype version: str
"""
_validation = {
'name': {'readonly': True},
'version': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConfigurationInfo, self).__init__(**kwargs)
self.name = None
self.version = None
class ConfigurationParameter(msrest.serialization.Model):
"""Represents a configuration parameter.
:param name: Name of the configuration parameter.
:type name: str
:param value: Value of the configuration parameter.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(ConfigurationParameter, self).__init__(**kwargs)
self.name = name
self.value = value
class ConfigurationSetting(msrest.serialization.Model):
"""Configuration setting of LCM (Local Configuration Manager).
:param configuration_mode: Specifies how the LCM(Local Configuration Manager) actually applies
the configuration to the target nodes. Possible values are ApplyOnly, ApplyAndMonitor, and
ApplyAndAutoCorrect. Possible values include: "ApplyOnly", "ApplyAndMonitor",
"ApplyAndAutoCorrect".
:type configuration_mode: str or ~guest_configuration_client.models.ConfigurationMode
:param allow_module_overwrite: If true - new configurations downloaded from the pull service
are allowed to overwrite the old ones on the target node. Otherwise, false. Possible values
include: "True", "False".
:type allow_module_overwrite: str or ~guest_configuration_client.models.AllowModuleOverwrite
:param action_after_reboot: Specifies what happens after a reboot during the application of a
configuration. The possible values are ContinueConfiguration and StopConfiguration. Possible
values include: "ContinueConfiguration", "StopConfiguration".
:type action_after_reboot: str or ~guest_configuration_client.models.ActionAfterReboot
:param refresh_frequency_mins: The time interval, in minutes, at which the LCM checks a pull
service to get updated configurations. This value is ignored if the LCM is not configured in
pull mode. The default value is 30.
:type refresh_frequency_mins: float
:param reboot_if_needed: Set this to true to automatically reboot the node after a
configuration that requires reboot is applied. Otherwise, you will have to manually reboot the
node for any configuration that requires it. The default value is false. To use this setting
when a reboot condition is enacted by something other than DSC (such as Windows Installer),
combine this setting with the xPendingReboot module. Possible values include: "True", "False".
Default value: "False".
:type reboot_if_needed: str or ~guest_configuration_client.models.RebootIfNeeded
:param configuration_mode_frequency_mins: How often, in minutes, the current configuration is
checked and applied. This property is ignored if the ConfigurationMode property is set to
ApplyOnly. The default value is 15.
:type configuration_mode_frequency_mins: float
"""
_attribute_map = {
'configuration_mode': {'key': 'configurationMode', 'type': 'str'},
'allow_module_overwrite': {'key': 'allowModuleOverwrite', 'type': 'str'},
'action_after_reboot': {'key': 'actionAfterReboot', 'type': 'str'},
'refresh_frequency_mins': {'key': 'refreshFrequencyMins', 'type': 'float'},
'reboot_if_needed': {'key': 'rebootIfNeeded', 'type': 'str'},
'configuration_mode_frequency_mins': {'key': 'configurationModeFrequencyMins', 'type': 'float'},
}
def __init__(
self,
*,
configuration_mode: Optional[Union[str, "ConfigurationMode"]] = None,
allow_module_overwrite: Optional[Union[str, "AllowModuleOverwrite"]] = None,
action_after_reboot: Optional[Union[str, "ActionAfterReboot"]] = None,
refresh_frequency_mins: Optional[float] = 30,
reboot_if_needed: Optional[Union[str, "RebootIfNeeded"]] = "False",
configuration_mode_frequency_mins: Optional[float] = 15,
**kwargs
):
super(ConfigurationSetting, self).__init__(**kwargs)
self.configuration_mode = configuration_mode
self.allow_module_overwrite = allow_module_overwrite
self.action_after_reboot = action_after_reboot
self.refresh_frequency_mins = refresh_frequency_mins
self.reboot_if_needed = reboot_if_needed
self.configuration_mode_frequency_mins = configuration_mode_frequency_mins
class ErrorResponse(msrest.serialization.Model):
"""Error response of an operation failure.
:param error:
:type error: ~guest_configuration_client.models.ErrorResponseError
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponseError'},
}
def __init__(
self,
*,
error: Optional["ErrorResponseError"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ErrorResponseError(msrest.serialization.Model):
"""ErrorResponseError.
:param code: Error code.
:type code: str
:param message: Detail error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ErrorResponseError, self).__init__(**kwargs)
self.code = code
self.message = message
class Resource(msrest.serialization.Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the guest configuration assignment.
:vartype id: str
:param name: Name of the guest configuration assignment.
:type name: str
:param location: Region where the VM is located.
:type location: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
location: Optional[str] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = name
self.location = location
self.type = None
class GuestConfigurationAssignment(Resource):
"""Guest configuration assignment is an association between a machine and guest configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the guest configuration assignment.
:vartype id: str
:param name: Name of the guest configuration assignment.
:type name: str
:param location: Region where the VM is located.
:type location: str
:ivar type: The type of the resource.
:vartype type: str
:ivar target_resource_id: VM resource Id.
:vartype target_resource_id: str
:ivar compliance_status_properties_compliance_status: A value indicating compliance status of
the machine for the assigned guest configuration. Possible values include: "Compliant",
"NonCompliant", "Pending".
:vartype compliance_status_properties_compliance_status: str or
~guest_configuration_client.models.ComplianceStatus
:ivar last_compliance_status_checked: Date and time when last compliance status was checked.
:vartype last_compliance_status_checked: ~datetime.datetime
:ivar latest_report_id: Id of the latest report for the guest configuration assignment.
:vartype latest_report_id: str
:param context: The source which initiated the guest configuration assignment. Ex: Azure
Policy.
:type context: str
:ivar assignment_hash: Combined hash of the configuration package and parameters.
:vartype assignment_hash: str
:ivar provisioning_state: The provisioning state, which only appears in the response. Possible
values include: "Succeeded", "Failed", "Canceled", "Created".
:vartype provisioning_state: str or ~guest_configuration_client.models.ProvisioningState
:ivar id_properties_latest_assignment_report_id: ARM resource id of the report for the guest
configuration assignment.
:vartype id_properties_latest_assignment_report_id: str
:ivar report_id: GUID that identifies the guest configuration assignment report under a
subscription, resource group.
:vartype report_id: str
:param assignment: Configuration details of the guest configuration assignment.
:type assignment: ~guest_configuration_client.models.AssignmentInfo
:param vm: Information about the VM.
:type vm: ~guest_configuration_client.models.VmInfo
:ivar start_time: Start date and time of the guest configuration assignment compliance status
check.
:vartype start_time: ~datetime.datetime
:ivar end_time: End date and time of the guest configuration assignment compliance status
check.
:vartype end_time: ~datetime.datetime
:ivar compliance_status_properties_latest_assignment_report_compliance_status: A value
indicating compliance status of the machine for the assigned guest configuration. Possible
values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status_properties_latest_assignment_report_compliance_status: str or
~guest_configuration_client.models.ComplianceStatus
:ivar operation_type: Type of report, Consistency or Initial. Possible values include:
"Consistency", "Initial".
:vartype operation_type: str or ~guest_configuration_client.models.Type
:param resources: The list of resources for which guest configuration assignment compliance is
checked.
:type resources: list[~guest_configuration_client.models.AssignmentReportResource]
:ivar kind: Kind of the guest configuration. For example:DSC. Default value: "DSC".
:vartype kind: str
:param name_properties_guest_configuration_name: Name of the guest configuration.
:type name_properties_guest_configuration_name: str
:param version: Version of the guest configuration.
:type version: str
:ivar content_uri: Uri of the storage where guest configuration package is uploaded.
:vartype content_uri: str
:ivar content_hash: Combined hash of the guest configuration package and configuration
parameters.
:vartype content_hash: str
:param configuration_parameter: The configuration parameters for the guest configuration.
:type configuration_parameter: list[~guest_configuration_client.models.ConfigurationParameter]
:param configuration_setting: The configuration setting for the guest configuration.
:type configuration_setting: ~guest_configuration_client.models.ConfigurationSetting
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'target_resource_id': {'readonly': True},
'compliance_status_properties_compliance_status': {'readonly': True},
'last_compliance_status_checked': {'readonly': True},
'latest_report_id': {'readonly': True},
'assignment_hash': {'readonly': True},
'provisioning_state': {'readonly': True},
'id_properties_latest_assignment_report_id': {'readonly': True},
'report_id': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'compliance_status_properties_latest_assignment_report_compliance_status': {'readonly': True},
'operation_type': {'readonly': True},
'kind': {'constant': True},
'content_uri': {'readonly': True},
'content_hash': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'target_resource_id': {'key': 'properties.targetResourceId', 'type': 'str'},
'compliance_status_properties_compliance_status': {'key': 'properties.complianceStatus', 'type': 'str'},
'last_compliance_status_checked': {'key': 'properties.lastComplianceStatusChecked', 'type': 'iso-8601'},
'latest_report_id': {'key': 'properties.latestReportId', 'type': 'str'},
'context': {'key': 'properties.context', 'type': 'str'},
'assignment_hash': {'key': 'properties.assignmentHash', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'id_properties_latest_assignment_report_id': {'key': 'properties.latestAssignmentReport.id', 'type': 'str'},
'report_id': {'key': 'properties.latestAssignmentReport.reportId', 'type': 'str'},
'assignment': {'key': 'properties.latestAssignmentReport.assignment', 'type': 'AssignmentInfo'},
'vm': {'key': 'properties.latestAssignmentReport.vm', 'type': 'VmInfo'},
'start_time': {'key': 'properties.latestAssignmentReport.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.latestAssignmentReport.endTime', 'type': 'iso-8601'},
'compliance_status_properties_latest_assignment_report_compliance_status': {'key': 'properties.latestAssignmentReport.complianceStatus', 'type': 'str'},
'operation_type': {'key': 'properties.latestAssignmentReport.operationType', 'type': 'str'},
'resources': {'key': 'properties.latestAssignmentReport.resources', 'type': '[AssignmentReportResource]'},
'kind': {'key': 'properties.guestConfiguration.kind', 'type': 'str'},
'name_properties_guest_configuration_name': {'key': 'properties.guestConfiguration.name', 'type': 'str'},
'version': {'key': 'properties.guestConfiguration.version', 'type': 'str'},
'content_uri': {'key': 'properties.guestConfiguration.contentUri', 'type': 'str'},
'content_hash': {'key': 'properties.guestConfiguration.contentHash', 'type': 'str'},
'configuration_parameter': {'key': 'properties.guestConfiguration.configurationParameter', 'type': '[ConfigurationParameter]'},
'configuration_setting': {'key': 'properties.guestConfiguration.configurationSetting', 'type': 'ConfigurationSetting'},
}
kind = "DSC"
def __init__(
self,
*,
name: Optional[str] = None,
location: Optional[str] = None,
context: Optional[str] = None,
assignment: Optional["AssignmentInfo"] = None,
vm: Optional["VmInfo"] = None,
resources: Optional[List["AssignmentReportResource"]] = None,
name_properties_guest_configuration_name: Optional[str] = None,
version: Optional[str] = None,
configuration_parameter: Optional[List["ConfigurationParameter"]] = None,
configuration_setting: Optional["ConfigurationSetting"] = None,
**kwargs
):
super(GuestConfigurationAssignment, self).__init__(name=name, location=location, **kwargs)
self.target_resource_id = None
self.compliance_status_properties_compliance_status = None
self.last_compliance_status_checked = None
self.latest_report_id = None
self.context = context
self.assignment_hash = None
self.provisioning_state = None
self.id_properties_latest_assignment_report_id = None
self.report_id = None
self.assignment = assignment
self.vm = vm
self.start_time = None
self.end_time = None
self.compliance_status_properties_latest_assignment_report_compliance_status = None
self.operation_type = None
self.resources = resources
self.name_properties_guest_configuration_name = name_properties_guest_configuration_name
self.version = version
self.content_uri = None
self.content_hash = None
self.configuration_parameter = configuration_parameter
self.configuration_setting = configuration_setting
class GuestConfigurationAssignmentList(msrest.serialization.Model):
"""The response of the list guest configuration assignment operation.
:param value: Result of the list guest configuration assignment operation.
:type value: list[~guest_configuration_client.models.GuestConfigurationAssignment]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[GuestConfigurationAssignment]'},
}
def __init__(
self,
*,
value: Optional[List["GuestConfigurationAssignment"]] = None,
**kwargs
):
super(GuestConfigurationAssignmentList, self).__init__(**kwargs)
self.value = value
class GuestConfigurationAssignmentReport(msrest.serialization.Model):
"""Report for the guest configuration assignment. Report contains information such as compliance status, reason, and more.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the report for the guest configuration assignment.
:vartype id: str
:ivar name: GUID that identifies the guest configuration assignment report under a
subscription, resource group.
:vartype name: str
:param properties: Properties of the guest configuration report.
:type properties:
~guest_configuration_client.models.GuestConfigurationAssignmentReportProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'GuestConfigurationAssignmentReportProperties'},
}
def __init__(
self,
*,
properties: Optional["GuestConfigurationAssignmentReportProperties"] = None,
**kwargs
):
super(GuestConfigurationAssignmentReport, self).__init__(**kwargs)
self.id = None
self.name = None
self.properties = properties
class GuestConfigurationAssignmentReportList(msrest.serialization.Model):
"""List of guest configuration assignment reports.
:param value: List of reports for the guest configuration. Report contains information such as
compliance status, reason and more.
:type value: list[~guest_configuration_client.models.GuestConfigurationAssignmentReport]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[GuestConfigurationAssignmentReport]'},
}
def __init__(
self,
*,
value: Optional[List["GuestConfigurationAssignmentReport"]] = None,
**kwargs
):
super(GuestConfigurationAssignmentReportList, self).__init__(**kwargs)
self.value = value
class GuestConfigurationAssignmentReportProperties(msrest.serialization.Model):
"""Report for the guest configuration assignment. Report contains information such as compliance status, reason, and more.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~guest_configuration_client.models.ComplianceStatus
:ivar report_id: GUID that identifies the guest configuration assignment report under a
subscription, resource group.
:vartype report_id: str
:param assignment: Configuration details of the guest configuration assignment.
:type assignment: ~guest_configuration_client.models.AssignmentInfo
:param vm: Information about the VM.
:type vm: ~guest_configuration_client.models.VmInfo
:ivar start_time: Start date and time of the guest configuration assignment compliance status
check.
:vartype start_time: ~datetime.datetime
:ivar end_time: End date and time of the guest configuration assignment compliance status
check.
:vartype end_time: ~datetime.datetime
:param details: Details of the assignment report.
:type details: ~guest_configuration_client.models.AssignmentReportDetails
"""
_validation = {
'compliance_status': {'readonly': True},
'report_id': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
}
_attribute_map = {
'compliance_status': {'key': 'complianceStatus', 'type': 'str'},
'report_id': {'key': 'reportId', 'type': 'str'},
'assignment': {'key': 'assignment', 'type': 'AssignmentInfo'},
'vm': {'key': 'vm', 'type': 'VmInfo'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'details': {'key': 'details', 'type': 'AssignmentReportDetails'},
}
def __init__(
self,
*,
assignment: Optional["AssignmentInfo"] = None,
vm: Optional["VmInfo"] = None,
details: Optional["AssignmentReportDetails"] = None,
**kwargs
):
super(GuestConfigurationAssignmentReportProperties, self).__init__(**kwargs)
self.compliance_status = None
self.report_id = None
self.assignment = assignment
self.vm = vm
self.start_time = None
self.end_time = None
self.details = details
class Operation(msrest.serialization.Model):
"""GuestConfiguration REST API operation.
:param name: Operation name: For ex.
providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/write or read.
:type name: str
:param display: Provider, Resource, Operation and description values.
:type display: ~guest_configuration_client.models.OperationDisplay
:param status_code: Service provider: Microsoft.GuestConfiguration.
:type status_code: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'status_code': {'key': 'properties.statusCode', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
status_code: Optional[str] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
self.status_code = status_code
class OperationDisplay(msrest.serialization.Model):
"""Provider, Resource, Operation and description values.
:param provider: Service provider: Microsoft.GuestConfiguration.
:type provider: str
:param resource: Resource on which the operation is performed: For ex.
:type resource: str
:param operation: Operation type: Read, write, delete, etc.
:type operation: str
:param description: Description about operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationList(msrest.serialization.Model):
"""The response model for the list of Automation operations.
:param value: List of Automation operations supported by the Automation resource provider.
:type value: list[~guest_configuration_client.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = value
class ProxyResource(Resource):
"""ARM proxy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the guest configuration assignment.
:vartype id: str
:param name: Name of the guest configuration assignment.
:type name: str
:param location: Region where the VM is located.
:type location: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
location: Optional[str] = None,
**kwargs
):
super(ProxyResource, self).__init__(name=name, location=location, **kwargs)
class VmInfo(msrest.serialization.Model):
"""Information about the VM.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id of the VM.
:vartype id: str
:ivar uuid: UUID(Universally Unique Identifier) of the VM.
:vartype uuid: str
"""
_validation = {
'id': {'readonly': True},
'uuid': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'uuid': {'key': 'uuid', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VmInfo, self).__init__(**kwargs)
self.id = None
self.uuid = None
|
tests/unitary/RewardStream/test_notify_reward_amount.py
|
AqualisDAO/curve-dao-contracts
| 217 |
48940
|
import math
import brownie
from brownie import chain
def test_only_distributor_allowed(alice, stream):
with brownie.reverts("dev: only distributor"):
stream.notify_reward_amount(10 ** 18, {"from": alice})
def test_retrieves_reward_token(bob, stream, reward_token):
stream.notify_reward_amount(10 ** 18, {"from": bob})
post_notify = reward_token.balanceOf(stream)
assert post_notify == 10 ** 18
def test_reward_rate_updates(bob, stream):
stream.notify_reward_amount(10 ** 18, {"from": bob})
post_notify = stream.reward_rate()
assert post_notify > 0
assert post_notify == 10 ** 18 / (86400 * 10)
def test_reward_rate_updates_mid_duration(bob, stream):
stream.notify_reward_amount(10 ** 18, {"from": bob})
chain.sleep(86400 * 5) # half of the duration
# top up the balance to be 10 ** 18 again
stream.notify_reward_amount(10 ** 18 / 2, {"from": bob})
post_notify = stream.reward_rate()
# should relatively close .00001 seems about good of a heuristic
assert math.isclose(post_notify, 10 ** 18 / (86400 * 10), rel_tol=0.00001)
def test_period_finish_updates(bob, stream):
tx = stream.notify_reward_amount(10 ** 18, {"from": bob})
assert stream.period_finish() == tx.timestamp + 86400 * 10
def test_update_last_update_time(bob, stream):
tx = stream.notify_reward_amount(10 ** 18, {"from": bob})
assert stream.last_update_time() == tx.timestamp
|
test/test_vehicle.py
|
pchevallier/bimmer_connected
| 141 |
48956
|
<gh_stars>100-1000
"""Tests for ConnectedDriveVehicle."""
import unittest
from unittest import mock
from test import load_response_json, BackendMock, TEST_USERNAME, TEST_PASSWORD, TEST_REGION, \
G31_VIN, F48_VIN, I01_VIN, I01_NOREX_VIN, F15_VIN, F45_VIN, F31_VIN, TEST_VEHICLE_DATA, \
ATTRIBUTE_MAPPING, MISSING_ATTRIBUTES, ADDITIONAL_ATTRIBUTES, G30_PHEV_OS7_VIN, AVAILABLE_STATES_MAPPING
from bimmer_connected.vehicle import ConnectedDriveVehicle, DriveTrainType
from bimmer_connected.account import ConnectedDriveAccount
_VEHICLES = load_response_json('vehicles.json')['vehicles']
G31_VEHICLE = _VEHICLES[0]
class TestVehicle(unittest.TestCase):
"""Tests for ConnectedDriveVehicle."""
def test_drive_train(self):
"""Tests around drive_train attribute."""
vehicle = ConnectedDriveVehicle(None, G31_VEHICLE)
self.assertEqual(DriveTrainType.CONVENTIONAL, vehicle.drive_train)
def test_parsing_attributes(self):
"""Test parsing different attributes of the vehicle."""
backend_mock = BackendMock()
with mock.patch('bimmer_connected.account.requests', new=backend_mock):
account = ConnectedDriveAccount(TEST_USERNAME, TEST_PASSWORD, TEST_REGION)
for vehicle in account.vehicles:
print(vehicle.name)
self.assertIsNotNone(vehicle.drive_train)
self.assertIsNotNone(vehicle.name)
self.assertIsNotNone(vehicle.has_internal_combustion_engine)
self.assertIsNotNone(vehicle.has_hv_battery)
self.assertIsNotNone(vehicle.drive_train_attributes)
self.assertIsNotNone(vehicle.has_statistics_service)
self.assertIsNotNone(vehicle.has_weekly_planner_service)
self.assertIsNotNone(vehicle.has_destination_service)
self.assertIsNotNone(vehicle.has_rangemap_service)
def test_drive_train_attributes(self):
"""Test parsing different attributes of the vehicle."""
backend_mock = BackendMock()
with mock.patch('bimmer_connected.account.requests', new=backend_mock):
account = ConnectedDriveAccount(TEST_USERNAME, TEST_PASSWORD, TEST_REGION)
for vehicle in account.vehicles:
self.assertEqual(vehicle.vin in [G31_VIN, F48_VIN, F15_VIN, F45_VIN, F31_VIN, G30_PHEV_OS7_VIN],
vehicle.has_internal_combustion_engine)
self.assertEqual(vehicle.vin in [I01_VIN, I01_NOREX_VIN, G30_PHEV_OS7_VIN],
vehicle.has_hv_battery)
self.assertEqual(vehicle.vin in [I01_VIN],
vehicle.has_range_extender)
def test_parsing_of_lsc_type(self):
"""Test parsing the lsc type field."""
backend_mock = BackendMock()
with mock.patch('bimmer_connected.account.requests', new=backend_mock):
account = ConnectedDriveAccount(TEST_USERNAME, TEST_PASSWORD, TEST_REGION)
for vehicle in account.vehicles:
self.assertIsNotNone(vehicle.lsc_type)
def test_available_attributes(self):
"""Check that available_attributes returns exactly the arguments we have in our test data."""
backend_mock = BackendMock()
with mock.patch('bimmer_connected.account.requests', new=backend_mock):
account = ConnectedDriveAccount(TEST_USERNAME, TEST_PASSWORD, TEST_REGION)
for vin, dirname in TEST_VEHICLE_DATA.items():
vehicle = account.get_vehicle(vin)
print(vehicle.name)
status_data = load_response_json('{}/status.json'.format(dirname))
existing_attributes = status_data['vehicleStatus'].keys()
existing_attributes = sorted([ATTRIBUTE_MAPPING.get(a, a) for a in existing_attributes
if a not in MISSING_ATTRIBUTES])
expected_attributes = sorted([a for a in vehicle.available_attributes if a not in ADDITIONAL_ATTRIBUTES])
self.assertListEqual(existing_attributes, expected_attributes)
def test_available_state_services(self):
"""Check that available_attributes returns exactly the arguments we have in our test data."""
backend_mock = BackendMock()
with mock.patch('bimmer_connected.account.requests', new=backend_mock):
account = ConnectedDriveAccount(TEST_USERNAME, TEST_PASSWORD, TEST_REGION)
vehicles = load_response_json('vehicles.json')
for test_vehicle in vehicles['vehicles']:
vehicle = account.get_vehicle(test_vehicle['vin'])
print(vehicle.name)
services_to_check = {
k: v
for k, v in test_vehicle.items()
if k in list(AVAILABLE_STATES_MAPPING)
}
available_services = ['STATUS']
for key, value in services_to_check.items():
if AVAILABLE_STATES_MAPPING[key].get(value):
available_services += AVAILABLE_STATES_MAPPING[key][value]
if vehicle.drive_train != DriveTrainType.CONVENTIONAL:
available_services += ['EFFICIENCY', 'NAVIGATION']
self.assertListEqual(sorted(vehicle.available_state_services), sorted(available_services))
|
forking_paths_dataset/code/batch_plot_traj_carla.py
|
ziyan0302/Multiverse
| 190 |
48959
|
# coding=utf-8
"""Batch convert the world traj in actev to carla traj."""
import argparse
import os
from glob import glob
from tqdm import tqdm
import sys
if sys.version_info > (3, 0):
import subprocess as commands
else:
import commands
parser = argparse.ArgumentParser()
parser.add_argument("traj_world_path")
parser.add_argument("--traj_vehicle_world_path", default=None)
parser.add_argument("save_carla_traj_path")
parser.add_argument("--save_carla_vehicle_path", default=None)
calibrations = {
"0000": {
"world_rotate": 320,
"carla_rotate": 130,
"scale": 1.0,
"origin": [3.5, -48.0, 0.3]
},
"0400": {
"world_rotate": 100,
"carla_rotate": 153,
"scale": 1.0,
"origin": [-10.0, 58.0, 0.5]
},
"0401": {
"world_rotate": 120,
"carla_rotate": 135,
"scale": 1.0,
"origin": [-48.0, 24.0, 0.5]
},
"0500": {
"world_rotate": 90,
"carla_rotate": 179,
"scale": 1.0,
"origin": [-65.5, -75.5, 0.1]
},
}
# Zara
calibration = {
"world_rotate": 270,
"carla_rotate": -3.04,
"scale": 1.2,
"origin": [-44.0511921243, -79.6225002047, 0.],
}
def get_scene(videoname_):
"""ActEV scene extractor from videoname."""
s = videoname_.split("_S_")[-1]
s = s.split("_")[0]
return s[:4]
if __name__ == "__main__":
args = parser.parse_args()
# all files
ped_traj_files = glob(os.path.join(args.traj_world_path, "*.txt"))
if args.traj_vehicle_world_path is not None:
assert args.save_carla_vehicle_path is not None
if not os.path.exists(args.save_carla_vehicle_path):
os.makedirs(args.save_carla_vehicle_path)
if not os.path.exists(args.save_carla_traj_path):
os.makedirs(args.save_carla_traj_path)
script_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "plot_traj_carla.py")
assert os.path.exists(script_path), script_path
for ped_traj_file in tqdm(ped_traj_files):
filename = os.path.splitext(os.path.basename(ped_traj_file))[0]
target_ped_file = os.path.join(
args.save_carla_traj_path, "%s.txt" % filename)
if args.traj_vehicle_world_path is None:
output = commands.getoutput("python3 %s %s 0 %f %f %f %f --world_rotate"
" %f --scale %f --save_carla_traj_file %s" % (
script_path, ped_traj_file,
calibration["origin"][0],
calibration["origin"][1],
calibration["origin"][2],
calibration["carla_rotate"],
calibration["world_rotate"],
calibration["scale"],
target_ped_file))
else:
scene = get_scene(filename)
if scene == "0002":
continue
vehicle_traj_file = os.path.join(args.traj_vehicle_world_path,
"%s.txt" % filename)
target_vehicle_file = os.path.join(args.save_carla_vehicle_path,
"%s.txt" % filename)
cmd = "python3 %s %s 0 %f %f %f %f --world_rotate" \
" %f --scale %f --save_carla_traj_file %s" \
" --vehicle_world_traj_file %s" \
" --save_vehicle_carla_traj_file %s" % (
script_path, ped_traj_file,
calibrations[scene]["origin"][0],
calibrations[scene]["origin"][1],
calibrations[scene]["origin"][2],
calibrations[scene]["carla_rotate"],
calibrations[scene]["world_rotate"],
calibrations[scene]["scale"],
target_ped_file,
vehicle_traj_file,
target_vehicle_file)
output = commands.getoutput("python3 %s %s 0 %f %f %f %f --world_rotate"
" %f --scale %f --save_carla_traj_file %s"
" --vehicle_world_traj_file %s --is_actev"
" --save_vehicle_carla_traj_file %s" % (
script_path, ped_traj_file,
calibrations[scene]["origin"][0],
calibrations[scene]["origin"][1],
calibrations[scene]["origin"][2],
calibrations[scene]["carla_rotate"],
calibrations[scene]["world_rotate"],
calibrations[scene]["scale"],
target_ped_file,
vehicle_traj_file,
target_vehicle_file))
|
desktop/core/ext-py/django-celery-beat-1.4.0/django_celery_beat/migrations/0005_add_solarschedule_events_choices_squashed_0009_merge_20181012_1416.py
|
maulikjs/hue
| 5,079 |
48973
|
# Generated by Django 2.1.2 on 2018-10-12 14:18
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
import django_celery_beat.validators
import timezone_field.fields
class Migration(migrations.Migration):
replaces = [
('django_celery_beat', '0005_add_solarschedule_events_choices'),
('django_celery_beat', '0006_auto_20180210_1226'),
('django_celery_beat', '0006_auto_20180322_0932'),
('django_celery_beat', '0007_auto_20180521_0826'),
('django_celery_beat', '0008_auto_20180914_1922'),
]
dependencies = [
('django_celery_beat', '0004_auto_20170221_0000'),
]
operations = [
migrations.AlterField(
model_name='solarschedule',
name='event',
field=models.CharField(
choices=[('dawn_astronomical', 'dawn_astronomical'),
('dawn_civil', 'dawn_civil'),
('dawn_nautical', 'dawn_nautical'),
('dusk_astronomical', 'dusk_astronomical'),
('dusk_civil', 'dusk_civil'),
('dusk_nautical', 'dusk_nautical'),
('solar_noon', 'solar_noon'), ('sunrise', 'sunrise'),
('sunset', 'sunset')], max_length=24,
verbose_name='event'),
),
migrations.AlterModelOptions(
name='crontabschedule',
options={
'ordering': ['month_of_year', 'day_of_month', 'day_of_week',
'hour', 'minute', 'timezone'],
'verbose_name': 'crontab', 'verbose_name_plural': 'crontabs'},
),
migrations.AlterModelOptions(
name='crontabschedule',
options={
'ordering': ['month_of_year', 'day_of_month', 'day_of_week',
'hour', 'minute', 'timezone'],
'verbose_name': 'crontab', 'verbose_name_plural': 'crontabs'},
),
migrations.AddField(
model_name='crontabschedule',
name='timezone',
field=timezone_field.fields.TimeZoneField(default='UTC'),
),
migrations.AddField(
model_name='periodictask',
name='one_off',
field=models.BooleanField(default=False,
verbose_name='one-off task'),
),
migrations.AddField(
model_name='periodictask',
name='start_time',
field=models.DateTimeField(blank=True, null=True,
verbose_name='start_time'),
),
migrations.AlterField(
model_name='crontabschedule',
name='day_of_month',
field=models.CharField(default='*', max_length=124, validators=[
django_celery_beat.validators.day_of_month_validator],
verbose_name='day of month'),
),
migrations.AlterField(
model_name='crontabschedule',
name='day_of_week',
field=models.CharField(default='*', max_length=64, validators=[
django_celery_beat.validators.day_of_week_validator],
verbose_name='day of week'),
),
migrations.AlterField(
model_name='crontabschedule',
name='hour',
field=models.CharField(default='*', max_length=96, validators=[
django_celery_beat.validators.hour_validator],
verbose_name='hour'),
),
migrations.AlterField(
model_name='crontabschedule',
name='minute',
field=models.CharField(default='*', max_length=240, validators=[
django_celery_beat.validators.minute_validator],
verbose_name='minute'),
),
migrations.AlterField(
model_name='crontabschedule',
name='month_of_year',
field=models.CharField(default='*', max_length=64, validators=[
django_celery_beat.validators.month_of_year_validator],
verbose_name='month of year'),
),
]
|
InvenTree/stock/__init__.py
|
ArakniD/InvenTree
| 656 |
48992
|
<reponame>ArakniD/InvenTree
"""
The Stock module is responsible for Stock management.
It includes models for:
- StockLocation
- StockItem
- StockItemTracking
"""
|
ansible/plugins/action/hashivault_write_from_file.py
|
ayav09/ansible-modules-hashivault
| 402 |
48993
|
########################################################################
#
# Developed for AT&T by <NAME>, August 2017
#
# Action plugin for hashivault_write_from_file module.
#
# Reads file from remote host using slurp module. (base64 encoded)
# Stores file/secret to Vault using hashivault_read module on localhost.
#
########################################################################
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
results = super(ActionModule, self).run(tmp, task_vars)
args = self._task.args.copy()
key = args.pop('key', None)
path = args.pop('path', None)
new_module_args = {
'src': path
}
self._update_module_args('slurp', new_module_args, task_vars)
results = merge_hash(
results,
# executes slurp module on remote host
self._execute_module(module_name='slurp', tmp=tmp, task_vars=task_vars, module_args=new_module_args)
)
if 'failed' in results and results['failed'] is True:
return results
# already base64 encoded from slurp
content = results.pop('content', None)
self._play_context.become = False
self._play_context.become_method = None
self._connection = self._shared_loader_obj.connection_loader.get('local', self._play_context,
self._connection._new_stdin)
args['data'] = {key: content}
if 'update' not in args:
args['update'] = True
results = merge_hash(
results,
# executes hashivault_write module on localhost
self._execute_module(module_name='hashivault_write', tmp=tmp, task_vars=task_vars, module_args=args)
)
results['invocation']['module_args']['data'] = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
return results
|
tests/validation/test_single_field_subscriptions.py
|
closeio/graphql-core
| 590 |
49074
|
<gh_stars>100-1000
from functools import partial
from graphql.utilities import build_schema
from graphql.validation import SingleFieldSubscriptionsRule
from .harness import assert_validation_errors
schema = build_schema(
"""
type Message {
body: String
sender: String
}
type SubscriptionRoot {
importantEmails: [String]
notImportantEmails: [String]
moreImportantEmails: [String]
spamEmails: [String]
deletedEmails: [String]
newMessage: Message
}
type QueryRoot {
dummy: String
}
schema {
query: QueryRoot
subscription: SubscriptionRoot
}
"""
)
assert_errors = partial(
assert_validation_errors, SingleFieldSubscriptionsRule, schema=schema
)
assert_valid = partial(assert_errors, errors=[])
def describe_validate_subscriptions_with_single_field():
def valid_subscription():
assert_valid(
"""
subscription ImportantEmails {
importantEmails
}
"""
)
def valid_subscription_with_fragment():
assert_valid(
"""
subscription sub {
...newMessageFields
}
fragment newMessageFields on SubscriptionRoot {
newMessage {
body
sender
}
}
"""
)
def valid_subscription_with_fragment_and_field():
assert_valid(
"""
subscription sub {
newMessage {
body
}
...newMessageFields
}
fragment newMessageFields on SubscriptionRoot {
newMessage {
body
sender
}
}
"""
)
def fails_with_more_than_one_root_field():
assert_errors(
"""
subscription ImportantEmails {
importantEmails
notImportantEmails
}
""",
[
{
"message": "Subscription 'ImportantEmails'"
" must select only one top level field.",
"locations": [(4, 15)],
}
],
)
def fails_with_more_than_one_root_field_including_introspection():
assert_errors(
"""
subscription ImportantEmails {
importantEmails
__typename
}
""",
[
{
"message": "Subscription 'ImportantEmails'"
" must select only one top level field.",
"locations": [(4, 15)],
},
{
"message": "Subscription 'ImportantEmails'"
" must not select an introspection top level field.",
"locations": [(4, 15)],
},
],
)
def fails_with_more_than_one_root_field_including_aliased_introspection():
assert_errors(
"""
subscription ImportantEmails {
importantEmails
...Introspection
}
fragment Introspection on SubscriptionRoot {
typename: __typename
}
""",
[
{
"message": "Subscription 'ImportantEmails'"
" must select only one top level field.",
"locations": [(7, 15)],
},
{
"message": "Subscription 'ImportantEmails'"
" must not select an introspection top level field.",
"locations": [(7, 15)],
},
],
)
def fails_with_many_more_than_one_root_field():
assert_errors(
"""
subscription ImportantEmails {
importantEmails
notImportantEmails
spamEmails
}
""",
[
{
"message": "Subscription 'ImportantEmails'"
" must select only one top level field.",
"locations": [(4, 15), (5, 15)],
}
],
)
def fails_with_more_than_one_root_field_via_fragments():
assert_errors(
"""
subscription ImportantEmails {
importantEmails
... {
more: moreImportantEmails
}
...NotImportantEmails
}
fragment NotImportantEmails on SubscriptionRoot {
notImportantEmails
deleted: deletedEmails
...SpamEmails
}
fragment SpamEmails on SubscriptionRoot {
spamEmails
}
""",
[
{
"message": "Subscription 'ImportantEmails'"
" must select only one top level field.",
"locations": [(5, 17), (10, 15), (11, 15), (15, 15)],
},
],
)
def does_not_infinite_loop_on_recursive_fragments():
assert_errors(
"""
subscription NoInfiniteLoop {
...A
}
fragment A on SubscriptionRoot {
...A
}
""",
[],
)
def fails_with_more_than_one_root_field_via_fragments_anonymous():
assert_errors(
"""
subscription {
importantEmails
... {
more: moreImportantEmails
...NotImportantEmails
}
...NotImportantEmails
}
fragment NotImportantEmails on SubscriptionRoot {
notImportantEmails
deleted: deletedEmails
... {
... {
archivedEmails
}
}
...SpamEmails
}
fragment SpamEmails on SubscriptionRoot {
spamEmails
...NonExistentFragment
}
""",
[
{
"message": "Anonymous Subscription"
" must select only one top level field.",
"locations": [(5, 17), (11, 15), (12, 15), (15, 19), (21, 15)],
},
],
)
def fails_with_more_than_one_root_field_in_anonymous_subscriptions():
assert_errors(
"""
subscription {
importantEmails
notImportantEmails
}
""",
[
{
"message": "Anonymous Subscription"
" must select only one top level field.",
"locations": [(4, 15)],
}
],
)
def fails_with_introspection_field():
assert_errors(
"""
subscription ImportantEmails {
__typename
}
""",
[
{
"message": "Subscription 'ImportantEmails' must not"
" select an introspection top level field.",
"locations": [(3, 15)],
}
],
)
def fails_with_introspection_field_in_anonymous_subscription():
assert_errors(
"""
subscription {
__typename
}
""",
[
{
"message": "Anonymous Subscription must not"
" select an introspection top level field.",
"locations": [(3, 15)],
}
],
)
def skips_if_not_subscription_type():
empty_schema = build_schema(
"""
type Query {
dummy: String
}
"""
)
assert_errors(
"""
subscription {
__typename
}
""",
[],
schema=empty_schema,
)
|
desktop/core/ext-py/chardet-3.0.4/setup.py
|
kokosing/hue
| 1,511 |
49080
|
<reponame>kokosing/hue<filename>desktop/core/ext-py/chardet-3.0.4/setup.py<gh_stars>1000+
#!/usr/bin/env python
import re
import sys
from setuptools import find_packages, setup
needs_pytest = set(['pytest', 'test', 'ptr']).intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
# Get version without importing, which avoids dependency issues
def get_version():
with open('chardet/version.py') as version_file:
return re.search(r"""__version__\s+=\s+(['"])(?P<version>.+?)\1""",
version_file.read()).group('version')
def readme():
with open('README.rst') as f:
return f.read()
setup(name='chardet',
version=get_version(),
description='Universal encoding detector for Python 2 and 3',
long_description=readme(),
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/chardet/chardet',
license="LGPL",
keywords=['encoding', 'i18n', 'xml'],
classifiers=["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
("License :: OSI Approved :: GNU Library or Lesser General"
" Public License (LGPL)"),
"Operating System :: OS Independent",
"Programming Language :: Python",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
("Topic :: Software Development :: Libraries :: Python "
"Modules"),
"Topic :: Text Processing :: Linguistic"],
packages=find_packages(),
setup_requires=pytest_runner,
tests_require=['pytest', 'hypothesis'],
entry_points={'console_scripts':
['chardetect = chardet.cli.chardetect:main']})
|
draw fractal/Fern/Fern__PIL.py
|
DazEB2/SimplePyScripts
| 117 |
49114
|
<reponame>DazEB2/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Папоротник / Fern
"""
# Оригинал: http://www.cyberforum.ru/pascalabc/thread994987.html
# uses GraphABC,Utils;
#
# const
# n=255;
# max=10;
#
# var
# x,y,x1,y1,cx,cy: real;
# i,ix,iy: integer;
# // z=z^2+c
# begin
# SetWindowCaption('Фракталы: папоротник');
# SetWindowSize(300,300);
# cx:=0.251;
# cy:=0.95;
# for ix:=0 to WindowWidth-1 do
# for iy:=0 to WindowHeight-1 do
# begin
# x:=0.001*(ix-200);
# y:=0.001*(iy-150);
# for i:=1 to n do
# begin
# x1:=0.5*x*x-0.88*y*y+cx;
# y1:=x*y+cy;
# if (x1>max) or (y1>max) then break;
# x:=x1;
# y:=y1;
# end;
# if i>=n then SetPixel(ix,iy,clGreen)
# else SetPixel(ix,iy,RGB(255-i,255,255-i));
# end;
# writeln('Время расчета = ',Milliseconds/1000,' с');
# end.
def draw_fern(draw_by_image, width, height):
n = 255
cx = 0.251
cy = 0.95
for ix in range(width):
for iy in range(height):
x = 0.001 * (ix - 200)
y = 0.001 * (iy - 150)
for i in range(n):
x1 = 0.5 * x * x - 0.88 * y * y + cx
y1 = x * y + cy
if x1 > 10 or y1 > 10:
break
x = x1
y = y1
color = "green" if i >= n else (255 - i, 255, 255 - i)
draw_by_image.point((ix, iy), color)
if __name__ == '__main__':
from PIL import Image, ImageDraw
img = Image.new("RGB", (300, 300), "white")
draw_fern(ImageDraw.Draw(img), img.width, img.height)
img.save('img.png')
|
saleor/core/migrations/0005_alter_eventdelivery_webhook.py
|
eanknd/saleor
| 1,392 |
49142
|
# Generated by Django 3.2.12 on 2022-04-08 12:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("webhook", "0008_webhook_subscription_query"),
("core", "0004_delete_delivery_without_webhook"),
]
operations = [
migrations.AlterField(
model_name="eventdelivery",
name="webhook",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="webhook.webhook"
),
),
]
|
22爬虫提高/day04/basic01.py
|
HaoZhang95/PythonAndMachineLearning
| 937 |
49150
|
<gh_stars>100-1000
import json
import time
import requests
from PIL import Image
from pytesseract import pytesseract
from selenium import webdriver
"""
selenium和xpath的使用区别
selenium使用不需要自己写headers,只需要导入webdriver.Chrome().get(url)就会打开浏览器,使用find_xxx_by_xpath
写入自己的xpath语句即可
传统的xpath使用,需要导入etree.Html(url),然后写入自己的html.xpath(‘xxx’)
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36"
}
def test01():
"""斗鱼平台获取直播房间和的信息
1、爬取房间名称、类型、房主、关注人数、封面信息等
2、使用selenium进行爬取
"""
class Douyu(object):
def __init__(self):
self.url = 'https://www.douyu.com/directory/all'
# 实例化浏览器对象
self.driver = webdriver.Chrome()
# 创建文件
self.file = open('douyu.json','w')
# 解析数据
def parse_data(self):
# 提取房间列表,必须使用elements
node_list = self.driver.find_elements_by_xpath('//*[@id="listAll"]/section[2]/div[2]/ul/li')
# 测试节点列表
# print(len(node_list))
# 定义存储数据的容器
data_list = []
# 遍历节点列表
for node in node_list:
temp = {}
# 提取房间的标题/房间类型/房间主人/关注人数/封面
temp['title'] = node.find_element_by_xpath('./div/div/h3').text
temp['category'] = node.find_element_by_xpath('./div/div/span').text
temp['owner'] = node.find_element_by_xpath('./div/p/span[1]').text
temp['num'] = node.find_element_by_xpath('./div/p/span[2]').text
temp['cover'] = node.find_element_by_xpath('./span/img').get_attribute('data-original')
# temp['link'] = node.get_attribute('href')
data_list.append(temp)
# print(temp)
# 返回数据
return data_list
# 保存数据
def save_data(self,data_list):
# 遍历列表数据,因为里面存储的是字典类型
for data in data_list:
str_data = json.dumps(data,ensure_ascii=False) + ',\n'
self.file.write(str_data)
def __del__(self):
# 关闭浏览器对象
self.driver.close()
self.file.close()
def run(self):
# 构造url
# 构造webdriver浏览器对象
# 发送请求
self.driver.get(self.url)
while True:
# 解析数据,返回数据列表
data_list = self.parse_data()
self.save_data(data_list)
# 提取下一页链接,模拟点击
try:
ele_next_url = self.driver.find_element_by_xpath('//*[@class="shark-pager-next"]')
ele_next_url.click()
time.sleep(3)
except:
break
# 保存数据
Douyu().run()
def test02():
from PIL import Image
import pytesseract
"""谷歌图片识别的包:tesseract"""
# 使用pil加载一张图片到内存中,返回图片对象
img = Image.open('test.jpg')
# 调用tesseract进行识别,返回一个data
data = pytesseract.image_to_string(img)
# 输出结果
print(data)
def test03():
"""图片识别验证码进行豆瓣登陆"""
# 创建浏览器对象
driver = webdriver.Chrome()
# 发送请求
driver.get('https://accounts.douban.com/login')
# 定位元素位置,账号
ele_email = driver.find_element_by_id('email')
# 把账号发送给表单
ele_email.send_keys('<KEY>')
# 定位元素,密码
ele_pswd = driver.find_element_by_id('password')
# 把密码发送给表单
ele_pswd.send_keys('123456shengjun')
# # 1、手动输入获取图片验证码
# 定位图片验证码所在的元素位置
# ele_captcha = driver.find_element_by_id('captcha_field')
# data = input('请输入图片验证码:')
# ele_captcha.send_keys(data)
# 2 使用ocr系统识别图片验证码
ele_image_captcha = driver.find_element_by_id('captcha_image')
image_url = ele_image_captcha.get_attribute('src')
print(image_url)
# 获取图片文件
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}
# 发送get请求,获取图片文件bytes类型
# data = requests.get(image_url,headers=headers).content
# 保存图片文件
# with open('douban.jpg','wb') as f:
# f.write(data)
# time.sleep(3)
# 使用ocr系统
img = Image.open('douban.jpg')
image_str = pytesseract.image_to_string(img)
print('-------',image_str,'-------')
# 定位登录按钮
ele_submit = driver.find_element_by_name('login')
# 模拟点击
ele_submit.click()
def main():
# test01()
# test02()
test03()
if __name__ == '__main__':
main()
|
office365/directory/applications/spa_application.py
|
rikeshtailor/Office365-REST-Python-Client
| 544 |
49152
|
from office365.runtime.client_value import ClientValue
class SpaApplication(ClientValue):
pass
|
Python/OsFileSystem/list_files.py
|
Gjacquenot/training-material
| 115 |
49161
|
#!/usr/bin/env python
from argparse import ArgumentParser
import os
import sys
if __name__ == '__main__':
arg_parser = ArgumentParser(description='list all files with given '
'extension in directory')
arg_parser.add_argument('--dir', default='.',
help='directory to search')
arg_parser.add_argument('ext', help='extension to use')
arg_parser.add_argument('--verbose', action='store_true',
help='show progress info')
options = arg_parser.parse_args()
for directory, _, files in os.walk(options.dir):
if options.verbose:
print("### checking directory '{}'".format(directory),
file=sys.stderr)
for file_name in files:
_, ext = os.path.splitext(file_name)
if ext == options.ext:
print(os.path.join(directory, file_name))
|
spacenetutilities/labeltools/geojsonPrepTools.py
|
Pandinosaurus/utilities
| 251 |
49193
|
from spacenetutilities.labeltools import coreLabelTools
import json
import glob
import argparse
from datetime import datetime
import os
def modifyTimeField(geoJson, geoJsonNew, featureItemsToAdd=['ingest_tim', 'ingest_time', 'edit_date'], featureKeyListToRemove=[]):
now = datetime.today()
with open(geoJson) as json_data:
d = json.load(json_data)
featureList = d['features']
newFeatureList = []
for feature in featureList:
tmpFeature = dict(feature)
for featureKey in featureKeyListToRemove:
if featureKey in tmpFeature['properties']:
del tmpFeature['properties'][featureKey]
for featureKey in featureItemsToAdd:
if not (featureKey in tmpFeature['properties']):
print('inserting missing field')
print(now.isoformat())
tmpFeature['properties'][featureKey] = now.isoformat()
else:
if not tmpFeature['properties'][featureKey]:
print('filling empty field')
tmpFeature['properties'][featureKey] = now.isoformat()
newFeatureList.append(tmpFeature)
d['features']=newFeatureList
if os.path.exists(geoJsonNew):
os.remove(geoJsonNew)
with open(geoJsonNew, 'w') as json_data:
json.dump(d, json_data)
def removeIdFieldFromJsonEntries(geoJson, geoJsonNew, featureKeyListToRemove=['Id', 'id'], featureItemsToAdd={}):
with open(geoJson) as json_data:
d = json.load(json_data)
featureList = d['features']
newFeatureList = []
for feature in featureList:
tmpFeature = dict(feature)
for featureKey in featureKeyListToRemove:
if featureKey in tmpFeature['properties']:
del tmpFeature['properties'][featureKey]
tmpFeature.update(featureItemsToAdd)
newFeatureList.append(tmpFeature)
d['features']=newFeatureList
if os.path.exists(geoJsonNew):
os.remove(geoJsonNew)
with open(geoJsonNew, 'w') as json_data:
json.dump(d, json_data)
def removeIdinGeoJSONFolder(folder, modifier='noid'):
geoJsonList = glob.glob(os.path.join(folder, '*.geojson'))
for geojsonName in geoJsonList:
removeIdFieldFromJsonEntries(geojsonName, geojsonName.replace('.geojson', '{}.geojson'.format(modifier)))
|
playground/auto_sharding_solver/test_sharding_spec.py
|
yf225/alpa
| 114 |
49224
|
from hlo import ShardingSpec, ShardingSpecType
from cluster_env import ClusterEnvironment
from common import compute_bytes
def test_tile():
cluster_env = ClusterEnvironment([[0, 1, 2], [3, 4, 5]], [1,1], [1,1], None)
sharding = ShardingSpec.tile((12, 12), [0, 1], [0, 1], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [1, 0], [1, 0], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [0, 1], [1, 0], cluster_env)
assert sharding.tile_assignment_dimensions == (3, 2)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [0], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 1, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12), [0], [1], cluster_env)
assert sharding.tile_assignment_dimensions == (3, 1, 2)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12), [1], [1], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 3, 2)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12), [1], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 2, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == True
sharding = ShardingSpec.tile((12, 12, 12), [0, 1], [0, 1], cluster_env)
assert sharding.tile_assignment_dimensions == (2, 3, 1)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12, 12), [0, 1], [1, 0], cluster_env)
assert sharding.tile_assignment_dimensions == (3, 2, 1)
assert sharding.tile_assignment_devices == (0, 3, 1, 4, 2, 5)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12, 12), [1], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 2, 1, 3)
assert sharding.tile_assignment_devices == (0, 1, 2, 3, 4, 5)
assert sharding.replicate_on_last_tile_dim == True
def test_tile2():
cluster_env = ClusterEnvironment([[0, 1, 2, 3]], [1,1], [1,1], None)
sharding = ShardingSpec.tile((12, 12), [1], [1], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 4)
assert sharding.tile_assignment_devices == (0, 1, 2, 3)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [1], [0], cluster_env)
assert sharding.type == ShardingSpecType.REPLICATED
cluster_env = ClusterEnvironment([[0], [1], [2], [3]], [1,1], [1,1], None)
sharding = ShardingSpec.tile((12, 12), [1], [0], cluster_env)
assert sharding.tile_assignment_dimensions == (1, 4)
assert sharding.tile_assignment_devices == (0, 1, 2, 3)
assert sharding.replicate_on_last_tile_dim == False
sharding = ShardingSpec.tile((12, 12), [1], [1], cluster_env)
assert sharding.type == ShardingSpecType.REPLICATED
def test_tile3():
cluster_env = ClusterEnvironment([[0, 1], [2, 3]], [1,1], [1,1], None)
shape = (12, 12)
src = ShardingSpec.split(shape, 1, cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
print(src)
print(dst)
cost = cluster_env.resharding_cost(shape, src, dst)
print(cost)
def assert_allclose(x, y):
assert abs((x - y) / (y + 1e-8)) < 0.01
def test_resharding_cost():
cluster_env = ClusterEnvironment([[0, 1, 2], [3, 4, 5]], [1, 1], [1, 1], None)
shape = (128, 128)
src = ShardingSpec.tile(shape, [0], [0], cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.tile(shape, [1, 0], [1, 0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0], [0], cluster_env)
dst = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0], [0], cluster_env)
dst = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, cluster_env.all_gather_cost(compute_bytes(shape), 1))
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.replicated(cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, cluster_env.all_gather_cost(compute_bytes(shape), 0)
+ cluster_env.all_gather_cost(compute_bytes(shape), 1))
def test_resharding_cost2():
cluster_env = ClusterEnvironment([[0], [1], [2], [3]], [1,1], [1,1], None)
shape = (128, 128)
src = ShardingSpec.tile(shape, [0, 1], [0, 1], cluster_env)
dst = ShardingSpec.tile(shape, [0], [0], cluster_env)
cost = cluster_env.resharding_cost(shape, src, dst)
assert_allclose(cost, 0)
if __name__ == "__main__":
test_tile()
test_tile2()
#test_tile3()
test_resharding_cost()
test_resharding_cost2()
|
scripts/run_jobs.py
|
Darklanx/rl-baselines3-zoo
| 591 |
49241
|
<reponame>Darklanx/rl-baselines3-zoo
"""
Run multiple experiments on a single machine.
"""
import subprocess
import numpy as np
ALGOS = ["sac"]
ENVS = ["MountainCarContinuous-v0"]
N_SEEDS = 10
EVAL_FREQ = 5000
N_EVAL_EPISODES = 10
LOG_STD_INIT = [-6, -5, -4, -3, -2, -1, 0, 1]
for algo in ALGOS:
for env_id in ENVS:
for log_std_init in LOG_STD_INIT:
log_folder = f"logs_std_{np.exp(log_std_init):.4f}"
for _ in range(N_SEEDS):
args = [
"--algo",
algo,
"--env",
env_id,
"--hyperparams",
f"policy_kwargs:dict(log_std_init={log_std_init}, net_arch=[64, 64])",
"--eval-episodes",
N_EVAL_EPISODES,
"--eval-freq",
EVAL_FREQ,
"-f",
log_folder,
]
args = list(map(str, args))
ok = subprocess.call(["python", "train.py"] + args)
|
examples/miniapps/flask-blueprints/githubnavigator/blueprints/__init__.py
|
whysage/python-dependency-injector
| 1,997 |
49244
|
"""Blueprints package."""
|
ambiente_virtual/Lib/site-packages/alembic/util/editor.py
|
PI-UNIVESP-Penapolis/PRODEA
| 1,324 |
49276
|
import os
from os.path import exists
from os.path import join
from os.path import splitext
from subprocess import check_call
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from .compat import is_posix
from .exc import CommandError
def open_in_editor(
filename: str, environ: Optional[Dict[str, str]] = None
) -> None:
"""
Opens the given file in a text editor. If the environment variable
``EDITOR`` is set, this is taken as preference.
Otherwise, a list of commonly installed editors is tried.
If no editor matches, an :py:exc:`OSError` is raised.
:param filename: The filename to open. Will be passed verbatim to the
editor command.
:param environ: An optional drop-in replacement for ``os.environ``. Used
mainly for testing.
"""
env = os.environ if environ is None else environ
try:
editor = _find_editor(env)
check_call([editor, filename])
except Exception as exc:
raise CommandError("Error executing editor (%s)" % (exc,)) from exc
def _find_editor(environ: Mapping[str, str]) -> str:
candidates = _default_editors()
for i, var in enumerate(("EDITOR", "VISUAL")):
if var in environ:
user_choice = environ[var]
if exists(user_choice):
return user_choice
if os.sep not in user_choice:
candidates.insert(i, user_choice)
for candidate in candidates:
path = _find_executable(candidate, environ)
if path is not None:
return path
raise OSError(
"No suitable editor found. Please set the "
'"EDITOR" or "VISUAL" environment variables'
)
def _find_executable(
candidate: str, environ: Mapping[str, str]
) -> Optional[str]:
# Assuming this is on the PATH, we need to determine it's absolute
# location. Otherwise, ``check_call`` will fail
if not is_posix and splitext(candidate)[1] != ".exe":
candidate += ".exe"
for path in environ.get("PATH", "").split(os.pathsep):
value = join(path, candidate)
if exists(value):
return value
return None
def _default_editors() -> List[str]:
# Look for an editor. Prefer the user's choice by env-var, fall back to
# most commonly installed editor (nano/vim)
if is_posix:
return ["sensible-editor", "editor", "nano", "vim", "code"]
else:
return ["code.exe", "notepad++.exe", "notepad.exe"]
|
conftest.py
|
d34dm8/chime
| 149 |
49312
|
<filename>conftest.py<gh_stars>100-1000
import importlib
import pathlib
import tempfile
import _pytest.monkeypatch
import pytest
import chime
@pytest.fixture(scope='function', autouse=True)
def reload_chime():
importlib.reload(chime)
@pytest.fixture(scope='function', autouse=True)
def mock_pathlib_home(monkeypatch: _pytest.monkeypatch.MonkeyPatch):
with tempfile.TemporaryDirectory() as home_dir:
home_dir_path = pathlib.Path(home_dir)
monkeypatch.setattr(pathlib.Path, name='home', value=lambda: home_dir_path)
monkeypatch.setenv('APPDATA', value=str(home_dir_path))
|
mode/examples/Basics/Data/VariableScope/VariableScope.pyde
|
timgates42/processing.py
| 1,224 |
49316
|
<filename>mode/examples/Basics/Data/VariableScope/VariableScope.pyde<gh_stars>1000+
"""
Variable Scope.
Variables have a global or local "scope".
For example, variables declared within either the
setup() or draw() functions may be only used in these
functions. Global variables, variables declared outside
of setup() and draw(), may be used anywhere within the program.
If a local variable is declared with the same name as a
global variable, the program will use the local variable to make
its calculations within the current scope. Variables are localized
within each block.
"""
a = 80 # Create a global variable "a"
def setup():
size(640, 360)
background(0)
stroke(255)
noLoop()
def draw():
# Draw a line using the global variable "a".
line(a, 0, a, height)
# Create a variable "b" local to the draw() function.
b = 100
# Create a global variable "c".
global c
c = 320 # Since "c" is global, it is avalaible to other functions.
# Make a call to the custom function drawGreenLine()
drawGreenLine()
# Draw a line using the local variable "b".
line(b, 0, b, height) # Note that "b" remains set to 100.
def drawGreenLine():
# Since "b" was defined as a variable local to the draw() function,
# this code inside this if statement will not run.
if('b' in locals() or 'b' in globals()):
background(255) # This won't run
else:
with pushStyle():
stroke(0, 255, 0)
b = 320 # Create a variable "b" local to drawGreenLine().
# Use the local variable "b" and the global variable "c" to draw a line.
line(b, 0, c, height)
|
pipeline/Serverless/04_stream_processor/stream_processor.py
|
Rkauff/Klayers
| 1,096 |
49324
|
import json
import os
from datetime import datetime
import boto3
from aws_lambda_powertools.logging import Logger
logger = Logger()
@logger.inject_lambda_context
def main(event, context):
records = event.get("Records", [])
entries = []
stream_label = os.environ["STREAM_LABEL"]
logger.info(
{"record_count": len(records), "stream": stream_label,}
)
for record in records:
keys = record.get("dynamodb").get("Keys")
pk = keys["pk"]["S"]
sk = keys["sk"]["S"]
# pk and sk are prefixed with <type>#, every char before the '#' describes the attribute type
pk_type = pk[: pk.find("#")]
sk_type = sk[: sk.find("#")]
event_name = record["eventName"]
logger.info(
{
"pk": pk,
"pk_type": pk_type,
"sk": sk,
"sk_type": sk_type,
"event_name": event_name,
}
)
entry = {
"Source": f"{stream_label}",
"Resources": [],
"DetailType": event_name,
"Detail": json.dumps(
{"pk_type": pk_type, "sk_type": sk_type, "record": record}
),
"EventBusName": "default",
}
entries.append(entry)
client = boto3.client("events")
response = client.put_events(Entries=entries)
logger.debug(entries)
logger.info(
{"num_entries": len(records), "failed_entries": response["FailedEntryCount"],}
)
return
|
models/__init__.py
|
MilesQLi/Theano-Lights
| 313 |
49327
|
<filename>models/__init__.py
__all__ = [
"ffn",
"rbfn",
"ffn_bn",
"ffn_ace",
"ffn_lae",
"ffn_bn_vat",
"ffn_vat",
"cnn",
"vae1",
"cvae",
"draw_at_lstm1",
"draw_at_lstm2",
"draw_lstm1",
"draw_sgru1",
"lm_lstm",
"lm_lstm_bn",
"lm_gru",
"lm_draw"
]
|
tools/third_party/importlib_metadata/prepare/example/example/__init__.py
|
meyerweb/wpt
| 2,479 |
49330
|
<gh_stars>1000+
def main():
return 'example'
|
plugins/modules/files_attributes.py
|
manala/ansible-roles
| 138 |
49333
|
<reponame>manala/ansible-roles
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: files_attributes
author: Manala (@manala)
short_description: Manage files attributes
description:
- Manage files attributes
'''
EXAMPLES = r'''
- name: Touch file
manala.roles.files_attributes:
path: /tmp/touch
state: touch
'''
|
tests/ui/menus/test_opmenu.py
|
Hengle/Houdini-Toolbox
| 136 |
49335
|
<gh_stars>100-1000
"""Tests for ht.ui.menus.opmenu module."""
# =============================================================================
# IMPORTS
# =============================================================================
# Houdini Toolbox
import ht.ui.menus.opmenu
# Houdini
import hou
# =============================================================================
# TESTS
# =============================================================================
def test_create_absolute_reference_copy(mocker):
"""Test ht.ui.menus.opmenu.create_absolute_reference_copy."""
mock_node = mocker.MagicMock(spec=hou.Node)
scriptargs = {"node": mock_node}
ht.ui.menus.opmenu.create_absolute_reference_copy(scriptargs)
mock_node.parent.return_value.copyItems.assert_called_with(
[mock_node], channel_reference_originals=True, relative_references=False
)
def test_save_item_to_file(mocker):
"""Test ht.ui.menus.opmenu.save_item_to_file."""
mock_copy = mocker.patch("ht.ui.menus.opmenu.copy_item")
mock_node = mocker.MagicMock(spec=hou.Node)
scriptargs = {"node": mock_node}
ht.ui.menus.opmenu.save_item_to_file(scriptargs)
mock_copy.assert_called_with(mock_node)
|
bgp/apps.py
|
maznu/peering-manager
| 127 |
49338
|
<reponame>maznu/peering-manager<filename>bgp/apps.py
from django.apps import AppConfig
class BgpConfig(AppConfig):
name = "bgp"
verbose_name = "BGP"
|
safe_control_gym/envs/env_wrappers/vectorized_env/subproc_vec_env.py
|
catgloss/safe-control-gym
| 120 |
49388
|
<reponame>catgloss/safe-control-gym
"""Subprocess vectorized environments.
See also:
* https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
* https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/vec_env/subproc_vec_env.py
"""
import copy
import numpy as np
import multiprocessing as mp
from safe_control_gym.utils.utils import get_random_state, set_random_state
from safe_control_gym.envs.env_wrappers.vectorized_env.vec_env import VecEnv
from safe_control_gym.envs.env_wrappers.vectorized_env.vec_env_utils import _flatten_list, _flatten_obs, CloudpickleWrapper, clear_mpi_env_vars
class SubprocVecEnv(VecEnv):
"""Multiprocess envs.
"""
def __init__(self, env_fns, spaces=None, context="spawn", n_workers=1):
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.n_workers = n_workers
assert nenvs % n_workers == 0, "Number of envs must be divisible by number of workers to run in series"
env_fns = np.array_split(env_fns, self.n_workers)
# Context is necessary for multiprocessing with CUDA, see pytorch.org/docs/stable/notes/multiprocessing.html
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(
*[ctx.Pipe() for _ in range(self.n_workers)])
self.ps = [
ctx.Process(target=worker,
args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote,
env_fn) in zip(self.work_remotes, self.remotes, env_fns)
]
for p in self.ps:
p.daemon = True # If the main process crashes, we should not cause things to hang.
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space = self.remotes[0].recv().x
self.viewer = None
VecEnv.__init__(self, nenvs, observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
actions = np.array_split(actions, self.n_workers)
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), {"n": infos}
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
obs, infos = zip(*results)
return _flatten_obs(obs), {"n": infos}
def get_images(self):
"""Called by parent `render` to support tiling images.
"""
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
imgs = _flatten_list(imgs)
return imgs
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def get_env_random_state(self):
for remote in self.remotes:
remote.send(('get_random_state', None))
worker_random_states = [remote.recv().x for remote in self.remotes]
return worker_random_states
def set_env_random_state(self, worker_random_states):
for remote, random_state in zip(self.remotes, worker_random_states):
remote.send(('set_random_state', random_state))
res = [remote.recv() for remote in self.remotes]
def get_attr(self, attr_name, indices=None):
"""Return attribute from vectorized environment (see base class).
"""
target_remotes, remote_env_indices = self._get_target_envs(indices)
for remote, env_indices in zip(target_remotes, remote_env_indices):
remote.send(("get_attr", (env_indices, attr_name)))
return _flatten_list([remote.recv() for remote in target_remotes])
def set_attr(self, attr_name, values, indices=None):
"""Set attribute inside vectorized environments (see base class).
"""
target_remotes, remote_env_indices, splits = self._get_target_envs(
indices)
value_splits = []
for i in range(len(splits) - 1):
start, end = splits[i], splits[i + 1]
value_splits.append(values[start:end])
for remote, env_indices, value_split in zip(target_remotes,
remote_env_indices,
value_splits):
remote.send(("set_attr", (env_indices, attr_name, value_split)))
for remote in target_remotes:
remote.recv()
def env_method(self,
method_name,
method_args=None,
method_kwargs=None,
indices=None):
"""Call instance methods of vectorized environments.
"""
target_remotes, remote_env_indices, splits = self._get_target_envs(indices)
method_arg_splits, method_kwarg_splits = [], []
for i in range(len(splits) - 1):
start, end = splits[i], splits[i + 1]
if method_args is None:
method_arg_splits.append([[]] * len(end - start))
else:
method_arg_splits.append(method_args[start:end])
if method_kwargs is None:
method_kwarg_splits.append([{}] * len(end - start))
else:
method_kwarg_splits.append(method_kwargs[start:end])
for remote, env_indices, method_arg_split, method_kwarg_split in zip(
target_remotes, remote_env_indices, method_arg_splits,
method_kwarg_splits):
remote.send(("env_method", (env_indices, method_name,
method_arg_split, method_kwarg_split)))
return _flatten_list([remote.recv() for remote in target_remotes])
def _get_target_envs(self, indices):
"""
Example:
n_workers: 3
current envs: [0,1,2,3,4,5]
remote_envs: [0,1], [2,3], [4,5]
target_envs: [1,1,3,4]
remote_indices: [0,0,1,1] -> [0,1]
splits: [0,2] -> [0,2,4]
remote_env_indices: [1,1,0,1] -> [1,1], [0,1]
"""
assert indices is None or sorted(
indices) == indices, "Indices must be ordered"
indices = self._get_indices(indices)
remote_indices = [idx // self.n_workers for idx in indices]
remote_env_indices = [idx % self.n_workers for idx in indices]
remote_indices, splits = np.unique(np.array(remote_indices), return_index=True)
target_remotes = [self.remotes[idx] for idx in remote_indices]
remote_env_indices = np.split(np.array(remote_env_indices), splits[1:])
remote_env_indices = remote_env_indices.tolist()
splits = np.append(splits, [len(indices)])
return target_remotes, remote_env_indices, splits
def worker(remote, parent_remote, env_fn_wrappers):
"""Worker func to execute vec_env commands.
"""
def step_env(env, action):
ob, reward, done, info = env.step(action)
if done:
end_obs = copy.deepcopy(ob)
end_info = copy.deepcopy(info)
ob, info = env.reset()
info["terminal_observation"] = end_obs
info["terminal_info"] = end_info
return ob, reward, done, info
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
# Branch out for requests.
if cmd == 'step':
remote.send(
[step_env(env, action) for env, action in zip(envs, data)])
elif cmd == 'reset':
remote.send([env.reset() for env in envs])
elif cmd == 'render':
remote.send([env.render(mode='rgb_array') for env in envs])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send(
CloudpickleWrapper(
(envs[0].observation_space, envs[0].action_space)))
elif cmd == "get_random_state":
remote.send(CloudpickleWrapper(get_random_state()))
elif cmd == "set_random_state":
set_random_state(data)
# Placeholder for the return.
remote.send(True)
elif cmd == "get_attr":
env_indices, attr_name = data
target_envs = [envs[idx] for idx in env_indices]
remote.send([getattr(env, attr_name) for env in target_envs])
elif cmd == "set_attr":
env_indices, attr_name, values = data
target_envs = [envs[idx] for idx in env_indices]
remote.send([
setattr(env, attr_name, value)
for env, value in zip(target_envs, values)
])
elif cmd == "env_method":
env_indices, name, args_list, kwargs_list = data
target_envs = [envs[idx] for idx in env_indices]
methods = [getattr(env, name) for env in target_envs]
remote.send([
method(*args, **kwargs) for method, args, kwargs in zip(
methods, args_list, kwargs_list)
])
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
except Exception as e:
print('Environment runner process failed...')
print(str(e))
finally:
for env in envs:
env.close()
|
AdminServer/appscale/admin/stop_services.py
|
loftwah/appscale
| 790 |
49398
|
<gh_stars>100-1000
""" Tries to stop all services until they are stopped. """
import argparse
import logging
import time
from appscale.common import service_helper
from appscale.common.constants import LOG_FORMAT
from appscale.common.retrying import retry
logger = logging.getLogger(__name__)
def start_service():
""" Starts a service. """
parser = argparse.ArgumentParser()
parser.add_argument('service', help='The service to start')
args = parser.parse_args()
service_helper.start(args.service)
def stop_service():
""" Stops a service. """
parser = argparse.ArgumentParser()
parser.add_argument('service', help='The service to stop')
args = parser.parse_args()
service_helper.stop(args.service)
def stop_services():
""" Tries to stop all appscale services until they are stopped. """
@retry(max_retries=3)
def stop_with_retries():
logger.debug('Stopping AppScale services')
service_helper.start('appscale-down.target', enable=False)
logger.info('Waiting for services to stop')
stop_requested = False
original_services_count = None
stopped_count = 0
while True:
services = service_helper.list()
if original_services_count is None:
original_services_count = len(services)
running = {service: state for service, state in services.items()
if state not in ('stopped')}
if not running:
logger.info('Finished stopping services')
break
if original_services_count - len(running) != stopped_count:
stopped_count = original_services_count - len(running)
logger.info(
'Stopped {}/{} services'.format(stopped_count, original_services_count))
if not stop_requested:
stop_with_retries()
stop_requested = True
time.sleep(min(0.3 * len(running), 5))
def main():
""" Main function which terminates all appscale processes. """
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
# Parse command line arguments
parser = argparse.ArgumentParser(description='A stop services command')
parser.add_argument('--verbose', action='store_true',
help='Output debug-level logging')
args = parser.parse_args()
if args.verbose:
logging.getLogger('appscale').setLevel(logging.DEBUG)
stop_services()
|
repoxplorer/controllers/tags.py
|
Priya-100/repoxplorer
| 107 |
49426
|
# Copyright 2017, <NAME>
# Copyright 2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan import expose
from repoxplorer.controllers import utils
from repoxplorer import index
from repoxplorer.index.projects import Projects
from repoxplorer.index.tags import Tags
from repoxplorer.index.contributors import Contributors
class TagsController(object):
@expose('json')
def tags(self, pid=None, tid=None,
dfrom=None, dto=None, inc_repos=None):
t = Tags(index.Connector(index_suffix='tags'))
projects_index = Projects()
idents = Contributors()
query_kwargs = utils.resolv_filters(
projects_index, idents, pid, tid, None, None,
dfrom, dto, inc_repos, None, None, None, None)
p_filter = [":".join(r.split(':')[:-1]) for r in query_kwargs['repos']]
dfrom = query_kwargs['fromdate']
dto = query_kwargs['todate']
ret = [r['_source'] for r in t.get_tags(p_filter, dfrom, dto)]
# TODO: if tid is given we can include user defined releases
# for repo tagged with tid.
if not pid:
return ret
# now append user defined releases
ur = {}
project = projects_index.get(pid, source=['refs', 'releases'])
for release in project.get('releases', []):
ur[release['name']] = release
for ref in project['refs']:
for release in ref.get('releases', []):
ur[release['name']] = release
for rel in ur.values():
ret.append(rel)
return ret
|
unit6/spiders/p5_downloader_middleware_handson/p5_downloader_middleware_handson/settings.py
|
nulearn3296/scrapy-training
| 182 |
49440
|
BOT_NAME = 'p5_downloader_middleware_handson'
SPIDER_MODULES = ['p5_downloader_middleware_handson.spiders']
NEWSPIDER_MODULE = 'p5_downloader_middleware_handson.spiders'
ROBOTSTXT_OBEY = True
DOWNLOADER_MIDDLEWARES = {
'p5_downloader_middleware_handson.middlewares.SeleniumDownloaderMiddleware': 543,
}
SELENIUM_ENABLED = True
|
ws4py/_asyncio_compat.py
|
diveyez/WebSocket-for-Python
| 733 |
49453
|
"""Provide compatibility over different versions of asyncio."""
import asyncio
if hasattr(asyncio, "async"):
# Compatibility for Python 3.3 and older
ensure_future = getattr(asyncio, "async")
else:
ensure_future = asyncio.ensure_future
|
pretrain.py
|
ku21fan/STR-Fewer-Labels
| 105 |
49470
|
<reponame>ku21fan/STR-Fewer-Labels<gh_stars>100-1000
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from tqdm import tqdm
from utils import Averager, adjust_learning_rate, AttnLabelConverter
from dataset import hierarchical_dataset, AlignCollate_SelfSL, Batch_Balanced_Dataset
from model import Model
from modules.self_supervised import MoCoLoss
from test import validation
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(opt, log):
if opt.self == "MoCo":
opt.batch_size = 256
""" dataset preparation """
if opt.select_data == "unlabel":
select_data = ["U1.Book32", "U2.TextVQA", "U3.STVQA"]
batch_ratio = [round(1 / len(select_data), 3)] * len(select_data)
else:
select_data = opt.select_data.split("-")
batch_ratio = opt.batch_ratio.split("-")
train_loader = Batch_Balanced_Dataset(
opt, opt.train_data, select_data, batch_ratio, log, learn_type="self"
)
AlignCollate_valid = AlignCollate_SelfSL(opt)
valid_dataset, valid_dataset_log = hierarchical_dataset(
root=opt.valid_data, opt=opt, data_type="unlabel"
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid,
pin_memory=False,
)
log.write(valid_dataset_log)
print("-" * 80)
log.write("-" * 80 + "\n")
""" model configuration """
if opt.self == "RotNet":
model = Model(opt, SelfSL_layer=opt.SelfSL_layer)
# weight initialization
for name, param in model.named_parameters():
if "localization_fc2" in name:
print(f"Skip {name} as it is already initialized")
continue
try:
if "bias" in name:
init.constant_(param, 0.0)
elif "weight" in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if "weight" in name:
param.data.fill_(1)
continue
elif opt.self == "MoCo":
model = MoCoLoss(
opt, dim=opt.moco_dim, K=opt.moco_k, m=opt.moco_m, T=opt.moco_t
)
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != "":
print(f"loading pretrained model from {opt.saved_model}")
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
log.write(repr(model) + "\n")
""" setup loss """
criterion = torch.nn.CrossEntropyLoss(ignore_index=-1).to(device)
# loss averager
train_loss_avg = Averager()
valid_loss_avg = Averager()
# filter that only require gradient descent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, model.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print(f"Trainable params num: {sum(params_num)}")
log.write(f"Trainable params num: {sum(params_num)}\n")
# [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]
# setup optimizer
if opt.optimizer == "adam":
optimizer = torch.optim.Adam(filtered_parameters, lr=opt.lr)
elif opt.self == "MoCo":
optimizer = torch.optim.SGD(
filtered_parameters,
lr=opt.moco_lr,
momentum=opt.moco_SGD_m,
weight_decay=opt.moco_wd,
)
opt.schedule = opt.moco_schedule
opt.lr = opt.moco_lr
opt.lr_drop_rate = opt.moco_lr_drop_rate
else:
optimizer = torch.optim.SGD(
filtered_parameters,
lr=opt.lr,
momentum=opt.momentum,
weight_decay=opt.weight_decay,
)
print("Optimizer:")
print(optimizer)
log.write(repr(optimizer) + "\n")
if "super" in opt.schedule:
if opt.optimizer == "sgd":
cycle_momentum = True
else:
cycle_momentum = False
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=opt.lr,
cycle_momentum=cycle_momentum,
div_factor=20,
final_div_factor=1000,
total_steps=opt.num_iter,
)
print("Scheduler:")
print(scheduler)
log.write(repr(scheduler) + "\n")
""" final options """
# print(opt)
opt_log = "------------ Options -------------\n"
args = vars(opt)
for k, v in args.items():
opt_log += f"{str(k)}: {str(v)}\n"
opt_log += "---------------------------------------\n"
print(opt_log)
log.write(opt_log)
log.close()
""" start training """
start_iter = 0
if opt.saved_model != "":
try:
start_iter = int(opt.saved_model.split("_")[-1].split(".")[0])
print(f"continue to train, start_iter: {start_iter}")
except:
pass
start_time = time.time()
iteration = start_iter
best_score = -1
# training loop
for iteration in tqdm(
range(start_iter + 1, opt.num_iter + 1),
total=opt.num_iter,
position=0,
leave=True,
):
# train part
if opt.self == "RotNet":
image, Self_label = train_loader.get_batch()
image = image.to(device)
preds = model(image, SelfSL_layer=opt.SelfSL_layer)
target = torch.LongTensor(Self_label).to(device)
elif opt.self == "MoCo":
q, k = train_loader.get_batch_two_images()
q = q.to(device)
k = k.to(device)
preds, target = model(im_q=q, im_k=k)
loss = criterion(preds, target)
train_loss_avg.add(loss)
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), opt.grad_clip
) # gradient clipping with 5 (Default)
optimizer.step()
if "super" in opt.schedule:
scheduler.step()
else:
adjust_learning_rate(optimizer, iteration, opt)
# validation part.
# To see training progress, we also conduct validation when 'iteration == 1'
if iteration % opt.val_interval == 0 or iteration == 1:
# for validation log
with open(f"./saved_models/{opt.exp_name}/log_train.txt", "a") as log:
model.eval()
with torch.no_grad():
length_of_data = 0
infer_time = 0
n_correct = 0
for i, (image_valid, Self_label_valid) in tqdm(
enumerate(valid_loader),
total=len(valid_loader),
position=1,
leave=False,
):
if opt.self == "RotNet":
batch_size = image_valid.size(0)
start_infer_time = time.time()
preds = model(
image_valid.to(device), SelfSL_layer=opt.SelfSL_layer
)
forward_time = time.time() - start_infer_time
target = torch.LongTensor(Self_label_valid).to(device)
elif opt.self == "MoCo":
batch_size = image_valid.size(0)
q_valid = image_valid.to(device)
k_valid = Self_label_valid.to(device)
start_infer_time = time.time()
preds, target = model(im_q=q_valid, im_k=k_valid)
forward_time = time.time() - start_infer_time
loss = criterion(preds, target)
valid_loss_avg.add(loss)
infer_time += forward_time
_, preds_index = preds.max(1)
n_correct += (preds_index == target).sum().item()
length_of_data = length_of_data + batch_size
current_score = n_correct / length_of_data * 100
model.train()
# keep best score (accuracy) model on valid dataset
if current_score > best_score:
best_score = current_score
torch.save(
model.state_dict(),
f"./saved_models/{opt.exp_name}/best_score.pth",
)
# validation log: loss, lr, score, time.
lr = optimizer.param_groups[0]["lr"]
elapsed_time = time.time() - start_time
valid_log = f"\n[{iteration}/{opt.num_iter}] Train loss: {train_loss_avg.val():0.5f}, Valid loss: {valid_loss_avg.val():0.5f}, lr: {lr:0.7f}\n"
valid_log += f"Best_score: {best_score:0.2f}, Current_score: {current_score:0.2f}, "
valid_log += (
f"Infer_time: {infer_time:0.1f}, Elapsed_time: {elapsed_time:0.1f}"
)
train_loss_avg.reset()
valid_loss_avg.reset()
# show some predicted results
dashed_line = "-" * 80
if opt.self == "RotNet":
head = f"GT:0 vs Pred | GT:90 vs Pred | GT:180 vs Pred | GT:270 vs Pred"
preds_index = preds_index[:20]
gts = Self_label_valid[:20]
elif opt.self == "MoCo":
head = f"GT:0 vs Pred | GT:0 vs Pred | GT:0 vs Pred | GT:0 vs Pred"
preds_index = preds_index[:8]
gts = torch.zeros(preds_index.shape[0], dtype=torch.long)
predicted_result_log = f"{dashed_line}\n{head}\n{dashed_line}\n"
for i, (gt, pred) in enumerate(zip(gts, preds_index)):
if opt.self == "RotNet":
gt, pred = gt * 90, pred * 90
if i % 4 != 3:
predicted_result_log += f"{gt} vs {pred} | "
else:
predicted_result_log += f"{gt} vs {pred} \n"
predicted_result_log += f"{dashed_line}"
valid_log = f"{valid_log}\n{predicted_result_log}"
print(valid_log)
log.write(valid_log + "\n")
print(
f'finished the experiment: {opt.exp_name}, "CUDA_VISIBLE_DEVICES" was {opt.CUDA_VISIBLE_DEVICES}'
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--train_data",
default="data_CVPR2021/training/unlabel/",
help="path to training dataset",
)
parser.add_argument(
"--valid_data",
default="data_CVPR2021/validation/",
help="path to validation dataset",
)
parser.add_argument(
"--workers", type=int, default=4, help="number of data loading workers"
)
parser.add_argument("--batch_size", type=int, default=128, help="input batch size")
parser.add_argument(
"--num_iter", type=int, default=200000, help="number of iterations to train for"
)
parser.add_argument(
"--val_interval",
type=int,
default=2000,
help="Interval between each validation",
)
parser.add_argument(
"--FT", type=str, default="init", help="whether to do fine-tuning |init|freeze|"
)
parser.add_argument(
"--optimizer", type=str, default="sgd", help="optimizer |sgd|adadelta|adam|"
)
parser.add_argument(
"--lr", type=float, default=0.1, help="learning rate. default for RotNet"
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum for SGD. default for RotNet",
)
parser.add_argument(
"--weight_decay",
type=float,
default=5e-4,
help="weight_decay for SGD. default for RotNet",
)
parser.add_argument(
"--schedule",
default=[0.3, 0.6, 0.8],
nargs="*",
help="learning rate schedule (when to drop lr by lr_drop_rate) default for RotNet",
)
parser.add_argument(
"--lr_drop_rate",
type=float,
default=0.2,
help="lr_drop_rate. default for RotNet",
)
parser.add_argument(
"--grad_clip", type=float, default=5, help="gradient clipping value. default=5"
)
""" Data processing """
parser.add_argument(
"--select_data",
type=str,
default="unlabel",
help="select training data default is `unlabel` which means 11 real labeled datasets",
)
parser.add_argument(
"--batch_ratio",
type=str,
default="1",
help="assign ratio for each selected data in the batch",
)
parser.add_argument(
"--total_data_usage_ratio",
type=str,
default="1.0",
help="total data usage ratio, this ratio is multiplied to total number of data.",
)
parser.add_argument(
"--batch_max_length", type=int, default=25, help="maximum-label-length"
)
parser.add_argument(
"--imgH", type=int, default=32, help="the height of the input image"
)
parser.add_argument(
"--imgW", type=int, default=100, help="the width of the input image"
)
parser.add_argument(
"--Aug",
type=str,
default="None",
help="whether to use augmentation |None|mixup|manifold|cutmix|",
)
""" Model Architecture """
parser.add_argument("--model_name", type=str, required=True, help="CRNN|TRBA")
parser.add_argument(
"--num_fiducial",
type=int,
default=20,
help="number of fiducial points of TPS-STN",
)
parser.add_argument(
"--input_channel",
type=int,
default=3,
help="the number of input channel of Feature extractor",
)
parser.add_argument(
"--output_channel",
type=int,
default=512,
help="the number of output channel of Feature extractor",
)
parser.add_argument(
"--hidden_size", type=int, default=256, help="the size of the LSTM hidden state"
)
""" Self supervised learning """
parser.add_argument(
"--self",
type=str,
default="RotNet",
help="whether to use self-supervised learning |RotNet|MoCo|",
)
parser.add_argument(
"--SelfSL_layer", type=str, default="CNN", help="for SelfSL_layer"
)
# moco specific configs:
parser.add_argument(
"--moco_dim", default=128, type=int, help="feature dimension (default: 128)"
)
parser.add_argument(
"--moco_k",
default=65536,
type=int,
help="queue size; number of negative keys (default: 65536)",
)
parser.add_argument(
"--moco_m",
default=0.999,
type=float,
help="moco momentum of updating key encoder (default: 0.999)",
)
parser.add_argument(
"--moco_t", default=0.07, type=float, help="softmax temperature (default: 0.07)"
)
parser.add_argument("--moco_lr", default=0.03, type=float, help="SGD lr for moco")
parser.add_argument(
"--moco_wd", default=0.0001, type=float, help="SGD weight_decay for moco"
)
parser.add_argument(
"--moco_SGD_m", default=0.9, type=float, help="SGD momentum for moco"
)
parser.add_argument(
"--moco_schedule", default=[0.6, 0.8], type=float, help="SGD momentum for moco"
)
parser.add_argument(
"--moco_lr_drop_rate", type=float, default=0.1, help="moco lr_drop_rate"
)
""" exp_name and etc """
parser.add_argument("--exp_name", help="Where to store logs and models")
parser.add_argument(
"--manual_seed", type=int, default=111, help="for random seed setting"
)
parser.add_argument(
"--saved_model", default="", help="path to model to continue training"
)
opt = parser.parse_args()
opt.gpu_name = "_".join(torch.cuda.get_device_name().split())
# Use 'NV' for CRNN, 'NR' or 'TR' for TRBA.
if opt.model_name[0] == "N":
opt.Transformation = "None"
elif opt.model_name[0] == "T":
opt.Transformation = "TPS"
else:
raise
if opt.model_name[1] == "V":
opt.FeatureExtraction = "VGG"
elif opt.model_name[1] == "R":
opt.FeatureExtraction = "ResNet"
else:
raise
opt.SequenceModeling = "None"
opt.Prediction = "None"
if not opt.exp_name:
opt.exp_name = (
f"pretrain-{opt.model_name}-{opt.self}-{opt.SelfSL_layer}-{opt.gpu_name}"
)
opt.exp_name += f"-Seed{opt.manual_seed}"
os.makedirs(f"./saved_models/{opt.exp_name}", exist_ok=True)
log = open(f"./saved_models/{opt.exp_name}/log_train.txt", "a")
command_line_input = " ".join(sys.argv)
log.write(f"Command line input: {command_line_input}\n")
""" Seed and GPU setting """
random.seed(opt.manual_seed)
np.random.seed(opt.manual_seed)
torch.manual_seed(opt.manual_seed)
torch.cuda.manual_seed_all(opt.manual_seed) # if you are using multi-GPU.
torch.cuda.manual_seed(opt.manual_seed)
cudnn.benchmark = True # It fasten training.
cudnn.deterministic = True
opt.gpu_name = "_".join(torch.cuda.get_device_name().split())
if sys.platform == "linux":
opt.CUDA_VISIBLE_DEVICES = os.environ["CUDA_VISIBLE_DEVICES"]
else:
opt.CUDA_VISIBLE_DEVICES = 0 # for convenience
opt.num_gpu = torch.cuda.device_count()
if opt.num_gpu > 1:
print(
"We recommend to use 1 GPU, check your GPU number, you would miss CUDA_VISIBLE_DEVICES=0 or typo"
)
print("To use multi-gpu setting, remove or comment out these lines")
sys.exit()
if sys.platform == "win32":
opt.workers = 0
train(opt, log)
|
sampyl/tests/test_samplers.py
|
wilsonify/sampyl
| 308 |
49494
|
<gh_stars>100-1000
from ..core import np
from ..exceptions import *
from .logps import *
import sampyl as smp
import pytest
#TODO: Make tests to check correctness of samplers
np_source = np.__package__
n_samples = 100
def test_logp_with_grad():
logp = poisson_with_grad
start = {'lam1':1., 'lam2': 1.}
nuts = smp.NUTS(logp, start, grad_logp=True)
chain = nuts.sample(n_samples)
assert(len(chain)==n_samples)
def test_parallel_lin_model():
logp = linear_model_logp
start = {'b':np.zeros(5), 'sig': 1.}
metro = smp.Metropolis(logp, start)
nuts = smp.NUTS(logp, start)
metro_chains = metro.sample(n_samples, n_chains=2)
nuts_chains = nuts.sample(n_samples, n_chains=2)
assert(len(metro_chains) == 2)
assert(len(nuts_chains) == 2)
def test_parallel_2D():
start = {'lam1': 1., 'lam2': 1.}
metro = smp.Metropolis(poisson_logp, start)
nuts = smp.NUTS(poisson_logp, start)
metro_chains = metro.sample(n_samples, n_chains=2)
nuts_chains = nuts.sample(n_samples, n_chains=2)
assert(len(metro_chains) == 2)
assert(len(nuts_chains) == 2)
def test_sample_chain():
start = {'lam1': 1., 'lam2': 1.}
step1 = smp.Metropolis(poisson_logp, start, condition=['lam2'])
step2 = smp.NUTS(poisson_logp, start, condition=['lam1'])
chain = smp.Chain([step1, step2], start)
trace = chain.sample(n_samples)
assert(trace.shape == (n_samples,))
def test_conditional_chain():
logp = poisson_logp
start = {'lam1': 1., 'lam2': 2.}
metro = smp.Metropolis(logp, start, condition=['lam2'])
nuts = smp.NUTS(logp, start, condition=['lam1'])
state = metro._conditional_step()
assert(state['lam2'] == 2.)
nuts.state.update(state)
state = nuts._conditional_step()
assert(len(state) == 2)
def test_conditional():
logp = poisson_logp
start = {'lam1': 1., 'lam2': 2.}
metro = smp.Metropolis(logp, start, condition=['lam2'])
state = metro._conditional_step()
assert(len(state) == 2)
assert(state['lam2'] == 2.)
def test_metropolis_linear_model():
logp = linear_model_logp
start = {'b':np.zeros(5), 'sig': 1.}
metro = smp.Metropolis(logp, start)
trace = metro.sample(n_samples)
assert(trace.shape == (n_samples,))
def test_hamiltonian_linear_model():
logp = linear_model_logp
start = {'b': np.zeros(5), 'sig': 1.}
hmc = smp.Hamiltonian(logp, start)
trace = hmc.sample(n_samples)
assert(trace.shape == (n_samples,))
def test_nuts_linear_model():
logp = linear_model_logp
start = {'b': np.zeros(5), 'sig': 1.}
nuts = smp.NUTS(logp, start)
trace = nuts.sample(n_samples)
assert(trace.shape == (n_samples,))
def test_metropolis():
logp = normal_1D_logp
start = {'x': 1.}
metro = smp.Metropolis(logp, start)
trace = metro.sample(n_samples)
assert(trace.shape == (n_samples,))
def test_hmc_autograd():
logp = normal_1D_logp
start = {'x': 1.}
if np_source == 'autograd.numpy':
hmc = smp.Hamiltonian(logp, start)
trace = hmc.sample(n_samples)
assert(trace.shape == (n_samples,))
elif np_source == 'numpy':
with pytest.raises(AutogradError):
hmc = smp.Hamiltonian(logp, start)
def test_hmc_pass_grad_logp():
logp, grad_logp = normal_1D_logp, normal_1D_grad_logp
start = {'x': 1.}
hmc = smp.Hamiltonian(logp, start, grad_logp=grad_logp)
trace = hmc.sample(n_samples)
assert(trace.shape == (n_samples,))
def test_NUTS_autograd():
logp = normal_1D_logp
start = {'x': 1.}
if np_source == 'autograd.numpy':
nuts = smp.NUTS(logp, start)
trace = nuts.sample(n_samples)
assert(trace.shape == (n_samples,))
elif np_source == 'numpy':
with pytest.raises(AutogradError):
nuts = smp.NUTS(logp, start)
def test_NUTS_pass_grad_logp():
logp, grad_logp = normal_1D_logp, normal_1D_grad_logp
start = {'x': 1.}
nuts = smp.NUTS(logp, start, grad_logp=grad_logp)
trace = nuts.sample(n_samples)
assert(trace.shape == (n_samples,))
def test_sampler_num_logp():
logp = 1.
start = {'x': None}
with pytest.raises(TypeError):
metro = smp.Metropolis(logp, start)
def test_sampler_no_args_logp():
def logp():
return x
start = {'x': None}
with pytest.raises(ValueError):
metro = smp.Metropolis(logp, start)
def test_metropolis_two_vars():
logp = poisson_logp
start = {'lam1':1., 'lam2':1.}
metro = smp.Metropolis(logp, start)
trace = metro.sample(n_samples)
assert(trace.shape == (n_samples,))
def test_metropolis_two_vars_start():
logp = poisson_logp
start = {'lam1':1., 'lam2':1.}
metro = smp.Metropolis(logp, start)
trace = metro.sample(n_samples)
assert(trace.shape == (n_samples,))
def test_slice():
logp = normal_1D_logp
start = {'x': 1.}
slice = smp.Slice(logp, start)
trace = slice.sample(n_samples)
assert(trace.shape == (n_samples,))
def test_slice_two_vars():
logp = poisson_logp
start = {'lam1': 1., 'lam2': 1.}
slice = smp.Slice(logp, start)
trace = slice.sample(n_samples)
assert(trace.shape == (n_samples,))
|
tests/ut/python/dataset/test_flanger.py
|
PowerOlive/mindspore
| 3,200 |
49517
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.audio.transforms as audio
from mindspore import log as logger
from mindspore.dataset.audio.utils import Modulation, Interpolation
def count_unequal_element(data_expected, data_me, rtol, atol):
assert data_expected.shape == data_me.shape
total_count = len(data_expected.flatten())
error = np.abs(data_expected - data_me)
greater = np.greater(error, atol + np.abs(data_expected) * rtol)
loss_count = np.count_nonzero(greater)
assert (loss_count / total_count) < rtol, "\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}".format(
data_expected[greater], data_me[greater], error[greater])
def test_flanger_eager_sinusoidal_linear_float64():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[0.10000000000, 0.19999999536, 0.29999998145],
[0.23391812865, 0.29239766081, 0.35087719298]], dtype=np.float64)
flanger_op = audio.Flanger(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.SINUSOIDAL, Interpolation.LINEAR)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_eager_triangular_linear_float32():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[-1.2, 2, -3.6], [1, 2.4, 3.7]], dtype=np.float32)
# Expect waveform
expect_waveform = np.array([[-1.0000000000, 1.0000000000, -1.0000000000],
[0.58479529619, 1.0000000000, 1.0000000000]], dtype=np.float32)
flanger_op = audio.Flanger(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.TRIANGULAR, Interpolation.LINEAR)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_eager_triangular_linear_int():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[-2, -3, 0], [2, 2, 3]], dtype=np.int)
# Expect waveform
expect_waveform = np.array([[-1, -1, 0],
[1, 1, 1]], dtype=np.int)
flanger_op = audio.Flanger(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.TRIANGULAR, Interpolation.LINEAR)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_shape_221():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[[1], [1.1]], [[0.9], [0.6]]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[[1.00000000],
[0.64327485]],
[[0.90000000],
[0.35087719]]], dtype=np.float64)
flanger_op = audio.Flanger(44100)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_shape_11211():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[[[[0.44]], [[0.55]]]]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[[[[0.44000000]], [[0.55000000]]]]], dtype=np.float64)
flanger_op = audio.Flanger(44100)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_pipeline():
""" mindspore pipeline mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[[1.1, 1.2, 1.3], [1.4, 1.5, 1.6]]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[[1.00000000000, 1.00000000000, 1.00000000000],
[0.81871345029, 0.87719298245, 0.93567251461]]], dtype=np.float64)
data = (waveform, np.random.sample((1, 2, 1)))
dataset = ds.NumpySlicesDataset(data, ["channel", "sample"], shuffle=False)
flanger_op = audio.Flanger(44100)
# Filtered waveform by flanger
dataset = dataset.map(
input_columns=["channel"], operations=flanger_op, num_parallel_workers=1)
i = 0
for item in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
count_unequal_element(expect_waveform[i, :],
item['channel'], 0.0001, 0.0001)
i += 1
def test_invalid_flanger_input():
def test_invalid_input(test_name, sample_rate, delay, depth, regen, width, speed, phase, modulation, interpolation,
error, error_msg):
logger.info("Test Flanger with bad input: {0}".format(test_name))
with pytest.raises(error) as error_info:
audio.Flanger(sample_rate, delay, depth, regen, width, speed, phase, modulation, interpolation)
assert error_msg in str(error_info.value)
test_invalid_input("invalid sample_rate parameter value", 0, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input sample_rate is not within the required interval of [-2147483648, 0) and (0, 2147483647].")
test_invalid_input("invalid sample_rate parameter type as a float", 44100.5, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument sample_rate with value 44100.5 is not of "
"type [<class 'int'>], but got <class 'float'>.")
test_invalid_input("invalid sample_rate parameter type as a String", "44100", 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument sample_rate with value 44100 is not of "
"type [<class 'int'>], but got <class 'str'>.")
test_invalid_input("invalid delay parameter type as a String", 44100, "0.0", 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument delay with value 0.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid delay parameter value", 44100, 50, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input delay is not within the required interval of [0, 30].")
test_invalid_input("invalid depth parameter type as a String", 44100, 0.0, "2.0", 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument depth with value 2.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid depth parameter value", 44100, 0.0, 50.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input depth is not within the required interval of [0, 10].")
test_invalid_input("invalid regen parameter type as a String", 44100, 0.0, 2.0, "0.0", 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument regen with value 0.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid regen parameter value", 44100, 0.0, 2.0, 100.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input regen is not within the required interval of [-95, 95].")
test_invalid_input("invalid width parameter type as a String", 44100, 0.0, 2.0, 0.0, "71.0", 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument width with value 71.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid width parameter value", 44100, 0.0, 2.0, 0.0, 150.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input width is not within the required interval of [0, 100].")
test_invalid_input("invalid speed parameter type as a String", 44100, 0.0, 2.0, 0.0, 71.0, "0.5", 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument speed with value 0.5 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid speed parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 50, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input speed is not within the required interval of [0.1, 10].")
test_invalid_input("invalid phase parameter type as a String", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, "25.0",
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument phase with value 25.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid phase parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, 150.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input phase is not within the required interval of [0, 100].")
test_invalid_input("invalid modulation parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, "test",
Interpolation.LINEAR, TypeError,
"Argument modulation with value test is not of type [<enum 'Modulation'>], "
"but got <class 'str'>.")
test_invalid_input("invalid modulation parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, "test", TypeError,
"Argument interpolation with value test is not of type [<enum 'Interpolation'>], "
"but got <class 'str'>.")
if __name__ == '__main__':
test_flanger_eager_sinusoidal_linear_float64()
test_flanger_eager_triangular_linear_float32()
test_flanger_eager_triangular_linear_int()
test_flanger_shape_221()
test_flanger_shape_11211()
test_flanger_pipeline()
test_invalid_flanger_input()
|
quant/state.py
|
vincent87lee/alphahunter
| 149 |
49534
|
# -*- coding:utf-8 -*-
"""
状态信息
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
"""
class State:
STATE_CODE_PARAM_MISS = 1 #交易接口初始化过程缺少参数
STATE_CODE_CONNECT_SUCCESS = 2 #交易接口连接成功
STATE_CODE_CONNECT_FAILED = 3 #交易接口连接失败
STATE_CODE_DISCONNECT = 4 #交易接口连接断开
STATE_CODE_RECONNECTING = 5 #交易接口重新连接中
STATE_CODE_READY = 6 #交易接口准备好
STATE_CODE_GENERAL_ERROR = 7 #交易接口常规错误
STATE_CODE_DB_SUCCESS = 8 #数据库连接成功
STATE_CODE_DB_ERROR = 9 #数据库连接失败
def __init__(self, platform, account, msg, code = STATE_CODE_PARAM_MISS):
self._platform = platform
self._account = account
self._msg = msg
self._code = code
@property
def platform(self):
return self._platform
@property
def account(self):
return self._account
@property
def msg(self):
return self._msg
@property
def code(self):
return self._code
def __str__(self):
return "platform:{} account:{} msg:{}".format(self._platform, self._account, self._msg)
def __repr__(self):
return str(self)
|
docs/examples/tutorial/clibraries/queue3.py
|
johannes-mueller/cython
| 6,663 |
49574
|
<gh_stars>1000+
from cython.cimports import cqueue
from cython import cast
@cython.cclass
class Queue:
"""A queue class for C integer values.
>>> q = Queue()
>>> q.append(5)
>>> q.peek()
5
>>> q.pop()
5
"""
_c_queue = cython.declare(cython.pointer(cqueue.Queue))
def __cinit__(self):
self._c_queue = cqueue.queue_new()
if self._c_queue is cython.NULL:
raise MemoryError()
def __dealloc__(self):
if self._c_queue is not cython.NULL:
cqueue.queue_free(self._c_queue)
@cython.ccall
def append(self, value: cython.int):
if not cqueue.queue_push_tail(self._c_queue,
cast(cython.p_void, cast(cython.Py_ssize_t, value))):
raise MemoryError()
# The `cpdef` feature is obviously not available for the original "extend()"
# method, as the method signature is incompatible with Python argument
# types (Python does not have pointers). However, we can rename
# the C-ish "extend()" method to e.g. "extend_ints()", and write
# a new "extend()" method that provides a suitable Python interface by
# accepting an arbitrary Python iterable.
@cython.ccall
def extend(self, values):
for value in values:
self.append(value)
@cython.cfunc
def extend_ints(self, values: cython.p_int, count: cython.size_t):
value: cython.int
for value in values[:count]: # Slicing pointer to limit the iteration boundaries.
self.append(value)
@cython.ccall
@cython.exceptval(-1, check=True)
def peek(self) -> cython.int:
value: cython.int = cast(cython.Py_ssize_t, cqueue.queue_peek_head(self._c_queue))
if value == 0:
# this may mean that the queue is empty,
# or that it happens to contain a 0 value
if cqueue.queue_is_empty(self._c_queue):
raise IndexError("Queue is empty")
return value
@cython.ccall
@cython.exceptval(-1, check=True)
def pop(self) -> cython.int:
if cqueue.queue_is_empty(self._c_queue):
raise IndexError("Queue is empty")
return cast(cython.Py_ssize_t, cqueue.queue_pop_head(self._c_queue))
def __bool__(self):
return not cqueue.queue_is_empty(self._c_queue)
|
venv/Lib/site-packages/json_tricks/nonp.py
|
mintzer/pupillometry-rf-back
| 145 |
49597
|
<reponame>mintzer/pupillometry-rf-back
import warnings
from json import loads as json_loads
from os import fsync
from sys import exc_info
from json_tricks.utils import is_py3, dict_default, gzip_compress, gzip_decompress, JsonTricksDeprecation
from .utils import str_type, NoNumpyException # keep 'unused' imports
from .comment import strip_comments # keep 'unused' imports
#TODO @mark: imports removed?
from .encoders import TricksEncoder, json_date_time_encode, \
class_instance_encode, json_complex_encode, json_set_encode, numeric_types_encode, numpy_encode, \
nonumpy_encode, nopandas_encode, pandas_encode, noenum_instance_encode, \
enum_instance_encode, pathlib_encode # keep 'unused' imports
from .decoders import TricksPairHook, \
json_date_time_hook, ClassInstanceHook, \
json_complex_hook, json_set_hook, numeric_types_hook, json_numpy_obj_hook, \
json_nonumpy_obj_hook, \
nopandas_hook, pandas_hook, EnumInstanceHook, \
noenum_hook, pathlib_hook, nopathlib_hook # keep 'unused' imports
ENCODING = 'UTF-8'
_cih_instance = ClassInstanceHook()
_eih_instance = EnumInstanceHook()
DEFAULT_ENCODERS = [json_date_time_encode, json_complex_encode, json_set_encode,
numeric_types_encode, class_instance_encode, ]
DEFAULT_HOOKS = [json_date_time_hook, json_complex_hook, json_set_hook,
numeric_types_hook, _cih_instance, ]
#TODO @mark: add properties to all built-in encoders (for speed - but it should keep working without)
try:
import enum
except ImportError:
DEFAULT_ENCODERS = [noenum_instance_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [noenum_hook,] + DEFAULT_HOOKS
else:
DEFAULT_ENCODERS = [enum_instance_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [_eih_instance,] + DEFAULT_HOOKS
try:
import numpy
except ImportError:
DEFAULT_ENCODERS = [nonumpy_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [json_nonumpy_obj_hook,] + DEFAULT_HOOKS
else:
# numpy encode needs to be before complex
DEFAULT_ENCODERS = [numpy_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [json_numpy_obj_hook,] + DEFAULT_HOOKS
try:
import pandas
except ImportError:
DEFAULT_ENCODERS = [nopandas_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [nopandas_hook,] + DEFAULT_HOOKS
else:
DEFAULT_ENCODERS = [pandas_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [pandas_hook,] + DEFAULT_HOOKS
try:
import pathlib
except:
# No need to include a "nopathlib_encode" hook since we would not encounter
# the Path object if pathlib isn't available. However, we *could* encounter
# a serialized Path object (produced by a version of Python with pathlib).
DEFAULT_HOOKS = [nopathlib_hook,] + DEFAULT_HOOKS
else:
DEFAULT_ENCODERS = [pathlib_encode,] + DEFAULT_ENCODERS
DEFAULT_HOOKS = [pathlib_hook,] + DEFAULT_HOOKS
DEFAULT_NONP_ENCODERS = [nonumpy_encode,] + DEFAULT_ENCODERS # DEPRECATED
DEFAULT_NONP_HOOKS = [json_nonumpy_obj_hook,] + DEFAULT_HOOKS # DEPRECATED
def dumps(obj, sort_keys=None, cls=None, obj_encoders=DEFAULT_ENCODERS, extra_obj_encoders=(),
primitives=False, compression=None, allow_nan=False, conv_str_byte=False, fallback_encoders=(),
properties=None, **jsonkwargs):
"""
Convert a nested data structure to a json string.
:param obj: The Python object to convert.
:param sort_keys: Keep this False if you want order to be preserved.
:param cls: The json encoder class to use, defaults to NoNumpyEncoder which gives a warning for numpy arrays.
:param obj_encoders: Iterable of encoders to use to convert arbitrary objects into json-able promitives.
:param extra_obj_encoders: Like `obj_encoders` but on top of them: use this to add encoders without replacing defaults. Since v3.5 these happen before default encoders.
:param fallback_encoders: These are extra `obj_encoders` that 1) are ran after all others and 2) only run if the object hasn't yet been changed.
:param allow_nan: Allow NaN and Infinity values, which is a (useful) violation of the JSON standard (default False).
:param conv_str_byte: Try to automatically convert between strings and bytes (assuming utf-8) (default False).
:param properties: A dictionary of properties that is passed to each encoder that will accept it.
:return: The string containing the json-encoded version of obj.
Other arguments are passed on to `cls`. Note that `sort_keys` should be false if you want to preserve order.
"""
if not hasattr(extra_obj_encoders, '__iter__'):
raise TypeError('`extra_obj_encoders` should be a tuple in `json_tricks.dump(s)`')
encoders = tuple(extra_obj_encoders) + tuple(obj_encoders)
properties = properties or {}
dict_default(properties, 'primitives', primitives)
dict_default(properties, 'compression', compression)
dict_default(properties, 'allow_nan', allow_nan)
if cls is None:
cls = TricksEncoder
txt = cls(sort_keys=sort_keys, obj_encoders=encoders, allow_nan=allow_nan,
primitives=primitives, fallback_encoders=fallback_encoders,
properties=properties, **jsonkwargs).encode(obj)
if not is_py3 and isinstance(txt, str):
txt = unicode(txt, ENCODING)
if not compression:
return txt
if compression is True:
compression = 5
txt = txt.encode(ENCODING)
gzstring = gzip_compress(txt, compresslevel=compression)
return gzstring
def dump(obj, fp, sort_keys=None, cls=None, obj_encoders=DEFAULT_ENCODERS, extra_obj_encoders=(),
primitives=False, compression=None, force_flush=False, allow_nan=False, conv_str_byte=False,
fallback_encoders=(), properties=None, **jsonkwargs):
"""
Convert a nested data structure to a json string.
:param fp: File handle or path to write to.
:param compression: The gzip compression level, or None for no compression.
:param force_flush: If True, flush the file handle used, when possibly also in the operating system (default False).
The other arguments are identical to `dumps`.
"""
if (isinstance(obj, str_type) or hasattr(obj, 'write')) and isinstance(fp, (list, dict)):
raise ValueError('json-tricks dump arguments are in the wrong order: provide the data to be serialized before file handle')
txt = dumps(obj, sort_keys=sort_keys, cls=cls, obj_encoders=obj_encoders, extra_obj_encoders=extra_obj_encoders,
primitives=primitives, compression=compression, allow_nan=allow_nan, conv_str_byte=conv_str_byte,
fallback_encoders=fallback_encoders, properties=properties, **jsonkwargs)
if isinstance(fp, str_type):
if compression:
fh = open(fp, 'wb+')
else:
fh = open(fp, 'w+')
else:
fh = fp
if conv_str_byte:
try:
fh.write(b'')
except TypeError:
pass
# if not isinstance(txt, str_type):
# # Cannot write bytes, so must be in text mode, but we didn't get a text
# if not compression:
# txt = txt.decode(ENCODING)
else:
try:
fh.write(u'')
except TypeError:
if isinstance(txt, str_type):
txt = txt.encode(ENCODING)
try:
if compression and 'b' not in getattr(fh, 'mode', 'b?') and not isinstance(txt, str_type):
raise IOError('If compression is enabled, the file must be opened in binary mode.')
try:
fh.write(txt)
except TypeError as err:
err.args = (err.args[0] + '. A possible reason is that the file is not opened in binary mode; '
'be sure to set file mode to something like "wb".',)
raise
finally:
if force_flush:
fh.flush()
try:
if fh.fileno() is not None:
fsync(fh.fileno())
except (ValueError,):
pass
if isinstance(fp, str_type):
fh.close()
return txt
def loads(string, preserve_order=True, ignore_comments=None, decompression=None, obj_pairs_hooks=DEFAULT_HOOKS,
extra_obj_pairs_hooks=(), cls_lookup_map=None, allow_duplicates=True, conv_str_byte=False,
properties=None, **jsonkwargs):
"""
Convert a nested data structure to a json string.
:param string: The string containing a json encoded data structure.
:param decode_cls_instances: True to attempt to decode class instances (requires the environment to be similar the the encoding one).
:param preserve_order: Whether to preserve order by using OrderedDicts or not.
:param ignore_comments: Remove comments (starting with # or //).
:param decompression: True to use gzip decompression, False to use raw data, None to automatically determine (default). Assumes utf-8 encoding!
:param obj_pairs_hooks: A list of dictionary hooks to apply.
:param extra_obj_pairs_hooks: Like `obj_pairs_hooks` but on top of them: use this to add hooks without replacing defaults. Since v3.5 these happen before default hooks.
:param cls_lookup_map: If set to a dict, for example ``globals()``, then classes encoded from __main__ are looked up this dict.
:param allow_duplicates: If set to False, an error will be raised when loading a json-map that contains duplicate keys.
:param parse_float: A function to parse strings to integers (e.g. Decimal). There is also `parse_int`.
:param conv_str_byte: Try to automatically convert between strings and bytes (assuming utf-8) (default False).
:return: The string containing the json-encoded version of obj.
Other arguments are passed on to json_func.
"""
if not hasattr(extra_obj_pairs_hooks, '__iter__'):
raise TypeError('`extra_obj_pairs_hooks` should be a tuple in `json_tricks.load(s)`')
if decompression is None:
decompression = isinstance(string, bytes) and string[:2] == b'\x1f\x8b'
if decompression:
string = gzip_decompress(string).decode(ENCODING)
if not isinstance(string, str_type):
if conv_str_byte:
string = string.decode(ENCODING)
else:
raise TypeError(('The input was of non-string type "{0:}" in `json_tricks.load(s)`. '
'Bytes cannot be automatically decoding since the encoding is not known. Recommended '
'way is to instead encode the bytes to a string and pass that string to `load(s)`, '
'for example bytevar.encode("utf-8") if utf-8 is the encoding. Alternatively you can '
'force an attempt by passing conv_str_byte=True, but this may cause decoding issues.')
.format(type(string)))
if ignore_comments or ignore_comments is None:
new_string = strip_comments(string)
if ignore_comments is None and not getattr(loads, '_ignore_comments_warned', False) and string != new_string:
warnings.warn('`json_tricks.load(s)` stripped some comments, but `ignore_comments` was '
'not passed; in the next major release, the behaviour when `ignore_comments` is not '
'passed will change; it is recommended to explicitly pass `ignore_comments=True` if '
'you want to strip comments; see https://github.com/mverleg/pyjson_tricks/issues/74',
JsonTricksDeprecation)
loads._ignore_comments_warned = True
string = new_string
properties = properties or {}
dict_default(properties, 'preserve_order', preserve_order)
dict_default(properties, 'ignore_comments', ignore_comments)
dict_default(properties, 'decompression', decompression)
dict_default(properties, 'cls_lookup_map', cls_lookup_map)
dict_default(properties, 'allow_duplicates', allow_duplicates)
hooks = tuple(extra_obj_pairs_hooks) + tuple(obj_pairs_hooks)
hook = TricksPairHook(ordered=preserve_order, obj_pairs_hooks=hooks, allow_duplicates=allow_duplicates, properties=properties)
return json_loads(string, object_pairs_hook=hook, **jsonkwargs)
def load(fp, preserve_order=True, ignore_comments=None, decompression=None, obj_pairs_hooks=DEFAULT_HOOKS,
extra_obj_pairs_hooks=(), cls_lookup_map=None, allow_duplicates=True, conv_str_byte=False,
properties=None, **jsonkwargs):
"""
Convert a nested data structure to a json string.
:param fp: File handle or path to load from.
The other arguments are identical to loads.
"""
try:
if isinstance(fp, str_type):
if decompression is not None:
open_binary = bool(decompression)
else:
with open(fp, 'rb') as fh:
# This attempts to detect gzip mode; gzip should always
# have this header, and text json can't have it.
open_binary = (fh.read(2) == b'\x1f\x8b')
with open(fp, 'rb' if open_binary else 'r') as fh:
string = fh.read()
else:
string = fp.read()
except UnicodeDecodeError as err:
# todo: not covered in tests, is it relevant?
raise Exception('There was a problem decoding the file content. A possible reason is that the file is not ' +
'opened in binary mode; be sure to set file mode to something like "rb".').with_traceback(exc_info()[2])
return loads(string, preserve_order=preserve_order, ignore_comments=ignore_comments, decompression=decompression,
obj_pairs_hooks=obj_pairs_hooks, extra_obj_pairs_hooks=extra_obj_pairs_hooks, cls_lookup_map=cls_lookup_map,
allow_duplicates=allow_duplicates, conv_str_byte=conv_str_byte, properties=properties, **jsonkwargs)
|
numba/misc/inspection.py
|
luk-f-a/numba
| 6,620 |
49625
|
"""Miscellaneous inspection tools
"""
from tempfile import NamedTemporaryFile
def disassemble_elf_to_cfg(elf):
"""
Gets the CFG of the disassembly of an ELF object, elf, and renders it
appropriately depending on the execution environment (terminal/notebook).
"""
try:
import r2pipe
except ImportError:
raise RuntimeError("r2pipe package needed for disasm CFG")
def get_rendering(cmd=None):
if cmd is None:
raise ValueError("No command given")
with NamedTemporaryFile(delete=False) as f:
f.write(elf)
f.flush() # force write, radare2 needs a binary blob on disk
# catch if r2pipe can actually talk to radare2
try:
flags = ['-e io.cache=true', # fix relocations in disassembly
'-e scr.color=1', # 16 bit ANSI colour terminal
]
r = r2pipe.open(f.name, flags=flags)
data = r.cmd('af;%s' % cmd)
r.quit()
except Exception as e:
if "radare2 in PATH" in str(e):
msg = ("This feature requires 'radare2' to be "
"installed and available on the system see: "
"https://github.com/radareorg/radare2. "
"Cannot find 'radare2' in $PATH.")
raise RuntimeError(msg)
else:
raise e
return data
class DisasmCFG(object):
def _repr_svg_(self):
try:
import graphviz
except ImportError:
raise RuntimeError("graphviz package needed for disasm CFG")
jupyter_rendering = get_rendering(cmd='agfd')
# this just makes it read slightly better in jupyter notebooks
jupyter_rendering.replace('fontname="Courier",',
'fontname="Courier",fontsize=6,')
src = graphviz.Source(jupyter_rendering)
return src.pipe('svg').decode('UTF-8')
def __repr__(self):
return get_rendering(cmd='agf')
return DisasmCFG()
|
crits/exploits/urls.py
|
dutrow/crits
| 738 |
49629
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^add/$', views.add_exploit, name='crits-exploits-views-add_exploit'),
url(r'^edit/cve/$', views.edit_exploit_cve, name='crits-exploits-views-edit_exploit_cve'),
url(r'^edit/name/(?P<id_>\S+)/$', views.edit_exploit_name, name='crits-exploits-views-edit_exploit_name'),
url(r'^details/(?P<id_>\S+)/$', views.exploit_detail, name='crits-exploits-views-exploit_detail'),
url(r'^remove/(?P<id_>\S+)/$', views.remove_exploit, name='crits-exploits-views-remove_exploit'),
url(r'^list/$', views.exploits_listing, name='crits-exploits-views-exploits_listing'),
url(r'^list/(?P<option>\S+)/$', views.exploits_listing, name='crits-exploits-views-exploits_listing'),
]
|
dglib/modules/sampler.py
|
billzhonggz/Transfer-Learning-Library
| 1,474 |
49634
|
"""
@author: <NAME>
@contact: <EMAIL>
"""
import random
import copy
import numpy as np
from torch.utils.data.dataset import ConcatDataset
from torch.utils.data.sampler import Sampler
class DefaultSampler(Sampler):
r"""Traverse all :math:`N` domains, randomly select :math:`K` samples in each domain to form a mini-batch of size
:math:`N\times K`.
Args:
data_source (ConcatDataset): dataset that contains data from multiple domains
batch_size (int): mini-batch size (:math:`N\times K` here)
"""
def __init__(self, data_source: ConcatDataset, batch_size: int):
super(Sampler, self).__init__()
self.num_all_domains = len(data_source.cumulative_sizes)
self.sample_idxes_per_domain = []
start = 0
for end in data_source.cumulative_sizes:
idxes = [idx for idx in range(start, end)]
self.sample_idxes_per_domain.append(idxes)
start = end
assert batch_size % self.num_all_domains == 0
self.batch_size_per_domain = batch_size // self.num_all_domains
self.length = len(list(self.__iter__()))
def __iter__(self):
sample_idxes_per_domain = copy.deepcopy(self.sample_idxes_per_domain)
final_idxes = []
stop_flag = False
while not stop_flag:
for domain in range(self.num_all_domains):
sample_idxes = sample_idxes_per_domain[domain]
selected_idxes = random.sample(sample_idxes, self.batch_size_per_domain)
final_idxes.extend(selected_idxes)
for idx in selected_idxes:
sample_idxes_per_domain[domain].remove(idx)
remaining_size = len(sample_idxes_per_domain[domain])
if remaining_size < self.batch_size_per_domain:
stop_flag = True
return iter(final_idxes)
def __len__(self):
return self.length
class RandomDomainSampler(Sampler):
r"""Randomly sample :math:`N` domains, then randomly select :math:`K` samples in each domain to form a mini-batch of
size :math:`N\times K`.
Args:
data_source (ConcatDataset): dataset that contains data from multiple domains
batch_size (int): mini-batch size (:math:`N\times K` here)
n_domains_per_batch (int): number of domains to select in a single mini-batch (:math:`N` here)
"""
def __init__(self, data_source: ConcatDataset, batch_size: int, n_domains_per_batch: int):
super(Sampler, self).__init__()
self.n_domains_in_dataset = len(data_source.cumulative_sizes)
self.n_domains_per_batch = n_domains_per_batch
assert self.n_domains_in_dataset >= self.n_domains_per_batch
self.sample_idxes_per_domain = []
start = 0
for end in data_source.cumulative_sizes:
idxes = [idx for idx in range(start, end)]
self.sample_idxes_per_domain.append(idxes)
start = end
assert batch_size % n_domains_per_batch == 0
self.batch_size_per_domain = batch_size // n_domains_per_batch
self.length = len(list(self.__iter__()))
def __iter__(self):
sample_idxes_per_domain = copy.deepcopy(self.sample_idxes_per_domain)
domain_idxes = [idx for idx in range(self.n_domains_in_dataset)]
final_idxes = []
stop_flag = False
while not stop_flag:
selected_domains = random.sample(domain_idxes, self.n_domains_per_batch)
for domain in selected_domains:
sample_idxes = sample_idxes_per_domain[domain]
if len(sample_idxes) < self.batch_size_per_domain:
selected_idxes = np.random.choice(sample_idxes, self.batch_size_per_domain, replace=True)
else:
selected_idxes = random.sample(sample_idxes, self.batch_size_per_domain)
final_idxes.extend(selected_idxes)
for idx in selected_idxes:
if idx in sample_idxes_per_domain[domain]:
sample_idxes_per_domain[domain].remove(idx)
remaining_size = len(sample_idxes_per_domain[domain])
if remaining_size < self.batch_size_per_domain:
stop_flag = True
return iter(final_idxes)
def __len__(self):
return self.length
|
alipay/aop/api/domain/AntMerchantExpandIndirectTiansuoBindModel.py
|
snowxmas/alipay-sdk-python-all
| 213 |
49654
|
<reponame>snowxmas/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.TiansuoIsvBindVO import TiansuoIsvBindVO
class AntMerchantExpandIndirectTiansuoBindModel(object):
def __init__(self):
self._tiansuo_isv_bind_list = None
@property
def tiansuo_isv_bind_list(self):
return self._tiansuo_isv_bind_list
@tiansuo_isv_bind_list.setter
def tiansuo_isv_bind_list(self, value):
if isinstance(value, list):
self._tiansuo_isv_bind_list = list()
for i in value:
if isinstance(i, TiansuoIsvBindVO):
self._tiansuo_isv_bind_list.append(i)
else:
self._tiansuo_isv_bind_list.append(TiansuoIsvBindVO.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.tiansuo_isv_bind_list:
if isinstance(self.tiansuo_isv_bind_list, list):
for i in range(0, len(self.tiansuo_isv_bind_list)):
element = self.tiansuo_isv_bind_list[i]
if hasattr(element, 'to_alipay_dict'):
self.tiansuo_isv_bind_list[i] = element.to_alipay_dict()
if hasattr(self.tiansuo_isv_bind_list, 'to_alipay_dict'):
params['tiansuo_isv_bind_list'] = self.tiansuo_isv_bind_list.to_alipay_dict()
else:
params['tiansuo_isv_bind_list'] = self.tiansuo_isv_bind_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandIndirectTiansuoBindModel()
if 'tiansuo_isv_bind_list' in d:
o.tiansuo_isv_bind_list = d['tiansuo_isv_bind_list']
return o
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.