file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
msi.py | """SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py 2014/07/05 09:42:21 garyo"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def | (root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created != []:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" this function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )
file.write('{\\rtf')
if text!='':
file.write(text.replace('\n', '\\par '))
else:
file.write(name+'\\par\\par')
file.write('}')
file.close()
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| generate_guids | identifier_name |
msi.py | """SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py 2014/07/05 09:42:21 garyo"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
|
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created != []:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" this function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )
file.write('{\\rtf')
if text!='':
file.write(text.replace('\n', '\\par '))
else:
file.write(name+'\\par\\par')
file.write('}')
file.close()
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| s += '_'+s | conditional_block |
msi.py | """SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py 2014/07/05 09:42:21 garyo"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
|
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created != []:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" this function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )
file.write('{\\rtf')
if text!='':
file.write(text.replace('\n', '\\par '))
else:
file.write(name+'\\par\\par')
file.write('}')
file.close()
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| """ Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s] | identifier_body |
aws_common.py | from functools import reduce
from typing import TYPE_CHECKING, List, Optional, Union
import boto3
from boto3.session import Session
from datahub.configuration import ConfigModel
from datahub.configuration.common import AllowDenyPattern
from datahub.emitter.mce_builder import DEFAULT_ENV
if TYPE_CHECKING:
from mypy_boto3_glue import GlueClient
from mypy_boto3_s3 import S3Client
from mypy_boto3_sagemaker import SageMakerClient
def assume_role(
role_arn: str, aws_region: str, credentials: Optional[dict] = None
) -> dict:
credentials = credentials or {}
sts_client = boto3.client(
"sts",
region_name=aws_region,
aws_access_key_id=credentials.get("AccessKeyId"),
aws_secret_access_key=credentials.get("SecretAccessKey"),
aws_session_token=credentials.get("SessionToken"),
)
assumed_role_object = sts_client.assume_role(
RoleArn=role_arn, RoleSessionName="DatahubIngestionSource"
)
return assumed_role_object["Credentials"]
class AwsSourceConfig(ConfigModel):
"""
Common AWS credentials config.
Currently used by:
- Glue source
- SageMaker source
"""
env: str = DEFAULT_ENV
database_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
table_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
aws_access_key_id: Optional[str] = None
aws_secret_access_key: Optional[str] = None
aws_session_token: Optional[str] = None
aws_role: Optional[Union[str, List[str]]] = None
aws_region: str
def get_session(self) -> Session:
if (
self.aws_access_key_id
and self.aws_secret_access_key
and self.aws_session_token
):
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
region_name=self.aws_region,
)
elif self.aws_access_key_id and self.aws_secret_access_key:
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region,
)
elif self.aws_role:
if isinstance(self.aws_role, str):
credentials = assume_role(self.aws_role, self.aws_region)
else:
credentials = reduce(
lambda new_credentials, role_arn: assume_role(
role_arn, self.aws_region, new_credentials
),
self.aws_role,
{},
)
return Session(
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
region_name=self.aws_region,
)
else:
return Session(region_name=self.aws_region)
def get_s3_client(self) -> "S3Client":
return self.get_session().client("s3")
def | (self) -> "GlueClient":
return self.get_session().client("glue")
def get_sagemaker_client(self) -> "SageMakerClient":
return self.get_session().client("sagemaker")
def make_s3_urn(s3_uri: str, env: str, suffix: Optional[str] = None) -> str:
if not s3_uri.startswith("s3://"):
raise ValueError("S3 URIs should begin with 's3://'")
# remove S3 prefix (s3://)
s3_name = s3_uri[5:]
if s3_name.endswith("/"):
s3_name = s3_name[:-1]
if suffix is not None:
return f"urn:li:dataset:(urn:li:dataPlatform:s3,{s3_name}_{suffix},{env})"
return f"urn:li:dataset:(urn:li:dataPlatform:s3,{s3_name},{env})"
| get_glue_client | identifier_name |
aws_common.py | from functools import reduce
from typing import TYPE_CHECKING, List, Optional, Union
import boto3
from boto3.session import Session
from datahub.configuration import ConfigModel
from datahub.configuration.common import AllowDenyPattern
from datahub.emitter.mce_builder import DEFAULT_ENV
if TYPE_CHECKING:
from mypy_boto3_glue import GlueClient
from mypy_boto3_s3 import S3Client
from mypy_boto3_sagemaker import SageMakerClient
def assume_role(
role_arn: str, aws_region: str, credentials: Optional[dict] = None
) -> dict:
credentials = credentials or {}
sts_client = boto3.client(
"sts",
region_name=aws_region,
aws_access_key_id=credentials.get("AccessKeyId"),
aws_secret_access_key=credentials.get("SecretAccessKey"),
aws_session_token=credentials.get("SessionToken"),
)
assumed_role_object = sts_client.assume_role(
RoleArn=role_arn, RoleSessionName="DatahubIngestionSource"
)
return assumed_role_object["Credentials"]
class AwsSourceConfig(ConfigModel):
"""
Common AWS credentials config.
Currently used by:
- Glue source
- SageMaker source
"""
env: str = DEFAULT_ENV
database_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
table_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
aws_access_key_id: Optional[str] = None
aws_secret_access_key: Optional[str] = None
aws_session_token: Optional[str] = None
aws_role: Optional[Union[str, List[str]]] = None
aws_region: str
def get_session(self) -> Session:
|
def get_s3_client(self) -> "S3Client":
return self.get_session().client("s3")
def get_glue_client(self) -> "GlueClient":
return self.get_session().client("glue")
def get_sagemaker_client(self) -> "SageMakerClient":
return self.get_session().client("sagemaker")
def make_s3_urn(s3_uri: str, env: str, suffix: Optional[str] = None) -> str:
if not s3_uri.startswith("s3://"):
raise ValueError("S3 URIs should begin with 's3://'")
# remove S3 prefix (s3://)
s3_name = s3_uri[5:]
if s3_name.endswith("/"):
s3_name = s3_name[:-1]
if suffix is not None:
return f"urn:li:dataset:(urn:li:dataPlatform:s3,{s3_name}_{suffix},{env})"
return f"urn:li:dataset:(urn:li:dataPlatform:s3,{s3_name},{env})"
| if (
self.aws_access_key_id
and self.aws_secret_access_key
and self.aws_session_token
):
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
region_name=self.aws_region,
)
elif self.aws_access_key_id and self.aws_secret_access_key:
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region,
)
elif self.aws_role:
if isinstance(self.aws_role, str):
credentials = assume_role(self.aws_role, self.aws_region)
else:
credentials = reduce(
lambda new_credentials, role_arn: assume_role(
role_arn, self.aws_region, new_credentials
),
self.aws_role,
{},
)
return Session(
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
region_name=self.aws_region,
)
else:
return Session(region_name=self.aws_region) | identifier_body |
aws_common.py | from functools import reduce
from typing import TYPE_CHECKING, List, Optional, Union
import boto3
from boto3.session import Session
from datahub.configuration import ConfigModel
from datahub.configuration.common import AllowDenyPattern
from datahub.emitter.mce_builder import DEFAULT_ENV
if TYPE_CHECKING:
from mypy_boto3_glue import GlueClient
from mypy_boto3_s3 import S3Client
from mypy_boto3_sagemaker import SageMakerClient
def assume_role(
role_arn: str, aws_region: str, credentials: Optional[dict] = None
) -> dict:
credentials = credentials or {}
sts_client = boto3.client(
"sts",
region_name=aws_region,
aws_access_key_id=credentials.get("AccessKeyId"),
aws_secret_access_key=credentials.get("SecretAccessKey"),
aws_session_token=credentials.get("SessionToken"),
)
assumed_role_object = sts_client.assume_role(
RoleArn=role_arn, RoleSessionName="DatahubIngestionSource"
)
return assumed_role_object["Credentials"]
class AwsSourceConfig(ConfigModel):
"""
Common AWS credentials config.
Currently used by:
- Glue source
- SageMaker source
"""
env: str = DEFAULT_ENV
database_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
table_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
aws_access_key_id: Optional[str] = None
aws_secret_access_key: Optional[str] = None
aws_session_token: Optional[str] = None
aws_role: Optional[Union[str, List[str]]] = None
aws_region: str
def get_session(self) -> Session:
if (
self.aws_access_key_id
and self.aws_secret_access_key
and self.aws_session_token
):
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
region_name=self.aws_region,
)
elif self.aws_access_key_id and self.aws_secret_access_key:
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region,
)
elif self.aws_role:
if isinstance(self.aws_role, str):
credentials = assume_role(self.aws_role, self.aws_region)
else:
|
return Session(
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
region_name=self.aws_region,
)
else:
return Session(region_name=self.aws_region)
def get_s3_client(self) -> "S3Client":
return self.get_session().client("s3")
def get_glue_client(self) -> "GlueClient":
return self.get_session().client("glue")
def get_sagemaker_client(self) -> "SageMakerClient":
return self.get_session().client("sagemaker")
def make_s3_urn(s3_uri: str, env: str, suffix: Optional[str] = None) -> str:
if not s3_uri.startswith("s3://"):
raise ValueError("S3 URIs should begin with 's3://'")
# remove S3 prefix (s3://)
s3_name = s3_uri[5:]
if s3_name.endswith("/"):
s3_name = s3_name[:-1]
if suffix is not None:
return f"urn:li:dataset:(urn:li:dataPlatform:s3,{s3_name}_{suffix},{env})"
return f"urn:li:dataset:(urn:li:dataPlatform:s3,{s3_name},{env})"
| credentials = reduce(
lambda new_credentials, role_arn: assume_role(
role_arn, self.aws_region, new_credentials
),
self.aws_role,
{},
) | conditional_block |
aws_common.py | from functools import reduce
from typing import TYPE_CHECKING, List, Optional, Union
import boto3
from boto3.session import Session
from datahub.configuration import ConfigModel
from datahub.configuration.common import AllowDenyPattern
from datahub.emitter.mce_builder import DEFAULT_ENV
if TYPE_CHECKING:
from mypy_boto3_glue import GlueClient
from mypy_boto3_s3 import S3Client
from mypy_boto3_sagemaker import SageMakerClient
def assume_role(
role_arn: str, aws_region: str, credentials: Optional[dict] = None
) -> dict:
credentials = credentials or {}
sts_client = boto3.client(
"sts",
region_name=aws_region,
aws_access_key_id=credentials.get("AccessKeyId"),
aws_secret_access_key=credentials.get("SecretAccessKey"),
aws_session_token=credentials.get("SessionToken"),
)
assumed_role_object = sts_client.assume_role(
RoleArn=role_arn, RoleSessionName="DatahubIngestionSource"
)
return assumed_role_object["Credentials"]
class AwsSourceConfig(ConfigModel):
"""
Common AWS credentials config.
Currently used by:
- Glue source
- SageMaker source
"""
env: str = DEFAULT_ENV
database_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
table_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
aws_access_key_id: Optional[str] = None
aws_secret_access_key: Optional[str] = None
aws_session_token: Optional[str] = None
aws_role: Optional[Union[str, List[str]]] = None
aws_region: str
def get_session(self) -> Session:
if (
self.aws_access_key_id
and self.aws_secret_access_key
and self.aws_session_token
):
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
region_name=self.aws_region,
)
elif self.aws_access_key_id and self.aws_secret_access_key:
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region,
)
elif self.aws_role:
if isinstance(self.aws_role, str):
credentials = assume_role(self.aws_role, self.aws_region)
else:
credentials = reduce(
lambda new_credentials, role_arn: assume_role(
role_arn, self.aws_region, new_credentials
),
self.aws_role,
{},
)
return Session(
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"], | else:
return Session(region_name=self.aws_region)
def get_s3_client(self) -> "S3Client":
return self.get_session().client("s3")
def get_glue_client(self) -> "GlueClient":
return self.get_session().client("glue")
def get_sagemaker_client(self) -> "SageMakerClient":
return self.get_session().client("sagemaker")
def make_s3_urn(s3_uri: str, env: str, suffix: Optional[str] = None) -> str:
if not s3_uri.startswith("s3://"):
raise ValueError("S3 URIs should begin with 's3://'")
# remove S3 prefix (s3://)
s3_name = s3_uri[5:]
if s3_name.endswith("/"):
s3_name = s3_name[:-1]
if suffix is not None:
return f"urn:li:dataset:(urn:li:dataPlatform:s3,{s3_name}_{suffix},{env})"
return f"urn:li:dataset:(urn:li:dataPlatform:s3,{s3_name},{env})" | region_name=self.aws_region,
) | random_line_split |
main.ts | import { GameTypes, setMinTimeBetweenRequests, Ranks} from "hive-api";
import { BotFramework, Updater } from "lergins-bot-framework";
import * as path from "path";
import { DiscordWebhook } from "./notifications/DiscordWebhook";
import { TwitterBot } from "./notifications/TwitterBot";
import { Stats } from "./Stats";
import { CurrPlayerUpdater } from "./updater/CurrPlayerUpdater";
import { AchievementUpdater } from "./updater/AchievementUpdater";
import { GamePlayersUpdater } from "./updater/GamePlayersUpdater";
import { MapUpdater } from "./updater/MapUpdater";
import { MedalUpdater } from "./updater/MedalUpdater";
import { PlayerStatsUpdater } from "./updater/PlayerStatsUpdater";
import { TokenUpdater } from "./updater/TokenUpdater";
import { TotalKillsUpdater } from "./updater/TotalKillsUpdater";
import { UniquePlayerUpdater } from "./updater/UniquePlayerUpdater";
import { GameLeaderboardUpdater } from "./updater/GameLeaderboardsUpdater";
import { TotalPointsUpdater } from "./updater/TotalPointsUpdater";
import { SwarmNetUpdater } from "./updater/SwarmNetUpdater";
import { HideBlocklevelUpdater } from "./updater/HideBlockLevelUpdater";
import { nameUpdater } from "./updater/NameUpdater";
import { GamesPlayedUpdater } from './updater/GamesPlayedUpdater';
export const bot = new BotFramework.Builder()
.configFolderPath(path.join(__dirname, '..'))
.observer('twitter', TwitterBot)
.observer('discord-webhook', DiscordWebhook)
.forceFirebaseInit()
.build();
bot.addUpdater(nameUpdater)
bot.addUpdater(new AchievementUpdater())
bot.addUpdater(new CurrPlayerUpdater())
bot.addUpdater(new GamePlayersUpdater())
bot.addUpdater(new MapUpdater())
bot.addUpdater(new MedalUpdater())
bot.addUpdater(new PlayerStatsUpdater())
bot.addUpdater(new TokenUpdater())
bot.addUpdater(new GamesPlayedUpdater())
bot.addUpdater(new TotalPointsUpdater())
bot.addUpdater(new TotalKillsUpdater())
bot.addUpdater(new HideBlocklevelUpdater())
bot.addUpdater(new UniquePlayerUpdater())
bot.addUpdater(new SwarmNetUpdater())
export default bot;
export function config() { return bot.config() }
export function notificationSender() { return bot.notificationSender() }
export function | (updater: Updater) { return bot.addUpdater(updater) }
export function start() { return bot.start() }
export function send(type: string, message: any) { bot.send(type, message) }
async function main() {
setMinTimeBetweenRequests((await bot.config().get('min_time_between_requests')) || 1400);
process.on('SIGTERM', async () => {
Stats.print();
await Stats.saveToGoogleSheets();
console.log(`Stopped!`);
process.exit();
});
await Promise.all([
GameTypes.update(),
Ranks.update()
]);
console.log("Updated Game and Rank lists.");
[
GameTypes.BP,
GameTypes.DR,
GameTypes.HIDE,
GameTypes.SP,
GameTypes.TIMV,
GameTypes.SKY,
GameTypes.DRAW,
GameTypes.GRAV,
GameTypes.BED,
].forEach((type) => bot.addUpdater(new GameLeaderboardUpdater(type)));
if(await config().get("updater_active")){
bot.start();
}else{
console.warn(`!!! DEBUG MODE !!!`)
}
}
main().catch((e) => console.error(e));
| addUpdater | identifier_name |
main.ts | import { GameTypes, setMinTimeBetweenRequests, Ranks} from "hive-api";
import { BotFramework, Updater } from "lergins-bot-framework"; | import { CurrPlayerUpdater } from "./updater/CurrPlayerUpdater";
import { AchievementUpdater } from "./updater/AchievementUpdater";
import { GamePlayersUpdater } from "./updater/GamePlayersUpdater";
import { MapUpdater } from "./updater/MapUpdater";
import { MedalUpdater } from "./updater/MedalUpdater";
import { PlayerStatsUpdater } from "./updater/PlayerStatsUpdater";
import { TokenUpdater } from "./updater/TokenUpdater";
import { TotalKillsUpdater } from "./updater/TotalKillsUpdater";
import { UniquePlayerUpdater } from "./updater/UniquePlayerUpdater";
import { GameLeaderboardUpdater } from "./updater/GameLeaderboardsUpdater";
import { TotalPointsUpdater } from "./updater/TotalPointsUpdater";
import { SwarmNetUpdater } from "./updater/SwarmNetUpdater";
import { HideBlocklevelUpdater } from "./updater/HideBlockLevelUpdater";
import { nameUpdater } from "./updater/NameUpdater";
import { GamesPlayedUpdater } from './updater/GamesPlayedUpdater';
export const bot = new BotFramework.Builder()
.configFolderPath(path.join(__dirname, '..'))
.observer('twitter', TwitterBot)
.observer('discord-webhook', DiscordWebhook)
.forceFirebaseInit()
.build();
bot.addUpdater(nameUpdater)
bot.addUpdater(new AchievementUpdater())
bot.addUpdater(new CurrPlayerUpdater())
bot.addUpdater(new GamePlayersUpdater())
bot.addUpdater(new MapUpdater())
bot.addUpdater(new MedalUpdater())
bot.addUpdater(new PlayerStatsUpdater())
bot.addUpdater(new TokenUpdater())
bot.addUpdater(new GamesPlayedUpdater())
bot.addUpdater(new TotalPointsUpdater())
bot.addUpdater(new TotalKillsUpdater())
bot.addUpdater(new HideBlocklevelUpdater())
bot.addUpdater(new UniquePlayerUpdater())
bot.addUpdater(new SwarmNetUpdater())
export default bot;
export function config() { return bot.config() }
export function notificationSender() { return bot.notificationSender() }
export function addUpdater(updater: Updater) { return bot.addUpdater(updater) }
export function start() { return bot.start() }
export function send(type: string, message: any) { bot.send(type, message) }
async function main() {
setMinTimeBetweenRequests((await bot.config().get('min_time_between_requests')) || 1400);
process.on('SIGTERM', async () => {
Stats.print();
await Stats.saveToGoogleSheets();
console.log(`Stopped!`);
process.exit();
});
await Promise.all([
GameTypes.update(),
Ranks.update()
]);
console.log("Updated Game and Rank lists.");
[
GameTypes.BP,
GameTypes.DR,
GameTypes.HIDE,
GameTypes.SP,
GameTypes.TIMV,
GameTypes.SKY,
GameTypes.DRAW,
GameTypes.GRAV,
GameTypes.BED,
].forEach((type) => bot.addUpdater(new GameLeaderboardUpdater(type)));
if(await config().get("updater_active")){
bot.start();
}else{
console.warn(`!!! DEBUG MODE !!!`)
}
}
main().catch((e) => console.error(e)); | import * as path from "path";
import { DiscordWebhook } from "./notifications/DiscordWebhook";
import { TwitterBot } from "./notifications/TwitterBot";
import { Stats } from "./Stats"; | random_line_split |
main.ts | import { GameTypes, setMinTimeBetweenRequests, Ranks} from "hive-api";
import { BotFramework, Updater } from "lergins-bot-framework";
import * as path from "path";
import { DiscordWebhook } from "./notifications/DiscordWebhook";
import { TwitterBot } from "./notifications/TwitterBot";
import { Stats } from "./Stats";
import { CurrPlayerUpdater } from "./updater/CurrPlayerUpdater";
import { AchievementUpdater } from "./updater/AchievementUpdater";
import { GamePlayersUpdater } from "./updater/GamePlayersUpdater";
import { MapUpdater } from "./updater/MapUpdater";
import { MedalUpdater } from "./updater/MedalUpdater";
import { PlayerStatsUpdater } from "./updater/PlayerStatsUpdater";
import { TokenUpdater } from "./updater/TokenUpdater";
import { TotalKillsUpdater } from "./updater/TotalKillsUpdater";
import { UniquePlayerUpdater } from "./updater/UniquePlayerUpdater";
import { GameLeaderboardUpdater } from "./updater/GameLeaderboardsUpdater";
import { TotalPointsUpdater } from "./updater/TotalPointsUpdater";
import { SwarmNetUpdater } from "./updater/SwarmNetUpdater";
import { HideBlocklevelUpdater } from "./updater/HideBlockLevelUpdater";
import { nameUpdater } from "./updater/NameUpdater";
import { GamesPlayedUpdater } from './updater/GamesPlayedUpdater';
export const bot = new BotFramework.Builder()
.configFolderPath(path.join(__dirname, '..'))
.observer('twitter', TwitterBot)
.observer('discord-webhook', DiscordWebhook)
.forceFirebaseInit()
.build();
bot.addUpdater(nameUpdater)
bot.addUpdater(new AchievementUpdater())
bot.addUpdater(new CurrPlayerUpdater())
bot.addUpdater(new GamePlayersUpdater())
bot.addUpdater(new MapUpdater())
bot.addUpdater(new MedalUpdater())
bot.addUpdater(new PlayerStatsUpdater())
bot.addUpdater(new TokenUpdater())
bot.addUpdater(new GamesPlayedUpdater())
bot.addUpdater(new TotalPointsUpdater())
bot.addUpdater(new TotalKillsUpdater())
bot.addUpdater(new HideBlocklevelUpdater())
bot.addUpdater(new UniquePlayerUpdater())
bot.addUpdater(new SwarmNetUpdater())
export default bot;
export function config() { return bot.config() }
export function notificationSender() { return bot.notificationSender() }
export function addUpdater(updater: Updater) { return bot.addUpdater(updater) }
export function start() { return bot.start() }
export function send(type: string, message: any) { bot.send(type, message) }
async function main() {
setMinTimeBetweenRequests((await bot.config().get('min_time_between_requests')) || 1400);
process.on('SIGTERM', async () => {
Stats.print();
await Stats.saveToGoogleSheets();
console.log(`Stopped!`);
process.exit();
});
await Promise.all([
GameTypes.update(),
Ranks.update()
]);
console.log("Updated Game and Rank lists.");
[
GameTypes.BP,
GameTypes.DR,
GameTypes.HIDE,
GameTypes.SP,
GameTypes.TIMV,
GameTypes.SKY,
GameTypes.DRAW,
GameTypes.GRAV,
GameTypes.BED,
].forEach((type) => bot.addUpdater(new GameLeaderboardUpdater(type)));
if(await config().get("updater_active")){
bot.start();
}else |
}
main().catch((e) => console.error(e));
| {
console.warn(`!!! DEBUG MODE !!!`)
} | conditional_block |
main.ts | import { GameTypes, setMinTimeBetweenRequests, Ranks} from "hive-api";
import { BotFramework, Updater } from "lergins-bot-framework";
import * as path from "path";
import { DiscordWebhook } from "./notifications/DiscordWebhook";
import { TwitterBot } from "./notifications/TwitterBot";
import { Stats } from "./Stats";
import { CurrPlayerUpdater } from "./updater/CurrPlayerUpdater";
import { AchievementUpdater } from "./updater/AchievementUpdater";
import { GamePlayersUpdater } from "./updater/GamePlayersUpdater";
import { MapUpdater } from "./updater/MapUpdater";
import { MedalUpdater } from "./updater/MedalUpdater";
import { PlayerStatsUpdater } from "./updater/PlayerStatsUpdater";
import { TokenUpdater } from "./updater/TokenUpdater";
import { TotalKillsUpdater } from "./updater/TotalKillsUpdater";
import { UniquePlayerUpdater } from "./updater/UniquePlayerUpdater";
import { GameLeaderboardUpdater } from "./updater/GameLeaderboardsUpdater";
import { TotalPointsUpdater } from "./updater/TotalPointsUpdater";
import { SwarmNetUpdater } from "./updater/SwarmNetUpdater";
import { HideBlocklevelUpdater } from "./updater/HideBlockLevelUpdater";
import { nameUpdater } from "./updater/NameUpdater";
import { GamesPlayedUpdater } from './updater/GamesPlayedUpdater';
export const bot = new BotFramework.Builder()
.configFolderPath(path.join(__dirname, '..'))
.observer('twitter', TwitterBot)
.observer('discord-webhook', DiscordWebhook)
.forceFirebaseInit()
.build();
bot.addUpdater(nameUpdater)
bot.addUpdater(new AchievementUpdater())
bot.addUpdater(new CurrPlayerUpdater())
bot.addUpdater(new GamePlayersUpdater())
bot.addUpdater(new MapUpdater())
bot.addUpdater(new MedalUpdater())
bot.addUpdater(new PlayerStatsUpdater())
bot.addUpdater(new TokenUpdater())
bot.addUpdater(new GamesPlayedUpdater())
bot.addUpdater(new TotalPointsUpdater())
bot.addUpdater(new TotalKillsUpdater())
bot.addUpdater(new HideBlocklevelUpdater())
bot.addUpdater(new UniquePlayerUpdater())
bot.addUpdater(new SwarmNetUpdater())
export default bot;
export function config() { return bot.config() }
export function notificationSender() { return bot.notificationSender() }
export function addUpdater(updater: Updater) { return bot.addUpdater(updater) }
export function start() |
export function send(type: string, message: any) { bot.send(type, message) }
async function main() {
setMinTimeBetweenRequests((await bot.config().get('min_time_between_requests')) || 1400);
process.on('SIGTERM', async () => {
Stats.print();
await Stats.saveToGoogleSheets();
console.log(`Stopped!`);
process.exit();
});
await Promise.all([
GameTypes.update(),
Ranks.update()
]);
console.log("Updated Game and Rank lists.");
[
GameTypes.BP,
GameTypes.DR,
GameTypes.HIDE,
GameTypes.SP,
GameTypes.TIMV,
GameTypes.SKY,
GameTypes.DRAW,
GameTypes.GRAV,
GameTypes.BED,
].forEach((type) => bot.addUpdater(new GameLeaderboardUpdater(type)));
if(await config().get("updater_active")){
bot.start();
}else{
console.warn(`!!! DEBUG MODE !!!`)
}
}
main().catch((e) => console.error(e));
| { return bot.start() } | identifier_body |
server.js | var EXPRESS=require("EXPRESS"),
PATH=require("path"),
FS=require("fs"),
CLUSTER=require("cluster"),
Q=require("q"),
HDMA=require("./nodejs/api/hdma"),
CONFIG=require("./nodejs/config"),
LOGGER=require("./nodejs/config/logger.js"),
NUMCPU=2, //require("os").cpus().length,
models={
geoviewer: null
},
domain="",
domainFolder="",
MONGODB=null,
port=(CONFIG&&CONFIG.server)?CONFIG.server.port:8080,
app=null,
server=null,
io=null,
argv=require("minimist")(process.argv.slice(2));
//check arguemnts in the command line
var value;
for(var k in argv){
value=argv[k]
//port
if(k=='p' && value && value!='') |
}
/**
//cluster
if(CLUSTER.isMaster){
for(var i=0;i<NUMCPU;i++){
CLUSTER.fork();
}
Q.all([HDMA.mongodb.connect("localhost:27017", "HDMA"), HDMA.mongodb.connect("localhost:27017", "IBSS")]).then(function(results){
console.log(results);
//console.log(b);
}).catch(function(err){
console.log("error", err)
})
//init();
}else{
//init();
}
// Listen for dying workers
CLUSTER.on('exit', function (worker) {
// Replace the dead worker,
// we're not sentimental
console.log('Worker ' + worker.id + ' died :(');
CLUSTER.fork();
});
*/
init();
//init server
function init(){
app=EXPRESS();
server=app.listen(port);
io=require("socket.io").listen(server, {resource:"/socket/socket.io", log:false});//, transports:["xhr-polling"]}); //because we are using iis7 as the main web server which does not support websocket. we need to change to long-polling for socket. please refer to http://schmod.ruhoh.com/windows/socket-io-and-iis/
//winston.addColors({debug: 'green',info: 'cyan',silly: 'magenta',warn: 'yellow',error: 'red'})
LOGGER.info("Server is started and listened on port "+port);
//config----------------------------------------------------------------------
//log
var logFile = FS.createWriteStream('./log/server.log', {flags: 'a'}); //use {flags: 'w'} to open in write mode
app.use(EXPRESS.logger({stream: logFile}))
//gzip conpress method
app.use(EXPRESS.compress())
//parse the post data of the body
app.use(EXPRESS.bodyParser());
//render engine
app.set('views', __dirname+"/views/");
app.set('view engine', 'jade');
//jsonp
app.set("jsonp callback", true);
//maxlistener
//app.setMaxListeners(0);
//io.setMaxListeners(0);
//passport required config
//app.use(EXPRESS.static("public"))
app.use(EXPRESS.cookieParser());
app.use(EXPRESS.session({secret:'hdma@SDSU'})); // session secret
app.use(app.router)
//-------------------------------------------------------------------------------
models.geoviewer=require("./nodejs/geoviewer")({mongodb:null, io:io, router:app});
/***********************************************************************************
* RESTful
*********************************************************************************/
//if the domain is not equal to domainFolder, we may need to manually set up the path!!!
app.use("/geoviewer", EXPRESS.static(PATH.join(__dirname, "/public/geoviewer/final")));
app.use("/common", EXPRESS.static(PATH.join(__dirname, "/public/common")));
LOGGER.info("******************************")
LOGGER.info("Server Routes inited!")
}
| {
port=value;
} | conditional_block |
server.js | var EXPRESS=require("EXPRESS"),
PATH=require("path"),
FS=require("fs"),
CLUSTER=require("cluster"),
Q=require("q"),
HDMA=require("./nodejs/api/hdma"),
CONFIG=require("./nodejs/config"),
LOGGER=require("./nodejs/config/logger.js"),
NUMCPU=2, //require("os").cpus().length,
models={
geoviewer: null
},
domain="",
domainFolder="",
MONGODB=null,
port=(CONFIG&&CONFIG.server)?CONFIG.server.port:8080,
app=null,
server=null,
io=null,
argv=require("minimist")(process.argv.slice(2));
//check arguemnts in the command line
var value;
for(var k in argv){
value=argv[k]
//port
if(k=='p' && value && value!=''){
port=value;
}
}
/**
//cluster
if(CLUSTER.isMaster){
for(var i=0;i<NUMCPU;i++){
CLUSTER.fork();
}
Q.all([HDMA.mongodb.connect("localhost:27017", "HDMA"), HDMA.mongodb.connect("localhost:27017", "IBSS")]).then(function(results){
console.log(results);
//console.log(b);
}).catch(function(err){
console.log("error", err)
})
//init();
}else{
//init();
}
// Listen for dying workers
CLUSTER.on('exit', function (worker) {
// Replace the dead worker,
// we're not sentimental
console.log('Worker ' + worker.id + ' died :(');
CLUSTER.fork();
});
*/
init();
//init server
function init() | {
app=EXPRESS();
server=app.listen(port);
io=require("socket.io").listen(server, {resource:"/socket/socket.io", log:false});//, transports:["xhr-polling"]}); //because we are using iis7 as the main web server which does not support websocket. we need to change to long-polling for socket. please refer to http://schmod.ruhoh.com/windows/socket-io-and-iis/
//winston.addColors({debug: 'green',info: 'cyan',silly: 'magenta',warn: 'yellow',error: 'red'})
LOGGER.info("Server is started and listened on port "+port);
//config----------------------------------------------------------------------
//log
var logFile = FS.createWriteStream('./log/server.log', {flags: 'a'}); //use {flags: 'w'} to open in write mode
app.use(EXPRESS.logger({stream: logFile}))
//gzip conpress method
app.use(EXPRESS.compress())
//parse the post data of the body
app.use(EXPRESS.bodyParser());
//render engine
app.set('views', __dirname+"/views/");
app.set('view engine', 'jade');
//jsonp
app.set("jsonp callback", true);
//maxlistener
//app.setMaxListeners(0);
//io.setMaxListeners(0);
//passport required config
//app.use(EXPRESS.static("public"))
app.use(EXPRESS.cookieParser());
app.use(EXPRESS.session({secret:'hdma@SDSU'})); // session secret
app.use(app.router)
//-------------------------------------------------------------------------------
models.geoviewer=require("./nodejs/geoviewer")({mongodb:null, io:io, router:app});
/***********************************************************************************
* RESTful
*********************************************************************************/
//if the domain is not equal to domainFolder, we may need to manually set up the path!!!
app.use("/geoviewer", EXPRESS.static(PATH.join(__dirname, "/public/geoviewer/final")));
app.use("/common", EXPRESS.static(PATH.join(__dirname, "/public/common")));
LOGGER.info("******************************")
LOGGER.info("Server Routes inited!")
} | identifier_body |
|
server.js | var EXPRESS=require("EXPRESS"),
PATH=require("path"),
FS=require("fs"),
CLUSTER=require("cluster"),
Q=require("q"),
HDMA=require("./nodejs/api/hdma"),
CONFIG=require("./nodejs/config"),
LOGGER=require("./nodejs/config/logger.js"),
NUMCPU=2, //require("os").cpus().length,
models={
geoviewer: null
},
domain="",
domainFolder="",
MONGODB=null,
port=(CONFIG&&CONFIG.server)?CONFIG.server.port:8080,
app=null,
server=null,
io=null,
argv=require("minimist")(process.argv.slice(2));
//check arguemnts in the command line
var value;
for(var k in argv){
value=argv[k]
//port
if(k=='p' && value && value!=''){
port=value;
}
}
/**
//cluster
if(CLUSTER.isMaster){
for(var i=0;i<NUMCPU;i++){
CLUSTER.fork();
}
Q.all([HDMA.mongodb.connect("localhost:27017", "HDMA"), HDMA.mongodb.connect("localhost:27017", "IBSS")]).then(function(results){
console.log(results);
//console.log(b);
}).catch(function(err){
console.log("error", err)
})
//init();
}else{
//init();
}
// Listen for dying workers
CLUSTER.on('exit', function (worker) {
// Replace the dead worker,
// we're not sentimental
console.log('Worker ' + worker.id + ' died :(');
CLUSTER.fork();
});
*/
init();
//init server
function init(){
app=EXPRESS();
server=app.listen(port);
io=require("socket.io").listen(server, {resource:"/socket/socket.io", log:false});//, transports:["xhr-polling"]}); //because we are using iis7 as the main web server which does not support websocket. we need to change to long-polling for socket. please refer to http://schmod.ruhoh.com/windows/socket-io-and-iis/
//winston.addColors({debug: 'green',info: 'cyan',silly: 'magenta',warn: 'yellow',error: 'red'})
LOGGER.info("Server is started and listened on port "+port);
//config----------------------------------------------------------------------
//log
var logFile = FS.createWriteStream('./log/server.log', {flags: 'a'}); //use {flags: 'w'} to open in write mode
app.use(EXPRESS.logger({stream: logFile}))
//gzip conpress method
app.use(EXPRESS.compress())
//parse the post data of the body
app.use(EXPRESS.bodyParser());
//render engine
app.set('views', __dirname+"/views/");
app.set('view engine', 'jade'); | //app.setMaxListeners(0);
//io.setMaxListeners(0);
//passport required config
//app.use(EXPRESS.static("public"))
app.use(EXPRESS.cookieParser());
app.use(EXPRESS.session({secret:'hdma@SDSU'})); // session secret
app.use(app.router)
//-------------------------------------------------------------------------------
models.geoviewer=require("./nodejs/geoviewer")({mongodb:null, io:io, router:app});
/***********************************************************************************
* RESTful
*********************************************************************************/
//if the domain is not equal to domainFolder, we may need to manually set up the path!!!
app.use("/geoviewer", EXPRESS.static(PATH.join(__dirname, "/public/geoviewer/final")));
app.use("/common", EXPRESS.static(PATH.join(__dirname, "/public/common")));
LOGGER.info("******************************")
LOGGER.info("Server Routes inited!")
} |
//jsonp
app.set("jsonp callback", true);
//maxlistener | random_line_split |
server.js | var EXPRESS=require("EXPRESS"),
PATH=require("path"),
FS=require("fs"),
CLUSTER=require("cluster"),
Q=require("q"),
HDMA=require("./nodejs/api/hdma"),
CONFIG=require("./nodejs/config"),
LOGGER=require("./nodejs/config/logger.js"),
NUMCPU=2, //require("os").cpus().length,
models={
geoviewer: null
},
domain="",
domainFolder="",
MONGODB=null,
port=(CONFIG&&CONFIG.server)?CONFIG.server.port:8080,
app=null,
server=null,
io=null,
argv=require("minimist")(process.argv.slice(2));
//check arguemnts in the command line
var value;
for(var k in argv){
value=argv[k]
//port
if(k=='p' && value && value!=''){
port=value;
}
}
/**
//cluster
if(CLUSTER.isMaster){
for(var i=0;i<NUMCPU;i++){
CLUSTER.fork();
}
Q.all([HDMA.mongodb.connect("localhost:27017", "HDMA"), HDMA.mongodb.connect("localhost:27017", "IBSS")]).then(function(results){
console.log(results);
//console.log(b);
}).catch(function(err){
console.log("error", err)
})
//init();
}else{
//init();
}
// Listen for dying workers
CLUSTER.on('exit', function (worker) {
// Replace the dead worker,
// we're not sentimental
console.log('Worker ' + worker.id + ' died :(');
CLUSTER.fork();
});
*/
init();
//init server
function | (){
app=EXPRESS();
server=app.listen(port);
io=require("socket.io").listen(server, {resource:"/socket/socket.io", log:false});//, transports:["xhr-polling"]}); //because we are using iis7 as the main web server which does not support websocket. we need to change to long-polling for socket. please refer to http://schmod.ruhoh.com/windows/socket-io-and-iis/
//winston.addColors({debug: 'green',info: 'cyan',silly: 'magenta',warn: 'yellow',error: 'red'})
LOGGER.info("Server is started and listened on port "+port);
//config----------------------------------------------------------------------
//log
var logFile = FS.createWriteStream('./log/server.log', {flags: 'a'}); //use {flags: 'w'} to open in write mode
app.use(EXPRESS.logger({stream: logFile}))
//gzip conpress method
app.use(EXPRESS.compress())
//parse the post data of the body
app.use(EXPRESS.bodyParser());
//render engine
app.set('views', __dirname+"/views/");
app.set('view engine', 'jade');
//jsonp
app.set("jsonp callback", true);
//maxlistener
//app.setMaxListeners(0);
//io.setMaxListeners(0);
//passport required config
//app.use(EXPRESS.static("public"))
app.use(EXPRESS.cookieParser());
app.use(EXPRESS.session({secret:'hdma@SDSU'})); // session secret
app.use(app.router)
//-------------------------------------------------------------------------------
models.geoviewer=require("./nodejs/geoviewer")({mongodb:null, io:io, router:app});
/***********************************************************************************
* RESTful
*********************************************************************************/
//if the domain is not equal to domainFolder, we may need to manually set up the path!!!
app.use("/geoviewer", EXPRESS.static(PATH.join(__dirname, "/public/geoviewer/final")));
app.use("/common", EXPRESS.static(PATH.join(__dirname, "/public/common")));
LOGGER.info("******************************")
LOGGER.info("Server Routes inited!")
}
| init | identifier_name |
mod.rs | pub mod md5;
pub mod sha1;
pub trait Hasher
{
/**
* Reset the hasher's state.
*/
fn reset(&mut self);
/**
* Provide input data.
*/
fn update(&mut self, data: &[u8]);
/**
* Retrieve digest result. The output must be large enough to contains result
* size (from output_size method).
*/
fn output(&self, out: &mut [u8]);
/**
* Get the output size in bits.
*/
fn output_size_bits(&self) -> uint;
/**
* Get the block size in bits.
*/
fn block_size_bits(&self) -> uint;
/**
* Get the output size in bytes.
*/
fn output_size(&self) -> uint
{
(self.output_size_bits() + 7) / 8
}
/**
* Get the block size in bytes.
*/
fn block_size(&self) -> uint
{
(self.block_size_bits() + 7) / 8
}
fn | (&self) -> Vec<u8>
{
let size = self.output_size();
let mut buf = Vec::from_elem(size, 0u8);
self.output(buf.as_mut_slice());
buf
}
}
pub trait Hashable {
/**
* Feed the value to the hasher passed in parameter.
*/
fn feed<H: Hasher>(&self, h: &mut H);
/**
* Hash the value to ~[u8].
*
* Reset the hasher passed in parameter, because we want
* an empty hasher to get only the value's hash.
*/
fn to_hash<H: Hasher>(&self, h: &mut H) -> Vec<u8>
{
h.reset();
self.feed(h);
h.digest()
}
}
impl<'a> Hashable for &'a [u8] {
fn feed<H: Hasher>(&self, h: &mut H)
{
h.update(*self)
}
}
| digest | identifier_name |
mod.rs | pub mod md5;
pub mod sha1;
pub trait Hasher
{
/**
* Reset the hasher's state.
*/
fn reset(&mut self);
/**
* Provide input data.
*/
fn update(&mut self, data: &[u8]);
/**
* Retrieve digest result. The output must be large enough to contains result
* size (from output_size method).
*/
fn output(&self, out: &mut [u8]);
/**
* Get the output size in bits.
*/
fn output_size_bits(&self) -> uint;
/**
* Get the block size in bits.
*/
fn block_size_bits(&self) -> uint;
/**
* Get the output size in bytes.
*/
fn output_size(&self) -> uint
{
(self.output_size_bits() + 7) / 8
}
/**
* Get the block size in bytes.
*/
fn block_size(&self) -> uint
{
(self.block_size_bits() + 7) / 8
}
fn digest(&self) -> Vec<u8>
{
let size = self.output_size();
let mut buf = Vec::from_elem(size, 0u8);
self.output(buf.as_mut_slice());
buf
}
}
pub trait Hashable {
/**
* Feed the value to the hasher passed in parameter.
*/
fn feed<H: Hasher>(&self, h: &mut H);
/**
* Hash the value to ~[u8].
*
* Reset the hasher passed in parameter, because we want
* an empty hasher to get only the value's hash.
*/
fn to_hash<H: Hasher>(&self, h: &mut H) -> Vec<u8>
{
h.reset();
self.feed(h);
h.digest()
}
}
impl<'a> Hashable for &'a [u8] {
fn feed<H: Hasher>(&self, h: &mut H)
|
}
| {
h.update(*self)
} | identifier_body |
mod.rs | pub mod md5;
pub mod sha1;
pub trait Hasher
{
/**
* Reset the hasher's state.
*/
fn reset(&mut self);
/**
* Provide input data.
*/
fn update(&mut self, data: &[u8]);
/**
* Retrieve digest result. The output must be large enough to contains result
* size (from output_size method).
*/
fn output(&self, out: &mut [u8]);
/**
* Get the output size in bits.
*/
fn output_size_bits(&self) -> uint;
/**
* Get the block size in bits.
*/
fn block_size_bits(&self) -> uint;
/**
* Get the output size in bytes.
*/
fn output_size(&self) -> uint
{ | * Get the block size in bytes.
*/
fn block_size(&self) -> uint
{
(self.block_size_bits() + 7) / 8
}
fn digest(&self) -> Vec<u8>
{
let size = self.output_size();
let mut buf = Vec::from_elem(size, 0u8);
self.output(buf.as_mut_slice());
buf
}
}
pub trait Hashable {
/**
* Feed the value to the hasher passed in parameter.
*/
fn feed<H: Hasher>(&self, h: &mut H);
/**
* Hash the value to ~[u8].
*
* Reset the hasher passed in parameter, because we want
* an empty hasher to get only the value's hash.
*/
fn to_hash<H: Hasher>(&self, h: &mut H) -> Vec<u8>
{
h.reset();
self.feed(h);
h.digest()
}
}
impl<'a> Hashable for &'a [u8] {
fn feed<H: Hasher>(&self, h: &mut H)
{
h.update(*self)
}
} | (self.output_size_bits() + 7) / 8
}
/** | random_line_split |
feature_column_lib.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, | # See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FeatureColumns: tools for ingesting and representing features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import,g-bad-import-order
from tensorflow.python.feature_column.feature_column import *
from tensorflow.python.feature_column.feature_column_v2 import *
from tensorflow.python.feature_column.sequence_feature_column import *
from tensorflow.python.feature_column.serialization import *
# We import dense_features_v2 first so that the V1 DenseFeatures is the default
# if users directly import feature_column_lib.
from tensorflow.python.keras.feature_column.dense_features_v2 import *
from tensorflow.python.keras.feature_column.dense_features import *
from tensorflow.python.keras.feature_column.sequence_feature_column import *
# pylint: enable=unused-import,line-too-long | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | random_line_split |
validate-changelog.ts | #!/usr/bin/env ts-node
import * as Path from 'path'
import * as Fs from 'fs'
import Ajv, { ErrorObject } from 'ajv'
function handleError(error: string) {
console.error(error)
process.exit(-1)
}
function formatErrors(errors: ErrorObject[]): string |
const repositoryRoot = Path.dirname(__dirname)
const changelogPath = Path.join(repositoryRoot, 'changelog.json')
// eslint-disable-next-line no-sync
const changelog = Fs.readFileSync(changelogPath, 'utf8')
let changelogObj = null
try {
changelogObj = JSON.parse(changelog)
} catch {
handleError(
'Unable to parse the contents of changelog.json into a JSON object. Please review the file contents.'
)
}
const schema = {
$schema: 'http://json-schema.org/draft-07/schema#',
type: 'object',
properties: {
releases: {
type: 'object',
patternProperties: {
'^([0-9]+.[0-9]+.[0-9]+)(-beta[0-9]+|-test[0-9]+)?$': {
type: 'array',
items: {
type: 'string',
},
uniqueItems: true,
},
},
additionalProperties: false,
},
},
}
const ajv = new Ajv({ allErrors: true, uniqueItems: true })
const validate = ajv.compile(schema)
const valid = validate(changelogObj)
if (!valid && validate.errors != null) {
handleError(`Errors: \n${formatErrors(validate.errors)}`)
}
console.log('The changelog is totally fine')
| {
return errors
.map(error => {
const { dataPath, message } = error
const additionalProperties = error.params as any
const additionalProperty = additionalProperties.additionalProperty as string
let additionalPropertyText = ''
if (additionalProperty != null) {
additionalPropertyText = `, found: '${additionalProperties.additionalProperty}'`
}
// dataPath starts with a leading "."," which is a bit confusing
const element = dataPath.substring(1)
return ` - ${element} - ${message}${additionalPropertyText}`
})
.join('\n')
} | identifier_body |
validate-changelog.ts | #!/usr/bin/env ts-node
import * as Path from 'path'
import * as Fs from 'fs'
import Ajv, { ErrorObject } from 'ajv'
function handleError(error: string) {
console.error(error)
process.exit(-1)
}
function formatErrors(errors: ErrorObject[]): string {
return errors
.map(error => {
const { dataPath, message } = error
const additionalProperties = error.params as any
const additionalProperty = additionalProperties.additionalProperty as string
let additionalPropertyText = ''
if (additionalProperty != null) {
additionalPropertyText = `, found: '${additionalProperties.additionalProperty}'`
}
// dataPath starts with a leading "."," which is a bit confusing
const element = dataPath.substring(1)
return ` - ${element} - ${message}${additionalPropertyText}`
})
.join('\n')
}
const repositoryRoot = Path.dirname(__dirname)
const changelogPath = Path.join(repositoryRoot, 'changelog.json')
// eslint-disable-next-line no-sync
const changelog = Fs.readFileSync(changelogPath, 'utf8')
let changelogObj = null
try {
changelogObj = JSON.parse(changelog)
} catch {
handleError(
'Unable to parse the contents of changelog.json into a JSON object. Please review the file contents.'
)
}
const schema = {
$schema: 'http://json-schema.org/draft-07/schema#',
type: 'object',
properties: {
releases: {
type: 'object', | '^([0-9]+.[0-9]+.[0-9]+)(-beta[0-9]+|-test[0-9]+)?$': {
type: 'array',
items: {
type: 'string',
},
uniqueItems: true,
},
},
additionalProperties: false,
},
},
}
const ajv = new Ajv({ allErrors: true, uniqueItems: true })
const validate = ajv.compile(schema)
const valid = validate(changelogObj)
if (!valid && validate.errors != null) {
handleError(`Errors: \n${formatErrors(validate.errors)}`)
}
console.log('The changelog is totally fine') | patternProperties: { | random_line_split |
validate-changelog.ts | #!/usr/bin/env ts-node
import * as Path from 'path'
import * as Fs from 'fs'
import Ajv, { ErrorObject } from 'ajv'
function handleError(error: string) {
console.error(error)
process.exit(-1)
}
function formatErrors(errors: ErrorObject[]): string {
return errors
.map(error => {
const { dataPath, message } = error
const additionalProperties = error.params as any
const additionalProperty = additionalProperties.additionalProperty as string
let additionalPropertyText = ''
if (additionalProperty != null) {
additionalPropertyText = `, found: '${additionalProperties.additionalProperty}'`
}
// dataPath starts with a leading "."," which is a bit confusing
const element = dataPath.substring(1)
return ` - ${element} - ${message}${additionalPropertyText}`
})
.join('\n')
}
const repositoryRoot = Path.dirname(__dirname)
const changelogPath = Path.join(repositoryRoot, 'changelog.json')
// eslint-disable-next-line no-sync
const changelog = Fs.readFileSync(changelogPath, 'utf8')
let changelogObj = null
try {
changelogObj = JSON.parse(changelog)
} catch {
handleError(
'Unable to parse the contents of changelog.json into a JSON object. Please review the file contents.'
)
}
const schema = {
$schema: 'http://json-schema.org/draft-07/schema#',
type: 'object',
properties: {
releases: {
type: 'object',
patternProperties: {
'^([0-9]+.[0-9]+.[0-9]+)(-beta[0-9]+|-test[0-9]+)?$': {
type: 'array',
items: {
type: 'string',
},
uniqueItems: true,
},
},
additionalProperties: false,
},
},
}
const ajv = new Ajv({ allErrors: true, uniqueItems: true })
const validate = ajv.compile(schema)
const valid = validate(changelogObj)
if (!valid && validate.errors != null) |
console.log('The changelog is totally fine')
| {
handleError(`Errors: \n${formatErrors(validate.errors)}`)
} | conditional_block |
validate-changelog.ts | #!/usr/bin/env ts-node
import * as Path from 'path'
import * as Fs from 'fs'
import Ajv, { ErrorObject } from 'ajv'
function | (error: string) {
console.error(error)
process.exit(-1)
}
function formatErrors(errors: ErrorObject[]): string {
return errors
.map(error => {
const { dataPath, message } = error
const additionalProperties = error.params as any
const additionalProperty = additionalProperties.additionalProperty as string
let additionalPropertyText = ''
if (additionalProperty != null) {
additionalPropertyText = `, found: '${additionalProperties.additionalProperty}'`
}
// dataPath starts with a leading "."," which is a bit confusing
const element = dataPath.substring(1)
return ` - ${element} - ${message}${additionalPropertyText}`
})
.join('\n')
}
const repositoryRoot = Path.dirname(__dirname)
const changelogPath = Path.join(repositoryRoot, 'changelog.json')
// eslint-disable-next-line no-sync
const changelog = Fs.readFileSync(changelogPath, 'utf8')
let changelogObj = null
try {
changelogObj = JSON.parse(changelog)
} catch {
handleError(
'Unable to parse the contents of changelog.json into a JSON object. Please review the file contents.'
)
}
const schema = {
$schema: 'http://json-schema.org/draft-07/schema#',
type: 'object',
properties: {
releases: {
type: 'object',
patternProperties: {
'^([0-9]+.[0-9]+.[0-9]+)(-beta[0-9]+|-test[0-9]+)?$': {
type: 'array',
items: {
type: 'string',
},
uniqueItems: true,
},
},
additionalProperties: false,
},
},
}
const ajv = new Ajv({ allErrors: true, uniqueItems: true })
const validate = ajv.compile(schema)
const valid = validate(changelogObj)
if (!valid && validate.errors != null) {
handleError(`Errors: \n${formatErrors(validate.errors)}`)
}
console.log('The changelog is totally fine')
| handleError | identifier_name |
tests.py | from reviewboard.accounts.models import LocalSiteProfile
from reviewboard.reviews.models import ReviewRequest
class ProfileTests(TestCase):
"""Testing the Profile model."""
fixtures = ['test_users']
def test_is_profile_visible_with_public(self):
"""Testing User.is_profile_public with public profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
self.assertTrue(user1.is_profile_visible(user2))
def test_is_profile_visible_with_private(self):
"""Testing User.is_profile_public with private profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
profile = user1.get_profile()
profile.is_private = True
profile.save()
self.assertFalse(user1.is_profile_visible(user2))
self.assertTrue(user1.is_profile_visible(user1))
user2.is_staff = True
self.assertTrue(user1.is_profile_visible(user2))
@add_fixtures(['test_reviewrequests', 'test_scmtools', 'test_site'])
def test_is_star_unstar_updating_count_correctly(self):
"""Testing if star, unstar affect review request counts correctly."""
user1 = User.objects.get(username='admin')
profile1 = user1.get_profile()
review_request = ReviewRequest.objects.public()[0]
site_profile = profile1.site_profiles.get(local_site=None)
profile1.star_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertTrue(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 1)
profile1.unstar_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertFalse(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 0) | from django.contrib.auth.models import User
from djblets.testing.decorators import add_fixtures
from djblets.testing.testcases import TestCase
| random_line_split |
|
tests.py | from django.contrib.auth.models import User
from djblets.testing.decorators import add_fixtures
from djblets.testing.testcases import TestCase
from reviewboard.accounts.models import LocalSiteProfile
from reviewboard.reviews.models import ReviewRequest
class ProfileTests(TestCase):
"""Testing the Profile model."""
fixtures = ['test_users']
def test_is_profile_visible_with_public(self):
"""Testing User.is_profile_public with public profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
self.assertTrue(user1.is_profile_visible(user2))
def test_is_profile_visible_with_private(self):
|
@add_fixtures(['test_reviewrequests', 'test_scmtools', 'test_site'])
def test_is_star_unstar_updating_count_correctly(self):
"""Testing if star, unstar affect review request counts correctly."""
user1 = User.objects.get(username='admin')
profile1 = user1.get_profile()
review_request = ReviewRequest.objects.public()[0]
site_profile = profile1.site_profiles.get(local_site=None)
profile1.star_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertTrue(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 1)
profile1.unstar_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertFalse(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 0)
| """Testing User.is_profile_public with private profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
profile = user1.get_profile()
profile.is_private = True
profile.save()
self.assertFalse(user1.is_profile_visible(user2))
self.assertTrue(user1.is_profile_visible(user1))
user2.is_staff = True
self.assertTrue(user1.is_profile_visible(user2)) | identifier_body |
tests.py | from django.contrib.auth.models import User
from djblets.testing.decorators import add_fixtures
from djblets.testing.testcases import TestCase
from reviewboard.accounts.models import LocalSiteProfile
from reviewboard.reviews.models import ReviewRequest
class ProfileTests(TestCase):
"""Testing the Profile model."""
fixtures = ['test_users']
def test_is_profile_visible_with_public(self):
"""Testing User.is_profile_public with public profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
self.assertTrue(user1.is_profile_visible(user2))
def test_is_profile_visible_with_private(self):
"""Testing User.is_profile_public with private profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
profile = user1.get_profile()
profile.is_private = True
profile.save()
self.assertFalse(user1.is_profile_visible(user2))
self.assertTrue(user1.is_profile_visible(user1))
user2.is_staff = True
self.assertTrue(user1.is_profile_visible(user2))
@add_fixtures(['test_reviewrequests', 'test_scmtools', 'test_site'])
def | (self):
"""Testing if star, unstar affect review request counts correctly."""
user1 = User.objects.get(username='admin')
profile1 = user1.get_profile()
review_request = ReviewRequest.objects.public()[0]
site_profile = profile1.site_profiles.get(local_site=None)
profile1.star_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertTrue(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 1)
profile1.unstar_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertFalse(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 0)
| test_is_star_unstar_updating_count_correctly | identifier_name |
sort-tree.component.ts | import { Directive, EventEmitter, ViewChild } from '@angular/core';
import { MatSnackBar } from '@angular/material/snack-bar';
import { Title } from '@angular/platform-browser';
import { TranslateService } from '@ngx-translate/core';
import { SortDefinition } from 'app/core/ui-services/base-sort.service';
import { PromptService } from 'app/core/ui-services/prompt.service';
import { SortingTreeComponent } from 'app/shared/components/sorting-tree/sorting-tree.component';
import { Identifiable } from 'app/shared/models/base/identifiable'; | import { BaseViewModel } from './base-view-model';
export interface SortTreeFilterOption extends Identifiable {
label: string;
id: number;
state: boolean;
}
/**
* Abstract Sort view for hierarchic item trees
*/
@Directive()
export abstract class SortTreeViewComponentDirective<V extends BaseViewModel>
extends BaseViewComponentDirective
implements CanComponentDeactivate {
/**
* Reference to the view child
*/
@ViewChild('osSortedTree', { static: true })
public osSortTree: SortingTreeComponent<V>;
/**
* Emitter to emit if the nodes should expand or collapse.
*/
public readonly changeState: EventEmitter<Boolean> = new EventEmitter<Boolean>();
/**
* Emitter that emits the filters to the sorting tree.
* TODO note that the boolean function currently requires false if the item
* is to be visible!
*/
public readonly changeFilter: EventEmitter<(item: V) => boolean> = new EventEmitter<(item: V) => boolean>();
/**
* Emitter to notice the `tree-sorting.service` for sorting the data-source.
*/
public readonly forceSort = new EventEmitter<SortDefinition<V>>();
/**
* Boolean to check if changes has been made.
*/
public hasChanged = false;
/**
* Boolean to check if filters are active, so they could be removed.
*/
public hasActiveFilter = false;
/**
* Array that holds the number of visible nodes(0) and amount of available
* nodes(1).
*/
public seenNodes: [number, number] = [0, 0];
/**
* Updates the incoming/changing agenda items.
* @param title
* @param translate
* @param matSnackBar
* @param promptService
*/
public constructor(
title: Title,
protected translate: TranslateService,
matSnackBar: MatSnackBar,
protected promptService: PromptService
) {
super(title, translate, matSnackBar);
}
/**
* Function to restore the old state.
*/
public async onCancel(): Promise<void> {
if (await this.canDeactivate()) {
this.osSortTree.setSubscription();
}
}
/**
* Function to set an info if changes has been made.
*
* @param hasChanged Boolean received from the tree to see that changes has been made.
*/
public receiveChanges(hasChanged: boolean): void {
this.hasChanged = hasChanged;
}
/**
* Function to receive the new number of visible nodes when the filter has changed.
*
* @param nextNumberOfSeenNodes is an array with two indices:
* The first gives the number of currently shown nodes.
* The second tells how many nodes available.
*/
public onChangeAmountOfItems(nextNumberOfSeenNodes: [number, number]): void {
this.seenNodes = nextNumberOfSeenNodes;
}
/**
* Function to emit if the nodes should be expanded or collapsed.
*
* @param nextState Is the next state, expanded or collapsed, the nodes should be.
*/
public onStateChange(nextState: boolean): void {
this.changeState.emit(nextState);
}
/**
* Function to open a prompt dialog, so the user will be warned if they have
* made changes and not saved them.
*
* @returns The result from the prompt dialog.
*/
public async canDeactivate(): Promise<boolean> {
if (this.hasChanged) {
const title = this.translate.instant('Do you really want to exit this page?');
const content = this.translate.instant('You made changes.');
return await this.promptService.open(title, content);
}
return true;
}
} | import { CanComponentDeactivate } from 'app/shared/utils/watch-for-changes.guard';
import { BaseViewComponentDirective } from './base-view'; | random_line_split |
sort-tree.component.ts | import { Directive, EventEmitter, ViewChild } from '@angular/core';
import { MatSnackBar } from '@angular/material/snack-bar';
import { Title } from '@angular/platform-browser';
import { TranslateService } from '@ngx-translate/core';
import { SortDefinition } from 'app/core/ui-services/base-sort.service';
import { PromptService } from 'app/core/ui-services/prompt.service';
import { SortingTreeComponent } from 'app/shared/components/sorting-tree/sorting-tree.component';
import { Identifiable } from 'app/shared/models/base/identifiable';
import { CanComponentDeactivate } from 'app/shared/utils/watch-for-changes.guard';
import { BaseViewComponentDirective } from './base-view';
import { BaseViewModel } from './base-view-model';
export interface SortTreeFilterOption extends Identifiable {
label: string;
id: number;
state: boolean;
}
/**
* Abstract Sort view for hierarchic item trees
*/
@Directive()
export abstract class SortTreeViewComponentDirective<V extends BaseViewModel>
extends BaseViewComponentDirective
implements CanComponentDeactivate {
/**
* Reference to the view child
*/
@ViewChild('osSortedTree', { static: true })
public osSortTree: SortingTreeComponent<V>;
/**
* Emitter to emit if the nodes should expand or collapse.
*/
public readonly changeState: EventEmitter<Boolean> = new EventEmitter<Boolean>();
/**
* Emitter that emits the filters to the sorting tree.
* TODO note that the boolean function currently requires false if the item
* is to be visible!
*/
public readonly changeFilter: EventEmitter<(item: V) => boolean> = new EventEmitter<(item: V) => boolean>();
/**
* Emitter to notice the `tree-sorting.service` for sorting the data-source.
*/
public readonly forceSort = new EventEmitter<SortDefinition<V>>();
/**
* Boolean to check if changes has been made.
*/
public hasChanged = false;
/**
* Boolean to check if filters are active, so they could be removed.
*/
public hasActiveFilter = false;
/**
* Array that holds the number of visible nodes(0) and amount of available
* nodes(1).
*/
public seenNodes: [number, number] = [0, 0];
/**
* Updates the incoming/changing agenda items.
* @param title
* @param translate
* @param matSnackBar
* @param promptService
*/
public constructor(
title: Title,
protected translate: TranslateService,
matSnackBar: MatSnackBar,
protected promptService: PromptService
) {
super(title, translate, matSnackBar);
}
/**
* Function to restore the old state.
*/
public async onCancel(): Promise<void> {
if (await this.canDeactivate()) {
this.osSortTree.setSubscription();
}
}
/**
* Function to set an info if changes has been made.
*
* @param hasChanged Boolean received from the tree to see that changes has been made.
*/
public receiveChanges(hasChanged: boolean): void {
this.hasChanged = hasChanged;
}
/**
* Function to receive the new number of visible nodes when the filter has changed.
*
* @param nextNumberOfSeenNodes is an array with two indices:
* The first gives the number of currently shown nodes.
* The second tells how many nodes available.
*/
public onChangeAmountOfItems(nextNumberOfSeenNodes: [number, number]): void {
this.seenNodes = nextNumberOfSeenNodes;
}
/**
* Function to emit if the nodes should be expanded or collapsed.
*
* @param nextState Is the next state, expanded or collapsed, the nodes should be.
*/
public onStateChange(nextState: boolean): void |
/**
* Function to open a prompt dialog, so the user will be warned if they have
* made changes and not saved them.
*
* @returns The result from the prompt dialog.
*/
public async canDeactivate(): Promise<boolean> {
if (this.hasChanged) {
const title = this.translate.instant('Do you really want to exit this page?');
const content = this.translate.instant('You made changes.');
return await this.promptService.open(title, content);
}
return true;
}
}
| {
this.changeState.emit(nextState);
} | identifier_body |
sort-tree.component.ts | import { Directive, EventEmitter, ViewChild } from '@angular/core';
import { MatSnackBar } from '@angular/material/snack-bar';
import { Title } from '@angular/platform-browser';
import { TranslateService } from '@ngx-translate/core';
import { SortDefinition } from 'app/core/ui-services/base-sort.service';
import { PromptService } from 'app/core/ui-services/prompt.service';
import { SortingTreeComponent } from 'app/shared/components/sorting-tree/sorting-tree.component';
import { Identifiable } from 'app/shared/models/base/identifiable';
import { CanComponentDeactivate } from 'app/shared/utils/watch-for-changes.guard';
import { BaseViewComponentDirective } from './base-view';
import { BaseViewModel } from './base-view-model';
export interface SortTreeFilterOption extends Identifiable {
label: string;
id: number;
state: boolean;
}
/**
* Abstract Sort view for hierarchic item trees
*/
@Directive()
export abstract class SortTreeViewComponentDirective<V extends BaseViewModel>
extends BaseViewComponentDirective
implements CanComponentDeactivate {
/**
* Reference to the view child
*/
@ViewChild('osSortedTree', { static: true })
public osSortTree: SortingTreeComponent<V>;
/**
* Emitter to emit if the nodes should expand or collapse.
*/
public readonly changeState: EventEmitter<Boolean> = new EventEmitter<Boolean>();
/**
* Emitter that emits the filters to the sorting tree.
* TODO note that the boolean function currently requires false if the item
* is to be visible!
*/
public readonly changeFilter: EventEmitter<(item: V) => boolean> = new EventEmitter<(item: V) => boolean>();
/**
* Emitter to notice the `tree-sorting.service` for sorting the data-source.
*/
public readonly forceSort = new EventEmitter<SortDefinition<V>>();
/**
* Boolean to check if changes has been made.
*/
public hasChanged = false;
/**
* Boolean to check if filters are active, so they could be removed.
*/
public hasActiveFilter = false;
/**
* Array that holds the number of visible nodes(0) and amount of available
* nodes(1).
*/
public seenNodes: [number, number] = [0, 0];
/**
* Updates the incoming/changing agenda items.
* @param title
* @param translate
* @param matSnackBar
* @param promptService
*/
public constructor(
title: Title,
protected translate: TranslateService,
matSnackBar: MatSnackBar,
protected promptService: PromptService
) {
super(title, translate, matSnackBar);
}
/**
* Function to restore the old state.
*/
public async onCancel(): Promise<void> {
if (await this.canDeactivate()) {
this.osSortTree.setSubscription();
}
}
/**
* Function to set an info if changes has been made.
*
* @param hasChanged Boolean received from the tree to see that changes has been made.
*/
public receiveChanges(hasChanged: boolean): void {
this.hasChanged = hasChanged;
}
/**
* Function to receive the new number of visible nodes when the filter has changed.
*
* @param nextNumberOfSeenNodes is an array with two indices:
* The first gives the number of currently shown nodes.
* The second tells how many nodes available.
*/
public | (nextNumberOfSeenNodes: [number, number]): void {
this.seenNodes = nextNumberOfSeenNodes;
}
/**
* Function to emit if the nodes should be expanded or collapsed.
*
* @param nextState Is the next state, expanded or collapsed, the nodes should be.
*/
public onStateChange(nextState: boolean): void {
this.changeState.emit(nextState);
}
/**
* Function to open a prompt dialog, so the user will be warned if they have
* made changes and not saved them.
*
* @returns The result from the prompt dialog.
*/
public async canDeactivate(): Promise<boolean> {
if (this.hasChanged) {
const title = this.translate.instant('Do you really want to exit this page?');
const content = this.translate.instant('You made changes.');
return await this.promptService.open(title, content);
}
return true;
}
}
| onChangeAmountOfItems | identifier_name |
sort-tree.component.ts | import { Directive, EventEmitter, ViewChild } from '@angular/core';
import { MatSnackBar } from '@angular/material/snack-bar';
import { Title } from '@angular/platform-browser';
import { TranslateService } from '@ngx-translate/core';
import { SortDefinition } from 'app/core/ui-services/base-sort.service';
import { PromptService } from 'app/core/ui-services/prompt.service';
import { SortingTreeComponent } from 'app/shared/components/sorting-tree/sorting-tree.component';
import { Identifiable } from 'app/shared/models/base/identifiable';
import { CanComponentDeactivate } from 'app/shared/utils/watch-for-changes.guard';
import { BaseViewComponentDirective } from './base-view';
import { BaseViewModel } from './base-view-model';
export interface SortTreeFilterOption extends Identifiable {
label: string;
id: number;
state: boolean;
}
/**
* Abstract Sort view for hierarchic item trees
*/
@Directive()
export abstract class SortTreeViewComponentDirective<V extends BaseViewModel>
extends BaseViewComponentDirective
implements CanComponentDeactivate {
/**
* Reference to the view child
*/
@ViewChild('osSortedTree', { static: true })
public osSortTree: SortingTreeComponent<V>;
/**
* Emitter to emit if the nodes should expand or collapse.
*/
public readonly changeState: EventEmitter<Boolean> = new EventEmitter<Boolean>();
/**
* Emitter that emits the filters to the sorting tree.
* TODO note that the boolean function currently requires false if the item
* is to be visible!
*/
public readonly changeFilter: EventEmitter<(item: V) => boolean> = new EventEmitter<(item: V) => boolean>();
/**
* Emitter to notice the `tree-sorting.service` for sorting the data-source.
*/
public readonly forceSort = new EventEmitter<SortDefinition<V>>();
/**
* Boolean to check if changes has been made.
*/
public hasChanged = false;
/**
* Boolean to check if filters are active, so they could be removed.
*/
public hasActiveFilter = false;
/**
* Array that holds the number of visible nodes(0) and amount of available
* nodes(1).
*/
public seenNodes: [number, number] = [0, 0];
/**
* Updates the incoming/changing agenda items.
* @param title
* @param translate
* @param matSnackBar
* @param promptService
*/
public constructor(
title: Title,
protected translate: TranslateService,
matSnackBar: MatSnackBar,
protected promptService: PromptService
) {
super(title, translate, matSnackBar);
}
/**
* Function to restore the old state.
*/
public async onCancel(): Promise<void> {
if (await this.canDeactivate()) |
}
/**
* Function to set an info if changes has been made.
*
* @param hasChanged Boolean received from the tree to see that changes has been made.
*/
public receiveChanges(hasChanged: boolean): void {
this.hasChanged = hasChanged;
}
/**
* Function to receive the new number of visible nodes when the filter has changed.
*
* @param nextNumberOfSeenNodes is an array with two indices:
* The first gives the number of currently shown nodes.
* The second tells how many nodes available.
*/
public onChangeAmountOfItems(nextNumberOfSeenNodes: [number, number]): void {
this.seenNodes = nextNumberOfSeenNodes;
}
/**
* Function to emit if the nodes should be expanded or collapsed.
*
* @param nextState Is the next state, expanded or collapsed, the nodes should be.
*/
public onStateChange(nextState: boolean): void {
this.changeState.emit(nextState);
}
/**
* Function to open a prompt dialog, so the user will be warned if they have
* made changes and not saved them.
*
* @returns The result from the prompt dialog.
*/
public async canDeactivate(): Promise<boolean> {
if (this.hasChanged) {
const title = this.translate.instant('Do you really want to exit this page?');
const content = this.translate.instant('You made changes.');
return await this.promptService.open(title, content);
}
return true;
}
}
| {
this.osSortTree.setSubscription();
} | conditional_block |
usage.rs | use crate as utils;
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::intravisit;
use rustc_hir::intravisit::{NestedVisitorMap, Visitor};
use rustc_hir::HirIdSet;
use rustc_hir::{Expr, ExprKind, HirId, Path};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::LateContext;
use rustc_middle::hir::map::Map;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty;
use rustc_typeck::expr_use_visitor::{Delegate, ExprUseVisitor, PlaceBase, PlaceWithHirId};
/// Returns a set of mutated local variable IDs, or `None` if mutations could not be determined.
pub fn mutated_variables<'tcx>(expr: &'tcx Expr<'_>, cx: &LateContext<'tcx>) -> Option<HirIdSet> {
let mut delegate = MutVarsDelegate {
used_mutably: HirIdSet::default(),
skip: false,
};
cx.tcx.infer_ctxt().enter(|infcx| {
ExprUseVisitor::new(
&mut delegate,
&infcx,
expr.hir_id.owner,
cx.param_env,
cx.typeck_results(),
)
.walk_expr(expr);
});
if delegate.skip {
return None;
}
Some(delegate.used_mutably)
}
pub fn is_potentially_mutated<'tcx>(variable: &'tcx Path<'_>, expr: &'tcx Expr<'_>, cx: &LateContext<'tcx>) -> bool {
if let Res::Local(id) = variable.res {
mutated_variables(expr, cx).map_or(true, |mutated| mutated.contains(&id))
} else {
true
}
}
struct MutVarsDelegate {
used_mutably: HirIdSet,
skip: bool,
}
impl<'tcx> MutVarsDelegate {
#[allow(clippy::similar_names)]
fn update(&mut self, cat: &PlaceWithHirId<'tcx>) {
match cat.place.base {
PlaceBase::Local(id) => {
self.used_mutably.insert(id);
},
PlaceBase::Upvar(_) => {
//FIXME: This causes false negatives. We can't get the `NodeId` from
//`Categorization::Upvar(_)`. So we search for any `Upvar`s in the
//`while`-body, not just the ones in the condition.
self.skip = true;
},
_ => {},
}
}
}
impl<'tcx> Delegate<'tcx> for MutVarsDelegate {
fn consume(&mut self, _: &PlaceWithHirId<'tcx>, _: HirId) {}
fn borrow(&mut self, cmt: &PlaceWithHirId<'tcx>, _: HirId, bk: ty::BorrowKind) {
if let ty::BorrowKind::MutBorrow = bk {
self.update(cmt);
}
}
fn mutate(&mut self, cmt: &PlaceWithHirId<'tcx>, _: HirId) {
self.update(cmt);
}
fn fake_read(&mut self, _: rustc_typeck::expr_use_visitor::Place<'tcx>, _: FakeReadCause, _: HirId) {}
}
pub struct ParamBindingIdCollector {
binding_hir_ids: Vec<hir::HirId>,
}
impl<'tcx> ParamBindingIdCollector {
fn collect_binding_hir_ids(body: &'tcx hir::Body<'tcx>) -> Vec<hir::HirId> {
let mut hir_ids: Vec<hir::HirId> = Vec::new();
for param in body.params.iter() {
let mut finder = ParamBindingIdCollector {
binding_hir_ids: Vec::new(),
};
finder.visit_param(param);
for hir_id in &finder.binding_hir_ids {
hir_ids.push(*hir_id);
}
}
hir_ids
}
}
impl<'tcx> intravisit::Visitor<'tcx> for ParamBindingIdCollector {
type Map = Map<'tcx>;
fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
if let hir::PatKind::Binding(_, hir_id, ..) = pat.kind {
self.binding_hir_ids.push(hir_id);
}
intravisit::walk_pat(self, pat);
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
pub struct BindingUsageFinder<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
binding_ids: Vec<hir::HirId>,
usage_found: bool,
}
impl<'a, 'tcx> BindingUsageFinder<'a, 'tcx> {
pub fn are_params_used(cx: &'a LateContext<'tcx>, body: &'tcx hir::Body<'tcx>) -> bool {
let mut finder = BindingUsageFinder {
cx,
binding_ids: ParamBindingIdCollector::collect_binding_hir_ids(body),
usage_found: false,
};
finder.visit_body(body);
finder.usage_found
}
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for BindingUsageFinder<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
if !self.usage_found {
intravisit::walk_expr(self, expr);
}
}
fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _: hir::HirId) {
if let hir::def::Res::Local(id) = path.res {
if self.binding_ids.contains(&id) {
self.usage_found = true;
}
}
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
}
struct ReturnBreakContinueMacroVisitor {
seen_return_break_continue: bool,
}
impl ReturnBreakContinueMacroVisitor {
fn new() -> ReturnBreakContinueMacroVisitor {
ReturnBreakContinueMacroVisitor {
seen_return_break_continue: false,
}
}
}
impl<'tcx> Visitor<'tcx> for ReturnBreakContinueMacroVisitor {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
if self.seen_return_break_continue {
// No need to look farther if we've already seen one of them
return;
}
match &ex.kind {
ExprKind::Ret(..) | ExprKind::Break(..) | ExprKind::Continue(..) => {
self.seen_return_break_continue = true;
},
// Something special could be done here to handle while or for loop
// desugaring, as this will detect a break if there's a while loop
// or a for loop inside the expression.
_ => {
if utils::in_macro(ex.span) {
self.seen_return_break_continue = true;
} else {
rustc_hir::intravisit::walk_expr(self, ex);
}
},
}
}
}
pub fn contains_return_break_continue_macro(expression: &Expr<'_>) -> bool {
let mut recursive_visitor = ReturnBreakContinueMacroVisitor::new();
recursive_visitor.visit_expr(expression);
recursive_visitor.seen_return_break_continue
}
pub struct UsedAfterExprVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
expr: &'tcx Expr<'tcx>,
definition: HirId,
past_expr: bool,
used_after_expr: bool,
}
impl<'a, 'tcx> UsedAfterExprVisitor<'a, 'tcx> {
pub fn is_found(cx: &'a LateContext<'tcx>, expr: &'tcx Expr<'_>) -> bool {
utils::path_to_local(expr).map_or(false, |definition| {
let mut visitor = UsedAfterExprVisitor {
cx,
expr,
definition,
past_expr: false,
used_after_expr: false,
};
utils::get_enclosing_block(cx, definition).map_or(false, |block| {
visitor.visit_block(block);
visitor.used_after_expr
})
})
}
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for UsedAfterExprVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
if self.used_after_expr {
return;
}
if expr.hir_id == self.expr.hir_id {
self.past_expr = true;
} else if self.past_expr && utils::path_to_local_id(expr, self.definition) {
self.used_after_expr = true;
} else |
}
}
| {
intravisit::walk_expr(self, expr);
} | conditional_block |
usage.rs | use crate as utils;
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::intravisit;
use rustc_hir::intravisit::{NestedVisitorMap, Visitor};
use rustc_hir::HirIdSet;
use rustc_hir::{Expr, ExprKind, HirId, Path};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::LateContext;
use rustc_middle::hir::map::Map;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty;
use rustc_typeck::expr_use_visitor::{Delegate, ExprUseVisitor, PlaceBase, PlaceWithHirId};
/// Returns a set of mutated local variable IDs, or `None` if mutations could not be determined.
pub fn mutated_variables<'tcx>(expr: &'tcx Expr<'_>, cx: &LateContext<'tcx>) -> Option<HirIdSet> {
let mut delegate = MutVarsDelegate {
used_mutably: HirIdSet::default(),
skip: false,
};
cx.tcx.infer_ctxt().enter(|infcx| {
ExprUseVisitor::new(
&mut delegate,
&infcx,
expr.hir_id.owner,
cx.param_env,
cx.typeck_results(),
)
.walk_expr(expr);
});
if delegate.skip {
return None;
}
Some(delegate.used_mutably)
}
pub fn is_potentially_mutated<'tcx>(variable: &'tcx Path<'_>, expr: &'tcx Expr<'_>, cx: &LateContext<'tcx>) -> bool {
if let Res::Local(id) = variable.res {
mutated_variables(expr, cx).map_or(true, |mutated| mutated.contains(&id))
} else {
true
}
}
struct MutVarsDelegate {
used_mutably: HirIdSet,
skip: bool,
}
impl<'tcx> MutVarsDelegate {
#[allow(clippy::similar_names)]
fn update(&mut self, cat: &PlaceWithHirId<'tcx>) {
match cat.place.base {
PlaceBase::Local(id) => {
self.used_mutably.insert(id);
},
PlaceBase::Upvar(_) => {
//FIXME: This causes false negatives. We can't get the `NodeId` from
//`Categorization::Upvar(_)`. So we search for any `Upvar`s in the
//`while`-body, not just the ones in the condition.
self.skip = true;
},
_ => {},
}
}
}
impl<'tcx> Delegate<'tcx> for MutVarsDelegate {
fn consume(&mut self, _: &PlaceWithHirId<'tcx>, _: HirId) {}
fn borrow(&mut self, cmt: &PlaceWithHirId<'tcx>, _: HirId, bk: ty::BorrowKind) {
if let ty::BorrowKind::MutBorrow = bk {
self.update(cmt);
}
}
fn mutate(&mut self, cmt: &PlaceWithHirId<'tcx>, _: HirId) {
self.update(cmt);
}
fn fake_read(&mut self, _: rustc_typeck::expr_use_visitor::Place<'tcx>, _: FakeReadCause, _: HirId) {}
}
pub struct ParamBindingIdCollector {
binding_hir_ids: Vec<hir::HirId>,
}
impl<'tcx> ParamBindingIdCollector {
fn collect_binding_hir_ids(body: &'tcx hir::Body<'tcx>) -> Vec<hir::HirId> {
let mut hir_ids: Vec<hir::HirId> = Vec::new();
for param in body.params.iter() { | let mut finder = ParamBindingIdCollector {
binding_hir_ids: Vec::new(),
};
finder.visit_param(param);
for hir_id in &finder.binding_hir_ids {
hir_ids.push(*hir_id);
}
}
hir_ids
}
}
impl<'tcx> intravisit::Visitor<'tcx> for ParamBindingIdCollector {
type Map = Map<'tcx>;
fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
if let hir::PatKind::Binding(_, hir_id, ..) = pat.kind {
self.binding_hir_ids.push(hir_id);
}
intravisit::walk_pat(self, pat);
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
pub struct BindingUsageFinder<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
binding_ids: Vec<hir::HirId>,
usage_found: bool,
}
impl<'a, 'tcx> BindingUsageFinder<'a, 'tcx> {
pub fn are_params_used(cx: &'a LateContext<'tcx>, body: &'tcx hir::Body<'tcx>) -> bool {
let mut finder = BindingUsageFinder {
cx,
binding_ids: ParamBindingIdCollector::collect_binding_hir_ids(body),
usage_found: false,
};
finder.visit_body(body);
finder.usage_found
}
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for BindingUsageFinder<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
if !self.usage_found {
intravisit::walk_expr(self, expr);
}
}
fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _: hir::HirId) {
if let hir::def::Res::Local(id) = path.res {
if self.binding_ids.contains(&id) {
self.usage_found = true;
}
}
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
}
struct ReturnBreakContinueMacroVisitor {
seen_return_break_continue: bool,
}
impl ReturnBreakContinueMacroVisitor {
fn new() -> ReturnBreakContinueMacroVisitor {
ReturnBreakContinueMacroVisitor {
seen_return_break_continue: false,
}
}
}
impl<'tcx> Visitor<'tcx> for ReturnBreakContinueMacroVisitor {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
if self.seen_return_break_continue {
// No need to look farther if we've already seen one of them
return;
}
match &ex.kind {
ExprKind::Ret(..) | ExprKind::Break(..) | ExprKind::Continue(..) => {
self.seen_return_break_continue = true;
},
// Something special could be done here to handle while or for loop
// desugaring, as this will detect a break if there's a while loop
// or a for loop inside the expression.
_ => {
if utils::in_macro(ex.span) {
self.seen_return_break_continue = true;
} else {
rustc_hir::intravisit::walk_expr(self, ex);
}
},
}
}
}
pub fn contains_return_break_continue_macro(expression: &Expr<'_>) -> bool {
let mut recursive_visitor = ReturnBreakContinueMacroVisitor::new();
recursive_visitor.visit_expr(expression);
recursive_visitor.seen_return_break_continue
}
pub struct UsedAfterExprVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
expr: &'tcx Expr<'tcx>,
definition: HirId,
past_expr: bool,
used_after_expr: bool,
}
impl<'a, 'tcx> UsedAfterExprVisitor<'a, 'tcx> {
pub fn is_found(cx: &'a LateContext<'tcx>, expr: &'tcx Expr<'_>) -> bool {
utils::path_to_local(expr).map_or(false, |definition| {
let mut visitor = UsedAfterExprVisitor {
cx,
expr,
definition,
past_expr: false,
used_after_expr: false,
};
utils::get_enclosing_block(cx, definition).map_or(false, |block| {
visitor.visit_block(block);
visitor.used_after_expr
})
})
}
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for UsedAfterExprVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
if self.used_after_expr {
return;
}
if expr.hir_id == self.expr.hir_id {
self.past_expr = true;
} else if self.past_expr && utils::path_to_local_id(expr, self.definition) {
self.used_after_expr = true;
} else {
intravisit::walk_expr(self, expr);
}
}
} | random_line_split |
|
usage.rs | use crate as utils;
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::intravisit;
use rustc_hir::intravisit::{NestedVisitorMap, Visitor};
use rustc_hir::HirIdSet;
use rustc_hir::{Expr, ExprKind, HirId, Path};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::LateContext;
use rustc_middle::hir::map::Map;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty;
use rustc_typeck::expr_use_visitor::{Delegate, ExprUseVisitor, PlaceBase, PlaceWithHirId};
/// Returns a set of mutated local variable IDs, or `None` if mutations could not be determined.
pub fn mutated_variables<'tcx>(expr: &'tcx Expr<'_>, cx: &LateContext<'tcx>) -> Option<HirIdSet> {
let mut delegate = MutVarsDelegate {
used_mutably: HirIdSet::default(),
skip: false,
};
cx.tcx.infer_ctxt().enter(|infcx| {
ExprUseVisitor::new(
&mut delegate,
&infcx,
expr.hir_id.owner,
cx.param_env,
cx.typeck_results(),
)
.walk_expr(expr);
});
if delegate.skip {
return None;
}
Some(delegate.used_mutably)
}
pub fn is_potentially_mutated<'tcx>(variable: &'tcx Path<'_>, expr: &'tcx Expr<'_>, cx: &LateContext<'tcx>) -> bool {
if let Res::Local(id) = variable.res {
mutated_variables(expr, cx).map_or(true, |mutated| mutated.contains(&id))
} else {
true
}
}
struct MutVarsDelegate {
used_mutably: HirIdSet,
skip: bool,
}
impl<'tcx> MutVarsDelegate {
#[allow(clippy::similar_names)]
fn update(&mut self, cat: &PlaceWithHirId<'tcx>) {
match cat.place.base {
PlaceBase::Local(id) => {
self.used_mutably.insert(id);
},
PlaceBase::Upvar(_) => {
//FIXME: This causes false negatives. We can't get the `NodeId` from
//`Categorization::Upvar(_)`. So we search for any `Upvar`s in the
//`while`-body, not just the ones in the condition.
self.skip = true;
},
_ => {},
}
}
}
impl<'tcx> Delegate<'tcx> for MutVarsDelegate {
fn consume(&mut self, _: &PlaceWithHirId<'tcx>, _: HirId) {}
fn borrow(&mut self, cmt: &PlaceWithHirId<'tcx>, _: HirId, bk: ty::BorrowKind) {
if let ty::BorrowKind::MutBorrow = bk {
self.update(cmt);
}
}
fn mutate(&mut self, cmt: &PlaceWithHirId<'tcx>, _: HirId) {
self.update(cmt);
}
fn fake_read(&mut self, _: rustc_typeck::expr_use_visitor::Place<'tcx>, _: FakeReadCause, _: HirId) {}
}
pub struct ParamBindingIdCollector {
binding_hir_ids: Vec<hir::HirId>,
}
impl<'tcx> ParamBindingIdCollector {
fn collect_binding_hir_ids(body: &'tcx hir::Body<'tcx>) -> Vec<hir::HirId> {
let mut hir_ids: Vec<hir::HirId> = Vec::new();
for param in body.params.iter() {
let mut finder = ParamBindingIdCollector {
binding_hir_ids: Vec::new(),
};
finder.visit_param(param);
for hir_id in &finder.binding_hir_ids {
hir_ids.push(*hir_id);
}
}
hir_ids
}
}
impl<'tcx> intravisit::Visitor<'tcx> for ParamBindingIdCollector {
type Map = Map<'tcx>;
fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
if let hir::PatKind::Binding(_, hir_id, ..) = pat.kind {
self.binding_hir_ids.push(hir_id);
}
intravisit::walk_pat(self, pat);
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
pub struct BindingUsageFinder<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
binding_ids: Vec<hir::HirId>,
usage_found: bool,
}
impl<'a, 'tcx> BindingUsageFinder<'a, 'tcx> {
pub fn are_params_used(cx: &'a LateContext<'tcx>, body: &'tcx hir::Body<'tcx>) -> bool {
let mut finder = BindingUsageFinder {
cx,
binding_ids: ParamBindingIdCollector::collect_binding_hir_ids(body),
usage_found: false,
};
finder.visit_body(body);
finder.usage_found
}
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for BindingUsageFinder<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
if !self.usage_found {
intravisit::walk_expr(self, expr);
}
}
fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _: hir::HirId) {
if let hir::def::Res::Local(id) = path.res {
if self.binding_ids.contains(&id) {
self.usage_found = true;
}
}
}
fn | (&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
}
struct ReturnBreakContinueMacroVisitor {
seen_return_break_continue: bool,
}
impl ReturnBreakContinueMacroVisitor {
fn new() -> ReturnBreakContinueMacroVisitor {
ReturnBreakContinueMacroVisitor {
seen_return_break_continue: false,
}
}
}
impl<'tcx> Visitor<'tcx> for ReturnBreakContinueMacroVisitor {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
if self.seen_return_break_continue {
// No need to look farther if we've already seen one of them
return;
}
match &ex.kind {
ExprKind::Ret(..) | ExprKind::Break(..) | ExprKind::Continue(..) => {
self.seen_return_break_continue = true;
},
// Something special could be done here to handle while or for loop
// desugaring, as this will detect a break if there's a while loop
// or a for loop inside the expression.
_ => {
if utils::in_macro(ex.span) {
self.seen_return_break_continue = true;
} else {
rustc_hir::intravisit::walk_expr(self, ex);
}
},
}
}
}
pub fn contains_return_break_continue_macro(expression: &Expr<'_>) -> bool {
let mut recursive_visitor = ReturnBreakContinueMacroVisitor::new();
recursive_visitor.visit_expr(expression);
recursive_visitor.seen_return_break_continue
}
pub struct UsedAfterExprVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
expr: &'tcx Expr<'tcx>,
definition: HirId,
past_expr: bool,
used_after_expr: bool,
}
impl<'a, 'tcx> UsedAfterExprVisitor<'a, 'tcx> {
pub fn is_found(cx: &'a LateContext<'tcx>, expr: &'tcx Expr<'_>) -> bool {
utils::path_to_local(expr).map_or(false, |definition| {
let mut visitor = UsedAfterExprVisitor {
cx,
expr,
definition,
past_expr: false,
used_after_expr: false,
};
utils::get_enclosing_block(cx, definition).map_or(false, |block| {
visitor.visit_block(block);
visitor.used_after_expr
})
})
}
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for UsedAfterExprVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
if self.used_after_expr {
return;
}
if expr.hir_id == self.expr.hir_id {
self.past_expr = true;
} else if self.past_expr && utils::path_to_local_id(expr, self.definition) {
self.used_after_expr = true;
} else {
intravisit::walk_expr(self, expr);
}
}
}
| nested_visit_map | identifier_name |
usage.rs | use crate as utils;
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::intravisit;
use rustc_hir::intravisit::{NestedVisitorMap, Visitor};
use rustc_hir::HirIdSet;
use rustc_hir::{Expr, ExprKind, HirId, Path};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::LateContext;
use rustc_middle::hir::map::Map;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty;
use rustc_typeck::expr_use_visitor::{Delegate, ExprUseVisitor, PlaceBase, PlaceWithHirId};
/// Returns a set of mutated local variable IDs, or `None` if mutations could not be determined.
pub fn mutated_variables<'tcx>(expr: &'tcx Expr<'_>, cx: &LateContext<'tcx>) -> Option<HirIdSet> {
let mut delegate = MutVarsDelegate {
used_mutably: HirIdSet::default(),
skip: false,
};
cx.tcx.infer_ctxt().enter(|infcx| {
ExprUseVisitor::new(
&mut delegate,
&infcx,
expr.hir_id.owner,
cx.param_env,
cx.typeck_results(),
)
.walk_expr(expr);
});
if delegate.skip {
return None;
}
Some(delegate.used_mutably)
}
pub fn is_potentially_mutated<'tcx>(variable: &'tcx Path<'_>, expr: &'tcx Expr<'_>, cx: &LateContext<'tcx>) -> bool {
if let Res::Local(id) = variable.res {
mutated_variables(expr, cx).map_or(true, |mutated| mutated.contains(&id))
} else {
true
}
}
struct MutVarsDelegate {
used_mutably: HirIdSet,
skip: bool,
}
impl<'tcx> MutVarsDelegate {
#[allow(clippy::similar_names)]
fn update(&mut self, cat: &PlaceWithHirId<'tcx>) {
match cat.place.base {
PlaceBase::Local(id) => {
self.used_mutably.insert(id);
},
PlaceBase::Upvar(_) => {
//FIXME: This causes false negatives. We can't get the `NodeId` from
//`Categorization::Upvar(_)`. So we search for any `Upvar`s in the
//`while`-body, not just the ones in the condition.
self.skip = true;
},
_ => {},
}
}
}
impl<'tcx> Delegate<'tcx> for MutVarsDelegate {
fn consume(&mut self, _: &PlaceWithHirId<'tcx>, _: HirId) {}
fn borrow(&mut self, cmt: &PlaceWithHirId<'tcx>, _: HirId, bk: ty::BorrowKind) {
if let ty::BorrowKind::MutBorrow = bk {
self.update(cmt);
}
}
fn mutate(&mut self, cmt: &PlaceWithHirId<'tcx>, _: HirId) {
self.update(cmt);
}
fn fake_read(&mut self, _: rustc_typeck::expr_use_visitor::Place<'tcx>, _: FakeReadCause, _: HirId) {}
}
pub struct ParamBindingIdCollector {
binding_hir_ids: Vec<hir::HirId>,
}
impl<'tcx> ParamBindingIdCollector {
fn collect_binding_hir_ids(body: &'tcx hir::Body<'tcx>) -> Vec<hir::HirId> |
}
impl<'tcx> intravisit::Visitor<'tcx> for ParamBindingIdCollector {
type Map = Map<'tcx>;
fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
if let hir::PatKind::Binding(_, hir_id, ..) = pat.kind {
self.binding_hir_ids.push(hir_id);
}
intravisit::walk_pat(self, pat);
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
pub struct BindingUsageFinder<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
binding_ids: Vec<hir::HirId>,
usage_found: bool,
}
impl<'a, 'tcx> BindingUsageFinder<'a, 'tcx> {
pub fn are_params_used(cx: &'a LateContext<'tcx>, body: &'tcx hir::Body<'tcx>) -> bool {
let mut finder = BindingUsageFinder {
cx,
binding_ids: ParamBindingIdCollector::collect_binding_hir_ids(body),
usage_found: false,
};
finder.visit_body(body);
finder.usage_found
}
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for BindingUsageFinder<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
if !self.usage_found {
intravisit::walk_expr(self, expr);
}
}
fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _: hir::HirId) {
if let hir::def::Res::Local(id) = path.res {
if self.binding_ids.contains(&id) {
self.usage_found = true;
}
}
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
}
struct ReturnBreakContinueMacroVisitor {
seen_return_break_continue: bool,
}
impl ReturnBreakContinueMacroVisitor {
fn new() -> ReturnBreakContinueMacroVisitor {
ReturnBreakContinueMacroVisitor {
seen_return_break_continue: false,
}
}
}
impl<'tcx> Visitor<'tcx> for ReturnBreakContinueMacroVisitor {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
if self.seen_return_break_continue {
// No need to look farther if we've already seen one of them
return;
}
match &ex.kind {
ExprKind::Ret(..) | ExprKind::Break(..) | ExprKind::Continue(..) => {
self.seen_return_break_continue = true;
},
// Something special could be done here to handle while or for loop
// desugaring, as this will detect a break if there's a while loop
// or a for loop inside the expression.
_ => {
if utils::in_macro(ex.span) {
self.seen_return_break_continue = true;
} else {
rustc_hir::intravisit::walk_expr(self, ex);
}
},
}
}
}
pub fn contains_return_break_continue_macro(expression: &Expr<'_>) -> bool {
let mut recursive_visitor = ReturnBreakContinueMacroVisitor::new();
recursive_visitor.visit_expr(expression);
recursive_visitor.seen_return_break_continue
}
pub struct UsedAfterExprVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
expr: &'tcx Expr<'tcx>,
definition: HirId,
past_expr: bool,
used_after_expr: bool,
}
impl<'a, 'tcx> UsedAfterExprVisitor<'a, 'tcx> {
pub fn is_found(cx: &'a LateContext<'tcx>, expr: &'tcx Expr<'_>) -> bool {
utils::path_to_local(expr).map_or(false, |definition| {
let mut visitor = UsedAfterExprVisitor {
cx,
expr,
definition,
past_expr: false,
used_after_expr: false,
};
utils::get_enclosing_block(cx, definition).map_or(false, |block| {
visitor.visit_block(block);
visitor.used_after_expr
})
})
}
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for UsedAfterExprVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::OnlyBodies(self.cx.tcx.hir())
}
fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
if self.used_after_expr {
return;
}
if expr.hir_id == self.expr.hir_id {
self.past_expr = true;
} else if self.past_expr && utils::path_to_local_id(expr, self.definition) {
self.used_after_expr = true;
} else {
intravisit::walk_expr(self, expr);
}
}
}
| {
let mut hir_ids: Vec<hir::HirId> = Vec::new();
for param in body.params.iter() {
let mut finder = ParamBindingIdCollector {
binding_hir_ids: Vec::new(),
};
finder.visit_param(param);
for hir_id in &finder.binding_hir_ids {
hir_ids.push(*hir_id);
}
}
hir_ids
} | identifier_body |
tree.js | import {extend} from "lodash";
import SeqCollection from "../model/SeqCollection";
const TreeHelper = function(msa) {
this.msa = msa;
return this;
};
var tf =
{loadTree: function(cb) {
return this.msa.g.package.loadPackages(["msa-tnt", "biojs-io-newick"], cb);
},
showTree: function(newickStr) {
var newick = window.require("biojs-io-newick");
var mt = window.require("msa-tnt");
if (typeof newickStr === "string") {
var newickObj = newick.parse_newick(newickStr);
} else {
newickObj = newickStr;
}
var sel = new mt.selections();
var treeDiv;
if(this.msa.el.getElementsByClassName('tnt_groupDiv').length === 0){
treeDiv = document.createElement("div");
this.msa.el.appendChild(treeDiv);
} else {
console.log('A tree already exists. It will be overridden.');
treeDiv = this.msa.el.getElementsByClassName('tnt_groupDiv')[0].parentNode;
treeDiv.innerHTML = '';
}
const seqs = this.msa.seqs.toJSON();
//adapt tree ids to sequence ids
function iterateTree(nwck){
if(nwck.children != null){
nwck.children.forEach(x => iterateTree(x));
} else {
//found a leave
let seq = seqs.filter(s => s.name === nwck.name)[0];
if(seq != null){
if(typeof seq.id === 'number'){
//no tree has been uploaded so far, seqs have standard IDs
seq.ids = [`s${seq.id + 1}`];
nwck.name = `s${seq.id + 1}`;
} else {
//seqs have custom ids - don't mess with these
nwck.name = seq.id;
}
}
}
}
iterateTree(newickObj);
var nodes = mt.app({
seqs: seqs,
tree: newickObj
});
var t = new mt.adapters.tree({
model: nodes,
el: treeDiv,
sel: sel
});
//treeDiv.style.width = "500px"
// construct msa in a virtual dom | model: nodes,
sel: sel,
msa: this.msa
});
// remove top collection
nodes.models.forEach((e) => {
delete e.collection;
return Object.setPrototypeOf(e, require("backbone-thin").Model.prototype);
});
this.msa.seqs.reset(nodes.models);
//@msa.draw()
//@msa.render()
return console.log(this.msa.seqs);
},
// workaround against browserify's static analysis
require(pkg) {
return require(pkg);
}
};
extend(TreeHelper.prototype , tf);
export default TreeHelper; | var m = new mt.adapters.msa({ | random_line_split |
tree.js | import {extend} from "lodash";
import SeqCollection from "../model/SeqCollection";
const TreeHelper = function(msa) {
this.msa = msa;
return this;
};
var tf =
{loadTree: function(cb) {
return this.msa.g.package.loadPackages(["msa-tnt", "biojs-io-newick"], cb);
},
showTree: function(newickStr) {
var newick = window.require("biojs-io-newick");
var mt = window.require("msa-tnt");
if (typeof newickStr === "string") {
var newickObj = newick.parse_newick(newickStr);
} else {
newickObj = newickStr;
}
var sel = new mt.selections();
var treeDiv;
if(this.msa.el.getElementsByClassName('tnt_groupDiv').length === 0){
treeDiv = document.createElement("div");
this.msa.el.appendChild(treeDiv);
} else |
const seqs = this.msa.seqs.toJSON();
//adapt tree ids to sequence ids
function iterateTree(nwck){
if(nwck.children != null){
nwck.children.forEach(x => iterateTree(x));
} else {
//found a leave
let seq = seqs.filter(s => s.name === nwck.name)[0];
if(seq != null){
if(typeof seq.id === 'number'){
//no tree has been uploaded so far, seqs have standard IDs
seq.ids = [`s${seq.id + 1}`];
nwck.name = `s${seq.id + 1}`;
} else {
//seqs have custom ids - don't mess with these
nwck.name = seq.id;
}
}
}
}
iterateTree(newickObj);
var nodes = mt.app({
seqs: seqs,
tree: newickObj
});
var t = new mt.adapters.tree({
model: nodes,
el: treeDiv,
sel: sel
});
//treeDiv.style.width = "500px"
// construct msa in a virtual dom
var m = new mt.adapters.msa({
model: nodes,
sel: sel,
msa: this.msa
});
// remove top collection
nodes.models.forEach((e) => {
delete e.collection;
return Object.setPrototypeOf(e, require("backbone-thin").Model.prototype);
});
this.msa.seqs.reset(nodes.models);
//@msa.draw()
//@msa.render()
return console.log(this.msa.seqs);
},
// workaround against browserify's static analysis
require(pkg) {
return require(pkg);
}
};
extend(TreeHelper.prototype , tf);
export default TreeHelper;
| {
console.log('A tree already exists. It will be overridden.');
treeDiv = this.msa.el.getElementsByClassName('tnt_groupDiv')[0].parentNode;
treeDiv.innerHTML = '';
} | conditional_block |
tree.js | import {extend} from "lodash";
import SeqCollection from "../model/SeqCollection";
const TreeHelper = function(msa) {
this.msa = msa;
return this;
};
var tf =
{loadTree: function(cb) {
return this.msa.g.package.loadPackages(["msa-tnt", "biojs-io-newick"], cb);
},
showTree: function(newickStr) {
var newick = window.require("biojs-io-newick");
var mt = window.require("msa-tnt");
if (typeof newickStr === "string") {
var newickObj = newick.parse_newick(newickStr);
} else {
newickObj = newickStr;
}
var sel = new mt.selections();
var treeDiv;
if(this.msa.el.getElementsByClassName('tnt_groupDiv').length === 0){
treeDiv = document.createElement("div");
this.msa.el.appendChild(treeDiv);
} else {
console.log('A tree already exists. It will be overridden.');
treeDiv = this.msa.el.getElementsByClassName('tnt_groupDiv')[0].parentNode;
treeDiv.innerHTML = '';
}
const seqs = this.msa.seqs.toJSON();
//adapt tree ids to sequence ids
function | (nwck){
if(nwck.children != null){
nwck.children.forEach(x => iterateTree(x));
} else {
//found a leave
let seq = seqs.filter(s => s.name === nwck.name)[0];
if(seq != null){
if(typeof seq.id === 'number'){
//no tree has been uploaded so far, seqs have standard IDs
seq.ids = [`s${seq.id + 1}`];
nwck.name = `s${seq.id + 1}`;
} else {
//seqs have custom ids - don't mess with these
nwck.name = seq.id;
}
}
}
}
iterateTree(newickObj);
var nodes = mt.app({
seqs: seqs,
tree: newickObj
});
var t = new mt.adapters.tree({
model: nodes,
el: treeDiv,
sel: sel
});
//treeDiv.style.width = "500px"
// construct msa in a virtual dom
var m = new mt.adapters.msa({
model: nodes,
sel: sel,
msa: this.msa
});
// remove top collection
nodes.models.forEach((e) => {
delete e.collection;
return Object.setPrototypeOf(e, require("backbone-thin").Model.prototype);
});
this.msa.seqs.reset(nodes.models);
//@msa.draw()
//@msa.render()
return console.log(this.msa.seqs);
},
// workaround against browserify's static analysis
require(pkg) {
return require(pkg);
}
};
extend(TreeHelper.prototype , tf);
export default TreeHelper;
| iterateTree | identifier_name |
tree.js | import {extend} from "lodash";
import SeqCollection from "../model/SeqCollection";
const TreeHelper = function(msa) {
this.msa = msa;
return this;
};
var tf =
{loadTree: function(cb) {
return this.msa.g.package.loadPackages(["msa-tnt", "biojs-io-newick"], cb);
},
showTree: function(newickStr) {
var newick = window.require("biojs-io-newick");
var mt = window.require("msa-tnt");
if (typeof newickStr === "string") {
var newickObj = newick.parse_newick(newickStr);
} else {
newickObj = newickStr;
}
var sel = new mt.selections();
var treeDiv;
if(this.msa.el.getElementsByClassName('tnt_groupDiv').length === 0){
treeDiv = document.createElement("div");
this.msa.el.appendChild(treeDiv);
} else {
console.log('A tree already exists. It will be overridden.');
treeDiv = this.msa.el.getElementsByClassName('tnt_groupDiv')[0].parentNode;
treeDiv.innerHTML = '';
}
const seqs = this.msa.seqs.toJSON();
//adapt tree ids to sequence ids
function iterateTree(nwck){
if(nwck.children != null){
nwck.children.forEach(x => iterateTree(x));
} else {
//found a leave
let seq = seqs.filter(s => s.name === nwck.name)[0];
if(seq != null){
if(typeof seq.id === 'number'){
//no tree has been uploaded so far, seqs have standard IDs
seq.ids = [`s${seq.id + 1}`];
nwck.name = `s${seq.id + 1}`;
} else {
//seqs have custom ids - don't mess with these
nwck.name = seq.id;
}
}
}
}
iterateTree(newickObj);
var nodes = mt.app({
seqs: seqs,
tree: newickObj
});
var t = new mt.adapters.tree({
model: nodes,
el: treeDiv,
sel: sel
});
//treeDiv.style.width = "500px"
// construct msa in a virtual dom
var m = new mt.adapters.msa({
model: nodes,
sel: sel,
msa: this.msa
});
// remove top collection
nodes.models.forEach((e) => {
delete e.collection;
return Object.setPrototypeOf(e, require("backbone-thin").Model.prototype);
});
this.msa.seqs.reset(nodes.models);
//@msa.draw()
//@msa.render()
return console.log(this.msa.seqs);
},
// workaround against browserify's static analysis
require(pkg) |
};
extend(TreeHelper.prototype , tf);
export default TreeHelper;
| {
return require(pkg);
} | identifier_body |
moh-731-resource.service.ts | import { of as observableOf, Observable } from 'rxjs';
import { catchError, map } from 'rxjs/operators';
import { Injectable } from '@angular/core';
// tslint:disable-next-line:import-blacklist
import { Subject } from 'rxjs/Rx';
import { AppSettingsService } from '../app-settings/app-settings.service';
import { DataCacheService } from '../shared/services/data-cache.service';
import { HttpParams, HttpClient } from '@angular/common/http';
@Injectable()
export class Moh731ResourceService {
private _url = 'MOH-731-report';
public get url(): string {
return this.appSettingsService.getEtlRestbaseurl().trim() + this._url;
}
constructor(
public http: HttpClient,
public appSettingsService: AppSettingsService,
public cacheService: DataCacheService
) {}
public getMoh731Report(
locationUuids: string,
startDate: string,
endDate: string,
isLegacyReport: boolean,
isAggregated: boolean,
cacheTtl: number = 0
): Observable<any> {
let report = '';
let aggregated = 'false';
if (isAggregated) {
aggregated = 'true';
}
if (isLegacyReport) | else {
report = 'MOH-731-report-2017';
}
const urlParams: HttpParams = new HttpParams()
.set('locationUuids', locationUuids)
.set('startDate', startDate)
.set('endDate', endDate)
.set('reportName', report)
.set('isAggregated', aggregated);
const request = this.http
.get(this.url, {
params: urlParams
})
.pipe(
map((response: Response) => {
return response;
}),
catchError((err: any) => {
console.log('Err', err);
const error: any = err;
const errorObj = {
error: error.status,
message: error.statusText
};
return observableOf(errorObj);
})
);
return cacheTtl === 0
? request
: this.cacheService.cacheSingleRequest(
this.url,
urlParams,
request,
cacheTtl
);
}
}
| {
report = 'MOH-731-report';
} | conditional_block |
moh-731-resource.service.ts | import { of as observableOf, Observable } from 'rxjs';
import { catchError, map } from 'rxjs/operators';
import { Injectable } from '@angular/core';
// tslint:disable-next-line:import-blacklist
import { Subject } from 'rxjs/Rx';
import { AppSettingsService } from '../app-settings/app-settings.service';
import { DataCacheService } from '../shared/services/data-cache.service';
import { HttpParams, HttpClient } from '@angular/common/http';
@Injectable()
export class Moh731ResourceService {
private _url = 'MOH-731-report';
public get url(): string {
return this.appSettingsService.getEtlRestbaseurl().trim() + this._url;
}
constructor(
public http: HttpClient,
public appSettingsService: AppSettingsService,
public cacheService: DataCacheService
) {}
public getMoh731Report(
locationUuids: string,
startDate: string,
endDate: string,
isLegacyReport: boolean,
isAggregated: boolean,
cacheTtl: number = 0
): Observable<any> |
}
| {
let report = '';
let aggregated = 'false';
if (isAggregated) {
aggregated = 'true';
}
if (isLegacyReport) {
report = 'MOH-731-report';
} else {
report = 'MOH-731-report-2017';
}
const urlParams: HttpParams = new HttpParams()
.set('locationUuids', locationUuids)
.set('startDate', startDate)
.set('endDate', endDate)
.set('reportName', report)
.set('isAggregated', aggregated);
const request = this.http
.get(this.url, {
params: urlParams
})
.pipe(
map((response: Response) => {
return response;
}),
catchError((err: any) => {
console.log('Err', err);
const error: any = err;
const errorObj = {
error: error.status,
message: error.statusText
};
return observableOf(errorObj);
})
);
return cacheTtl === 0
? request
: this.cacheService.cacheSingleRequest(
this.url,
urlParams,
request,
cacheTtl
);
} | identifier_body |
moh-731-resource.service.ts | import { of as observableOf, Observable } from 'rxjs';
import { catchError, map } from 'rxjs/operators';
import { Injectable } from '@angular/core';
// tslint:disable-next-line:import-blacklist
import { Subject } from 'rxjs/Rx';
import { AppSettingsService } from '../app-settings/app-settings.service';
import { DataCacheService } from '../shared/services/data-cache.service';
import { HttpParams, HttpClient } from '@angular/common/http';
@Injectable()
export class Moh731ResourceService {
private _url = 'MOH-731-report';
public get url(): string {
return this.appSettingsService.getEtlRestbaseurl().trim() + this._url;
}
| (
public http: HttpClient,
public appSettingsService: AppSettingsService,
public cacheService: DataCacheService
) {}
public getMoh731Report(
locationUuids: string,
startDate: string,
endDate: string,
isLegacyReport: boolean,
isAggregated: boolean,
cacheTtl: number = 0
): Observable<any> {
let report = '';
let aggregated = 'false';
if (isAggregated) {
aggregated = 'true';
}
if (isLegacyReport) {
report = 'MOH-731-report';
} else {
report = 'MOH-731-report-2017';
}
const urlParams: HttpParams = new HttpParams()
.set('locationUuids', locationUuids)
.set('startDate', startDate)
.set('endDate', endDate)
.set('reportName', report)
.set('isAggregated', aggregated);
const request = this.http
.get(this.url, {
params: urlParams
})
.pipe(
map((response: Response) => {
return response;
}),
catchError((err: any) => {
console.log('Err', err);
const error: any = err;
const errorObj = {
error: error.status,
message: error.statusText
};
return observableOf(errorObj);
})
);
return cacheTtl === 0
? request
: this.cacheService.cacheSingleRequest(
this.url,
urlParams,
request,
cacheTtl
);
}
}
| constructor | identifier_name |
moh-731-resource.service.ts | import { of as observableOf, Observable } from 'rxjs';
import { catchError, map } from 'rxjs/operators'; | import { DataCacheService } from '../shared/services/data-cache.service';
import { HttpParams, HttpClient } from '@angular/common/http';
@Injectable()
export class Moh731ResourceService {
private _url = 'MOH-731-report';
public get url(): string {
return this.appSettingsService.getEtlRestbaseurl().trim() + this._url;
}
constructor(
public http: HttpClient,
public appSettingsService: AppSettingsService,
public cacheService: DataCacheService
) {}
public getMoh731Report(
locationUuids: string,
startDate: string,
endDate: string,
isLegacyReport: boolean,
isAggregated: boolean,
cacheTtl: number = 0
): Observable<any> {
let report = '';
let aggregated = 'false';
if (isAggregated) {
aggregated = 'true';
}
if (isLegacyReport) {
report = 'MOH-731-report';
} else {
report = 'MOH-731-report-2017';
}
const urlParams: HttpParams = new HttpParams()
.set('locationUuids', locationUuids)
.set('startDate', startDate)
.set('endDate', endDate)
.set('reportName', report)
.set('isAggregated', aggregated);
const request = this.http
.get(this.url, {
params: urlParams
})
.pipe(
map((response: Response) => {
return response;
}),
catchError((err: any) => {
console.log('Err', err);
const error: any = err;
const errorObj = {
error: error.status,
message: error.statusText
};
return observableOf(errorObj);
})
);
return cacheTtl === 0
? request
: this.cacheService.cacheSingleRequest(
this.url,
urlParams,
request,
cacheTtl
);
}
} | import { Injectable } from '@angular/core';
// tslint:disable-next-line:import-blacklist
import { Subject } from 'rxjs/Rx';
import { AppSettingsService } from '../app-settings/app-settings.service'; | random_line_split |
ctrl_c_handler.py | # Copyright 2015 Google Inc. All Rights Reserved.
"""Context manager to help with Control-C handling during critical commands."""
import signal
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.test.lib import exit_code
class CancellableTestSection(object):
"""Cancel a test matrix if CTRL-C is typed during a section of code.
While within this context manager, the CTRL-C signal is caught and a test
matrix is cancelled. This should only be used with a section of code where
the test matrix is running.
"""
def | (self, matrix_id, testing_api_helper):
self._old_handler = None
self._matrix_id = matrix_id
self._testing_api_helper = testing_api_helper
def __enter__(self):
self._old_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, self._Handler)
return self
def __exit__(self, typ, value, traceback):
signal.signal(signal.SIGINT, self._old_handler)
return False
def _Handler(self, unused_signal, unused_frame):
log.status.write('\n\nCancelling test [{id}]...\n\n'
.format(id=self._matrix_id))
self._testing_api_helper.CancelTestMatrix(self._matrix_id)
raise exceptions.ExitCodeNoError(exit_code=exit_code.MATRIX_CANCELLED)
| __init__ | identifier_name |
ctrl_c_handler.py | # Copyright 2015 Google Inc. All Rights Reserved.
"""Context manager to help with Control-C handling during critical commands."""
| import signal
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.test.lib import exit_code
class CancellableTestSection(object):
"""Cancel a test matrix if CTRL-C is typed during a section of code.
While within this context manager, the CTRL-C signal is caught and a test
matrix is cancelled. This should only be used with a section of code where
the test matrix is running.
"""
def __init__(self, matrix_id, testing_api_helper):
self._old_handler = None
self._matrix_id = matrix_id
self._testing_api_helper = testing_api_helper
def __enter__(self):
self._old_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, self._Handler)
return self
def __exit__(self, typ, value, traceback):
signal.signal(signal.SIGINT, self._old_handler)
return False
def _Handler(self, unused_signal, unused_frame):
log.status.write('\n\nCancelling test [{id}]...\n\n'
.format(id=self._matrix_id))
self._testing_api_helper.CancelTestMatrix(self._matrix_id)
raise exceptions.ExitCodeNoError(exit_code=exit_code.MATRIX_CANCELLED) | random_line_split |
|
ctrl_c_handler.py | # Copyright 2015 Google Inc. All Rights Reserved.
"""Context manager to help with Control-C handling during critical commands."""
import signal
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.test.lib import exit_code
class CancellableTestSection(object):
"""Cancel a test matrix if CTRL-C is typed during a section of code.
While within this context manager, the CTRL-C signal is caught and a test
matrix is cancelled. This should only be used with a section of code where
the test matrix is running.
"""
def __init__(self, matrix_id, testing_api_helper):
self._old_handler = None
self._matrix_id = matrix_id
self._testing_api_helper = testing_api_helper
def __enter__(self):
self._old_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, self._Handler)
return self
def __exit__(self, typ, value, traceback):
signal.signal(signal.SIGINT, self._old_handler)
return False
def _Handler(self, unused_signal, unused_frame):
| log.status.write('\n\nCancelling test [{id}]...\n\n'
.format(id=self._matrix_id))
self._testing_api_helper.CancelTestMatrix(self._matrix_id)
raise exceptions.ExitCodeNoError(exit_code=exit_code.MATRIX_CANCELLED) | identifier_body |
|
plotCorrelation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
import matplotlib.pyplot as plt
from deeptools.correlation import Correlation
from deeptools.parserCommon import writableFile
from deeptools._version import __version__
old_settings = np.seterr(all='ignore')
def parse_arguments(args=None):
basic_args = plot_correlation_args()
heatmap_parser = heatmap_options()
scatter_parser = scatterplot_options()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Tool for the analysis and visualization of sample correlations based on the output of multiBamSummary or
multiBigwigSummary. Pearson or Spearman methods are available to compute correlation
coefficients. Results can be saved as multiple
scatter plots depicting the pairwise correlations or as a clustered heatmap,
where the colors represent the correlation coefficients and the clusters are constructed using complete linkage.
Optionally, the values can be saved as tables, too.
detailed help:
plotCorrelation -h
""",
epilog='example usages:\n'
'plotCorrelation -in results_file --whatToPlot heatmap --corMethod pearson -o heatmap.png\n\n'
' \n\n',
parents=[basic_args, heatmap_parser, scatter_parser])
return parser
def plot_correlation_args():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
# define the arguments
required.add_argument('--corData', '-in',
metavar='FILE',
help='Compressed matrix of values generated by multiBigwigSummary or multiBamSummary',
required=True)
required.add_argument('--corMethod', '-c',
help="Correlation method.",
choices=['spearman', 'pearson'],
required=True)
required.add_argument('--whatToPlot', '-p',
help="Choose between a heatmap or pairwise scatter plots",
choices=['heatmap', 'scatterplot'],
required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('--plotFile', '-o',
help='File to save the heatmap to. The file extension determines the format, '
'so heatmap.pdf will save the heatmap in PDF format. '
'The available formats are: .png, '
'.eps, .pdf and .svg.',
type=writableFile,
metavar='FILE')
optional.add_argument('--skipZeros',
help='By setting this option, genomic regions '
'that have zero or missing (nan) values in all samples '
'are excluded.',
action='store_true',
required=False)
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf and svg.',
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
optional.add_argument(
'--removeOutliers',
help='If set, bins with very large counts are removed. '
'Bins with abnormally high reads counts artificially increase '
'pearson correlation; that\'s why, multiBamSummary tries '
'to remove outliers using the median absolute deviation (MAD) '
'method applying a threshold of 200 to only consider extremely '
'large deviations from the median. The ENCODE blacklist page '
'(https://sites.google.com/site/anshulkundaje/projects/blacklists) '
'contains useful information about regions with unusually high counts'
'that may be worth removing.',
action='store_true')
optional.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
group = parser.add_argument_group('Output optional options')
group.add_argument('--outFileCorMatrix',
help='Save matrix with pairwise correlation values to a tab-separated file.',
metavar='FILE',
type=writableFile)
return parser
def scatterplot_options():
|
def heatmap_options():
"""
Options for generating the correlation heatmap
"""
parser = argparse.ArgumentParser(add_help=False)
heatmap = parser.add_argument_group('Heatmap options')
heatmap.add_argument('--plotHeight',
help='Plot height in cm. (Default: %(default)s)',
type=float,
default=9.5)
heatmap.add_argument('--plotWidth',
help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',
type=float,
default=11)
heatmap.add_argument('--zMin', '-min',
default=None,
help='Minimum value for the heatmap intensities. '
'If not specified, the value is set automatically',
type=float)
heatmap.add_argument('--zMax', '-max',
default=None,
help='Maximum value for the heatmap intensities.'
'If not specified, the value is set automatically',
type=float)
heatmap.add_argument(
'--colorMap', default='jet',
metavar='',
help='Color map to use for the heatmap. Available values can be '
'seen here: '
'http://matplotlib.org/examples/color/colormaps_reference.html')
heatmap.add_argument('--plotNumbers',
help='If set, then the correlation number is plotted '
'on top of the heatmap. This option is only valid when plotting a heatmap.',
action='store_true',
required=False)
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
if args.plotFile is None and args.outFileCorMatrix is None:
sys.exit("At least one of --plotFile and --outFileCorMatrix must be specified!\n")
corr = Correlation(args.corData,
args.corMethod,
labels=args.labels,
remove_outliers=args.removeOutliers,
skip_zeros=args.skipZeros)
if args.corMethod == 'pearson':
# test if there are outliers and write a message recommending the removal
if len(corr.get_outlier_indices(np.asarray(corr.matrix).flatten())) > 0:
if args.removeOutliers:
sys.stderr.write("\nOutliers were detected in the data. They "
"will be removed to avoid bias "
"in the pearson correlation.\n")
else:
sys.stderr.write("\nOutliers were detected in the data. Consider "
"using the --removeOutliers parameter to avoid a bias "
"in the pearson correlation.\n")
if args.colorMap:
try:
plt.get_cmap(args.colorMap)
except ValueError as error:
sys.stderr.write(
"A problem was found. Message: {}\n".format(error))
exit()
if args.plotFile is not None:
if args.whatToPlot == 'scatterplot':
corr.plot_scatter(args.plotFile,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
xRange=args.xRange,
yRange=args.yRange,
log1p=args.log1p)
else:
corr.plot_correlation(args.plotFile,
vmax=args.zMax,
vmin=args.zMin,
colormap=args.colorMap,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
plot_numbers=args.plotNumbers,
plotWidth=args.plotWidth,
plotHeight=args.plotHeight)
if args.outFileCorMatrix:
o = open(args.outFileCorMatrix, "w")
o.write("#plotCorrelation --outFileCorMatrix\n")
corr.save_corr_matrix(o)
o.close()
| """
Options specific for creating the scatter plot
"""
parser = argparse.ArgumentParser(add_help=False)
scatter_opts = parser.add_argument_group('Scatter plot options')
scatter_opts.add_argument('--xRange',
help='The X axis range. The default scales these such that the full range of dots is displayed.',
type=int,
nargs=2,
default=None)
scatter_opts.add_argument('--yRange',
help='The Y axis range. The default scales these such that the full range of dots is displayed.',
type=int,
nargs=2,
default=None)
scatter_opts.add_argument('--log1p',
help='Plot the natural log of the scatter plot after adding 1. Note that this is ONLY for plotting, the correlation is unaffected.',
action='store_true')
return parser | identifier_body |
plotCorrelation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
import matplotlib.pyplot as plt
from deeptools.correlation import Correlation
from deeptools.parserCommon import writableFile
from deeptools._version import __version__
old_settings = np.seterr(all='ignore')
def parse_arguments(args=None):
basic_args = plot_correlation_args()
heatmap_parser = heatmap_options()
scatter_parser = scatterplot_options()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Tool for the analysis and visualization of sample correlations based on the output of multiBamSummary or
multiBigwigSummary. Pearson or Spearman methods are available to compute correlation
coefficients. Results can be saved as multiple
scatter plots depicting the pairwise correlations or as a clustered heatmap,
where the colors represent the correlation coefficients and the clusters are constructed using complete linkage.
Optionally, the values can be saved as tables, too.
detailed help:
plotCorrelation -h
""",
epilog='example usages:\n'
'plotCorrelation -in results_file --whatToPlot heatmap --corMethod pearson -o heatmap.png\n\n'
' \n\n',
parents=[basic_args, heatmap_parser, scatter_parser])
return parser
def plot_correlation_args():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
# define the arguments
required.add_argument('--corData', '-in',
metavar='FILE',
help='Compressed matrix of values generated by multiBigwigSummary or multiBamSummary',
required=True)
required.add_argument('--corMethod', '-c',
help="Correlation method.",
choices=['spearman', 'pearson'],
required=True)
required.add_argument('--whatToPlot', '-p',
help="Choose between a heatmap or pairwise scatter plots",
choices=['heatmap', 'scatterplot'],
required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('--plotFile', '-o',
help='File to save the heatmap to. The file extension determines the format, '
'so heatmap.pdf will save the heatmap in PDF format. '
'The available formats are: .png, '
'.eps, .pdf and .svg.',
type=writableFile,
metavar='FILE')
optional.add_argument('--skipZeros',
help='By setting this option, genomic regions '
'that have zero or missing (nan) values in all samples '
'are excluded.',
action='store_true',
required=False)
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf and svg.',
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
optional.add_argument(
'--removeOutliers',
help='If set, bins with very large counts are removed. '
'Bins with abnormally high reads counts artificially increase '
'pearson correlation; that\'s why, multiBamSummary tries '
'to remove outliers using the median absolute deviation (MAD) '
'method applying a threshold of 200 to only consider extremely '
'large deviations from the median. The ENCODE blacklist page '
'(https://sites.google.com/site/anshulkundaje/projects/blacklists) '
'contains useful information about regions with unusually high counts'
'that may be worth removing.',
action='store_true')
optional.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
group = parser.add_argument_group('Output optional options')
group.add_argument('--outFileCorMatrix',
help='Save matrix with pairwise correlation values to a tab-separated file.',
metavar='FILE',
type=writableFile)
return parser
def scatterplot_options():
"""
Options specific for creating the scatter plot
"""
parser = argparse.ArgumentParser(add_help=False)
scatter_opts = parser.add_argument_group('Scatter plot options')
scatter_opts.add_argument('--xRange',
help='The X axis range. The default scales these such that the full range of dots is displayed.',
type=int,
nargs=2,
default=None)
scatter_opts.add_argument('--yRange',
help='The Y axis range. The default scales these such that the full range of dots is displayed.',
type=int,
nargs=2,
default=None)
scatter_opts.add_argument('--log1p',
help='Plot the natural log of the scatter plot after adding 1. Note that this is ONLY for plotting, the correlation is unaffected.',
action='store_true')
return parser
def heatmap_options():
"""
Options for generating the correlation heatmap
"""
parser = argparse.ArgumentParser(add_help=False)
heatmap = parser.add_argument_group('Heatmap options')
heatmap.add_argument('--plotHeight',
help='Plot height in cm. (Default: %(default)s)',
type=float,
default=9.5)
heatmap.add_argument('--plotWidth',
help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',
type=float,
default=11)
heatmap.add_argument('--zMin', '-min',
default=None,
help='Minimum value for the heatmap intensities. '
'If not specified, the value is set automatically',
type=float)
heatmap.add_argument('--zMax', '-max',
default=None,
help='Maximum value for the heatmap intensities.'
'If not specified, the value is set automatically',
type=float)
heatmap.add_argument(
'--colorMap', default='jet',
metavar='',
help='Color map to use for the heatmap. Available values can be '
'seen here: '
'http://matplotlib.org/examples/color/colormaps_reference.html')
heatmap.add_argument('--plotNumbers',
help='If set, then the correlation number is plotted '
'on top of the heatmap. This option is only valid when plotting a heatmap.',
action='store_true',
required=False)
return parser
def main(args=None):
|
if args.plotFile is None and args.outFileCorMatrix is None:
sys.exit("At least one of --plotFile and --outFileCorMatrix must be specified!\n")
corr = Correlation(args.corData,
args.corMethod,
labels=args.labels,
remove_outliers=args.removeOutliers,
skip_zeros=args.skipZeros)
if args.corMethod == 'pearson':
# test if there are outliers and write a message recommending the removal
if len(corr.get_outlier_indices(np.asarray(corr.matrix).flatten())) > 0:
if args.removeOutliers:
sys.stderr.write("\nOutliers were detected in the data. They "
"will be removed to avoid bias "
"in the pearson correlation.\n")
else:
sys.stderr.write("\nOutliers were detected in the data. Consider "
"using the --removeOutliers parameter to avoid a bias "
"in the pearson correlation.\n")
if args.colorMap:
try:
plt.get_cmap(args.colorMap)
except ValueError as error:
sys.stderr.write(
"A problem was found. Message: {}\n".format(error))
exit()
if args.plotFile is not None:
if args.whatToPlot == 'scatterplot':
corr.plot_scatter(args.plotFile,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
xRange=args.xRange,
yRange=args.yRange,
log1p=args.log1p)
else:
corr.plot_correlation(args.plotFile,
vmax=args.zMax,
vmin=args.zMin,
colormap=args.colorMap,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
plot_numbers=args.plotNumbers,
plotWidth=args.plotWidth,
plotHeight=args.plotHeight)
if args.outFileCorMatrix:
o = open(args.outFileCorMatrix, "w")
o.write("#plotCorrelation --outFileCorMatrix\n")
corr.save_corr_matrix(o)
o.close() | args = parse_arguments().parse_args(args) | random_line_split |
plotCorrelation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
import matplotlib.pyplot as plt
from deeptools.correlation import Correlation
from deeptools.parserCommon import writableFile
from deeptools._version import __version__
old_settings = np.seterr(all='ignore')
def parse_arguments(args=None):
basic_args = plot_correlation_args()
heatmap_parser = heatmap_options()
scatter_parser = scatterplot_options()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Tool for the analysis and visualization of sample correlations based on the output of multiBamSummary or
multiBigwigSummary. Pearson or Spearman methods are available to compute correlation
coefficients. Results can be saved as multiple
scatter plots depicting the pairwise correlations or as a clustered heatmap,
where the colors represent the correlation coefficients and the clusters are constructed using complete linkage.
Optionally, the values can be saved as tables, too.
detailed help:
plotCorrelation -h
""",
epilog='example usages:\n'
'plotCorrelation -in results_file --whatToPlot heatmap --corMethod pearson -o heatmap.png\n\n'
' \n\n',
parents=[basic_args, heatmap_parser, scatter_parser])
return parser
def plot_correlation_args():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
# define the arguments
required.add_argument('--corData', '-in',
metavar='FILE',
help='Compressed matrix of values generated by multiBigwigSummary or multiBamSummary',
required=True)
required.add_argument('--corMethod', '-c',
help="Correlation method.",
choices=['spearman', 'pearson'],
required=True)
required.add_argument('--whatToPlot', '-p',
help="Choose between a heatmap or pairwise scatter plots",
choices=['heatmap', 'scatterplot'],
required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('--plotFile', '-o',
help='File to save the heatmap to. The file extension determines the format, '
'so heatmap.pdf will save the heatmap in PDF format. '
'The available formats are: .png, '
'.eps, .pdf and .svg.',
type=writableFile,
metavar='FILE')
optional.add_argument('--skipZeros',
help='By setting this option, genomic regions '
'that have zero or missing (nan) values in all samples '
'are excluded.',
action='store_true',
required=False)
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf and svg.',
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
optional.add_argument(
'--removeOutliers',
help='If set, bins with very large counts are removed. '
'Bins with abnormally high reads counts artificially increase '
'pearson correlation; that\'s why, multiBamSummary tries '
'to remove outliers using the median absolute deviation (MAD) '
'method applying a threshold of 200 to only consider extremely '
'large deviations from the median. The ENCODE blacklist page '
'(https://sites.google.com/site/anshulkundaje/projects/blacklists) '
'contains useful information about regions with unusually high counts'
'that may be worth removing.',
action='store_true')
optional.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
group = parser.add_argument_group('Output optional options')
group.add_argument('--outFileCorMatrix',
help='Save matrix with pairwise correlation values to a tab-separated file.',
metavar='FILE',
type=writableFile)
return parser
def | ():
"""
Options specific for creating the scatter plot
"""
parser = argparse.ArgumentParser(add_help=False)
scatter_opts = parser.add_argument_group('Scatter plot options')
scatter_opts.add_argument('--xRange',
help='The X axis range. The default scales these such that the full range of dots is displayed.',
type=int,
nargs=2,
default=None)
scatter_opts.add_argument('--yRange',
help='The Y axis range. The default scales these such that the full range of dots is displayed.',
type=int,
nargs=2,
default=None)
scatter_opts.add_argument('--log1p',
help='Plot the natural log of the scatter plot after adding 1. Note that this is ONLY for plotting, the correlation is unaffected.',
action='store_true')
return parser
def heatmap_options():
"""
Options for generating the correlation heatmap
"""
parser = argparse.ArgumentParser(add_help=False)
heatmap = parser.add_argument_group('Heatmap options')
heatmap.add_argument('--plotHeight',
help='Plot height in cm. (Default: %(default)s)',
type=float,
default=9.5)
heatmap.add_argument('--plotWidth',
help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',
type=float,
default=11)
heatmap.add_argument('--zMin', '-min',
default=None,
help='Minimum value for the heatmap intensities. '
'If not specified, the value is set automatically',
type=float)
heatmap.add_argument('--zMax', '-max',
default=None,
help='Maximum value for the heatmap intensities.'
'If not specified, the value is set automatically',
type=float)
heatmap.add_argument(
'--colorMap', default='jet',
metavar='',
help='Color map to use for the heatmap. Available values can be '
'seen here: '
'http://matplotlib.org/examples/color/colormaps_reference.html')
heatmap.add_argument('--plotNumbers',
help='If set, then the correlation number is plotted '
'on top of the heatmap. This option is only valid when plotting a heatmap.',
action='store_true',
required=False)
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
if args.plotFile is None and args.outFileCorMatrix is None:
sys.exit("At least one of --plotFile and --outFileCorMatrix must be specified!\n")
corr = Correlation(args.corData,
args.corMethod,
labels=args.labels,
remove_outliers=args.removeOutliers,
skip_zeros=args.skipZeros)
if args.corMethod == 'pearson':
# test if there are outliers and write a message recommending the removal
if len(corr.get_outlier_indices(np.asarray(corr.matrix).flatten())) > 0:
if args.removeOutliers:
sys.stderr.write("\nOutliers were detected in the data. They "
"will be removed to avoid bias "
"in the pearson correlation.\n")
else:
sys.stderr.write("\nOutliers were detected in the data. Consider "
"using the --removeOutliers parameter to avoid a bias "
"in the pearson correlation.\n")
if args.colorMap:
try:
plt.get_cmap(args.colorMap)
except ValueError as error:
sys.stderr.write(
"A problem was found. Message: {}\n".format(error))
exit()
if args.plotFile is not None:
if args.whatToPlot == 'scatterplot':
corr.plot_scatter(args.plotFile,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
xRange=args.xRange,
yRange=args.yRange,
log1p=args.log1p)
else:
corr.plot_correlation(args.plotFile,
vmax=args.zMax,
vmin=args.zMin,
colormap=args.colorMap,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
plot_numbers=args.plotNumbers,
plotWidth=args.plotWidth,
plotHeight=args.plotHeight)
if args.outFileCorMatrix:
o = open(args.outFileCorMatrix, "w")
o.write("#plotCorrelation --outFileCorMatrix\n")
corr.save_corr_matrix(o)
o.close()
| scatterplot_options | identifier_name |
plotCorrelation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
import matplotlib.pyplot as plt
from deeptools.correlation import Correlation
from deeptools.parserCommon import writableFile
from deeptools._version import __version__
old_settings = np.seterr(all='ignore')
def parse_arguments(args=None):
basic_args = plot_correlation_args()
heatmap_parser = heatmap_options()
scatter_parser = scatterplot_options()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Tool for the analysis and visualization of sample correlations based on the output of multiBamSummary or
multiBigwigSummary. Pearson or Spearman methods are available to compute correlation
coefficients. Results can be saved as multiple
scatter plots depicting the pairwise correlations or as a clustered heatmap,
where the colors represent the correlation coefficients and the clusters are constructed using complete linkage.
Optionally, the values can be saved as tables, too.
detailed help:
plotCorrelation -h
""",
epilog='example usages:\n'
'plotCorrelation -in results_file --whatToPlot heatmap --corMethod pearson -o heatmap.png\n\n'
' \n\n',
parents=[basic_args, heatmap_parser, scatter_parser])
return parser
def plot_correlation_args():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
# define the arguments
required.add_argument('--corData', '-in',
metavar='FILE',
help='Compressed matrix of values generated by multiBigwigSummary or multiBamSummary',
required=True)
required.add_argument('--corMethod', '-c',
help="Correlation method.",
choices=['spearman', 'pearson'],
required=True)
required.add_argument('--whatToPlot', '-p',
help="Choose between a heatmap or pairwise scatter plots",
choices=['heatmap', 'scatterplot'],
required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('--plotFile', '-o',
help='File to save the heatmap to. The file extension determines the format, '
'so heatmap.pdf will save the heatmap in PDF format. '
'The available formats are: .png, '
'.eps, .pdf and .svg.',
type=writableFile,
metavar='FILE')
optional.add_argument('--skipZeros',
help='By setting this option, genomic regions '
'that have zero or missing (nan) values in all samples '
'are excluded.',
action='store_true',
required=False)
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf and svg.',
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
optional.add_argument(
'--removeOutliers',
help='If set, bins with very large counts are removed. '
'Bins with abnormally high reads counts artificially increase '
'pearson correlation; that\'s why, multiBamSummary tries '
'to remove outliers using the median absolute deviation (MAD) '
'method applying a threshold of 200 to only consider extremely '
'large deviations from the median. The ENCODE blacklist page '
'(https://sites.google.com/site/anshulkundaje/projects/blacklists) '
'contains useful information about regions with unusually high counts'
'that may be worth removing.',
action='store_true')
optional.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
group = parser.add_argument_group('Output optional options')
group.add_argument('--outFileCorMatrix',
help='Save matrix with pairwise correlation values to a tab-separated file.',
metavar='FILE',
type=writableFile)
return parser
def scatterplot_options():
"""
Options specific for creating the scatter plot
"""
parser = argparse.ArgumentParser(add_help=False)
scatter_opts = parser.add_argument_group('Scatter plot options')
scatter_opts.add_argument('--xRange',
help='The X axis range. The default scales these such that the full range of dots is displayed.',
type=int,
nargs=2,
default=None)
scatter_opts.add_argument('--yRange',
help='The Y axis range. The default scales these such that the full range of dots is displayed.',
type=int,
nargs=2,
default=None)
scatter_opts.add_argument('--log1p',
help='Plot the natural log of the scatter plot after adding 1. Note that this is ONLY for plotting, the correlation is unaffected.',
action='store_true')
return parser
def heatmap_options():
"""
Options for generating the correlation heatmap
"""
parser = argparse.ArgumentParser(add_help=False)
heatmap = parser.add_argument_group('Heatmap options')
heatmap.add_argument('--plotHeight',
help='Plot height in cm. (Default: %(default)s)',
type=float,
default=9.5)
heatmap.add_argument('--plotWidth',
help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',
type=float,
default=11)
heatmap.add_argument('--zMin', '-min',
default=None,
help='Minimum value for the heatmap intensities. '
'If not specified, the value is set automatically',
type=float)
heatmap.add_argument('--zMax', '-max',
default=None,
help='Maximum value for the heatmap intensities.'
'If not specified, the value is set automatically',
type=float)
heatmap.add_argument(
'--colorMap', default='jet',
metavar='',
help='Color map to use for the heatmap. Available values can be '
'seen here: '
'http://matplotlib.org/examples/color/colormaps_reference.html')
heatmap.add_argument('--plotNumbers',
help='If set, then the correlation number is plotted '
'on top of the heatmap. This option is only valid when plotting a heatmap.',
action='store_true',
required=False)
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
if args.plotFile is None and args.outFileCorMatrix is None:
sys.exit("At least one of --plotFile and --outFileCorMatrix must be specified!\n")
corr = Correlation(args.corData,
args.corMethod,
labels=args.labels,
remove_outliers=args.removeOutliers,
skip_zeros=args.skipZeros)
if args.corMethod == 'pearson':
# test if there are outliers and write a message recommending the removal
if len(corr.get_outlier_indices(np.asarray(corr.matrix).flatten())) > 0:
if args.removeOutliers:
sys.stderr.write("\nOutliers were detected in the data. They "
"will be removed to avoid bias "
"in the pearson correlation.\n")
else:
sys.stderr.write("\nOutliers were detected in the data. Consider "
"using the --removeOutliers parameter to avoid a bias "
"in the pearson correlation.\n")
if args.colorMap:
try:
plt.get_cmap(args.colorMap)
except ValueError as error:
sys.stderr.write(
"A problem was found. Message: {}\n".format(error))
exit()
if args.plotFile is not None:
|
if args.outFileCorMatrix:
o = open(args.outFileCorMatrix, "w")
o.write("#plotCorrelation --outFileCorMatrix\n")
corr.save_corr_matrix(o)
o.close()
| if args.whatToPlot == 'scatterplot':
corr.plot_scatter(args.plotFile,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
xRange=args.xRange,
yRange=args.yRange,
log1p=args.log1p)
else:
corr.plot_correlation(args.plotFile,
vmax=args.zMax,
vmin=args.zMin,
colormap=args.colorMap,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
plot_numbers=args.plotNumbers,
plotWidth=args.plotWidth,
plotHeight=args.plotHeight) | conditional_block |
bs.js | For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'forms', 'bs', {
button: {
title: 'Button Properties',
text: 'Text (Value)',
type: 'Type',
typeBtn: 'Button',
typeSbm: 'Submit',
typeRst: 'Reset'
},
checkboxAndRadio: {
checkboxTitle: 'Checkbox Properties',
radioTitle: 'Radio Button Properties',
value: 'Value',
selected: 'Selected'
},
form: {
title: 'Form Properties',
menu: 'Form Properties',
action: 'Action',
method: 'Method',
encoding: 'Encoding'
},
hidden: {
title: 'Hidden Field Properties',
name: 'Name',
value: 'Value'
},
select: {
title: 'Selection Field Properties',
selectInfo: 'Select Info',
opAvail: 'Available Options',
value: 'Value',
size: 'Size',
lines: 'lines',
chkMulti: 'Allow multiple selections',
opText: 'Text',
opValue: 'Value',
btnAdd: 'Add',
btnModify: 'Modify',
btnUp: 'Up',
btnDown: 'Down',
btnSetValue: 'Set as selected value',
btnDelete: 'Delete'
},
textarea: {
title: 'Textarea Properties',
cols: 'Columns',
rows: 'Rows'
},
textfield: {
title: 'Text Field Properties',
name: 'Name',
value: 'Value',
charWidth: 'Character Width',
maxChars: 'Maximum Characters',
type: 'Type',
typeText: 'Text',
typePass: 'Password',
typeEmail: 'Email', // MISSING
typeSearch: 'Search', // MISSING
typeTel: 'Telephone Number', // MISSING
typeUrl: 'URL'
}
} ); | /*
Copyright (c) 2003-2015, CKSource - Frederico Knabben. All rights reserved.
| random_line_split |
|
test_histotoolsbase.py | from ROOT import TH1I, gROOT, kRed, kBlue
import unittest
import tempfile
import shutil
import os
from varial.extensions.cmsrun import Sample
from varial.wrappers import HistoWrapper
from varial.history import History
from varial import analysis
from varial import settings
from varial import diskio
class TestHistoToolsBase(unittest.TestCase):
| def setUp(self):
super(TestHistoToolsBase, self).setUp()
test_fs = "fileservice/"
if not os.path.exists(test_fs):
test_fs = "varial/test/" + test_fs
settings.DIR_FILESERVICE = test_fs
if (not os.path.exists(test_fs + "tt.root")) \
or (not os.path.exists(test_fs + "ttgamma.root")) \
or (not os.path.exists(test_fs + "zjets.root")):
self.fail("Fileservice testfiles not present!")
# create samples
analysis.all_samples["tt"] = Sample(
name = "tt",
is_data = True,
lumi = 3.,
legend = "pseudo data",
input_files = ["none"],
)
analysis.all_samples["ttgamma"] = Sample(
name = "ttgamma",
lumi = 4.,
legend = "tt gamma",
input_files = ["none"],
)
analysis.all_samples["zjets"] = Sample(
name = "zjets",
lumi = 0.1,
legend = "z jets",
input_files = ["none"],
)
analysis.colors = {
"tt gamma": kRed,
"z jets": kBlue
}
settings.stacking_order = [
"tt gamma",
"z jets"
]
analysis.active_samples = analysis.all_samples.keys()
# create a test wrapper
h1 = TH1I("h1", "H1", 2, .5, 4.5)
h1.Fill(1)
h1.Fill(3,2)
hist = History("test_op") # create some fake history
hist.add_args([History("fake_input_A"), History("fake_input_B")])
hist.add_kws({"john": "cleese"})
self.test_wrp = HistoWrapper(
h1,
name="Nam3",
title="T1tl3",
history=hist
)
self.test_dir = tempfile.mkdtemp()
analysis.cwd = self.test_dir
def tearDown(self):
super(TestHistoToolsBase, self).tearDown()
del self.test_wrp
diskio.close_open_root_files()
gROOT.Reset()
if os.path.exists(self.test_dir):
os.system('rm -r %s' % self.test_dir) | identifier_body |
|
test_histotoolsbase.py | from ROOT import TH1I, gROOT, kRed, kBlue
import unittest
import tempfile
import shutil
import os
from varial.extensions.cmsrun import Sample
from varial.wrappers import HistoWrapper
from varial.history import History
from varial import analysis
from varial import settings
from varial import diskio
class TestHistoToolsBase(unittest.TestCase):
def setUp(self):
super(TestHistoToolsBase, self).setUp()
test_fs = "fileservice/"
if not os.path.exists(test_fs):
test_fs = "varial/test/" + test_fs
settings.DIR_FILESERVICE = test_fs
if (not os.path.exists(test_fs + "tt.root")) \
or (not os.path.exists(test_fs + "ttgamma.root")) \
or (not os.path.exists(test_fs + "zjets.root")):
self.fail("Fileservice testfiles not present!")
# create samples
analysis.all_samples["tt"] = Sample(
name = "tt",
is_data = True,
lumi = 3.,
legend = "pseudo data",
input_files = ["none"],
)
analysis.all_samples["ttgamma"] = Sample(
name = "ttgamma",
lumi = 4.,
legend = "tt gamma",
input_files = ["none"],
)
analysis.all_samples["zjets"] = Sample(
name = "zjets",
lumi = 0.1,
legend = "z jets",
input_files = ["none"],
)
analysis.colors = {
"tt gamma": kRed,
"z jets": kBlue
}
settings.stacking_order = [
"tt gamma",
"z jets"
]
analysis.active_samples = analysis.all_samples.keys()
# create a test wrapper
h1 = TH1I("h1", "H1", 2, .5, 4.5)
h1.Fill(1)
h1.Fill(3,2)
hist = History("test_op") # create some fake history
hist.add_args([History("fake_input_A"), History("fake_input_B")])
hist.add_kws({"john": "cleese"})
self.test_wrp = HistoWrapper(
h1,
name="Nam3",
title="T1tl3",
history=hist
)
self.test_dir = tempfile.mkdtemp()
analysis.cwd = self.test_dir
def tearDown(self):
super(TestHistoToolsBase, self).tearDown()
del self.test_wrp
diskio.close_open_root_files()
gROOT.Reset()
if os.path.exists(self.test_dir):
| os.system('rm -r %s' % self.test_dir) | conditional_block |
|
test_histotoolsbase.py | from ROOT import TH1I, gROOT, kRed, kBlue
import unittest
import tempfile
import shutil
import os
from varial.extensions.cmsrun import Sample
from varial.wrappers import HistoWrapper
from varial.history import History
from varial import analysis
from varial import settings
from varial import diskio
class TestHistoToolsBase(unittest.TestCase):
def setUp(self):
super(TestHistoToolsBase, self).setUp()
test_fs = "fileservice/"
if not os.path.exists(test_fs):
test_fs = "varial/test/" + test_fs
settings.DIR_FILESERVICE = test_fs
if (not os.path.exists(test_fs + "tt.root")) \
or (not os.path.exists(test_fs + "ttgamma.root")) \
or (not os.path.exists(test_fs + "zjets.root")):
self.fail("Fileservice testfiles not present!")
# create samples
analysis.all_samples["tt"] = Sample(
name = "tt",
is_data = True,
lumi = 3.,
legend = "pseudo data",
input_files = ["none"],
)
analysis.all_samples["ttgamma"] = Sample(
name = "ttgamma",
lumi = 4.,
legend = "tt gamma",
input_files = ["none"],
)
analysis.all_samples["zjets"] = Sample(
name = "zjets",
lumi = 0.1,
legend = "z jets",
input_files = ["none"],
)
analysis.colors = {
"tt gamma": kRed,
"z jets": kBlue
}
settings.stacking_order = [
"tt gamma",
"z jets"
]
analysis.active_samples = analysis.all_samples.keys()
# create a test wrapper
h1 = TH1I("h1", "H1", 2, .5, 4.5)
h1.Fill(1)
h1.Fill(3,2)
hist = History("test_op") # create some fake history
hist.add_args([History("fake_input_A"), History("fake_input_B")])
hist.add_kws({"john": "cleese"})
self.test_wrp = HistoWrapper(
h1,
name="Nam3",
title="T1tl3",
history=hist
)
self.test_dir = tempfile.mkdtemp()
analysis.cwd = self.test_dir
def | (self):
super(TestHistoToolsBase, self).tearDown()
del self.test_wrp
diskio.close_open_root_files()
gROOT.Reset()
if os.path.exists(self.test_dir):
os.system('rm -r %s' % self.test_dir)
| tearDown | identifier_name |
test_histotoolsbase.py | from ROOT import TH1I, gROOT, kRed, kBlue
import unittest
import tempfile
import shutil
import os
from varial.extensions.cmsrun import Sample
from varial.wrappers import HistoWrapper
from varial.history import History
from varial import analysis
from varial import settings
from varial import diskio
class TestHistoToolsBase(unittest.TestCase):
def setUp(self):
super(TestHistoToolsBase, self).setUp()
test_fs = "fileservice/"
if not os.path.exists(test_fs):
test_fs = "varial/test/" + test_fs
settings.DIR_FILESERVICE = test_fs
if (not os.path.exists(test_fs + "tt.root")) \
or (not os.path.exists(test_fs + "ttgamma.root")) \
or (not os.path.exists(test_fs + "zjets.root")):
self.fail("Fileservice testfiles not present!")
# create samples
analysis.all_samples["tt"] = Sample(
name = "tt",
is_data = True,
lumi = 3.,
legend = "pseudo data",
input_files = ["none"],
)
analysis.all_samples["ttgamma"] = Sample(
name = "ttgamma",
lumi = 4.,
legend = "tt gamma",
input_files = ["none"],
)
analysis.all_samples["zjets"] = Sample(
name = "zjets",
lumi = 0.1,
legend = "z jets",
input_files = ["none"],
)
analysis.colors = {
"tt gamma": kRed,
"z jets": kBlue
}
settings.stacking_order = [ | ]
analysis.active_samples = analysis.all_samples.keys()
# create a test wrapper
h1 = TH1I("h1", "H1", 2, .5, 4.5)
h1.Fill(1)
h1.Fill(3,2)
hist = History("test_op") # create some fake history
hist.add_args([History("fake_input_A"), History("fake_input_B")])
hist.add_kws({"john": "cleese"})
self.test_wrp = HistoWrapper(
h1,
name="Nam3",
title="T1tl3",
history=hist
)
self.test_dir = tempfile.mkdtemp()
analysis.cwd = self.test_dir
def tearDown(self):
super(TestHistoToolsBase, self).tearDown()
del self.test_wrp
diskio.close_open_root_files()
gROOT.Reset()
if os.path.exists(self.test_dir):
os.system('rm -r %s' % self.test_dir) | "tt gamma",
"z jets" | random_line_split |
debugkarma.conf.js | module.exports = function(config) {
config.set({
// base path that will be used to resolve all patterns (eg. files, exclude)
basePath: '',
// frameworks to use
// available frameworks: https://npmjs.org/browse/keyword/karma-adapter
frameworks: ['jasmine'],
files: [
'src/bobril.js',
'src/**/*.js',
'test/**/*.js',
{ pattern: 'src/**/*.js.map', included: false },
{ pattern: 'src/**/*.ts', included: false },
{ pattern: 'test/**/*.js.map', included: false },
{ pattern: 'test/**/*.ts', included: false },
{ pattern: 'examples/**/*.*', included: false }
], | // available reporters: https://npmjs.org/browse/keyword/karma-reporter
reporters: ['progress'],
// web server port
port: 8765,
// enable / disable colors in the output (reporters and logs)
colors: true,
// level of logging
// possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
logLevel: config.LOG_INFO,
// enable / disable watching file and executing tests whenever any file changes
autoWatch: true,
// start these browsers
// available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
browsers: ['PhantomJS'],
// Continuous Integration mode
// if true, Karma captures browsers, runs the tests and exits
singleRun: false
});
}; |
// test results reporter to use
// possible values: 'dots', 'progress' | random_line_split |
mediawiki.js | // mediawiki add categories
/*jslint node: true */
'use strict';
var qs = require('querystring');
var utils = require('../utils');
var bot = require('nodemw');
exports.search = function(api, context, callback) {
if (api.indexOf('mediawiki.category.') === 0) {
if (api === 'mediawiki.category.wikipedia') |
}
};
| {
var bot = require('nodemw'), method = 'http://', host = 'en.wikipedia.org', apiPath = '/w', articlePath = '/wiki', client = new bot({
server: host,
path: apiPath
});
context.referers = 'http://en.wikipedia.org/w/api.php';
client.getPagesInCategory(context.query, function(pages) {
pages.forEach(function(page) {
callback(null, method + host + articlePath + '/' + page.title.replace(/ /g, '_'), context);
});
});
} | conditional_block |
mediawiki.js | // mediawiki add categories
/*jslint node: true */
'use strict';
var qs = require('querystring'); | var utils = require('../utils');
var bot = require('nodemw');
exports.search = function(api, context, callback) {
if (api.indexOf('mediawiki.category.') === 0) {
if (api === 'mediawiki.category.wikipedia') {
var bot = require('nodemw'), method = 'http://', host = 'en.wikipedia.org', apiPath = '/w', articlePath = '/wiki', client = new bot({
server: host,
path: apiPath
});
context.referers = 'http://en.wikipedia.org/w/api.php';
client.getPagesInCategory(context.query, function(pages) {
pages.forEach(function(page) {
callback(null, method + host + articlePath + '/' + page.title.replace(/ /g, '_'), context);
});
});
}
}
}; | random_line_split |
|
index.ts | /*
MIT License
Copyright (c) 2020 Looker Data Sciences, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | SOFTWARE.
*/
export { NotFoundScene } from './NotFoundScene' | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | random_line_split |
urn.ts | /** | */
const encodedSlashRegExp = new RegExp(encodeURIComponent('/'), 'g');
/**
* Replaces any occurrence of / with the encoded equivalent
* @param {string} urn
* @return {string}
*/
export const encodeForwardSlash = (urn: string): string => urn.replace(/\//g, () => encodeURIComponent('/'));
/**
* Replaces encoded slashes with /
* @param {string} urn
* @return {string}
*/
export const decodeForwardSlash = (urn: string): string =>
urn.replace(encodedSlashRegExp, () => decodeURIComponent('/'));
/**
* Replaces occurrences of / with the encoded counterpart in a urn string
* @param {string} urn
* @return {string}
*/
export const encodeUrn = (urn: string): string => encodeForwardSlash(urn);
/**
* Replaces encoded occurrences of / with the string /
* @param {string} urn
* @return {string}
*/
export const decodeUrn = (urn: string): string => decodeForwardSlash(urn);
/**
* Stores the encoded URL for the asterisk/wildcard symbol since encodeURIComponent doesn't catch these
* as a reserved symbol
* @type {string}
*/
const encodedWildcard = '%2A';
/**
* Cached RegExp object for a global search of /
* @type {RegExp}
*/
const encodedWildcardRegExp = new RegExp(encodedWildcard, 'g');
/**
* Replaces any occurence of * with the encoded equivalent
* @param {string} urn
* @return {string}
*/
export const encodeWildcard = (urn: string): string => urn.replace(/\*/g, encodedWildcard);
/**
* Replaces encoded slashes with /
* @param {string} urn
* @return {string}
*/
export const decodeWildcard = (urn: string): string => urn.replace(encodedWildcardRegExp, decodeURIComponent('*'));
/**
* Will extract the entity type from a urn
* @param urn
*/
export const extractEntityType = (urn: string): string | undefined => urn.split(':')[2]; | * Cached RegExp object for a global search of /
* @type {RegExp} | random_line_split |
shift.py | import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class Shift(function_node.FunctionNode):
def __init__(self, ksize=3, dilate=1):
super(Shift, self).__init__()
self.kh, self.kw = _pair(ksize)
if self.kh % 2 != 1:
raise ValueError('kh must be odd')
if self.kw % 2 != 1:
raise ValueError('kw must be odd')
self.dy, self.dx = _pair(dilate)
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape[1] >= self.kh * self.kw,
)
def forward_cpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
py = self.kh // 2 * abs(self.dy)
px = self.kw // 2 * abs(self.dx)
x = numpy.pad(x, ((0, 0), (0, 0), (py, py), (px, px)),
'constant')
n_groups = self.kh * self.kw
group_size = c // n_groups
ret = []
for i, group_idx in enumerate(range(n_groups)):
# Make sure that center group is last
if group_idx == (n_groups - 1) // 2:
group_idx = n_groups - 1
elif group_idx == (n_groups - 1):
group_idx = (n_groups - 1) // 2
ky = (group_idx // self.kw) - py // abs(self.dy)
kx = (group_idx % self.kw) - px // abs(self.dx)
hs = py + -ky * self.dy
ws = px + -kx * self.dx
he = hs + h
we = ws + w
cs = i * group_size
ce = (i + 1) * group_size if i < n_groups - 1 else None
ret.append(x[:, cs:ce, hs:he, ws:we])
return numpy.concatenate(ret, axis=1),
def forward_gpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
y = cuda.cupy.empty_like(x)
cuda.elementwise(
'raw T x, int32 c, int32 h, int32 w,'
'int32 kh, int32 kw,'
'int32 dy, int32 dx',
'T y',
'''
int b0 = i / (c * h * w);
int rest = i % (c * h * w);
int c0 = rest / (h * w);
rest %= h * w;
int out_row = rest / w;
int out_col = rest % w;
int n_groups = kh * kw;
int group_size = c / n_groups;
int group_idx = c0 / group_size;
// Make sure that center group is last
if (group_idx == (n_groups - 1) / 2) { |
int ky = (group_idx / kw) - kh / 2;
int kx = (group_idx % kw) - kw / 2;
if (group_idx >= n_groups) {
ky = 0;
kx = 0;
}
int in_row = -ky * dy + out_row;
int in_col = -kx * dx + out_col;
if (in_row >= 0 && in_row < h && in_col >= 0 && in_col < w) {
y = x[b0 * c * h * w + c0 * h * w + in_row * w + in_col];
} else {
y = 0;
}
''',
'shift_gpu')(x, c, h, w, self.kh, self.kw, self.dy, self.dx, y)
return y,
def backward(self, indexes, grad_outputs):
return shift(grad_outputs[0], ksize=(self.kh, self.kw),
dilate=(-self.dy, -self.dx)),
def shift(x, ksize=3, dilate=1):
"""Shift function.
See: `Shift: A Zero FLOP, Zero Parameter Alternative to Spatial \
Convolutions <https://arxiv.org/abs/1711.08141>`_
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable of shape :math:`(n, c, h, w)`.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
Returns:
~chainer.Variable:
Output variable of same shape as ``x``.
"""
fnode = Shift(ksize, dilate)
y, = fnode.apply((x,))
return y | group_idx = n_groups - 1;
} else if (group_idx == n_groups - 1) {
group_idx = (n_groups - 1) / 2;
} | random_line_split |
shift.py | import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class Shift(function_node.FunctionNode):
def | (self, ksize=3, dilate=1):
super(Shift, self).__init__()
self.kh, self.kw = _pair(ksize)
if self.kh % 2 != 1:
raise ValueError('kh must be odd')
if self.kw % 2 != 1:
raise ValueError('kw must be odd')
self.dy, self.dx = _pair(dilate)
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape[1] >= self.kh * self.kw,
)
def forward_cpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
py = self.kh // 2 * abs(self.dy)
px = self.kw // 2 * abs(self.dx)
x = numpy.pad(x, ((0, 0), (0, 0), (py, py), (px, px)),
'constant')
n_groups = self.kh * self.kw
group_size = c // n_groups
ret = []
for i, group_idx in enumerate(range(n_groups)):
# Make sure that center group is last
if group_idx == (n_groups - 1) // 2:
group_idx = n_groups - 1
elif group_idx == (n_groups - 1):
group_idx = (n_groups - 1) // 2
ky = (group_idx // self.kw) - py // abs(self.dy)
kx = (group_idx % self.kw) - px // abs(self.dx)
hs = py + -ky * self.dy
ws = px + -kx * self.dx
he = hs + h
we = ws + w
cs = i * group_size
ce = (i + 1) * group_size if i < n_groups - 1 else None
ret.append(x[:, cs:ce, hs:he, ws:we])
return numpy.concatenate(ret, axis=1),
def forward_gpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
y = cuda.cupy.empty_like(x)
cuda.elementwise(
'raw T x, int32 c, int32 h, int32 w,'
'int32 kh, int32 kw,'
'int32 dy, int32 dx',
'T y',
'''
int b0 = i / (c * h * w);
int rest = i % (c * h * w);
int c0 = rest / (h * w);
rest %= h * w;
int out_row = rest / w;
int out_col = rest % w;
int n_groups = kh * kw;
int group_size = c / n_groups;
int group_idx = c0 / group_size;
// Make sure that center group is last
if (group_idx == (n_groups - 1) / 2) {
group_idx = n_groups - 1;
} else if (group_idx == n_groups - 1) {
group_idx = (n_groups - 1) / 2;
}
int ky = (group_idx / kw) - kh / 2;
int kx = (group_idx % kw) - kw / 2;
if (group_idx >= n_groups) {
ky = 0;
kx = 0;
}
int in_row = -ky * dy + out_row;
int in_col = -kx * dx + out_col;
if (in_row >= 0 && in_row < h && in_col >= 0 && in_col < w) {
y = x[b0 * c * h * w + c0 * h * w + in_row * w + in_col];
} else {
y = 0;
}
''',
'shift_gpu')(x, c, h, w, self.kh, self.kw, self.dy, self.dx, y)
return y,
def backward(self, indexes, grad_outputs):
return shift(grad_outputs[0], ksize=(self.kh, self.kw),
dilate=(-self.dy, -self.dx)),
def shift(x, ksize=3, dilate=1):
"""Shift function.
See: `Shift: A Zero FLOP, Zero Parameter Alternative to Spatial \
Convolutions <https://arxiv.org/abs/1711.08141>`_
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable of shape :math:`(n, c, h, w)`.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
Returns:
~chainer.Variable:
Output variable of same shape as ``x``.
"""
fnode = Shift(ksize, dilate)
y, = fnode.apply((x,))
return y
| __init__ | identifier_name |
shift.py | import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class Shift(function_node.FunctionNode):
def __init__(self, ksize=3, dilate=1):
super(Shift, self).__init__()
self.kh, self.kw = _pair(ksize)
if self.kh % 2 != 1:
raise ValueError('kh must be odd')
if self.kw % 2 != 1:
raise ValueError('kw must be odd')
self.dy, self.dx = _pair(dilate)
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape[1] >= self.kh * self.kw,
)
def forward_cpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
py = self.kh // 2 * abs(self.dy)
px = self.kw // 2 * abs(self.dx)
x = numpy.pad(x, ((0, 0), (0, 0), (py, py), (px, px)),
'constant')
n_groups = self.kh * self.kw
group_size = c // n_groups
ret = []
for i, group_idx in enumerate(range(n_groups)):
# Make sure that center group is last
if group_idx == (n_groups - 1) // 2:
group_idx = n_groups - 1
elif group_idx == (n_groups - 1):
group_idx = (n_groups - 1) // 2
ky = (group_idx // self.kw) - py // abs(self.dy)
kx = (group_idx % self.kw) - px // abs(self.dx)
hs = py + -ky * self.dy
ws = px + -kx * self.dx
he = hs + h
we = ws + w
cs = i * group_size
ce = (i + 1) * group_size if i < n_groups - 1 else None
ret.append(x[:, cs:ce, hs:he, ws:we])
return numpy.concatenate(ret, axis=1),
def forward_gpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
y = cuda.cupy.empty_like(x)
cuda.elementwise(
'raw T x, int32 c, int32 h, int32 w,'
'int32 kh, int32 kw,'
'int32 dy, int32 dx',
'T y',
'''
int b0 = i / (c * h * w);
int rest = i % (c * h * w);
int c0 = rest / (h * w);
rest %= h * w;
int out_row = rest / w;
int out_col = rest % w;
int n_groups = kh * kw;
int group_size = c / n_groups;
int group_idx = c0 / group_size;
// Make sure that center group is last
if (group_idx == (n_groups - 1) / 2) {
group_idx = n_groups - 1;
} else if (group_idx == n_groups - 1) {
group_idx = (n_groups - 1) / 2;
}
int ky = (group_idx / kw) - kh / 2;
int kx = (group_idx % kw) - kw / 2;
if (group_idx >= n_groups) {
ky = 0;
kx = 0;
}
int in_row = -ky * dy + out_row;
int in_col = -kx * dx + out_col;
if (in_row >= 0 && in_row < h && in_col >= 0 && in_col < w) {
y = x[b0 * c * h * w + c0 * h * w + in_row * w + in_col];
} else {
y = 0;
}
''',
'shift_gpu')(x, c, h, w, self.kh, self.kw, self.dy, self.dx, y)
return y,
def backward(self, indexes, grad_outputs):
|
def shift(x, ksize=3, dilate=1):
"""Shift function.
See: `Shift: A Zero FLOP, Zero Parameter Alternative to Spatial \
Convolutions <https://arxiv.org/abs/1711.08141>`_
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable of shape :math:`(n, c, h, w)`.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
Returns:
~chainer.Variable:
Output variable of same shape as ``x``.
"""
fnode = Shift(ksize, dilate)
y, = fnode.apply((x,))
return y
| return shift(grad_outputs[0], ksize=(self.kh, self.kw),
dilate=(-self.dy, -self.dx)), | identifier_body |
shift.py | import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _pair(x):
if hasattr(x, '__getitem__'):
|
return x, x
class Shift(function_node.FunctionNode):
def __init__(self, ksize=3, dilate=1):
super(Shift, self).__init__()
self.kh, self.kw = _pair(ksize)
if self.kh % 2 != 1:
raise ValueError('kh must be odd')
if self.kw % 2 != 1:
raise ValueError('kw must be odd')
self.dy, self.dx = _pair(dilate)
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape[1] >= self.kh * self.kw,
)
def forward_cpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
py = self.kh // 2 * abs(self.dy)
px = self.kw // 2 * abs(self.dx)
x = numpy.pad(x, ((0, 0), (0, 0), (py, py), (px, px)),
'constant')
n_groups = self.kh * self.kw
group_size = c // n_groups
ret = []
for i, group_idx in enumerate(range(n_groups)):
# Make sure that center group is last
if group_idx == (n_groups - 1) // 2:
group_idx = n_groups - 1
elif group_idx == (n_groups - 1):
group_idx = (n_groups - 1) // 2
ky = (group_idx // self.kw) - py // abs(self.dy)
kx = (group_idx % self.kw) - px // abs(self.dx)
hs = py + -ky * self.dy
ws = px + -kx * self.dx
he = hs + h
we = ws + w
cs = i * group_size
ce = (i + 1) * group_size if i < n_groups - 1 else None
ret.append(x[:, cs:ce, hs:he, ws:we])
return numpy.concatenate(ret, axis=1),
def forward_gpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
y = cuda.cupy.empty_like(x)
cuda.elementwise(
'raw T x, int32 c, int32 h, int32 w,'
'int32 kh, int32 kw,'
'int32 dy, int32 dx',
'T y',
'''
int b0 = i / (c * h * w);
int rest = i % (c * h * w);
int c0 = rest / (h * w);
rest %= h * w;
int out_row = rest / w;
int out_col = rest % w;
int n_groups = kh * kw;
int group_size = c / n_groups;
int group_idx = c0 / group_size;
// Make sure that center group is last
if (group_idx == (n_groups - 1) / 2) {
group_idx = n_groups - 1;
} else if (group_idx == n_groups - 1) {
group_idx = (n_groups - 1) / 2;
}
int ky = (group_idx / kw) - kh / 2;
int kx = (group_idx % kw) - kw / 2;
if (group_idx >= n_groups) {
ky = 0;
kx = 0;
}
int in_row = -ky * dy + out_row;
int in_col = -kx * dx + out_col;
if (in_row >= 0 && in_row < h && in_col >= 0 && in_col < w) {
y = x[b0 * c * h * w + c0 * h * w + in_row * w + in_col];
} else {
y = 0;
}
''',
'shift_gpu')(x, c, h, w, self.kh, self.kw, self.dy, self.dx, y)
return y,
def backward(self, indexes, grad_outputs):
return shift(grad_outputs[0], ksize=(self.kh, self.kw),
dilate=(-self.dy, -self.dx)),
def shift(x, ksize=3, dilate=1):
"""Shift function.
See: `Shift: A Zero FLOP, Zero Parameter Alternative to Spatial \
Convolutions <https://arxiv.org/abs/1711.08141>`_
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable of shape :math:`(n, c, h, w)`.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
Returns:
~chainer.Variable:
Output variable of same shape as ``x``.
"""
fnode = Shift(ksize, dilate)
y, = fnode.apply((x,))
return y
| return x | conditional_block |
markdown.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::io;
use core;
use getopts;
use testing;
use rustc::session::search_paths::SearchPaths;
use externalfiles::ExternalHtml;
use html::escape::Escape;
use html::markdown;
use html::markdown::{Markdown, MarkdownWithToc, find_testable_code, reset_headers};
use test::Collector;
/// Separate any lines at the start of the file that begin with `%`.
fn extract_leading_metadata<'a>(s: &'a str) -> (Vec<&'a str>, &'a str) {
let mut metadata = Vec::new();
for line in s.lines() {
if line.starts_with("%") {
// remove %<whitespace>
metadata.push(line[1..].trim_left())
} else {
let line_start_byte = s.subslice_offset(line);
return (metadata, &s[line_start_byte..]);
}
}
// if we're here, then all lines were metadata % lines.
(metadata, "")
}
/// Render `input` (e.g. "foo.md") into an HTML file in `output`
/// (e.g. output = "bar" => "bar/foo.html").
pub fn render(input: &str, mut output: Path, matches: &getopts::Matches,
external_html: &ExternalHtml, include_toc: bool) -> int {
let input_p = Path::new(input);
output.push(input_p.filestem().unwrap());
output.set_extension("html");
let mut css = String::new();
for name in matches.opt_strs("markdown-css").iter() {
let s = format!("<link rel=\"stylesheet\" type=\"text/css\" href=\"{}\">\n", name);
css.push_str(s.as_slice())
}
let input_str = load_or_return!(input, 1, 2);
let playground = matches.opt_str("markdown-playground-url");
if playground.is_some() {
markdown::PLAYGROUND_KRATE.with(|s| { *s.borrow_mut() = None; });
}
let playground = playground.unwrap_or("".to_string());
let mut out = match io::File::create(&output) {
Err(e) => {
let _ = writeln!(&mut io::stderr(),
"error opening `{}` for writing: {}",
output.display(), e);
return 4;
}
Ok(f) => f
};
let (metadata, text) = extract_leading_metadata(input_str.as_slice());
if metadata.len() == 0 {
let _ = writeln!(&mut io::stderr(),
"invalid markdown file: expecting initial line with `% ...TITLE...`");
return 5;
}
let title = metadata[0].as_slice();
reset_headers();
let rendered = if include_toc {
format!("{}", MarkdownWithToc(text))
} else {
format!("{}", Markdown(text))
};
let err = write!(
&mut out,
r#"<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<title>{title}</title>
{css}
{in_header}
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]-->
{before_content}
<h1 class="title">{title}</h1>
{text}
<script type="text/javascript">
window.playgroundUrl = "{playground}";
</script>
{after_content}
</body>
</html>"#,
title = Escape(title),
css = css,
in_header = external_html.in_header,
before_content = external_html.before_content,
text = rendered,
after_content = external_html.after_content,
playground = playground,
);
match err {
Err(e) => {
let _ = writeln!(&mut io::stderr(),
"error writing to `{}`: {}",
output.display(), e);
6
}
Ok(_) => 0
}
}
/// Run any tests/code examples in the markdown file `input`.
pub fn test(input: &str, libs: SearchPaths, externs: core::Externs,
mut test_args: Vec<String>) -> int | {
let input_str = load_or_return!(input, 1, 2);
let mut collector = Collector::new(input.to_string(), libs, externs, true);
find_testable_code(input_str.as_slice(), &mut collector);
test_args.insert(0, "rustdoctest".to_string());
testing::test_main(test_args.as_slice(), collector.tests);
0
} | identifier_body |
|
markdown.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::io;
use core;
use getopts;
use testing;
use rustc::session::search_paths::SearchPaths;
use externalfiles::ExternalHtml;
use html::escape::Escape;
use html::markdown;
use html::markdown::{Markdown, MarkdownWithToc, find_testable_code, reset_headers};
use test::Collector;
/// Separate any lines at the start of the file that begin with `%`.
fn | <'a>(s: &'a str) -> (Vec<&'a str>, &'a str) {
let mut metadata = Vec::new();
for line in s.lines() {
if line.starts_with("%") {
// remove %<whitespace>
metadata.push(line[1..].trim_left())
} else {
let line_start_byte = s.subslice_offset(line);
return (metadata, &s[line_start_byte..]);
}
}
// if we're here, then all lines were metadata % lines.
(metadata, "")
}
/// Render `input` (e.g. "foo.md") into an HTML file in `output`
/// (e.g. output = "bar" => "bar/foo.html").
pub fn render(input: &str, mut output: Path, matches: &getopts::Matches,
external_html: &ExternalHtml, include_toc: bool) -> int {
let input_p = Path::new(input);
output.push(input_p.filestem().unwrap());
output.set_extension("html");
let mut css = String::new();
for name in matches.opt_strs("markdown-css").iter() {
let s = format!("<link rel=\"stylesheet\" type=\"text/css\" href=\"{}\">\n", name);
css.push_str(s.as_slice())
}
let input_str = load_or_return!(input, 1, 2);
let playground = matches.opt_str("markdown-playground-url");
if playground.is_some() {
markdown::PLAYGROUND_KRATE.with(|s| { *s.borrow_mut() = None; });
}
let playground = playground.unwrap_or("".to_string());
let mut out = match io::File::create(&output) {
Err(e) => {
let _ = writeln!(&mut io::stderr(),
"error opening `{}` for writing: {}",
output.display(), e);
return 4;
}
Ok(f) => f
};
let (metadata, text) = extract_leading_metadata(input_str.as_slice());
if metadata.len() == 0 {
let _ = writeln!(&mut io::stderr(),
"invalid markdown file: expecting initial line with `% ...TITLE...`");
return 5;
}
let title = metadata[0].as_slice();
reset_headers();
let rendered = if include_toc {
format!("{}", MarkdownWithToc(text))
} else {
format!("{}", Markdown(text))
};
let err = write!(
&mut out,
r#"<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc">
<title>{title}</title>
{css}
{in_header}
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]-->
{before_content}
<h1 class="title">{title}</h1>
{text}
<script type="text/javascript">
window.playgroundUrl = "{playground}";
</script>
{after_content}
</body>
</html>"#,
title = Escape(title),
css = css,
in_header = external_html.in_header,
before_content = external_html.before_content,
text = rendered,
after_content = external_html.after_content,
playground = playground,
);
match err {
Err(e) => {
let _ = writeln!(&mut io::stderr(),
"error writing to `{}`: {}",
output.display(), e);
6
}
Ok(_) => 0
}
}
/// Run any tests/code examples in the markdown file `input`.
pub fn test(input: &str, libs: SearchPaths, externs: core::Externs,
mut test_args: Vec<String>) -> int {
let input_str = load_or_return!(input, 1, 2);
let mut collector = Collector::new(input.to_string(), libs, externs, true);
find_testable_code(input_str.as_slice(), &mut collector);
test_args.insert(0, "rustdoctest".to_string());
testing::test_main(test_args.as_slice(), collector.tests);
0
}
| extract_leading_metadata | identifier_name |
markdown.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::io;
use core;
use getopts;
use testing;
use rustc::session::search_paths::SearchPaths;
use externalfiles::ExternalHtml;
use html::escape::Escape;
use html::markdown;
use html::markdown::{Markdown, MarkdownWithToc, find_testable_code, reset_headers};
use test::Collector;
/// Separate any lines at the start of the file that begin with `%`.
fn extract_leading_metadata<'a>(s: &'a str) -> (Vec<&'a str>, &'a str) {
let mut metadata = Vec::new();
for line in s.lines() {
if line.starts_with("%") {
// remove %<whitespace>
metadata.push(line[1..].trim_left())
} else {
let line_start_byte = s.subslice_offset(line);
return (metadata, &s[line_start_byte..]);
}
}
// if we're here, then all lines were metadata % lines.
(metadata, "")
}
/// Render `input` (e.g. "foo.md") into an HTML file in `output`
/// (e.g. output = "bar" => "bar/foo.html").
pub fn render(input: &str, mut output: Path, matches: &getopts::Matches,
external_html: &ExternalHtml, include_toc: bool) -> int {
let input_p = Path::new(input);
output.push(input_p.filestem().unwrap());
output.set_extension("html");
let mut css = String::new();
for name in matches.opt_strs("markdown-css").iter() {
let s = format!("<link rel=\"stylesheet\" type=\"text/css\" href=\"{}\">\n", name);
css.push_str(s.as_slice())
}
let input_str = load_or_return!(input, 1, 2);
let playground = matches.opt_str("markdown-playground-url");
if playground.is_some() {
markdown::PLAYGROUND_KRATE.with(|s| { *s.borrow_mut() = None; });
}
let playground = playground.unwrap_or("".to_string());
let mut out = match io::File::create(&output) {
Err(e) => {
let _ = writeln!(&mut io::stderr(),
"error opening `{}` for writing: {}",
output.display(), e);
return 4;
}
Ok(f) => f
};
let (metadata, text) = extract_leading_metadata(input_str.as_slice());
if metadata.len() == 0 {
let _ = writeln!(&mut io::stderr(),
"invalid markdown file: expecting initial line with `% ...TITLE...`");
return 5;
}
let title = metadata[0].as_slice();
reset_headers();
let rendered = if include_toc {
format!("{}", MarkdownWithToc(text))
} else {
format!("{}", Markdown(text))
};
let err = write!(
&mut out,
r#"<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"> | <title>{title}</title>
{css}
{in_header}
</head>
<body class="rustdoc">
<!--[if lte IE 8]>
<div class="warning">
This old browser is unsupported and will most likely display funky
things.
</div>
<![endif]-->
{before_content}
<h1 class="title">{title}</h1>
{text}
<script type="text/javascript">
window.playgroundUrl = "{playground}";
</script>
{after_content}
</body>
</html>"#,
title = Escape(title),
css = css,
in_header = external_html.in_header,
before_content = external_html.before_content,
text = rendered,
after_content = external_html.after_content,
playground = playground,
);
match err {
Err(e) => {
let _ = writeln!(&mut io::stderr(),
"error writing to `{}`: {}",
output.display(), e);
6
}
Ok(_) => 0
}
}
/// Run any tests/code examples in the markdown file `input`.
pub fn test(input: &str, libs: SearchPaths, externs: core::Externs,
mut test_args: Vec<String>) -> int {
let input_str = load_or_return!(input, 1, 2);
let mut collector = Collector::new(input.to_string(), libs, externs, true);
find_testable_code(input_str.as_slice(), &mut collector);
test_args.insert(0, "rustdoctest".to_string());
testing::test_main(test_args.as_slice(), collector.tests);
0
} | <meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="rustdoc"> | random_line_split |
ecg.py | # Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List
import numpy as np
from scipy import signal
from cerebralcortex.data_processor.signalprocessing.dataquality import Quality
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import DataStream
def filter_bad_ecg(ecg: DataStream,
ecg_quality: DataStream) -> DataStream:
"""
This function combines the raw ecg and ecg data quality datastream and only keeps those datapoints that are assigned acceptable in data quality
:param ecg: raw ecg datastream
:param ecg_quality: ecg quality datastream
:return: filtered ecg datastream
"""
ecg_filtered = DataStream.from_datastream([ecg])
ecg_quality_array = ecg_quality.data
ecg_raw_timestamp_array = np.array([i.start_time.timestamp() for i in ecg.data])
ecg_filtered_array = []
initial_index = 0
for item in ecg_quality_array:
if item.sample == Quality.ACCEPTABLE:
final_index = initial_index
for i in range(initial_index, len(ecg.data)):
if item.start_time.timestamp() <= ecg_raw_timestamp_array[i] <= item.end_time.timestamp():
ecg_filtered_array.append(ecg.data[i])
final_index = i
initial_index = final_index
ecg_filtered.data = ecg_filtered_array
return ecg_filtered
def compute_rr_intervals(ecg: DataStream,
ecg_quality: DataStream,
fs: float) -> DataStream:
|
def rr_interval_update(rpeak_temp1: List[DataPoint],
rr_ave: float,
min_size: int = 8) -> float:
"""
:param min_size: 8 last R-peaks are checked to compute the running rr interval average
:param rpeak_temp1: R peak locations
:param rr_ave: previous rr-interval average
:return: the new rr-interval average of the previously detected 8 R peak locations
"""
peak_interval = np.diff([0] + rpeak_temp1) # TODO: rpeak_temp1 is a datapoint, what should this be converted to?
return rr_ave if len(peak_interval) < min_size else np.sum(peak_interval[-min_size:]) / min_size
def compute_moving_window_int(sample: np.ndarray,
fs: float,
blackman_win_length: int,
filter_length: int = 257,
delta: float = .02) -> np.ndarray:
"""
:param sample: ecg sample array
:param fs: sampling frequency
:param blackman_win_length: length of the blackman window on which to compute the moving window integration
:param filter_length: length of the FIR bandpass filter on which filtering is done on ecg sample array
:param delta: to compute the weights of each band in FIR filter
:return: the Moving window integration of the sample array
"""
# I believe these constants can be kept in a file
# filter edges
filter_edges = [0, 4.5 * 2 / fs, 5 * 2 / fs, 20 * 2 / fs, 20.5 * 2 / fs, 1]
# gains at filter band edges
gains = [0, 0, 1, 1, 0, 0]
# weights
weights = [500 / delta, 1 / delta, 500 / delta]
# length of the FIR filter
# FIR filter coefficients for bandpass filtering
filter_coeff = signal.firls(filter_length, filter_edges, gains, weights)
# bandpass filtered signal
bandpass_signal = signal.convolve(sample, filter_coeff, 'same')
bandpass_signal /= np.percentile(bandpass_signal, 90)
# derivative array
derivative_array = (np.array([-1.0, -2.0, 0, 2.0, 1.0])) * (1 / 8)
# derivative signal (differentiation of the bandpass)
derivative_signal = signal.convolve(bandpass_signal, derivative_array, 'same')
derivative_signal /= np.percentile(derivative_signal, 90)
# squared derivative signal
derivative_squared_signal = derivative_signal ** 2
derivative_squared_signal /= np.percentile(derivative_squared_signal, 90)
# blackman window
blackman_window = np.blackman(blackman_win_length)
# moving window Integration of squared derivative signal
mov_win_int_signal = signal.convolve(derivative_squared_signal, blackman_window, 'same')
mov_win_int_signal /= np.percentile(mov_win_int_signal, 90)
return mov_win_int_signal
def check_peak(data: List[DataPoint]) -> bool:
"""
This is a function to check the condition of a simple peak of signal y in index i
:param data:
:return:
"""
if len(data) < 3:
return False
midpoint = int(len(data) / 2)
test_value = data[0]
for i in data[1:midpoint + 1]:
if test_value < i:
test_value = i
else:
return False
for i in data[midpoint + 1:]:
if test_value > i:
test_value = i
else:
return False
return True
# TODO: CODE_REVIEW: Justify in the method documentation string the justification of the default values
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def compute_r_peaks(threshold_1: float,
rr_ave: float,
mov_win_int_signal: np.ndarray,
peak_tuple_array: List[tuple]) -> list:
"""
This function does the adaptive thresholding of the signal to get the R-peak locations
:param threshold_1: Thr1 is the threshold above which the R peak
:param rr_ave: running RR-interval average
:param mov_win_int_signal: signal sample array
:param peak_tuple_array: A tuple array containing location and values of the simple peaks detected in the process before
:returns rpeak_array_indices: The location of the R peaks in the signal sample array once found this is returned
"""
peak_location_in_signal_array = [i[0] for i in peak_tuple_array] # location of the simple peaks in signal array
amplitude_in_peak_locations = [i[1] for i in peak_tuple_array] # simple peak's amplitude in signal array
threshold_2 = 0.5 * threshold_1 # any signal value between threshold_2 and threshold_1 is a noise peak
sig_lev = 4 * threshold_1 # current signal level -any signal above thrice the signal level is discarded as a spurious value
noise_lev = 0.1 * sig_lev # current noise level of the signal
ind_rpeak = 0
rpeak_array_indices = []
rpeak_inds_in_peak_array = []
while ind_rpeak < len(peak_location_in_signal_array):
# if for 166 percent of the present RR interval no peak is detected as R peak then threshold_2 is taken as the
# R peak threshold and the maximum of the range is taken as a R peak and RR interval is updated accordingly
if len(rpeak_array_indices) >= 1 and peak_location_in_signal_array[ind_rpeak] - peak_location_in_signal_array[
rpeak_inds_in_peak_array[-1]] > 1.66 * rr_ave and ind_rpeak - rpeak_inds_in_peak_array[-1] > 1:
# values and indexes of previous peaks discarded as not an R peak whose magnitude is above threshold_2
searchback_array = [(k - rpeak_inds_in_peak_array[-1], amplitude_in_peak_locations[k]) for k in
range(rpeak_inds_in_peak_array[-1] + 1, ind_rpeak) if
3 * sig_lev > amplitude_in_peak_locations[k] > threshold_2]
if len(searchback_array) > 0:
# maximum inside the range calculated beforehand is taken as R peak
searchback_array_inrange_values = [x[1] for x in searchback_array]
searchback_max_index = np.argmax(searchback_array_inrange_values)
rpeak_array_indices.append(peak_location_in_signal_array[
rpeak_inds_in_peak_array[-1] + searchback_array[searchback_max_index][
0]])
rpeak_inds_in_peak_array.append(
rpeak_inds_in_peak_array[-1] + searchback_array[searchback_max_index][0])
sig_lev = ewma(sig_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the current signal level
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
rr_ave = rr_interval_update(rpeak_array_indices, rr_ave)
ind_rpeak = rpeak_inds_in_peak_array[-1] + 1
else:
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
ind_rpeak += 1
else:
# R peak checking
if threshold_1 <= mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]] < 3 * sig_lev:
rpeak_array_indices.append(peak_location_in_signal_array[ind_rpeak])
rpeak_inds_in_peak_array.append(ind_rpeak)
sig_lev = ewma(sig_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the signal level
# noise peak checking
elif threshold_1 > mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]] > threshold_2:
noise_lev = ewma(noise_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the noise level
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
ind_rpeak += 1
rr_ave = rr_interval_update(rpeak_array_indices, rr_ave)
return rpeak_array_indices
def ewma(value: float, new_value: float, alpha: float) -> float:
"""
:param value:
:param new_value:
:param alpha:
:return:
"""
return alpha * new_value + (1 - alpha) * value
# TODO: CODE_REVIEW: Justify in the method documentation string the justification of the default values
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def remove_close_peaks(rpeak_temp1: list,
sample: np.ndarray,
fs: float,
min_range: float = .5) -> list:
"""
This function removes one of two peaks from two consecutive R peaks
if difference among them is less than the minimum possible
:param min_range:
:param rpeak_temp1: R peak array containing the index of the R peaks
:param sample: sample array
:param fs: sampling frequency
:return: R peak array with no close R peaks
"""
difference = 0
rpeak_temp2 = rpeak_temp1
while difference != 1:
length_rpeak_temp2 = len(rpeak_temp2)
temp = np.diff(rpeak_temp2)
comp_index1 = [rpeak_temp2[i] for i in range(len(temp)) if temp[i] < min_range * fs]
comp_index2 = [rpeak_temp2[i + 1] for i in range(len(temp)) if temp[i] < min_range * fs]
comp1 = sample[comp_index1]
comp2 = sample[comp_index2]
checkmin = np.matrix([comp1, comp2])
temp_ind1 = [i for i in range(len(temp)) if temp[i] < min_range * fs]
temp_ind2 = np.argmin(np.array(checkmin), axis=0)
temp_ind = temp_ind1 + temp_ind2
temp_ind = np.unique(temp_ind)
count = 0
for i in temp_ind:
rpeak_temp2.remove(rpeak_temp2[i - count])
count = count + 1
difference = length_rpeak_temp2 - len(rpeak_temp2) + 1
return rpeak_temp2
def confirm_peaks(rpeak_temp1: list,
sample: np.ndarray,
fs: float,
range_for_checking: float = 1 / 10) -> np.ndarray:
"""
This function does the final check on the R peaks detected and
finds the maximum in a range of fs/10 of the detected peak location and assigns it to be the peak
:param rpeak_temp1: R peak array containing the index of the R peaks
:param sample: sample array
:param fs: sampling frequency
:param range_for_checking : The peaks are checked within a range of fs/10 to get the maximum value within that range
:return: final R peak array
"""
for i in range(1, len(rpeak_temp1) - 1):
start_index = int(rpeak_temp1[i] - np.ceil(range_for_checking * fs))
end_index = int(rpeak_temp1[i] + np.ceil(range_for_checking * fs) + 1)
index = np.argmax(sample[start_index:end_index])
rpeak_temp1[i] = rpeak_temp1[i] - np.ceil(range_for_checking * fs) + index
return np.array(rpeak_temp1).astype(np.int64)
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def detect_rpeak(ecg: DataStream,
fs: float = 64,
threshold: float = 0.5,
blackman_win_len_range: float = 0.2) -> DataStream:
"""
This program implements the Pan Tomkins algorithm on ECG signal to detect the R peaks
Since the ecg array can have discontinuity in the timestamp arrays the rr-interval calculated
in the algorithm is calculated in terms of the index in the sample array
The algorithm consists of some major steps
1. computation of the moving window integration of the signal in terms of blackman window of a prescribed length
2. compute all the peaks of the moving window integration signal
3. adaptive thresholding with dynamic signal and noise thresholds applied to filter out the R peak locations
4. confirm the R peaks through differentiation from the nearby peaks and remove the false peaks
:param ecg: ecg array of tuples (timestamp,value)
:param fs: sampling frequency
:param threshold: initial threshold to detect the R peak in a signal normalized by the 90th percentile. .5 is default.
:param blackman_win_len_range : the range to calculate blackman window length
:return: R peak array of tuples (timestamp, Rpeak interval)
"""
data = ecg.data
result = DataStream.from_datastream([ecg])
if len(data) == 0:
result.data = []
return result
sample = np.array([i.sample for i in data])
timestamp = np.array([i.start_time for i in data])
# computes the moving window integration of the signal
blackman_win_len = np.ceil(fs * blackman_win_len_range)
y = compute_moving_window_int(sample, fs, blackman_win_len)
peak_location_values = [(i, y[i]) for i in range(2, len(y) - 1) if check_peak(y[i - 2:i + 3])]
# initial RR interval average
peak_location = [i[0] for i in peak_location_values]
running_rr_avg = sum(np.diff(peak_location)) / (len(peak_location) - 1)
rpeak_temp1 = compute_r_peaks(threshold, running_rr_avg, y, peak_location_values)
rpeak_temp2 = remove_close_peaks(rpeak_temp1, sample, fs)
index = confirm_peaks(rpeak_temp2, sample, fs)
rpeak_timestamp = timestamp[index]
rpeak_value = np.diff(rpeak_timestamp)
rpeak_timestamp = rpeak_timestamp[1:]
result_data = []
for k in range(len(rpeak_value)):
result_data.append(
DataPoint.from_tuple(rpeak_timestamp[k], rpeak_value[k].seconds + rpeak_value[k].microseconds / 1e6))
# Create resulting datastream to be returned
result.data = result_data
return result
| """
filter ecg datastream first and compute rr-interval datastream from the ecg datastream
:param ecg:ecg datastream
:param ecg_quality : ecg quality annotated datastream
:param fs: sampling frequency
:return: rr-interval datastream
"""
ecg_filtered = filter_bad_ecg(ecg, ecg_quality)
# compute the r-peak array
ecg_rpeak = detect_rpeak(ecg_filtered, fs)
return ecg_rpeak | identifier_body |
ecg.py | # Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List
import numpy as np
from scipy import signal
from cerebralcortex.data_processor.signalprocessing.dataquality import Quality
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import DataStream
def | (ecg: DataStream,
ecg_quality: DataStream) -> DataStream:
"""
This function combines the raw ecg and ecg data quality datastream and only keeps those datapoints that are assigned acceptable in data quality
:param ecg: raw ecg datastream
:param ecg_quality: ecg quality datastream
:return: filtered ecg datastream
"""
ecg_filtered = DataStream.from_datastream([ecg])
ecg_quality_array = ecg_quality.data
ecg_raw_timestamp_array = np.array([i.start_time.timestamp() for i in ecg.data])
ecg_filtered_array = []
initial_index = 0
for item in ecg_quality_array:
if item.sample == Quality.ACCEPTABLE:
final_index = initial_index
for i in range(initial_index, len(ecg.data)):
if item.start_time.timestamp() <= ecg_raw_timestamp_array[i] <= item.end_time.timestamp():
ecg_filtered_array.append(ecg.data[i])
final_index = i
initial_index = final_index
ecg_filtered.data = ecg_filtered_array
return ecg_filtered
def compute_rr_intervals(ecg: DataStream,
ecg_quality: DataStream,
fs: float) -> DataStream:
"""
filter ecg datastream first and compute rr-interval datastream from the ecg datastream
:param ecg:ecg datastream
:param ecg_quality : ecg quality annotated datastream
:param fs: sampling frequency
:return: rr-interval datastream
"""
ecg_filtered = filter_bad_ecg(ecg, ecg_quality)
# compute the r-peak array
ecg_rpeak = detect_rpeak(ecg_filtered, fs)
return ecg_rpeak
def rr_interval_update(rpeak_temp1: List[DataPoint],
rr_ave: float,
min_size: int = 8) -> float:
"""
:param min_size: 8 last R-peaks are checked to compute the running rr interval average
:param rpeak_temp1: R peak locations
:param rr_ave: previous rr-interval average
:return: the new rr-interval average of the previously detected 8 R peak locations
"""
peak_interval = np.diff([0] + rpeak_temp1) # TODO: rpeak_temp1 is a datapoint, what should this be converted to?
return rr_ave if len(peak_interval) < min_size else np.sum(peak_interval[-min_size:]) / min_size
def compute_moving_window_int(sample: np.ndarray,
fs: float,
blackman_win_length: int,
filter_length: int = 257,
delta: float = .02) -> np.ndarray:
"""
:param sample: ecg sample array
:param fs: sampling frequency
:param blackman_win_length: length of the blackman window on which to compute the moving window integration
:param filter_length: length of the FIR bandpass filter on which filtering is done on ecg sample array
:param delta: to compute the weights of each band in FIR filter
:return: the Moving window integration of the sample array
"""
# I believe these constants can be kept in a file
# filter edges
filter_edges = [0, 4.5 * 2 / fs, 5 * 2 / fs, 20 * 2 / fs, 20.5 * 2 / fs, 1]
# gains at filter band edges
gains = [0, 0, 1, 1, 0, 0]
# weights
weights = [500 / delta, 1 / delta, 500 / delta]
# length of the FIR filter
# FIR filter coefficients for bandpass filtering
filter_coeff = signal.firls(filter_length, filter_edges, gains, weights)
# bandpass filtered signal
bandpass_signal = signal.convolve(sample, filter_coeff, 'same')
bandpass_signal /= np.percentile(bandpass_signal, 90)
# derivative array
derivative_array = (np.array([-1.0, -2.0, 0, 2.0, 1.0])) * (1 / 8)
# derivative signal (differentiation of the bandpass)
derivative_signal = signal.convolve(bandpass_signal, derivative_array, 'same')
derivative_signal /= np.percentile(derivative_signal, 90)
# squared derivative signal
derivative_squared_signal = derivative_signal ** 2
derivative_squared_signal /= np.percentile(derivative_squared_signal, 90)
# blackman window
blackman_window = np.blackman(blackman_win_length)
# moving window Integration of squared derivative signal
mov_win_int_signal = signal.convolve(derivative_squared_signal, blackman_window, 'same')
mov_win_int_signal /= np.percentile(mov_win_int_signal, 90)
return mov_win_int_signal
def check_peak(data: List[DataPoint]) -> bool:
"""
This is a function to check the condition of a simple peak of signal y in index i
:param data:
:return:
"""
if len(data) < 3:
return False
midpoint = int(len(data) / 2)
test_value = data[0]
for i in data[1:midpoint + 1]:
if test_value < i:
test_value = i
else:
return False
for i in data[midpoint + 1:]:
if test_value > i:
test_value = i
else:
return False
return True
# TODO: CODE_REVIEW: Justify in the method documentation string the justification of the default values
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def compute_r_peaks(threshold_1: float,
rr_ave: float,
mov_win_int_signal: np.ndarray,
peak_tuple_array: List[tuple]) -> list:
"""
This function does the adaptive thresholding of the signal to get the R-peak locations
:param threshold_1: Thr1 is the threshold above which the R peak
:param rr_ave: running RR-interval average
:param mov_win_int_signal: signal sample array
:param peak_tuple_array: A tuple array containing location and values of the simple peaks detected in the process before
:returns rpeak_array_indices: The location of the R peaks in the signal sample array once found this is returned
"""
peak_location_in_signal_array = [i[0] for i in peak_tuple_array] # location of the simple peaks in signal array
amplitude_in_peak_locations = [i[1] for i in peak_tuple_array] # simple peak's amplitude in signal array
threshold_2 = 0.5 * threshold_1 # any signal value between threshold_2 and threshold_1 is a noise peak
sig_lev = 4 * threshold_1 # current signal level -any signal above thrice the signal level is discarded as a spurious value
noise_lev = 0.1 * sig_lev # current noise level of the signal
ind_rpeak = 0
rpeak_array_indices = []
rpeak_inds_in_peak_array = []
while ind_rpeak < len(peak_location_in_signal_array):
# if for 166 percent of the present RR interval no peak is detected as R peak then threshold_2 is taken as the
# R peak threshold and the maximum of the range is taken as a R peak and RR interval is updated accordingly
if len(rpeak_array_indices) >= 1 and peak_location_in_signal_array[ind_rpeak] - peak_location_in_signal_array[
rpeak_inds_in_peak_array[-1]] > 1.66 * rr_ave and ind_rpeak - rpeak_inds_in_peak_array[-1] > 1:
# values and indexes of previous peaks discarded as not an R peak whose magnitude is above threshold_2
searchback_array = [(k - rpeak_inds_in_peak_array[-1], amplitude_in_peak_locations[k]) for k in
range(rpeak_inds_in_peak_array[-1] + 1, ind_rpeak) if
3 * sig_lev > amplitude_in_peak_locations[k] > threshold_2]
if len(searchback_array) > 0:
# maximum inside the range calculated beforehand is taken as R peak
searchback_array_inrange_values = [x[1] for x in searchback_array]
searchback_max_index = np.argmax(searchback_array_inrange_values)
rpeak_array_indices.append(peak_location_in_signal_array[
rpeak_inds_in_peak_array[-1] + searchback_array[searchback_max_index][
0]])
rpeak_inds_in_peak_array.append(
rpeak_inds_in_peak_array[-1] + searchback_array[searchback_max_index][0])
sig_lev = ewma(sig_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the current signal level
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
rr_ave = rr_interval_update(rpeak_array_indices, rr_ave)
ind_rpeak = rpeak_inds_in_peak_array[-1] + 1
else:
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
ind_rpeak += 1
else:
# R peak checking
if threshold_1 <= mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]] < 3 * sig_lev:
rpeak_array_indices.append(peak_location_in_signal_array[ind_rpeak])
rpeak_inds_in_peak_array.append(ind_rpeak)
sig_lev = ewma(sig_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the signal level
# noise peak checking
elif threshold_1 > mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]] > threshold_2:
noise_lev = ewma(noise_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the noise level
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
ind_rpeak += 1
rr_ave = rr_interval_update(rpeak_array_indices, rr_ave)
return rpeak_array_indices
def ewma(value: float, new_value: float, alpha: float) -> float:
"""
:param value:
:param new_value:
:param alpha:
:return:
"""
return alpha * new_value + (1 - alpha) * value
# TODO: CODE_REVIEW: Justify in the method documentation string the justification of the default values
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def remove_close_peaks(rpeak_temp1: list,
sample: np.ndarray,
fs: float,
min_range: float = .5) -> list:
"""
This function removes one of two peaks from two consecutive R peaks
if difference among them is less than the minimum possible
:param min_range:
:param rpeak_temp1: R peak array containing the index of the R peaks
:param sample: sample array
:param fs: sampling frequency
:return: R peak array with no close R peaks
"""
difference = 0
rpeak_temp2 = rpeak_temp1
while difference != 1:
length_rpeak_temp2 = len(rpeak_temp2)
temp = np.diff(rpeak_temp2)
comp_index1 = [rpeak_temp2[i] for i in range(len(temp)) if temp[i] < min_range * fs]
comp_index2 = [rpeak_temp2[i + 1] for i in range(len(temp)) if temp[i] < min_range * fs]
comp1 = sample[comp_index1]
comp2 = sample[comp_index2]
checkmin = np.matrix([comp1, comp2])
temp_ind1 = [i for i in range(len(temp)) if temp[i] < min_range * fs]
temp_ind2 = np.argmin(np.array(checkmin), axis=0)
temp_ind = temp_ind1 + temp_ind2
temp_ind = np.unique(temp_ind)
count = 0
for i in temp_ind:
rpeak_temp2.remove(rpeak_temp2[i - count])
count = count + 1
difference = length_rpeak_temp2 - len(rpeak_temp2) + 1
return rpeak_temp2
def confirm_peaks(rpeak_temp1: list,
sample: np.ndarray,
fs: float,
range_for_checking: float = 1 / 10) -> np.ndarray:
"""
This function does the final check on the R peaks detected and
finds the maximum in a range of fs/10 of the detected peak location and assigns it to be the peak
:param rpeak_temp1: R peak array containing the index of the R peaks
:param sample: sample array
:param fs: sampling frequency
:param range_for_checking : The peaks are checked within a range of fs/10 to get the maximum value within that range
:return: final R peak array
"""
for i in range(1, len(rpeak_temp1) - 1):
start_index = int(rpeak_temp1[i] - np.ceil(range_for_checking * fs))
end_index = int(rpeak_temp1[i] + np.ceil(range_for_checking * fs) + 1)
index = np.argmax(sample[start_index:end_index])
rpeak_temp1[i] = rpeak_temp1[i] - np.ceil(range_for_checking * fs) + index
return np.array(rpeak_temp1).astype(np.int64)
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def detect_rpeak(ecg: DataStream,
fs: float = 64,
threshold: float = 0.5,
blackman_win_len_range: float = 0.2) -> DataStream:
"""
This program implements the Pan Tomkins algorithm on ECG signal to detect the R peaks
Since the ecg array can have discontinuity in the timestamp arrays the rr-interval calculated
in the algorithm is calculated in terms of the index in the sample array
The algorithm consists of some major steps
1. computation of the moving window integration of the signal in terms of blackman window of a prescribed length
2. compute all the peaks of the moving window integration signal
3. adaptive thresholding with dynamic signal and noise thresholds applied to filter out the R peak locations
4. confirm the R peaks through differentiation from the nearby peaks and remove the false peaks
:param ecg: ecg array of tuples (timestamp,value)
:param fs: sampling frequency
:param threshold: initial threshold to detect the R peak in a signal normalized by the 90th percentile. .5 is default.
:param blackman_win_len_range : the range to calculate blackman window length
:return: R peak array of tuples (timestamp, Rpeak interval)
"""
data = ecg.data
result = DataStream.from_datastream([ecg])
if len(data) == 0:
result.data = []
return result
sample = np.array([i.sample for i in data])
timestamp = np.array([i.start_time for i in data])
# computes the moving window integration of the signal
blackman_win_len = np.ceil(fs * blackman_win_len_range)
y = compute_moving_window_int(sample, fs, blackman_win_len)
peak_location_values = [(i, y[i]) for i in range(2, len(y) - 1) if check_peak(y[i - 2:i + 3])]
# initial RR interval average
peak_location = [i[0] for i in peak_location_values]
running_rr_avg = sum(np.diff(peak_location)) / (len(peak_location) - 1)
rpeak_temp1 = compute_r_peaks(threshold, running_rr_avg, y, peak_location_values)
rpeak_temp2 = remove_close_peaks(rpeak_temp1, sample, fs)
index = confirm_peaks(rpeak_temp2, sample, fs)
rpeak_timestamp = timestamp[index]
rpeak_value = np.diff(rpeak_timestamp)
rpeak_timestamp = rpeak_timestamp[1:]
result_data = []
for k in range(len(rpeak_value)):
result_data.append(
DataPoint.from_tuple(rpeak_timestamp[k], rpeak_value[k].seconds + rpeak_value[k].microseconds / 1e6))
# Create resulting datastream to be returned
result.data = result_data
return result
| filter_bad_ecg | identifier_name |
ecg.py | # Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List
import numpy as np
from scipy import signal
from cerebralcortex.data_processor.signalprocessing.dataquality import Quality
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import DataStream
def filter_bad_ecg(ecg: DataStream,
ecg_quality: DataStream) -> DataStream:
"""
This function combines the raw ecg and ecg data quality datastream and only keeps those datapoints that are assigned acceptable in data quality
:param ecg: raw ecg datastream
:param ecg_quality: ecg quality datastream
:return: filtered ecg datastream
"""
ecg_filtered = DataStream.from_datastream([ecg])
ecg_quality_array = ecg_quality.data
ecg_raw_timestamp_array = np.array([i.start_time.timestamp() for i in ecg.data])
ecg_filtered_array = []
initial_index = 0
for item in ecg_quality_array:
if item.sample == Quality.ACCEPTABLE:
final_index = initial_index
for i in range(initial_index, len(ecg.data)):
if item.start_time.timestamp() <= ecg_raw_timestamp_array[i] <= item.end_time.timestamp():
ecg_filtered_array.append(ecg.data[i])
final_index = i
initial_index = final_index
ecg_filtered.data = ecg_filtered_array
return ecg_filtered
def compute_rr_intervals(ecg: DataStream,
ecg_quality: DataStream,
fs: float) -> DataStream:
"""
filter ecg datastream first and compute rr-interval datastream from the ecg datastream
:param ecg:ecg datastream
:param ecg_quality : ecg quality annotated datastream
:param fs: sampling frequency
:return: rr-interval datastream
"""
ecg_filtered = filter_bad_ecg(ecg, ecg_quality)
# compute the r-peak array
ecg_rpeak = detect_rpeak(ecg_filtered, fs)
return ecg_rpeak
def rr_interval_update(rpeak_temp1: List[DataPoint],
rr_ave: float,
min_size: int = 8) -> float:
"""
:param min_size: 8 last R-peaks are checked to compute the running rr interval average
:param rpeak_temp1: R peak locations
:param rr_ave: previous rr-interval average
:return: the new rr-interval average of the previously detected 8 R peak locations
"""
peak_interval = np.diff([0] + rpeak_temp1) # TODO: rpeak_temp1 is a datapoint, what should this be converted to?
return rr_ave if len(peak_interval) < min_size else np.sum(peak_interval[-min_size:]) / min_size
def compute_moving_window_int(sample: np.ndarray,
fs: float,
blackman_win_length: int,
filter_length: int = 257,
delta: float = .02) -> np.ndarray:
"""
:param sample: ecg sample array
:param fs: sampling frequency
:param blackman_win_length: length of the blackman window on which to compute the moving window integration
:param filter_length: length of the FIR bandpass filter on which filtering is done on ecg sample array
:param delta: to compute the weights of each band in FIR filter
:return: the Moving window integration of the sample array
"""
# I believe these constants can be kept in a file
# filter edges
filter_edges = [0, 4.5 * 2 / fs, 5 * 2 / fs, 20 * 2 / fs, 20.5 * 2 / fs, 1]
# gains at filter band edges
gains = [0, 0, 1, 1, 0, 0]
# weights
weights = [500 / delta, 1 / delta, 500 / delta]
# length of the FIR filter
# FIR filter coefficients for bandpass filtering
filter_coeff = signal.firls(filter_length, filter_edges, gains, weights)
# bandpass filtered signal
bandpass_signal = signal.convolve(sample, filter_coeff, 'same')
bandpass_signal /= np.percentile(bandpass_signal, 90)
# derivative array
derivative_array = (np.array([-1.0, -2.0, 0, 2.0, 1.0])) * (1 / 8)
# derivative signal (differentiation of the bandpass)
derivative_signal = signal.convolve(bandpass_signal, derivative_array, 'same')
derivative_signal /= np.percentile(derivative_signal, 90)
# squared derivative signal
derivative_squared_signal = derivative_signal ** 2
derivative_squared_signal /= np.percentile(derivative_squared_signal, 90)
# blackman window
blackman_window = np.blackman(blackman_win_length)
# moving window Integration of squared derivative signal
mov_win_int_signal = signal.convolve(derivative_squared_signal, blackman_window, 'same')
mov_win_int_signal /= np.percentile(mov_win_int_signal, 90)
return mov_win_int_signal
def check_peak(data: List[DataPoint]) -> bool:
"""
This is a function to check the condition of a simple peak of signal y in index i
:param data:
:return:
"""
if len(data) < 3:
return False
midpoint = int(len(data) / 2)
test_value = data[0]
for i in data[1:midpoint + 1]:
if test_value < i:
test_value = i
else:
return False
for i in data[midpoint + 1:]:
if test_value > i:
test_value = i
else:
return False
return True
# TODO: CODE_REVIEW: Justify in the method documentation string the justification of the default values
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def compute_r_peaks(threshold_1: float,
rr_ave: float,
mov_win_int_signal: np.ndarray,
peak_tuple_array: List[tuple]) -> list:
"""
This function does the adaptive thresholding of the signal to get the R-peak locations
:param threshold_1: Thr1 is the threshold above which the R peak
:param rr_ave: running RR-interval average
:param mov_win_int_signal: signal sample array
:param peak_tuple_array: A tuple array containing location and values of the simple peaks detected in the process before
:returns rpeak_array_indices: The location of the R peaks in the signal sample array once found this is returned
"""
peak_location_in_signal_array = [i[0] for i in peak_tuple_array] # location of the simple peaks in signal array
amplitude_in_peak_locations = [i[1] for i in peak_tuple_array] # simple peak's amplitude in signal array
threshold_2 = 0.5 * threshold_1 # any signal value between threshold_2 and threshold_1 is a noise peak
sig_lev = 4 * threshold_1 # current signal level -any signal above thrice the signal level is discarded as a spurious value
noise_lev = 0.1 * sig_lev # current noise level of the signal
ind_rpeak = 0
rpeak_array_indices = []
rpeak_inds_in_peak_array = []
while ind_rpeak < len(peak_location_in_signal_array):
# if for 166 percent of the present RR interval no peak is detected as R peak then threshold_2 is taken as the
# R peak threshold and the maximum of the range is taken as a R peak and RR interval is updated accordingly
if len(rpeak_array_indices) >= 1 and peak_location_in_signal_array[ind_rpeak] - peak_location_in_signal_array[
rpeak_inds_in_peak_array[-1]] > 1.66 * rr_ave and ind_rpeak - rpeak_inds_in_peak_array[-1] > 1:
# values and indexes of previous peaks discarded as not an R peak whose magnitude is above threshold_2
searchback_array = [(k - rpeak_inds_in_peak_array[-1], amplitude_in_peak_locations[k]) for k in
range(rpeak_inds_in_peak_array[-1] + 1, ind_rpeak) if
3 * sig_lev > amplitude_in_peak_locations[k] > threshold_2]
if len(searchback_array) > 0:
# maximum inside the range calculated beforehand is taken as R peak
searchback_array_inrange_values = [x[1] for x in searchback_array]
searchback_max_index = np.argmax(searchback_array_inrange_values)
rpeak_array_indices.append(peak_location_in_signal_array[
rpeak_inds_in_peak_array[-1] + searchback_array[searchback_max_index][
0]])
rpeak_inds_in_peak_array.append(
rpeak_inds_in_peak_array[-1] + searchback_array[searchback_max_index][0])
sig_lev = ewma(sig_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the current signal level
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
rr_ave = rr_interval_update(rpeak_array_indices, rr_ave)
ind_rpeak = rpeak_inds_in_peak_array[-1] + 1
else:
|
else:
# R peak checking
if threshold_1 <= mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]] < 3 * sig_lev:
rpeak_array_indices.append(peak_location_in_signal_array[ind_rpeak])
rpeak_inds_in_peak_array.append(ind_rpeak)
sig_lev = ewma(sig_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the signal level
# noise peak checking
elif threshold_1 > mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]] > threshold_2:
noise_lev = ewma(noise_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the noise level
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
ind_rpeak += 1
rr_ave = rr_interval_update(rpeak_array_indices, rr_ave)
return rpeak_array_indices
def ewma(value: float, new_value: float, alpha: float) -> float:
"""
:param value:
:param new_value:
:param alpha:
:return:
"""
return alpha * new_value + (1 - alpha) * value
# TODO: CODE_REVIEW: Justify in the method documentation string the justification of the default values
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def remove_close_peaks(rpeak_temp1: list,
sample: np.ndarray,
fs: float,
min_range: float = .5) -> list:
"""
This function removes one of two peaks from two consecutive R peaks
if difference among them is less than the minimum possible
:param min_range:
:param rpeak_temp1: R peak array containing the index of the R peaks
:param sample: sample array
:param fs: sampling frequency
:return: R peak array with no close R peaks
"""
difference = 0
rpeak_temp2 = rpeak_temp1
while difference != 1:
length_rpeak_temp2 = len(rpeak_temp2)
temp = np.diff(rpeak_temp2)
comp_index1 = [rpeak_temp2[i] for i in range(len(temp)) if temp[i] < min_range * fs]
comp_index2 = [rpeak_temp2[i + 1] for i in range(len(temp)) if temp[i] < min_range * fs]
comp1 = sample[comp_index1]
comp2 = sample[comp_index2]
checkmin = np.matrix([comp1, comp2])
temp_ind1 = [i for i in range(len(temp)) if temp[i] < min_range * fs]
temp_ind2 = np.argmin(np.array(checkmin), axis=0)
temp_ind = temp_ind1 + temp_ind2
temp_ind = np.unique(temp_ind)
count = 0
for i in temp_ind:
rpeak_temp2.remove(rpeak_temp2[i - count])
count = count + 1
difference = length_rpeak_temp2 - len(rpeak_temp2) + 1
return rpeak_temp2
def confirm_peaks(rpeak_temp1: list,
sample: np.ndarray,
fs: float,
range_for_checking: float = 1 / 10) -> np.ndarray:
"""
This function does the final check on the R peaks detected and
finds the maximum in a range of fs/10 of the detected peak location and assigns it to be the peak
:param rpeak_temp1: R peak array containing the index of the R peaks
:param sample: sample array
:param fs: sampling frequency
:param range_for_checking : The peaks are checked within a range of fs/10 to get the maximum value within that range
:return: final R peak array
"""
for i in range(1, len(rpeak_temp1) - 1):
start_index = int(rpeak_temp1[i] - np.ceil(range_for_checking * fs))
end_index = int(rpeak_temp1[i] + np.ceil(range_for_checking * fs) + 1)
index = np.argmax(sample[start_index:end_index])
rpeak_temp1[i] = rpeak_temp1[i] - np.ceil(range_for_checking * fs) + index
return np.array(rpeak_temp1).astype(np.int64)
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def detect_rpeak(ecg: DataStream,
fs: float = 64,
threshold: float = 0.5,
blackman_win_len_range: float = 0.2) -> DataStream:
"""
This program implements the Pan Tomkins algorithm on ECG signal to detect the R peaks
Since the ecg array can have discontinuity in the timestamp arrays the rr-interval calculated
in the algorithm is calculated in terms of the index in the sample array
The algorithm consists of some major steps
1. computation of the moving window integration of the signal in terms of blackman window of a prescribed length
2. compute all the peaks of the moving window integration signal
3. adaptive thresholding with dynamic signal and noise thresholds applied to filter out the R peak locations
4. confirm the R peaks through differentiation from the nearby peaks and remove the false peaks
:param ecg: ecg array of tuples (timestamp,value)
:param fs: sampling frequency
:param threshold: initial threshold to detect the R peak in a signal normalized by the 90th percentile. .5 is default.
:param blackman_win_len_range : the range to calculate blackman window length
:return: R peak array of tuples (timestamp, Rpeak interval)
"""
data = ecg.data
result = DataStream.from_datastream([ecg])
if len(data) == 0:
result.data = []
return result
sample = np.array([i.sample for i in data])
timestamp = np.array([i.start_time for i in data])
# computes the moving window integration of the signal
blackman_win_len = np.ceil(fs * blackman_win_len_range)
y = compute_moving_window_int(sample, fs, blackman_win_len)
peak_location_values = [(i, y[i]) for i in range(2, len(y) - 1) if check_peak(y[i - 2:i + 3])]
# initial RR interval average
peak_location = [i[0] for i in peak_location_values]
running_rr_avg = sum(np.diff(peak_location)) / (len(peak_location) - 1)
rpeak_temp1 = compute_r_peaks(threshold, running_rr_avg, y, peak_location_values)
rpeak_temp2 = remove_close_peaks(rpeak_temp1, sample, fs)
index = confirm_peaks(rpeak_temp2, sample, fs)
rpeak_timestamp = timestamp[index]
rpeak_value = np.diff(rpeak_timestamp)
rpeak_timestamp = rpeak_timestamp[1:]
result_data = []
for k in range(len(rpeak_value)):
result_data.append(
DataPoint.from_tuple(rpeak_timestamp[k], rpeak_value[k].seconds + rpeak_value[k].microseconds / 1e6))
# Create resulting datastream to be returned
result.data = result_data
return result
| threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
ind_rpeak += 1 | conditional_block |
ecg.py | # Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List
import numpy as np
from scipy import signal
from cerebralcortex.data_processor.signalprocessing.dataquality import Quality
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import DataStream
def filter_bad_ecg(ecg: DataStream,
ecg_quality: DataStream) -> DataStream:
"""
This function combines the raw ecg and ecg data quality datastream and only keeps those datapoints that are assigned acceptable in data quality
:param ecg: raw ecg datastream
:param ecg_quality: ecg quality datastream
:return: filtered ecg datastream
"""
ecg_filtered = DataStream.from_datastream([ecg])
ecg_quality_array = ecg_quality.data
ecg_raw_timestamp_array = np.array([i.start_time.timestamp() for i in ecg.data])
ecg_filtered_array = []
initial_index = 0
for item in ecg_quality_array:
if item.sample == Quality.ACCEPTABLE:
final_index = initial_index
for i in range(initial_index, len(ecg.data)):
if item.start_time.timestamp() <= ecg_raw_timestamp_array[i] <= item.end_time.timestamp(): | ecg_filtered.data = ecg_filtered_array
return ecg_filtered
def compute_rr_intervals(ecg: DataStream,
ecg_quality: DataStream,
fs: float) -> DataStream:
"""
filter ecg datastream first and compute rr-interval datastream from the ecg datastream
:param ecg:ecg datastream
:param ecg_quality : ecg quality annotated datastream
:param fs: sampling frequency
:return: rr-interval datastream
"""
ecg_filtered = filter_bad_ecg(ecg, ecg_quality)
# compute the r-peak array
ecg_rpeak = detect_rpeak(ecg_filtered, fs)
return ecg_rpeak
def rr_interval_update(rpeak_temp1: List[DataPoint],
rr_ave: float,
min_size: int = 8) -> float:
"""
:param min_size: 8 last R-peaks are checked to compute the running rr interval average
:param rpeak_temp1: R peak locations
:param rr_ave: previous rr-interval average
:return: the new rr-interval average of the previously detected 8 R peak locations
"""
peak_interval = np.diff([0] + rpeak_temp1) # TODO: rpeak_temp1 is a datapoint, what should this be converted to?
return rr_ave if len(peak_interval) < min_size else np.sum(peak_interval[-min_size:]) / min_size
def compute_moving_window_int(sample: np.ndarray,
fs: float,
blackman_win_length: int,
filter_length: int = 257,
delta: float = .02) -> np.ndarray:
"""
:param sample: ecg sample array
:param fs: sampling frequency
:param blackman_win_length: length of the blackman window on which to compute the moving window integration
:param filter_length: length of the FIR bandpass filter on which filtering is done on ecg sample array
:param delta: to compute the weights of each band in FIR filter
:return: the Moving window integration of the sample array
"""
# I believe these constants can be kept in a file
# filter edges
filter_edges = [0, 4.5 * 2 / fs, 5 * 2 / fs, 20 * 2 / fs, 20.5 * 2 / fs, 1]
# gains at filter band edges
gains = [0, 0, 1, 1, 0, 0]
# weights
weights = [500 / delta, 1 / delta, 500 / delta]
# length of the FIR filter
# FIR filter coefficients for bandpass filtering
filter_coeff = signal.firls(filter_length, filter_edges, gains, weights)
# bandpass filtered signal
bandpass_signal = signal.convolve(sample, filter_coeff, 'same')
bandpass_signal /= np.percentile(bandpass_signal, 90)
# derivative array
derivative_array = (np.array([-1.0, -2.0, 0, 2.0, 1.0])) * (1 / 8)
# derivative signal (differentiation of the bandpass)
derivative_signal = signal.convolve(bandpass_signal, derivative_array, 'same')
derivative_signal /= np.percentile(derivative_signal, 90)
# squared derivative signal
derivative_squared_signal = derivative_signal ** 2
derivative_squared_signal /= np.percentile(derivative_squared_signal, 90)
# blackman window
blackman_window = np.blackman(blackman_win_length)
# moving window Integration of squared derivative signal
mov_win_int_signal = signal.convolve(derivative_squared_signal, blackman_window, 'same')
mov_win_int_signal /= np.percentile(mov_win_int_signal, 90)
return mov_win_int_signal
def check_peak(data: List[DataPoint]) -> bool:
"""
This is a function to check the condition of a simple peak of signal y in index i
:param data:
:return:
"""
if len(data) < 3:
return False
midpoint = int(len(data) / 2)
test_value = data[0]
for i in data[1:midpoint + 1]:
if test_value < i:
test_value = i
else:
return False
for i in data[midpoint + 1:]:
if test_value > i:
test_value = i
else:
return False
return True
# TODO: CODE_REVIEW: Justify in the method documentation string the justification of the default values
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def compute_r_peaks(threshold_1: float,
rr_ave: float,
mov_win_int_signal: np.ndarray,
peak_tuple_array: List[tuple]) -> list:
"""
This function does the adaptive thresholding of the signal to get the R-peak locations
:param threshold_1: Thr1 is the threshold above which the R peak
:param rr_ave: running RR-interval average
:param mov_win_int_signal: signal sample array
:param peak_tuple_array: A tuple array containing location and values of the simple peaks detected in the process before
:returns rpeak_array_indices: The location of the R peaks in the signal sample array once found this is returned
"""
peak_location_in_signal_array = [i[0] for i in peak_tuple_array] # location of the simple peaks in signal array
amplitude_in_peak_locations = [i[1] for i in peak_tuple_array] # simple peak's amplitude in signal array
threshold_2 = 0.5 * threshold_1 # any signal value between threshold_2 and threshold_1 is a noise peak
sig_lev = 4 * threshold_1 # current signal level -any signal above thrice the signal level is discarded as a spurious value
noise_lev = 0.1 * sig_lev # current noise level of the signal
ind_rpeak = 0
rpeak_array_indices = []
rpeak_inds_in_peak_array = []
while ind_rpeak < len(peak_location_in_signal_array):
# if for 166 percent of the present RR interval no peak is detected as R peak then threshold_2 is taken as the
# R peak threshold and the maximum of the range is taken as a R peak and RR interval is updated accordingly
if len(rpeak_array_indices) >= 1 and peak_location_in_signal_array[ind_rpeak] - peak_location_in_signal_array[
rpeak_inds_in_peak_array[-1]] > 1.66 * rr_ave and ind_rpeak - rpeak_inds_in_peak_array[-1] > 1:
# values and indexes of previous peaks discarded as not an R peak whose magnitude is above threshold_2
searchback_array = [(k - rpeak_inds_in_peak_array[-1], amplitude_in_peak_locations[k]) for k in
range(rpeak_inds_in_peak_array[-1] + 1, ind_rpeak) if
3 * sig_lev > amplitude_in_peak_locations[k] > threshold_2]
if len(searchback_array) > 0:
# maximum inside the range calculated beforehand is taken as R peak
searchback_array_inrange_values = [x[1] for x in searchback_array]
searchback_max_index = np.argmax(searchback_array_inrange_values)
rpeak_array_indices.append(peak_location_in_signal_array[
rpeak_inds_in_peak_array[-1] + searchback_array[searchback_max_index][
0]])
rpeak_inds_in_peak_array.append(
rpeak_inds_in_peak_array[-1] + searchback_array[searchback_max_index][0])
sig_lev = ewma(sig_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the current signal level
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
rr_ave = rr_interval_update(rpeak_array_indices, rr_ave)
ind_rpeak = rpeak_inds_in_peak_array[-1] + 1
else:
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
ind_rpeak += 1
else:
# R peak checking
if threshold_1 <= mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]] < 3 * sig_lev:
rpeak_array_indices.append(peak_location_in_signal_array[ind_rpeak])
rpeak_inds_in_peak_array.append(ind_rpeak)
sig_lev = ewma(sig_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the signal level
# noise peak checking
elif threshold_1 > mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]] > threshold_2:
noise_lev = ewma(noise_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the noise level
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
ind_rpeak += 1
rr_ave = rr_interval_update(rpeak_array_indices, rr_ave)
return rpeak_array_indices
def ewma(value: float, new_value: float, alpha: float) -> float:
"""
:param value:
:param new_value:
:param alpha:
:return:
"""
return alpha * new_value + (1 - alpha) * value
# TODO: CODE_REVIEW: Justify in the method documentation string the justification of the default values
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def remove_close_peaks(rpeak_temp1: list,
sample: np.ndarray,
fs: float,
min_range: float = .5) -> list:
"""
This function removes one of two peaks from two consecutive R peaks
if difference among them is less than the minimum possible
:param min_range:
:param rpeak_temp1: R peak array containing the index of the R peaks
:param sample: sample array
:param fs: sampling frequency
:return: R peak array with no close R peaks
"""
difference = 0
rpeak_temp2 = rpeak_temp1
while difference != 1:
length_rpeak_temp2 = len(rpeak_temp2)
temp = np.diff(rpeak_temp2)
comp_index1 = [rpeak_temp2[i] for i in range(len(temp)) if temp[i] < min_range * fs]
comp_index2 = [rpeak_temp2[i + 1] for i in range(len(temp)) if temp[i] < min_range * fs]
comp1 = sample[comp_index1]
comp2 = sample[comp_index2]
checkmin = np.matrix([comp1, comp2])
temp_ind1 = [i for i in range(len(temp)) if temp[i] < min_range * fs]
temp_ind2 = np.argmin(np.array(checkmin), axis=0)
temp_ind = temp_ind1 + temp_ind2
temp_ind = np.unique(temp_ind)
count = 0
for i in temp_ind:
rpeak_temp2.remove(rpeak_temp2[i - count])
count = count + 1
difference = length_rpeak_temp2 - len(rpeak_temp2) + 1
return rpeak_temp2
def confirm_peaks(rpeak_temp1: list,
sample: np.ndarray,
fs: float,
range_for_checking: float = 1 / 10) -> np.ndarray:
"""
This function does the final check on the R peaks detected and
finds the maximum in a range of fs/10 of the detected peak location and assigns it to be the peak
:param rpeak_temp1: R peak array containing the index of the R peaks
:param sample: sample array
:param fs: sampling frequency
:param range_for_checking : The peaks are checked within a range of fs/10 to get the maximum value within that range
:return: final R peak array
"""
for i in range(1, len(rpeak_temp1) - 1):
start_index = int(rpeak_temp1[i] - np.ceil(range_for_checking * fs))
end_index = int(rpeak_temp1[i] + np.ceil(range_for_checking * fs) + 1)
index = np.argmax(sample[start_index:end_index])
rpeak_temp1[i] = rpeak_temp1[i] - np.ceil(range_for_checking * fs) + index
return np.array(rpeak_temp1).astype(np.int64)
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def detect_rpeak(ecg: DataStream,
fs: float = 64,
threshold: float = 0.5,
blackman_win_len_range: float = 0.2) -> DataStream:
"""
This program implements the Pan Tomkins algorithm on ECG signal to detect the R peaks
Since the ecg array can have discontinuity in the timestamp arrays the rr-interval calculated
in the algorithm is calculated in terms of the index in the sample array
The algorithm consists of some major steps
1. computation of the moving window integration of the signal in terms of blackman window of a prescribed length
2. compute all the peaks of the moving window integration signal
3. adaptive thresholding with dynamic signal and noise thresholds applied to filter out the R peak locations
4. confirm the R peaks through differentiation from the nearby peaks and remove the false peaks
:param ecg: ecg array of tuples (timestamp,value)
:param fs: sampling frequency
:param threshold: initial threshold to detect the R peak in a signal normalized by the 90th percentile. .5 is default.
:param blackman_win_len_range : the range to calculate blackman window length
:return: R peak array of tuples (timestamp, Rpeak interval)
"""
data = ecg.data
result = DataStream.from_datastream([ecg])
if len(data) == 0:
result.data = []
return result
sample = np.array([i.sample for i in data])
timestamp = np.array([i.start_time for i in data])
# computes the moving window integration of the signal
blackman_win_len = np.ceil(fs * blackman_win_len_range)
y = compute_moving_window_int(sample, fs, blackman_win_len)
peak_location_values = [(i, y[i]) for i in range(2, len(y) - 1) if check_peak(y[i - 2:i + 3])]
# initial RR interval average
peak_location = [i[0] for i in peak_location_values]
running_rr_avg = sum(np.diff(peak_location)) / (len(peak_location) - 1)
rpeak_temp1 = compute_r_peaks(threshold, running_rr_avg, y, peak_location_values)
rpeak_temp2 = remove_close_peaks(rpeak_temp1, sample, fs)
index = confirm_peaks(rpeak_temp2, sample, fs)
rpeak_timestamp = timestamp[index]
rpeak_value = np.diff(rpeak_timestamp)
rpeak_timestamp = rpeak_timestamp[1:]
result_data = []
for k in range(len(rpeak_value)):
result_data.append(
DataPoint.from_tuple(rpeak_timestamp[k], rpeak_value[k].seconds + rpeak_value[k].microseconds / 1e6))
# Create resulting datastream to be returned
result.data = result_data
return result | ecg_filtered_array.append(ecg.data[i])
final_index = i
initial_index = final_index
| random_line_split |
Gruntfile.js | module.exports = function (grunt) {
grunt.initConfig({
// Builds Sass
sass: {
dev: {
options: {
style: 'expanded',
sourcemap: true,
includePaths: [
'govuk_modules/govuk_template/assets/stylesheets',
'govuk_modules/govuk_frontend_toolkit/stylesheets',
'govuk_modules/govuk-elements-sass/'
],
outputStyle: 'expanded'
},
files: [{
expand: true,
cwd: 'app/assets/sass',
src: ['*.scss'],
dest: 'public/stylesheets/',
ext: '.css'
}]
}
},
// Copies templates and assets from external modules and dirs
sync: {
assets: {
files: [{
expand: true,
cwd: 'app/assets/',
src: ['**/*', '!sass/**'],
dest: 'public/'
}],
ignoreInDest: '**/stylesheets/**',
updateAndDelete: true
},
govuk: {
files: [{
cwd: 'node_modules/govuk_frontend_toolkit/',
src: '**',
dest: 'govuk_modules/govuk_frontend_toolkit/'
},
{
cwd: 'node_modules/govuk_template_jinja/assets/',
src: '**',
dest: 'govuk_modules/govuk_template/assets/'
},
{
cwd: 'node_modules/govuk_template_jinja/views/layouts/',
src: '**',
dest: 'govuk_modules/govuk_template/views/layouts/'
},
{
cwd: 'node_modules/govuk-elements-sass/public/sass/',
src: ['**', '!node_modules', '!elements-page.scss', '!elements-page-ie6.scss', '!elements-page-ie7.scss', '!elements-page-ie8.scss', '!main.scss', '!main-ie6.scss', '!main-ie7.scss', '!main-ie8.scss', '!prism.scss'],
dest: 'govuk_modules/govuk-elements-sass/'
}]
},
govuk_template_jinja: {
files: [{
cwd: 'govuk_modules/govuk_template/views/layouts/',
src: '**',
dest: 'lib/'
}]
},
tachyons: {
files: [{
cwd: 'node_modules/tachyons/css/',
src: '**',
dest: 'public/stylesheets/'
}]
}
},
// Watches assets and sass for changes
watch: {
css: {
files: ['app/assets/sass/**/*.scss'],
tasks: ['sass'],
options: {
spawn: false
}
},
assets: {
files: ['app/assets/**/*', '!app/assets/sass/**'],
tasks: ['sync:assets'], | }
},
// nodemon watches for changes and restarts app
nodemon: {
dev: {
script: 'server.js',
options: {
ext: 'js, json',
ignore: ['node_modules/**', 'app/assets/**', 'app/components/**', 'app/lib/**', 'public/**'],
args: grunt.option.flags()
}
}
},
concurrent: {
target: {
tasks: ['watch', 'nodemon'],
options: {
logConcurrentOutput: true
}
}
}
})
;[
'grunt-sync',
'grunt-contrib-watch',
'grunt-sass',
'grunt-nodemon',
'grunt-concurrent'
].forEach(function (task) {
grunt.loadNpmTasks(task)
})
grunt.registerTask('generate-assets', [
'sync',
'sass'
])
grunt.registerTask('default', [
'generate-assets',
'concurrent:target'
])
grunt.registerTask(
'test',
'default',
function () {
grunt.log.writeln('Test that the app runs')
}
)
} | options: {
spawn: false
} | random_line_split |
jquery.compat-1.3.js | /*
* Compatibility Plugin for jQuery 1.3 (on top of jQuery 1.4)
* All code copied from jQuery 1.4
* By John Resig
* Dual licensed under MIT and GPL.
*/
(function(jQuery) {
// .add() is no longer equivalent to .concat()
// Results are now returned in document order
jQuery.fn.add = function( selector, context ) {
var set = typeof selector === "string" ?
jQuery( selector, context || this.context ) :
jQuery.makeArray( selector );
return this.pushStack( jQuery.merge( this.get(), set ) );
};
// clone( true ) now copies over all data in addition to
// the events
jQuery.fn.clone = function( events ) {
// Do the clone
var ret = this.map(function() {
if ( !jQuery.support.noCloneEvent && !jQuery.isXMLDoc(this) ) {
// IE copies events bound via attachEvent when
// using cloneNode. Calling detachEvent on the
// clone will also remove the events from the orignal
// In order to get around this, we use innerHTML.
// Unfortunately, this means some modifications to
// attributes in IE that are actually only stored
// as properties will not be copied (such as the
// the name attribute on an input).
var html = this.outerHTML, ownerDocument = this.ownerDocument;
if ( !html ) {
var div = ownerDocument.createElement("div");
div.appendChild( this.cloneNode(true) );
html = div.innerHTML;
}
return jQuery.clean([html.replace(/ jQuery\d+="(?:\d+|null)"/g, "")
.replace(/^\s+/, "")], ownerDocument)[0];
} else {
return this.cloneNode(true);
}
});
// Copy the events from the original to the clone
if ( events === true ) {
cloneCopyEvent( this, ret );
cloneCopyEvent( this.find("*"), ret.find("*") );
}
// Return the cloned set
return ret;
};
function cloneCopyEvent(orig, ret) {
var i = 0;
ret.each(function() {
if ( this.nodeName !== (orig[i] && orig[i].nodeName) ) {
return;
}
var oldData = jQuery.data( orig[i++] ), events = oldData && oldData.events;
if ( events ) {
for ( var type in events ) {
for ( var handler in events[ type ] ) {
jQuery.event.add( this, type, events[ type ][ handler ], events[ type ][ handler ].data );
}
}
}
});
}
// jQuery.data(elem) no longer returns an ID,
// returns the data object instead.
jQuery.data = function( elem, name, data ) {
if ( elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()] ) {
return;
}
elem = elem == window ?
windowData :
elem;
var id = elem[ jQuery.expando ], cache = jQuery.cache, thisCache;
// Handle the case where there's no name immediately
if ( !name && !id ) {
return null;
}
// Compute a unique ID for the element
if ( !id ) {
id = ++jQuery.uuid;
}
// Avoid generating a new cache unless none exists and we
// want to manipulate it.
if ( typeof name === "object" ) {
elem[ jQuery.expando ] = id;
thisCache = cache[ id ] = jQuery.extend(true, {}, name);
} else if ( cache[ id ] ) {
thisCache = cache[ id ];
} else if ( typeof data === "undefined" ) {
thisCache = {};
} else {
thisCache = cache[ id ] = {};
}
// Prevent overriding the named cache with undefined values
if ( data !== undefined ) {
elem[ jQuery.expando ] = id;
thisCache[ name ] = data;
}
return typeof name === "string" ? thisCache[ name ] : id;
};
// jQuery() now returns an empty jQuery set, not jQuery(document)
var oldinit = jQuery.fn.init;
jQuery.fn.init = function( selector ) {
if ( selector === undefined ) {
return jQuery( document );
}
oldinit.apply( this, arguments );
return this;
};
jQuery.fn.init.prototype = oldinit.prototype;
// .val("...") on radio and checkbox elements was amgiuous,
// it only selects on value now (which is much less ambiguous)
var oldval = jQuery.fn.val;
jQuery.fn.val = function( val ) {
if ( val !== undefined ) {
return this.each(function(i) {
var self = jQuery(this), val = value;
if ( this.nodeType !== 1 ) {
return;
}
if ( isFunction ) {
val = value.call(this, i, self.val());
}
// Typecast each time if the value is a Function and the appended
// value is therefore different each time.
if ( typeof val === "number" ) {
val += "";
}
if ( jQuery.isArray(val) && rradiocheck.test( this.type ) ) {
this.checked = jQuery.inArray( self.val(), val ) >= 0 ||
jQuery.inArray(this.name, value) >= 0;
} else if ( jQuery.nodeName( this, "select" ) ) {
var values = jQuery.makeArray(val);
jQuery( "option", this ).each(function() {
this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0 ||
jQuery.inArray( this.text, values ) >= 0;
});
if ( !values.length ) {
this.selectedIndex = -1;
}
} else {
this.value = val;
}
});
}
return oldval.apply( this, arguments );
};
// jQuery.browser.version now exclusively matches based upon the rendering engine
jQuery.browser.version = (navigator.userAgent.toLowerCase().match( /.+(?:rv|it|ra|ie)[\/: ]([\d.]+)/ ) || [0,'0'])[1];
// jQuery.ajax() is now strict about JSON input (must follow the spec)
// Also, it auto-executes scripts that have no dataType and have a content-type of "text/javascript"
jQuery.httpData = function( xhr, type, s ) { |
if ( xml && data.documentElement.nodeName === "parsererror" ) {
throw "parsererror";
}
// Allow a pre-filtering function to sanitize the response
// s is checked to keep backwards compatibility
if ( s && s.dataFilter ) {
data = s.dataFilter( data, type );
}
// The filter can actually parse the response
if ( typeof data === "string" ) {
// Get the JavaScript object, if JSON is used.
if ( type === "json" || !type && ct.indexOf("json") >= 0 ) {
data = (new Function("return " + data))();
}
}
return data;
};
// Ajax data is now serialized in the PHP/Rails style be default
jQuery.ajaxSettings.traditional = true;
// The internal jQuery.className structure has been removed in
// favor of the traditional jQuery methods
jQuery.className = {
add: function ( elem, classNames ) {
jQuery( elem ).addClass( classNames );
},
remove: function( elem, classNames ) {
jQuery( elem ).removeClass( classNames );
},
has: function( elem, className ) {
jQuery( elem ).hasClass( className );
}
};
// jQuery.extend( true, ... ) only works on plain objects and arrays now
jQuery.extend = jQuery.fn.extend = function() {
// copy reference to target object
var target = arguments[0] || {}, i = 1, length = arguments.length, deep = false, options, name, src, copy;
// Handle a deep copy situation
if ( typeof target === "boolean" ) {
deep = target;
target = arguments[1] || {};
// skip the boolean and the target
i = 2;
}
// Handle case when target is a string or something (possible in deep copy)
if ( typeof target !== "object" && !jQuery.isFunction(target) ) {
target = {};
}
// extend jQuery itself if only one argument is passed
if ( length === i ) {
target = this;
--i;
}
for ( ; i < length; i++ ) {
// Only deal with non-null/undefined values
if ( (options = arguments[ i ]) != null ) {
// Extend the base object
for ( name in options ) {
src = target[ name ];
copy = options[ name ];
// Prevent never-ending loop
if ( target === copy ) {
continue;
}
// Recurse if we're merging object literal values or arrays
if ( deep && copy && typeof copy === "object" && !copy.nodeType ) {
target[ name ] = jQuery.extend( deep,
// Never move original objects, clone them
src || ( copy.length != null ? [ ] : { } ), copy );
// Don't bring in undefined values
} else if ( copy !== undefined ) {
target[ name ] = copy;
}
}
}
}
// Return the modified object
return target;
};
})(jQuery); | var ct = xhr.getResponseHeader("content-type") || "",
xml = type === "xml" || !type && ct.indexOf("xml") >= 0,
data = xml ? xhr.responseXML : xhr.responseText; | random_line_split |
jquery.compat-1.3.js | /*
* Compatibility Plugin for jQuery 1.3 (on top of jQuery 1.4)
* All code copied from jQuery 1.4
* By John Resig
* Dual licensed under MIT and GPL.
*/
(function(jQuery) {
// .add() is no longer equivalent to .concat()
// Results are now returned in document order
jQuery.fn.add = function( selector, context ) {
var set = typeof selector === "string" ?
jQuery( selector, context || this.context ) :
jQuery.makeArray( selector );
return this.pushStack( jQuery.merge( this.get(), set ) );
};
// clone( true ) now copies over all data in addition to
// the events
jQuery.fn.clone = function( events ) {
// Do the clone
var ret = this.map(function() {
if ( !jQuery.support.noCloneEvent && !jQuery.isXMLDoc(this) ) {
// IE copies events bound via attachEvent when
// using cloneNode. Calling detachEvent on the
// clone will also remove the events from the orignal
// In order to get around this, we use innerHTML.
// Unfortunately, this means some modifications to
// attributes in IE that are actually only stored
// as properties will not be copied (such as the
// the name attribute on an input).
var html = this.outerHTML, ownerDocument = this.ownerDocument;
if ( !html ) {
var div = ownerDocument.createElement("div");
div.appendChild( this.cloneNode(true) );
html = div.innerHTML;
}
return jQuery.clean([html.replace(/ jQuery\d+="(?:\d+|null)"/g, "")
.replace(/^\s+/, "")], ownerDocument)[0];
} else {
return this.cloneNode(true);
}
});
// Copy the events from the original to the clone
if ( events === true ) {
cloneCopyEvent( this, ret );
cloneCopyEvent( this.find("*"), ret.find("*") );
}
// Return the cloned set
return ret;
};
function cloneCopyEvent(orig, ret) {
var i = 0;
ret.each(function() {
if ( this.nodeName !== (orig[i] && orig[i].nodeName) ) {
return;
}
var oldData = jQuery.data( orig[i++] ), events = oldData && oldData.events;
if ( events ) {
for ( var type in events ) {
for ( var handler in events[ type ] ) {
jQuery.event.add( this, type, events[ type ][ handler ], events[ type ][ handler ].data );
}
}
}
});
}
// jQuery.data(elem) no longer returns an ID,
// returns the data object instead.
jQuery.data = function( elem, name, data ) {
if ( elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()] ) {
return;
}
elem = elem == window ?
windowData :
elem;
var id = elem[ jQuery.expando ], cache = jQuery.cache, thisCache;
// Handle the case where there's no name immediately
if ( !name && !id ) {
return null;
}
// Compute a unique ID for the element
if ( !id ) {
id = ++jQuery.uuid;
}
// Avoid generating a new cache unless none exists and we
// want to manipulate it.
if ( typeof name === "object" ) {
elem[ jQuery.expando ] = id;
thisCache = cache[ id ] = jQuery.extend(true, {}, name);
} else if ( cache[ id ] ) {
thisCache = cache[ id ];
} else if ( typeof data === "undefined" ) {
thisCache = {};
} else {
thisCache = cache[ id ] = {};
}
// Prevent overriding the named cache with undefined values
if ( data !== undefined ) {
elem[ jQuery.expando ] = id;
thisCache[ name ] = data;
}
return typeof name === "string" ? thisCache[ name ] : id;
};
// jQuery() now returns an empty jQuery set, not jQuery(document)
var oldinit = jQuery.fn.init;
jQuery.fn.init = function( selector ) {
if ( selector === undefined ) {
return jQuery( document );
}
oldinit.apply( this, arguments );
return this;
};
jQuery.fn.init.prototype = oldinit.prototype;
// .val("...") on radio and checkbox elements was amgiuous,
// it only selects on value now (which is much less ambiguous)
var oldval = jQuery.fn.val;
jQuery.fn.val = function( val ) {
if ( val !== undefined ) {
return this.each(function(i) {
var self = jQuery(this), val = value;
if ( this.nodeType !== 1 ) {
return;
}
if ( isFunction ) {
val = value.call(this, i, self.val());
}
// Typecast each time if the value is a Function and the appended
// value is therefore different each time.
if ( typeof val === "number" ) {
val += "";
}
if ( jQuery.isArray(val) && rradiocheck.test( this.type ) ) {
this.checked = jQuery.inArray( self.val(), val ) >= 0 ||
jQuery.inArray(this.name, value) >= 0;
} else if ( jQuery.nodeName( this, "select" ) ) {
var values = jQuery.makeArray(val);
jQuery( "option", this ).each(function() {
this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0 ||
jQuery.inArray( this.text, values ) >= 0;
});
if ( !values.length ) {
this.selectedIndex = -1;
}
} else |
});
}
return oldval.apply( this, arguments );
};
// jQuery.browser.version now exclusively matches based upon the rendering engine
jQuery.browser.version = (navigator.userAgent.toLowerCase().match( /.+(?:rv|it|ra|ie)[\/: ]([\d.]+)/ ) || [0,'0'])[1];
// jQuery.ajax() is now strict about JSON input (must follow the spec)
// Also, it auto-executes scripts that have no dataType and have a content-type of "text/javascript"
jQuery.httpData = function( xhr, type, s ) {
var ct = xhr.getResponseHeader("content-type") || "",
xml = type === "xml" || !type && ct.indexOf("xml") >= 0,
data = xml ? xhr.responseXML : xhr.responseText;
if ( xml && data.documentElement.nodeName === "parsererror" ) {
throw "parsererror";
}
// Allow a pre-filtering function to sanitize the response
// s is checked to keep backwards compatibility
if ( s && s.dataFilter ) {
data = s.dataFilter( data, type );
}
// The filter can actually parse the response
if ( typeof data === "string" ) {
// Get the JavaScript object, if JSON is used.
if ( type === "json" || !type && ct.indexOf("json") >= 0 ) {
data = (new Function("return " + data))();
}
}
return data;
};
// Ajax data is now serialized in the PHP/Rails style be default
jQuery.ajaxSettings.traditional = true;
// The internal jQuery.className structure has been removed in
// favor of the traditional jQuery methods
jQuery.className = {
add: function ( elem, classNames ) {
jQuery( elem ).addClass( classNames );
},
remove: function( elem, classNames ) {
jQuery( elem ).removeClass( classNames );
},
has: function( elem, className ) {
jQuery( elem ).hasClass( className );
}
};
// jQuery.extend( true, ... ) only works on plain objects and arrays now
jQuery.extend = jQuery.fn.extend = function() {
// copy reference to target object
var target = arguments[0] || {}, i = 1, length = arguments.length, deep = false, options, name, src, copy;
// Handle a deep copy situation
if ( typeof target === "boolean" ) {
deep = target;
target = arguments[1] || {};
// skip the boolean and the target
i = 2;
}
// Handle case when target is a string or something (possible in deep copy)
if ( typeof target !== "object" && !jQuery.isFunction(target) ) {
target = {};
}
// extend jQuery itself if only one argument is passed
if ( length === i ) {
target = this;
--i;
}
for ( ; i < length; i++ ) {
// Only deal with non-null/undefined values
if ( (options = arguments[ i ]) != null ) {
// Extend the base object
for ( name in options ) {
src = target[ name ];
copy = options[ name ];
// Prevent never-ending loop
if ( target === copy ) {
continue;
}
// Recurse if we're merging object literal values or arrays
if ( deep && copy && typeof copy === "object" && !copy.nodeType ) {
target[ name ] = jQuery.extend( deep,
// Never move original objects, clone them
src || ( copy.length != null ? [ ] : { } ), copy );
// Don't bring in undefined values
} else if ( copy !== undefined ) {
target[ name ] = copy;
}
}
}
}
// Return the modified object
return target;
};
})(jQuery);
| {
this.value = val;
} | conditional_block |
jquery.compat-1.3.js | /*
* Compatibility Plugin for jQuery 1.3 (on top of jQuery 1.4)
* All code copied from jQuery 1.4
* By John Resig
* Dual licensed under MIT and GPL.
*/
(function(jQuery) {
// .add() is no longer equivalent to .concat()
// Results are now returned in document order
jQuery.fn.add = function( selector, context ) {
var set = typeof selector === "string" ?
jQuery( selector, context || this.context ) :
jQuery.makeArray( selector );
return this.pushStack( jQuery.merge( this.get(), set ) );
};
// clone( true ) now copies over all data in addition to
// the events
jQuery.fn.clone = function( events ) {
// Do the clone
var ret = this.map(function() {
if ( !jQuery.support.noCloneEvent && !jQuery.isXMLDoc(this) ) {
// IE copies events bound via attachEvent when
// using cloneNode. Calling detachEvent on the
// clone will also remove the events from the orignal
// In order to get around this, we use innerHTML.
// Unfortunately, this means some modifications to
// attributes in IE that are actually only stored
// as properties will not be copied (such as the
// the name attribute on an input).
var html = this.outerHTML, ownerDocument = this.ownerDocument;
if ( !html ) {
var div = ownerDocument.createElement("div");
div.appendChild( this.cloneNode(true) );
html = div.innerHTML;
}
return jQuery.clean([html.replace(/ jQuery\d+="(?:\d+|null)"/g, "")
.replace(/^\s+/, "")], ownerDocument)[0];
} else {
return this.cloneNode(true);
}
});
// Copy the events from the original to the clone
if ( events === true ) {
cloneCopyEvent( this, ret );
cloneCopyEvent( this.find("*"), ret.find("*") );
}
// Return the cloned set
return ret;
};
function | (orig, ret) {
var i = 0;
ret.each(function() {
if ( this.nodeName !== (orig[i] && orig[i].nodeName) ) {
return;
}
var oldData = jQuery.data( orig[i++] ), events = oldData && oldData.events;
if ( events ) {
for ( var type in events ) {
for ( var handler in events[ type ] ) {
jQuery.event.add( this, type, events[ type ][ handler ], events[ type ][ handler ].data );
}
}
}
});
}
// jQuery.data(elem) no longer returns an ID,
// returns the data object instead.
jQuery.data = function( elem, name, data ) {
if ( elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()] ) {
return;
}
elem = elem == window ?
windowData :
elem;
var id = elem[ jQuery.expando ], cache = jQuery.cache, thisCache;
// Handle the case where there's no name immediately
if ( !name && !id ) {
return null;
}
// Compute a unique ID for the element
if ( !id ) {
id = ++jQuery.uuid;
}
// Avoid generating a new cache unless none exists and we
// want to manipulate it.
if ( typeof name === "object" ) {
elem[ jQuery.expando ] = id;
thisCache = cache[ id ] = jQuery.extend(true, {}, name);
} else if ( cache[ id ] ) {
thisCache = cache[ id ];
} else if ( typeof data === "undefined" ) {
thisCache = {};
} else {
thisCache = cache[ id ] = {};
}
// Prevent overriding the named cache with undefined values
if ( data !== undefined ) {
elem[ jQuery.expando ] = id;
thisCache[ name ] = data;
}
return typeof name === "string" ? thisCache[ name ] : id;
};
// jQuery() now returns an empty jQuery set, not jQuery(document)
var oldinit = jQuery.fn.init;
jQuery.fn.init = function( selector ) {
if ( selector === undefined ) {
return jQuery( document );
}
oldinit.apply( this, arguments );
return this;
};
jQuery.fn.init.prototype = oldinit.prototype;
// .val("...") on radio and checkbox elements was amgiuous,
// it only selects on value now (which is much less ambiguous)
var oldval = jQuery.fn.val;
jQuery.fn.val = function( val ) {
if ( val !== undefined ) {
return this.each(function(i) {
var self = jQuery(this), val = value;
if ( this.nodeType !== 1 ) {
return;
}
if ( isFunction ) {
val = value.call(this, i, self.val());
}
// Typecast each time if the value is a Function and the appended
// value is therefore different each time.
if ( typeof val === "number" ) {
val += "";
}
if ( jQuery.isArray(val) && rradiocheck.test( this.type ) ) {
this.checked = jQuery.inArray( self.val(), val ) >= 0 ||
jQuery.inArray(this.name, value) >= 0;
} else if ( jQuery.nodeName( this, "select" ) ) {
var values = jQuery.makeArray(val);
jQuery( "option", this ).each(function() {
this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0 ||
jQuery.inArray( this.text, values ) >= 0;
});
if ( !values.length ) {
this.selectedIndex = -1;
}
} else {
this.value = val;
}
});
}
return oldval.apply( this, arguments );
};
// jQuery.browser.version now exclusively matches based upon the rendering engine
jQuery.browser.version = (navigator.userAgent.toLowerCase().match( /.+(?:rv|it|ra|ie)[\/: ]([\d.]+)/ ) || [0,'0'])[1];
// jQuery.ajax() is now strict about JSON input (must follow the spec)
// Also, it auto-executes scripts that have no dataType and have a content-type of "text/javascript"
jQuery.httpData = function( xhr, type, s ) {
var ct = xhr.getResponseHeader("content-type") || "",
xml = type === "xml" || !type && ct.indexOf("xml") >= 0,
data = xml ? xhr.responseXML : xhr.responseText;
if ( xml && data.documentElement.nodeName === "parsererror" ) {
throw "parsererror";
}
// Allow a pre-filtering function to sanitize the response
// s is checked to keep backwards compatibility
if ( s && s.dataFilter ) {
data = s.dataFilter( data, type );
}
// The filter can actually parse the response
if ( typeof data === "string" ) {
// Get the JavaScript object, if JSON is used.
if ( type === "json" || !type && ct.indexOf("json") >= 0 ) {
data = (new Function("return " + data))();
}
}
return data;
};
// Ajax data is now serialized in the PHP/Rails style be default
jQuery.ajaxSettings.traditional = true;
// The internal jQuery.className structure has been removed in
// favor of the traditional jQuery methods
jQuery.className = {
add: function ( elem, classNames ) {
jQuery( elem ).addClass( classNames );
},
remove: function( elem, classNames ) {
jQuery( elem ).removeClass( classNames );
},
has: function( elem, className ) {
jQuery( elem ).hasClass( className );
}
};
// jQuery.extend( true, ... ) only works on plain objects and arrays now
jQuery.extend = jQuery.fn.extend = function() {
// copy reference to target object
var target = arguments[0] || {}, i = 1, length = arguments.length, deep = false, options, name, src, copy;
// Handle a deep copy situation
if ( typeof target === "boolean" ) {
deep = target;
target = arguments[1] || {};
// skip the boolean and the target
i = 2;
}
// Handle case when target is a string or something (possible in deep copy)
if ( typeof target !== "object" && !jQuery.isFunction(target) ) {
target = {};
}
// extend jQuery itself if only one argument is passed
if ( length === i ) {
target = this;
--i;
}
for ( ; i < length; i++ ) {
// Only deal with non-null/undefined values
if ( (options = arguments[ i ]) != null ) {
// Extend the base object
for ( name in options ) {
src = target[ name ];
copy = options[ name ];
// Prevent never-ending loop
if ( target === copy ) {
continue;
}
// Recurse if we're merging object literal values or arrays
if ( deep && copy && typeof copy === "object" && !copy.nodeType ) {
target[ name ] = jQuery.extend( deep,
// Never move original objects, clone them
src || ( copy.length != null ? [ ] : { } ), copy );
// Don't bring in undefined values
} else if ( copy !== undefined ) {
target[ name ] = copy;
}
}
}
}
// Return the modified object
return target;
};
})(jQuery);
| cloneCopyEvent | identifier_name |
jquery.compat-1.3.js | /*
* Compatibility Plugin for jQuery 1.3 (on top of jQuery 1.4)
* All code copied from jQuery 1.4
* By John Resig
* Dual licensed under MIT and GPL.
*/
(function(jQuery) {
// .add() is no longer equivalent to .concat()
// Results are now returned in document order
jQuery.fn.add = function( selector, context ) {
var set = typeof selector === "string" ?
jQuery( selector, context || this.context ) :
jQuery.makeArray( selector );
return this.pushStack( jQuery.merge( this.get(), set ) );
};
// clone( true ) now copies over all data in addition to
// the events
jQuery.fn.clone = function( events ) {
// Do the clone
var ret = this.map(function() {
if ( !jQuery.support.noCloneEvent && !jQuery.isXMLDoc(this) ) {
// IE copies events bound via attachEvent when
// using cloneNode. Calling detachEvent on the
// clone will also remove the events from the orignal
// In order to get around this, we use innerHTML.
// Unfortunately, this means some modifications to
// attributes in IE that are actually only stored
// as properties will not be copied (such as the
// the name attribute on an input).
var html = this.outerHTML, ownerDocument = this.ownerDocument;
if ( !html ) {
var div = ownerDocument.createElement("div");
div.appendChild( this.cloneNode(true) );
html = div.innerHTML;
}
return jQuery.clean([html.replace(/ jQuery\d+="(?:\d+|null)"/g, "")
.replace(/^\s+/, "")], ownerDocument)[0];
} else {
return this.cloneNode(true);
}
});
// Copy the events from the original to the clone
if ( events === true ) {
cloneCopyEvent( this, ret );
cloneCopyEvent( this.find("*"), ret.find("*") );
}
// Return the cloned set
return ret;
};
function cloneCopyEvent(orig, ret) |
// jQuery.data(elem) no longer returns an ID,
// returns the data object instead.
jQuery.data = function( elem, name, data ) {
if ( elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()] ) {
return;
}
elem = elem == window ?
windowData :
elem;
var id = elem[ jQuery.expando ], cache = jQuery.cache, thisCache;
// Handle the case where there's no name immediately
if ( !name && !id ) {
return null;
}
// Compute a unique ID for the element
if ( !id ) {
id = ++jQuery.uuid;
}
// Avoid generating a new cache unless none exists and we
// want to manipulate it.
if ( typeof name === "object" ) {
elem[ jQuery.expando ] = id;
thisCache = cache[ id ] = jQuery.extend(true, {}, name);
} else if ( cache[ id ] ) {
thisCache = cache[ id ];
} else if ( typeof data === "undefined" ) {
thisCache = {};
} else {
thisCache = cache[ id ] = {};
}
// Prevent overriding the named cache with undefined values
if ( data !== undefined ) {
elem[ jQuery.expando ] = id;
thisCache[ name ] = data;
}
return typeof name === "string" ? thisCache[ name ] : id;
};
// jQuery() now returns an empty jQuery set, not jQuery(document)
var oldinit = jQuery.fn.init;
jQuery.fn.init = function( selector ) {
if ( selector === undefined ) {
return jQuery( document );
}
oldinit.apply( this, arguments );
return this;
};
jQuery.fn.init.prototype = oldinit.prototype;
// .val("...") on radio and checkbox elements was amgiuous,
// it only selects on value now (which is much less ambiguous)
var oldval = jQuery.fn.val;
jQuery.fn.val = function( val ) {
if ( val !== undefined ) {
return this.each(function(i) {
var self = jQuery(this), val = value;
if ( this.nodeType !== 1 ) {
return;
}
if ( isFunction ) {
val = value.call(this, i, self.val());
}
// Typecast each time if the value is a Function and the appended
// value is therefore different each time.
if ( typeof val === "number" ) {
val += "";
}
if ( jQuery.isArray(val) && rradiocheck.test( this.type ) ) {
this.checked = jQuery.inArray( self.val(), val ) >= 0 ||
jQuery.inArray(this.name, value) >= 0;
} else if ( jQuery.nodeName( this, "select" ) ) {
var values = jQuery.makeArray(val);
jQuery( "option", this ).each(function() {
this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0 ||
jQuery.inArray( this.text, values ) >= 0;
});
if ( !values.length ) {
this.selectedIndex = -1;
}
} else {
this.value = val;
}
});
}
return oldval.apply( this, arguments );
};
// jQuery.browser.version now exclusively matches based upon the rendering engine
jQuery.browser.version = (navigator.userAgent.toLowerCase().match( /.+(?:rv|it|ra|ie)[\/: ]([\d.]+)/ ) || [0,'0'])[1];
// jQuery.ajax() is now strict about JSON input (must follow the spec)
// Also, it auto-executes scripts that have no dataType and have a content-type of "text/javascript"
jQuery.httpData = function( xhr, type, s ) {
var ct = xhr.getResponseHeader("content-type") || "",
xml = type === "xml" || !type && ct.indexOf("xml") >= 0,
data = xml ? xhr.responseXML : xhr.responseText;
if ( xml && data.documentElement.nodeName === "parsererror" ) {
throw "parsererror";
}
// Allow a pre-filtering function to sanitize the response
// s is checked to keep backwards compatibility
if ( s && s.dataFilter ) {
data = s.dataFilter( data, type );
}
// The filter can actually parse the response
if ( typeof data === "string" ) {
// Get the JavaScript object, if JSON is used.
if ( type === "json" || !type && ct.indexOf("json") >= 0 ) {
data = (new Function("return " + data))();
}
}
return data;
};
// Ajax data is now serialized in the PHP/Rails style be default
jQuery.ajaxSettings.traditional = true;
// The internal jQuery.className structure has been removed in
// favor of the traditional jQuery methods
jQuery.className = {
add: function ( elem, classNames ) {
jQuery( elem ).addClass( classNames );
},
remove: function( elem, classNames ) {
jQuery( elem ).removeClass( classNames );
},
has: function( elem, className ) {
jQuery( elem ).hasClass( className );
}
};
// jQuery.extend( true, ... ) only works on plain objects and arrays now
jQuery.extend = jQuery.fn.extend = function() {
// copy reference to target object
var target = arguments[0] || {}, i = 1, length = arguments.length, deep = false, options, name, src, copy;
// Handle a deep copy situation
if ( typeof target === "boolean" ) {
deep = target;
target = arguments[1] || {};
// skip the boolean and the target
i = 2;
}
// Handle case when target is a string or something (possible in deep copy)
if ( typeof target !== "object" && !jQuery.isFunction(target) ) {
target = {};
}
// extend jQuery itself if only one argument is passed
if ( length === i ) {
target = this;
--i;
}
for ( ; i < length; i++ ) {
// Only deal with non-null/undefined values
if ( (options = arguments[ i ]) != null ) {
// Extend the base object
for ( name in options ) {
src = target[ name ];
copy = options[ name ];
// Prevent never-ending loop
if ( target === copy ) {
continue;
}
// Recurse if we're merging object literal values or arrays
if ( deep && copy && typeof copy === "object" && !copy.nodeType ) {
target[ name ] = jQuery.extend( deep,
// Never move original objects, clone them
src || ( copy.length != null ? [ ] : { } ), copy );
// Don't bring in undefined values
} else if ( copy !== undefined ) {
target[ name ] = copy;
}
}
}
}
// Return the modified object
return target;
};
})(jQuery);
| {
var i = 0;
ret.each(function() {
if ( this.nodeName !== (orig[i] && orig[i].nodeName) ) {
return;
}
var oldData = jQuery.data( orig[i++] ), events = oldData && oldData.events;
if ( events ) {
for ( var type in events ) {
for ( var handler in events[ type ] ) {
jQuery.event.add( this, type, events[ type ][ handler ], events[ type ][ handler ].data );
}
}
}
});
} | identifier_body |
lib.rs | #[derive(Debug, PartialEq)]
pub struct Clock {
hours: i16,
minutes: i16,
}
impl Clock {
pub fn new(hours: i16, minutes: i16) -> Self {
Clock { hours, minutes }.normalize()
}
pub fn add_minutes(mut self, n: i16) -> Self {
self.minutes += n;
self.normalize()
}
pub fn to_string(&self) -> String |
fn normalize(mut self) -> Self {
self.hours += self.minutes / 60;
self.minutes %= 60;
self.hours %= 24;
if self.minutes < 0 {
self.hours -= 1;
self.minutes += 60;
}
if self.hours < 0 {
self.hours += 24;
}
self
}
}
| {
format!("{:02}:{:02}", self.hours, self.minutes)
} | identifier_body |
lib.rs | #[derive(Debug, PartialEq)]
pub struct Clock {
hours: i16,
minutes: i16,
}
impl Clock {
pub fn new(hours: i16, minutes: i16) -> Self {
Clock { hours, minutes }.normalize()
}
pub fn add_minutes(mut self, n: i16) -> Self {
self.minutes += n;
self.normalize()
}
pub fn to_string(&self) -> String {
format!("{:02}:{:02}", self.hours, self.minutes)
}
fn normalize(mut self) -> Self {
self.hours += self.minutes / 60;
self.minutes %= 60;
self.hours %= 24;
if self.minutes < 0 {
self.hours -= 1;
self.minutes += 60;
}
if self.hours < 0 |
self
}
}
| {
self.hours += 24;
} | conditional_block |
lib.rs | #[derive(Debug, PartialEq)]
pub struct Clock {
hours: i16,
minutes: i16,
}
impl Clock {
pub fn new(hours: i16, minutes: i16) -> Self {
Clock { hours, minutes }.normalize()
}
pub fn add_minutes(mut self, n: i16) -> Self {
self.minutes += n;
self.normalize()
}
pub fn to_string(&self) -> String {
format!("{:02}:{:02}", self.hours, self.minutes)
}
fn | (mut self) -> Self {
self.hours += self.minutes / 60;
self.minutes %= 60;
self.hours %= 24;
if self.minutes < 0 {
self.hours -= 1;
self.minutes += 60;
}
if self.hours < 0 {
self.hours += 24;
}
self
}
}
| normalize | identifier_name |
lib.rs | #[derive(Debug, PartialEq)]
pub struct Clock { | minutes: i16,
}
impl Clock {
pub fn new(hours: i16, minutes: i16) -> Self {
Clock { hours, minutes }.normalize()
}
pub fn add_minutes(mut self, n: i16) -> Self {
self.minutes += n;
self.normalize()
}
pub fn to_string(&self) -> String {
format!("{:02}:{:02}", self.hours, self.minutes)
}
fn normalize(mut self) -> Self {
self.hours += self.minutes / 60;
self.minutes %= 60;
self.hours %= 24;
if self.minutes < 0 {
self.hours -= 1;
self.minutes += 60;
}
if self.hours < 0 {
self.hours += 24;
}
self
}
} | hours: i16, | random_line_split |
TaskItem.tsx | /// <reference path="../../../webclient.d.ts"/>
import * as React from 'react';
import {Task, TaskStatus, Executor, IExecutorEditState} from '../model';
import ExecutorEditor from './ExecutorEditor';
interface ITaskItemProps extends React.Props<TaskItem> {
task : Task;
editState : {[executorId: number]: IExecutorEditState};
executorsFn : () => (Executor[] | boolean | Error);
onExpand : (id: number) => void;
onEditExecutor : (taskId: number, executorId: number) => void;
onSaveExecutor : (taskId: number, executorId: number, name: string) => void;
onCancelExecutor : (taskId: number, executorId: number) => void;
}
interface ITaskItemState {
collapsed: boolean;
}
export default class TaskItem extends React.Component<ITaskItemProps, ITaskItemState> {
private static TaskStatusColors = {
[TaskStatus.NEW] : 'red',
[TaskStatus.RUNNING] : 'blue',
[TaskStatus.DONE] : 'gray',
};
state: ITaskItemState = {collapsed: true};
toggle = () => {
let {collapsed} = this.state;
collapsed = !collapsed;
if (!collapsed) {
this.props.onExpand(this.props.task.id);
}
this.setState({collapsed});
};
onSaveExecutor = (id: number, name: string) => {
const {task} = this.props;
this.props.onSaveExecutor(task.id, id, name);
};
onCancelExecutor = (id: number) => {
const {task} = this.props;
this.props.onCancelExecutor(task.id, id);
};
onEditExecutor = (id: number) => {
const {task} = this.props;
this.props.onEditExecutor(task.id, id)
};
render() {
const {task, executorsFn, editState} = this.props;
const {collapsed} = this.state;
const renderStatus = (status: TaskStatus) => {
const color = TaskItem.TaskStatusColors[status];
return <span style={{float: 'right', color}}>{TaskStatus[status].toUpperCase()}</span>;
};
const renderExecutor = (e: Executor, editState: IExecutorEditState) => {
return !editState || editState.viewMode
? <span style={{cursor: 'pointer'}} onClick={() => this.onEditExecutor(e.id)}>#{e.id} {e.name}</span>
: <div>
<ExecutorEditor
executor={e}
progress={editState.progress}
onSave={this.onSaveExecutor}
onCancel={this.onCancelExecutor}
/>
{editState.error && <span style={{color: 'red'}}>{editState.error.message}</span>}
</div>
};
const renderExecutors = (e: (Executor[] | boolean | Error)) => {
if (typeof e === 'boolean' && e === true) {
return <i>Loading task executors...</i>;
}
if (e instanceof Error) {
const err: Error = e;
return <span style={{color: 'red'}}>{err.message}</span>
}
if (e instanceof Array) {
const arr: Executor[] = e;
return !arr.length
? <i>This task has no executors</i>
:<ul>{ arr.map(executor => <li key={executor.id}>{renderExecutor(executor, editState && editState[executor.id])}</li>) }</ul>;
}
throw new Error(`Unknown executors type ${typeof e}`);
};
return (
<div style={{width: '400px', marginBottom: '5px'}}>
<div>
<strong>#{task.id}</strong>
{' '}
<span>{task.title}</span>
| </div>
)
}
} | {renderStatus(task.status)}
<a style={{cursor: 'pointer', float: 'right', marginRight: '5px'}} onClick={this.toggle}>Executors</a>
</div>
{collapsed === false && <div>{renderExecutors(executorsFn())}</div>} | random_line_split |
TaskItem.tsx | /// <reference path="../../../webclient.d.ts"/>
import * as React from 'react';
import {Task, TaskStatus, Executor, IExecutorEditState} from '../model';
import ExecutorEditor from './ExecutorEditor';
interface ITaskItemProps extends React.Props<TaskItem> {
task : Task;
editState : {[executorId: number]: IExecutorEditState};
executorsFn : () => (Executor[] | boolean | Error);
onExpand : (id: number) => void;
onEditExecutor : (taskId: number, executorId: number) => void;
onSaveExecutor : (taskId: number, executorId: number, name: string) => void;
onCancelExecutor : (taskId: number, executorId: number) => void;
}
interface ITaskItemState {
collapsed: boolean;
}
export default class TaskItem extends React.Component<ITaskItemProps, ITaskItemState> {
private static TaskStatusColors = {
[TaskStatus.NEW] : 'red',
[TaskStatus.RUNNING] : 'blue',
[TaskStatus.DONE] : 'gray',
};
state: ITaskItemState = {collapsed: true};
toggle = () => {
let {collapsed} = this.state;
collapsed = !collapsed;
if (!collapsed) |
this.setState({collapsed});
};
onSaveExecutor = (id: number, name: string) => {
const {task} = this.props;
this.props.onSaveExecutor(task.id, id, name);
};
onCancelExecutor = (id: number) => {
const {task} = this.props;
this.props.onCancelExecutor(task.id, id);
};
onEditExecutor = (id: number) => {
const {task} = this.props;
this.props.onEditExecutor(task.id, id)
};
render() {
const {task, executorsFn, editState} = this.props;
const {collapsed} = this.state;
const renderStatus = (status: TaskStatus) => {
const color = TaskItem.TaskStatusColors[status];
return <span style={{float: 'right', color}}>{TaskStatus[status].toUpperCase()}</span>;
};
const renderExecutor = (e: Executor, editState: IExecutorEditState) => {
return !editState || editState.viewMode
? <span style={{cursor: 'pointer'}} onClick={() => this.onEditExecutor(e.id)}>#{e.id} {e.name}</span>
: <div>
<ExecutorEditor
executor={e}
progress={editState.progress}
onSave={this.onSaveExecutor}
onCancel={this.onCancelExecutor}
/>
{editState.error && <span style={{color: 'red'}}>{editState.error.message}</span>}
</div>
};
const renderExecutors = (e: (Executor[] | boolean | Error)) => {
if (typeof e === 'boolean' && e === true) {
return <i>Loading task executors...</i>;
}
if (e instanceof Error) {
const err: Error = e;
return <span style={{color: 'red'}}>{err.message}</span>
}
if (e instanceof Array) {
const arr: Executor[] = e;
return !arr.length
? <i>This task has no executors</i>
:<ul>{ arr.map(executor => <li key={executor.id}>{renderExecutor(executor, editState && editState[executor.id])}</li>) }</ul>;
}
throw new Error(`Unknown executors type ${typeof e}`);
};
return (
<div style={{width: '400px', marginBottom: '5px'}}>
<div>
<strong>#{task.id}</strong>
{' '}
<span>{task.title}</span>
{renderStatus(task.status)}
<a style={{cursor: 'pointer', float: 'right', marginRight: '5px'}} onClick={this.toggle}>Executors</a>
</div>
{collapsed === false && <div>{renderExecutors(executorsFn())}</div>}
</div>
)
}
}
| {
this.props.onExpand(this.props.task.id);
} | conditional_block |
full_inspiration.py | #!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class FullInspiration(object):
"""
NOTE: This class is auto generated by the systran code generator program.
Do not edit the class manually.
"""
def __init__(self):
|
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'systran_types' and p != 'attribute_map':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| """
Systran model
:param dict systran_types: The key is attribute name and the value is attribute type.
:param dict attribute_map: The key is attribute name and the value is json key in definition.
"""
self.systran_types = {
'id': 'str',
'location': 'FullLocation',
'type': 'str',
'title': 'str',
'introduction': 'str',
'content': 'str',
'photos': 'list[Photo]',
'videos': 'list[Video]'
}
self.attribute_map = {
'id': 'id',
'location': 'location',
'type': 'type',
'title': 'title',
'introduction': 'introduction',
'content': 'content',
'photos': 'photos',
'videos': 'videos'
}
# Inspiration Identifier
self.id = None # str
# Location
self.location = None # FullLocation
# Inspiration type
self.type = None # str
# Title
self.title = None # str
# Introduction
self.introduction = None # str
# Content
self.content = None # str
# Array of Photos
self.photos = None # list[Photo]
# Array of Videos
self.videos = None # list[Video] | identifier_body |
full_inspiration.py | #!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class FullInspiration(object):
"""
NOTE: This class is auto generated by the systran code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Systran model
:param dict systran_types: The key is attribute name and the value is attribute type.
:param dict attribute_map: The key is attribute name and the value is json key in definition.
"""
self.systran_types = {
'id': 'str',
'location': 'FullLocation',
'type': 'str',
'title': 'str',
'introduction': 'str',
'content': 'str',
'photos': 'list[Photo]',
'videos': 'list[Video]'
}
self.attribute_map = {
'id': 'id',
'location': 'location',
'type': 'type',
'title': 'title',
'introduction': 'introduction',
'content': 'content',
'photos': 'photos',
'videos': 'videos'
}
# Inspiration Identifier
self.id = None # str
# Location
self.location = None # FullLocation
# Inspiration type
self.type = None # str
# Title
self.title = None # str
# Introduction
self.introduction = None # str
# Content
self.content = None # str
# Array of Photos
self.photos = None # list[Photo]
# Array of Videos
self.videos = None # list[Video]
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'systran_types' and p != 'attribute_map':
|
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p])) | conditional_block |
full_inspiration.py | #!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class | (object):
"""
NOTE: This class is auto generated by the systran code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Systran model
:param dict systran_types: The key is attribute name and the value is attribute type.
:param dict attribute_map: The key is attribute name and the value is json key in definition.
"""
self.systran_types = {
'id': 'str',
'location': 'FullLocation',
'type': 'str',
'title': 'str',
'introduction': 'str',
'content': 'str',
'photos': 'list[Photo]',
'videos': 'list[Video]'
}
self.attribute_map = {
'id': 'id',
'location': 'location',
'type': 'type',
'title': 'title',
'introduction': 'introduction',
'content': 'content',
'photos': 'photos',
'videos': 'videos'
}
# Inspiration Identifier
self.id = None # str
# Location
self.location = None # FullLocation
# Inspiration type
self.type = None # str
# Title
self.title = None # str
# Introduction
self.introduction = None # str
# Content
self.content = None # str
# Array of Photos
self.photos = None # list[Photo]
# Array of Videos
self.videos = None # list[Video]
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'systran_types' and p != 'attribute_map':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| FullInspiration | identifier_name |
full_inspiration.py | #!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| class FullInspiration(object):
"""
NOTE: This class is auto generated by the systran code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Systran model
:param dict systran_types: The key is attribute name and the value is attribute type.
:param dict attribute_map: The key is attribute name and the value is json key in definition.
"""
self.systran_types = {
'id': 'str',
'location': 'FullLocation',
'type': 'str',
'title': 'str',
'introduction': 'str',
'content': 'str',
'photos': 'list[Photo]',
'videos': 'list[Video]'
}
self.attribute_map = {
'id': 'id',
'location': 'location',
'type': 'type',
'title': 'title',
'introduction': 'introduction',
'content': 'content',
'photos': 'photos',
'videos': 'videos'
}
# Inspiration Identifier
self.id = None # str
# Location
self.location = None # FullLocation
# Inspiration type
self.type = None # str
# Title
self.title = None # str
# Introduction
self.introduction = None # str
# Content
self.content = None # str
# Array of Photos
self.photos = None # list[Photo]
# Array of Videos
self.videos = None # list[Video]
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'systran_types' and p != 'attribute_map':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties)) | random_line_split |
|
step6_file.rs | use std::rc::Rc;
//use std::collections::HashMap;
use fnv::FnvHashMap;
use itertools::Itertools;
#[macro_use]
extern crate lazy_static;
extern crate regex;
extern crate itertools;
extern crate fnv;
extern crate rustyline;
use rustyline::error::ReadlineError;
use rustyline::Editor;
#[macro_use]
mod types;
use types::{MalVal,MalArgs,MalRet,MalErr,error,format_error};
use types::MalVal::{Nil,Bool,Str,Sym,List,Vector,Hash,Func,MalFunc};
mod reader;
mod printer;
mod env;
use env::{Env,env_new,env_bind,env_get,env_set,env_sets};
#[macro_use]
mod core;
// read
fn read(str: &str) -> MalRet {
reader::read_str(str.to_string())
}
// eval
fn eval_ast(ast: &MalVal, env: &Env) -> MalRet {
match ast {
Sym(_) => Ok(env_get(&env, &ast)?),
List(v,_) => {
let mut lst: MalArgs = vec![];
for a in v.iter() { lst.push(eval(a.clone(), env.clone())?) }
Ok(list!(lst))
},
Vector(v,_) => {
let mut lst: MalArgs = vec![];
for a in v.iter() { lst.push(eval(a.clone(), env.clone())?) }
Ok(vector!(lst))
},
Hash(hm,_) => {
let mut new_hm: FnvHashMap<String,MalVal> = FnvHashMap::default();
for (k,v) in hm.iter() {
new_hm.insert(k.to_string(), eval(v.clone(), env.clone())?);
}
Ok(Hash(Rc::new(new_hm),Rc::new(Nil)))
},
_ => Ok(ast.clone()),
}
}
fn eval(mut ast: MalVal, mut env: Env) -> MalRet {
let ret: MalRet;
'tco: loop {
ret = match ast.clone() {
List(l,_) => {
if l.len() == 0 { return Ok(ast); }
let a0 = &l[0];
match a0 {
Sym(ref a0sym) if a0sym == "def!" => {
env_set(&env, l[1].clone(), eval(l[2].clone(), env.clone())?)
},
Sym(ref a0sym) if a0sym == "let*" => {
env = env_new(Some(env.clone()));
let (a1, a2) = (l[1].clone(), l[2].clone());
match a1 {
List(ref binds,_) | Vector(ref binds,_) => {
for (b, e) in binds.iter().tuples() {
match b {
Sym(_) => {
let _ = env_set(&env, b.clone(),
eval(e.clone(), env.clone())?);
},
_ => {
return error("let* with non-Sym binding");
}
}
}
},
_ => {
return error("let* with non-List bindings");
}
};
ast = a2;
continue 'tco;
},
Sym(ref a0sym) if a0sym == "do" => {
match eval_ast(&list!(l[1..l.len()-1].to_vec()), &env)? {
List(_,_) => {
ast = l.last().unwrap_or(&Nil).clone();
continue 'tco;
},
_ => error("invalid do form"),
}
},
Sym(ref a0sym) if a0sym == "if" => {
let cond = eval(l[1].clone(), env.clone())?;
match cond {
Bool(false) | Nil if l.len() >= 4 => {
ast = l[3].clone();
continue 'tco;
},
Bool(false) | Nil => Ok(Nil),
_ if l.len() >= 3 => {
ast = l[2].clone();
continue 'tco;
},
_ => Ok(Nil)
}
},
Sym(ref a0sym) if a0sym == "fn*" => {
let (a1, a2) = (l[1].clone(), l[2].clone());
Ok(MalFunc{eval: eval, ast: Rc::new(a2), env: env,
params: Rc::new(a1), is_macro: false,
meta: Rc::new(Nil)})
},
Sym(ref a0sym) if a0sym == "eval" => {
ast = eval(l[1].clone(), env.clone())?;
while let Some(ref e) = env.clone().outer {
env = e.clone();
}
continue 'tco;
},
_ => {
match eval_ast(&ast, &env)? {
List(ref el,_) => {
let ref f = el[0].clone();
let args = el[1..].to_vec();
match f {
Func(_,_) => f.apply(args),
MalFunc{ast: mast, env: menv, params, ..} => {
let a = &**mast;
let p = &**params;
env = env_bind(Some(menv.clone()), p.clone(), args)?;
ast = a.clone();
continue 'tco;
},
_ => error("attempt to call non-function"),
}
},
_ => {
error("expected a list")
}
}
}
}
},
_ => eval_ast(&ast, &env),
};
break;
} // end 'tco loop
ret
}
// print
fn print(ast: &MalVal) -> String |
fn rep(str: &str, env: &Env) -> Result<String,MalErr> {
let ast = read(str)?;
let exp = eval(ast, env.clone())?;
Ok(print(&exp))
}
fn main() {
let mut args = std::env::args();
let arg1 = args.nth(1);
// `()` can be used when no completer is required
let mut rl = Editor::<()>::new();
if rl.load_history(".mal-history").is_err() {
println!("No previous history.");
}
// core.rs: defined using rust
let repl_env = env_new(None);
for (k, v) in core::ns() {
env_sets(&repl_env, k, v);
}
env_sets(&repl_env, "*ARGV*", list!(args.map(Str).collect()));
// core.mal: defined using the language itself
let _ = rep("(def! not (fn* (a) (if a false true)))", &repl_env);
let _ = rep("(def! load-file (fn* (f) (eval (read-string (str \"(do \" (slurp f) \")\")))))", &repl_env);
// Invoked with arguments
if let Some(f) = arg1 {
match rep(&format!("(load-file \"{}\")",f), &repl_env) {
Ok(_) => std::process::exit(0),
Err(e) => {
println!("Error: {}", format_error(e));
std::process::exit(1);
}
}
}
// main repl loop
loop {
let readline = rl.readline("user> ");
match readline {
Ok(line) => {
rl.add_history_entry(&line);
rl.save_history(".mal-history").unwrap();
if line.len() > 0 {
match rep(&line, &repl_env) {
Ok(out) => println!("{}", out),
Err(e) => println!("Error: {}", format_error(e)),
}
}
},
Err(ReadlineError::Interrupted) => continue,
Err(ReadlineError::Eof) => break,
Err(err) => {
println!("Error: {:?}", err);
break
}
}
}
}
// vim: ts=2:sw=2:expandtab
| {
ast.pr_str(true)
} | identifier_body |
step6_file.rs | use std::rc::Rc;
//use std::collections::HashMap;
use fnv::FnvHashMap;
use itertools::Itertools;
#[macro_use]
extern crate lazy_static;
extern crate regex;
extern crate itertools;
extern crate fnv;
extern crate rustyline;
use rustyline::error::ReadlineError;
use rustyline::Editor;
#[macro_use]
mod types;
use types::{MalVal,MalArgs,MalRet,MalErr,error,format_error};
use types::MalVal::{Nil,Bool,Str,Sym,List,Vector,Hash,Func,MalFunc};
mod reader;
mod printer;
mod env;
use env::{Env,env_new,env_bind,env_get,env_set,env_sets};
#[macro_use]
mod core;
// read
fn read(str: &str) -> MalRet {
reader::read_str(str.to_string())
}
// eval
fn eval_ast(ast: &MalVal, env: &Env) -> MalRet {
match ast {
Sym(_) => Ok(env_get(&env, &ast)?),
List(v,_) => {
let mut lst: MalArgs = vec![];
for a in v.iter() { lst.push(eval(a.clone(), env.clone())?) }
Ok(list!(lst))
},
Vector(v,_) => {
let mut lst: MalArgs = vec![];
for a in v.iter() { lst.push(eval(a.clone(), env.clone())?) }
Ok(vector!(lst))
},
Hash(hm,_) => {
let mut new_hm: FnvHashMap<String,MalVal> = FnvHashMap::default();
for (k,v) in hm.iter() {
new_hm.insert(k.to_string(), eval(v.clone(), env.clone())?);
}
Ok(Hash(Rc::new(new_hm),Rc::new(Nil)))
},
_ => Ok(ast.clone()),
}
}
fn eval(mut ast: MalVal, mut env: Env) -> MalRet {
let ret: MalRet;
'tco: loop {
ret = match ast.clone() {
List(l,_) => {
if l.len() == 0 { return Ok(ast); }
let a0 = &l[0];
match a0 {
Sym(ref a0sym) if a0sym == "def!" => {
env_set(&env, l[1].clone(), eval(l[2].clone(), env.clone())?)
},
Sym(ref a0sym) if a0sym == "let*" => {
env = env_new(Some(env.clone()));
let (a1, a2) = (l[1].clone(), l[2].clone());
match a1 {
List(ref binds,_) | Vector(ref binds,_) => {
for (b, e) in binds.iter().tuples() {
match b {
Sym(_) => {
let _ = env_set(&env, b.clone(),
eval(e.clone(), env.clone())?);
},
_ => {
return error("let* with non-Sym binding");
}
}
}
},
_ => {
return error("let* with non-List bindings");
}
};
ast = a2;
continue 'tco;
},
Sym(ref a0sym) if a0sym == "do" => {
match eval_ast(&list!(l[1..l.len()-1].to_vec()), &env)? {
List(_,_) => {
ast = l.last().unwrap_or(&Nil).clone();
continue 'tco;
},
_ => error("invalid do form"),
}
},
Sym(ref a0sym) if a0sym == "if" => {
let cond = eval(l[1].clone(), env.clone())?;
match cond {
Bool(false) | Nil if l.len() >= 4 => {
ast = l[3].clone();
continue 'tco;
},
Bool(false) | Nil => Ok(Nil),
_ if l.len() >= 3 => | ,
_ => Ok(Nil)
}
},
Sym(ref a0sym) if a0sym == "fn*" => {
let (a1, a2) = (l[1].clone(), l[2].clone());
Ok(MalFunc{eval: eval, ast: Rc::new(a2), env: env,
params: Rc::new(a1), is_macro: false,
meta: Rc::new(Nil)})
},
Sym(ref a0sym) if a0sym == "eval" => {
ast = eval(l[1].clone(), env.clone())?;
while let Some(ref e) = env.clone().outer {
env = e.clone();
}
continue 'tco;
},
_ => {
match eval_ast(&ast, &env)? {
List(ref el,_) => {
let ref f = el[0].clone();
let args = el[1..].to_vec();
match f {
Func(_,_) => f.apply(args),
MalFunc{ast: mast, env: menv, params, ..} => {
let a = &**mast;
let p = &**params;
env = env_bind(Some(menv.clone()), p.clone(), args)?;
ast = a.clone();
continue 'tco;
},
_ => error("attempt to call non-function"),
}
},
_ => {
error("expected a list")
}
}
}
}
},
_ => eval_ast(&ast, &env),
};
break;
} // end 'tco loop
ret
}
// print
fn print(ast: &MalVal) -> String {
ast.pr_str(true)
}
fn rep(str: &str, env: &Env) -> Result<String,MalErr> {
let ast = read(str)?;
let exp = eval(ast, env.clone())?;
Ok(print(&exp))
}
fn main() {
let mut args = std::env::args();
let arg1 = args.nth(1);
// `()` can be used when no completer is required
let mut rl = Editor::<()>::new();
if rl.load_history(".mal-history").is_err() {
println!("No previous history.");
}
// core.rs: defined using rust
let repl_env = env_new(None);
for (k, v) in core::ns() {
env_sets(&repl_env, k, v);
}
env_sets(&repl_env, "*ARGV*", list!(args.map(Str).collect()));
// core.mal: defined using the language itself
let _ = rep("(def! not (fn* (a) (if a false true)))", &repl_env);
let _ = rep("(def! load-file (fn* (f) (eval (read-string (str \"(do \" (slurp f) \")\")))))", &repl_env);
// Invoked with arguments
if let Some(f) = arg1 {
match rep(&format!("(load-file \"{}\")",f), &repl_env) {
Ok(_) => std::process::exit(0),
Err(e) => {
println!("Error: {}", format_error(e));
std::process::exit(1);
}
}
}
// main repl loop
loop {
let readline = rl.readline("user> ");
match readline {
Ok(line) => {
rl.add_history_entry(&line);
rl.save_history(".mal-history").unwrap();
if line.len() > 0 {
match rep(&line, &repl_env) {
Ok(out) => println!("{}", out),
Err(e) => println!("Error: {}", format_error(e)),
}
}
},
Err(ReadlineError::Interrupted) => continue,
Err(ReadlineError::Eof) => break,
Err(err) => {
println!("Error: {:?}", err);
break
}
}
}
}
// vim: ts=2:sw=2:expandtab
| {
ast = l[2].clone();
continue 'tco;
} | conditional_block |
step6_file.rs | use std::rc::Rc;
//use std::collections::HashMap;
use fnv::FnvHashMap;
use itertools::Itertools;
#[macro_use]
extern crate lazy_static;
extern crate regex;
extern crate itertools;
extern crate fnv;
extern crate rustyline;
use rustyline::error::ReadlineError;
use rustyline::Editor;
#[macro_use]
mod types;
use types::{MalVal,MalArgs,MalRet,MalErr,error,format_error};
use types::MalVal::{Nil,Bool,Str,Sym,List,Vector,Hash,Func,MalFunc};
mod reader;
mod printer;
mod env;
use env::{Env,env_new,env_bind,env_get,env_set,env_sets};
#[macro_use]
mod core;
// read
fn read(str: &str) -> MalRet {
reader::read_str(str.to_string())
}
// eval
fn eval_ast(ast: &MalVal, env: &Env) -> MalRet {
match ast {
Sym(_) => Ok(env_get(&env, &ast)?),
List(v,_) => {
let mut lst: MalArgs = vec![];
for a in v.iter() { lst.push(eval(a.clone(), env.clone())?) }
Ok(list!(lst))
},
Vector(v,_) => {
let mut lst: MalArgs = vec![];
for a in v.iter() { lst.push(eval(a.clone(), env.clone())?) }
Ok(vector!(lst))
},
Hash(hm,_) => {
let mut new_hm: FnvHashMap<String,MalVal> = FnvHashMap::default();
for (k,v) in hm.iter() {
new_hm.insert(k.to_string(), eval(v.clone(), env.clone())?);
}
Ok(Hash(Rc::new(new_hm),Rc::new(Nil)))
},
_ => Ok(ast.clone()),
}
}
fn eval(mut ast: MalVal, mut env: Env) -> MalRet {
let ret: MalRet;
'tco: loop {
ret = match ast.clone() {
List(l,_) => {
if l.len() == 0 { return Ok(ast); }
let a0 = &l[0];
match a0 {
Sym(ref a0sym) if a0sym == "def!" => {
env_set(&env, l[1].clone(), eval(l[2].clone(), env.clone())?)
},
Sym(ref a0sym) if a0sym == "let*" => {
env = env_new(Some(env.clone()));
let (a1, a2) = (l[1].clone(), l[2].clone());
match a1 {
List(ref binds,_) | Vector(ref binds,_) => {
for (b, e) in binds.iter().tuples() {
match b {
Sym(_) => {
let _ = env_set(&env, b.clone(),
eval(e.clone(), env.clone())?);
},
_ => {
return error("let* with non-Sym binding");
}
}
}
},
_ => {
return error("let* with non-List bindings");
}
};
ast = a2;
continue 'tco;
},
Sym(ref a0sym) if a0sym == "do" => {
match eval_ast(&list!(l[1..l.len()-1].to_vec()), &env)? {
List(_,_) => {
ast = l.last().unwrap_or(&Nil).clone();
continue 'tco;
},
_ => error("invalid do form"),
}
},
Sym(ref a0sym) if a0sym == "if" => {
let cond = eval(l[1].clone(), env.clone())?;
match cond {
Bool(false) | Nil if l.len() >= 4 => {
ast = l[3].clone();
continue 'tco;
},
Bool(false) | Nil => Ok(Nil),
_ if l.len() >= 3 => {
ast = l[2].clone();
continue 'tco;
},
_ => Ok(Nil)
}
},
Sym(ref a0sym) if a0sym == "fn*" => {
let (a1, a2) = (l[1].clone(), l[2].clone());
Ok(MalFunc{eval: eval, ast: Rc::new(a2), env: env,
params: Rc::new(a1), is_macro: false,
meta: Rc::new(Nil)})
},
Sym(ref a0sym) if a0sym == "eval" => {
ast = eval(l[1].clone(), env.clone())?;
while let Some(ref e) = env.clone().outer {
env = e.clone();
}
continue 'tco;
},
_ => {
match eval_ast(&ast, &env)? {
List(ref el,_) => {
let ref f = el[0].clone();
let args = el[1..].to_vec();
match f {
Func(_,_) => f.apply(args),
MalFunc{ast: mast, env: menv, params, ..} => {
let a = &**mast;
let p = &**params;
env = env_bind(Some(menv.clone()), p.clone(), args)?;
ast = a.clone();
continue 'tco;
},
_ => error("attempt to call non-function"), | }
}
}
},
_ => eval_ast(&ast, &env),
};
break;
} // end 'tco loop
ret
}
// print
fn print(ast: &MalVal) -> String {
ast.pr_str(true)
}
fn rep(str: &str, env: &Env) -> Result<String,MalErr> {
let ast = read(str)?;
let exp = eval(ast, env.clone())?;
Ok(print(&exp))
}
fn main() {
let mut args = std::env::args();
let arg1 = args.nth(1);
// `()` can be used when no completer is required
let mut rl = Editor::<()>::new();
if rl.load_history(".mal-history").is_err() {
println!("No previous history.");
}
// core.rs: defined using rust
let repl_env = env_new(None);
for (k, v) in core::ns() {
env_sets(&repl_env, k, v);
}
env_sets(&repl_env, "*ARGV*", list!(args.map(Str).collect()));
// core.mal: defined using the language itself
let _ = rep("(def! not (fn* (a) (if a false true)))", &repl_env);
let _ = rep("(def! load-file (fn* (f) (eval (read-string (str \"(do \" (slurp f) \")\")))))", &repl_env);
// Invoked with arguments
if let Some(f) = arg1 {
match rep(&format!("(load-file \"{}\")",f), &repl_env) {
Ok(_) => std::process::exit(0),
Err(e) => {
println!("Error: {}", format_error(e));
std::process::exit(1);
}
}
}
// main repl loop
loop {
let readline = rl.readline("user> ");
match readline {
Ok(line) => {
rl.add_history_entry(&line);
rl.save_history(".mal-history").unwrap();
if line.len() > 0 {
match rep(&line, &repl_env) {
Ok(out) => println!("{}", out),
Err(e) => println!("Error: {}", format_error(e)),
}
}
},
Err(ReadlineError::Interrupted) => continue,
Err(ReadlineError::Eof) => break,
Err(err) => {
println!("Error: {:?}", err);
break
}
}
}
}
// vim: ts=2:sw=2:expandtab | }
},
_ => {
error("expected a list")
} | random_line_split |
step6_file.rs | use std::rc::Rc;
//use std::collections::HashMap;
use fnv::FnvHashMap;
use itertools::Itertools;
#[macro_use]
extern crate lazy_static;
extern crate regex;
extern crate itertools;
extern crate fnv;
extern crate rustyline;
use rustyline::error::ReadlineError;
use rustyline::Editor;
#[macro_use]
mod types;
use types::{MalVal,MalArgs,MalRet,MalErr,error,format_error};
use types::MalVal::{Nil,Bool,Str,Sym,List,Vector,Hash,Func,MalFunc};
mod reader;
mod printer;
mod env;
use env::{Env,env_new,env_bind,env_get,env_set,env_sets};
#[macro_use]
mod core;
// read
fn read(str: &str) -> MalRet {
reader::read_str(str.to_string())
}
// eval
fn eval_ast(ast: &MalVal, env: &Env) -> MalRet {
match ast {
Sym(_) => Ok(env_get(&env, &ast)?),
List(v,_) => {
let mut lst: MalArgs = vec![];
for a in v.iter() { lst.push(eval(a.clone(), env.clone())?) }
Ok(list!(lst))
},
Vector(v,_) => {
let mut lst: MalArgs = vec![];
for a in v.iter() { lst.push(eval(a.clone(), env.clone())?) }
Ok(vector!(lst))
},
Hash(hm,_) => {
let mut new_hm: FnvHashMap<String,MalVal> = FnvHashMap::default();
for (k,v) in hm.iter() {
new_hm.insert(k.to_string(), eval(v.clone(), env.clone())?);
}
Ok(Hash(Rc::new(new_hm),Rc::new(Nil)))
},
_ => Ok(ast.clone()),
}
}
fn eval(mut ast: MalVal, mut env: Env) -> MalRet {
let ret: MalRet;
'tco: loop {
ret = match ast.clone() {
List(l,_) => {
if l.len() == 0 { return Ok(ast); }
let a0 = &l[0];
match a0 {
Sym(ref a0sym) if a0sym == "def!" => {
env_set(&env, l[1].clone(), eval(l[2].clone(), env.clone())?)
},
Sym(ref a0sym) if a0sym == "let*" => {
env = env_new(Some(env.clone()));
let (a1, a2) = (l[1].clone(), l[2].clone());
match a1 {
List(ref binds,_) | Vector(ref binds,_) => {
for (b, e) in binds.iter().tuples() {
match b {
Sym(_) => {
let _ = env_set(&env, b.clone(),
eval(e.clone(), env.clone())?);
},
_ => {
return error("let* with non-Sym binding");
}
}
}
},
_ => {
return error("let* with non-List bindings");
}
};
ast = a2;
continue 'tco;
},
Sym(ref a0sym) if a0sym == "do" => {
match eval_ast(&list!(l[1..l.len()-1].to_vec()), &env)? {
List(_,_) => {
ast = l.last().unwrap_or(&Nil).clone();
continue 'tco;
},
_ => error("invalid do form"),
}
},
Sym(ref a0sym) if a0sym == "if" => {
let cond = eval(l[1].clone(), env.clone())?;
match cond {
Bool(false) | Nil if l.len() >= 4 => {
ast = l[3].clone();
continue 'tco;
},
Bool(false) | Nil => Ok(Nil),
_ if l.len() >= 3 => {
ast = l[2].clone();
continue 'tco;
},
_ => Ok(Nil)
}
},
Sym(ref a0sym) if a0sym == "fn*" => {
let (a1, a2) = (l[1].clone(), l[2].clone());
Ok(MalFunc{eval: eval, ast: Rc::new(a2), env: env,
params: Rc::new(a1), is_macro: false,
meta: Rc::new(Nil)})
},
Sym(ref a0sym) if a0sym == "eval" => {
ast = eval(l[1].clone(), env.clone())?;
while let Some(ref e) = env.clone().outer {
env = e.clone();
}
continue 'tco;
},
_ => {
match eval_ast(&ast, &env)? {
List(ref el,_) => {
let ref f = el[0].clone();
let args = el[1..].to_vec();
match f {
Func(_,_) => f.apply(args),
MalFunc{ast: mast, env: menv, params, ..} => {
let a = &**mast;
let p = &**params;
env = env_bind(Some(menv.clone()), p.clone(), args)?;
ast = a.clone();
continue 'tco;
},
_ => error("attempt to call non-function"),
}
},
_ => {
error("expected a list")
}
}
}
}
},
_ => eval_ast(&ast, &env),
};
break;
} // end 'tco loop
ret
}
// print
fn print(ast: &MalVal) -> String {
ast.pr_str(true)
}
fn | (str: &str, env: &Env) -> Result<String,MalErr> {
let ast = read(str)?;
let exp = eval(ast, env.clone())?;
Ok(print(&exp))
}
fn main() {
let mut args = std::env::args();
let arg1 = args.nth(1);
// `()` can be used when no completer is required
let mut rl = Editor::<()>::new();
if rl.load_history(".mal-history").is_err() {
println!("No previous history.");
}
// core.rs: defined using rust
let repl_env = env_new(None);
for (k, v) in core::ns() {
env_sets(&repl_env, k, v);
}
env_sets(&repl_env, "*ARGV*", list!(args.map(Str).collect()));
// core.mal: defined using the language itself
let _ = rep("(def! not (fn* (a) (if a false true)))", &repl_env);
let _ = rep("(def! load-file (fn* (f) (eval (read-string (str \"(do \" (slurp f) \")\")))))", &repl_env);
// Invoked with arguments
if let Some(f) = arg1 {
match rep(&format!("(load-file \"{}\")",f), &repl_env) {
Ok(_) => std::process::exit(0),
Err(e) => {
println!("Error: {}", format_error(e));
std::process::exit(1);
}
}
}
// main repl loop
loop {
let readline = rl.readline("user> ");
match readline {
Ok(line) => {
rl.add_history_entry(&line);
rl.save_history(".mal-history").unwrap();
if line.len() > 0 {
match rep(&line, &repl_env) {
Ok(out) => println!("{}", out),
Err(e) => println!("Error: {}", format_error(e)),
}
}
},
Err(ReadlineError::Interrupted) => continue,
Err(ReadlineError::Eof) => break,
Err(err) => {
println!("Error: {:?}", err);
break
}
}
}
}
// vim: ts=2:sw=2:expandtab
| rep | identifier_name |
mod.rs | //! Provides functions for maintaining database schema.
//!
//! A database migration always provides procedures to update the schema, as well as to revert
//! itself. Diesel's migrations are versioned, and run in order. Diesel also takes care of tracking
//! which migrations have already been run automatically. Your migrations don't need to be
//! idempotent, as Diesel will ensure no migration is run twice unless it has been reverted.
//!
//! Migrations should be placed in a `/migrations` directory at the root of your project (the same
//! directory as `Cargo.toml`). When any of these functions are run, Diesel will search for the
//! migrations directory in the current directory and its parents, stopping when it finds the
//! directory containing `Cargo.toml`.
//! | //! ## Example
//!
//! ```text
//! # Directory Structure
//! - 20151219180527_create_users
//! - up.sql
//! - down.sql
//! - 20160107082941_create_posts
//! - up.sql
//! - down.sql
//! ```
//!
//! ```sql
//! -- 20151219180527_create_users/up.sql
//! CREATE TABLE users (
//! id SERIAL PRIMARY KEY,
//! name VARCHAR NOT NULL,
//! hair_color VARCHAR
//! );
//! ```
//!
//! ```sql
//! -- 20151219180527_create_users/down.sql
//! DROP TABLE users;
//! ```
//!
//! ```sql
//! -- 20160107082941_create_posts/up.sql
//! CREATE TABLE posts (
//! id SERIAL PRIMARY KEY,
//! user_id INTEGER NOT NULL,
//! title VARCHAR NOT NULL,
//! body TEXT
//! );
//! ```
//!
//! ```sql
//! -- 20160107082941_create_posts/down.sql
//! DROP TABLE posts;
//! ```
mod migration;
mod migration_error;
mod schema;
pub use self::migration_error::*;
use ::expression::expression_methods::*;
use ::query_dsl::*;
use self::migration::*;
use self::migration_error::MigrationError::*;
use self::schema::NewMigration;
use self::schema::__diesel_schema_migrations::dsl::*;
use {Connection, QueryResult};
use std::collections::HashSet;
use std::env;
use std::path::{PathBuf, Path};
/// Runs all migrations that have not yet been run. This function will print all progress to
/// stdout. This function will return an `Err` if some error occurs reading the migrations, or if
/// any migration fails to run. Each migration is run in its own transaction, so some migrations
/// may be committed, even if a later migration fails to run.
///
/// It should be noted that this runs all migrations that have not already been run, regardless of
/// whether or not their version is later than the latest run migration. This is generally not a
/// problem, and eases the more common case of two developers generating independent migrations on
/// a branch. Whoever created the second one will eventually need to run the first when both
/// branches are merged.
///
/// See the [module level documentation](index.html) for information on how migrations should be
/// structured, and where Diesel will look for them by default.
pub fn run_pending_migrations(conn: &Connection) -> Result<(), RunMigrationsError> {
try!(create_schema_migrations_table_if_needed(conn));
let already_run = try!(previously_run_migration_versions(conn));
let migrations_dir = try!(find_migrations_directory());
let all_migrations = try!(migrations_in_directory(&migrations_dir));
let pending_migrations = all_migrations.into_iter().filter(|m| {
!already_run.contains(m.version())
});
run_migrations(conn, pending_migrations)
}
/// Reverts the last migration that was run. Returns the version that was reverted. Returns an
/// `Err` if no migrations have ever been run.
///
/// See the [module level documentation](index.html) for information on how migrations should be
/// structured, and where Diesel will look for them by default.
pub fn revert_latest_migration(conn: &Connection) -> Result<String, RunMigrationsError> {
try!(create_schema_migrations_table_if_needed(conn));
let latest_migration_version = try!(latest_run_migration_version(conn));
revert_migration_with_version(conn, &latest_migration_version)
.map(|_| latest_migration_version)
}
#[doc(hidden)]
pub fn revert_migration_with_version(conn: &Connection, ver: &str) -> Result<(), RunMigrationsError> {
migration_with_version(ver)
.map_err(|e| e.into())
.and_then(|m| revert_migration(conn, m))
}
#[doc(hidden)]
pub fn run_migration_with_version(conn: &Connection, ver: &str) -> Result<(), RunMigrationsError> {
migration_with_version(ver)
.map_err(|e| e.into())
.and_then(|m| run_migration(conn, m))
}
fn migration_with_version(ver: &str) -> Result<Box<Migration>, MigrationError> {
let migrations_dir = try!(find_migrations_directory());
let all_migrations = try!(migrations_in_directory(&migrations_dir));
let migration = all_migrations.into_iter().find(|m| {
m.version() == ver
});
match migration {
Some(m) => Ok(m),
None => Err(UnknownMigrationVersion(ver.into())),
}
}
fn create_schema_migrations_table_if_needed(conn: &Connection) -> QueryResult<usize> {
conn.silence_notices(|| {
conn.execute("CREATE TABLE IF NOT EXISTS __diesel_schema_migrations (
version VARCHAR PRIMARY KEY NOT NULL,
run_on TIMESTAMP NOT NULL DEFAULT NOW()
)")
})
}
fn previously_run_migration_versions(conn: &Connection) -> QueryResult<HashSet<String>> {
__diesel_schema_migrations.select(version)
.load(&conn)
.map(|r| r.collect())
}
fn latest_run_migration_version(conn: &Connection) -> QueryResult<String> {
use ::expression::dsl::max;
__diesel_schema_migrations.select(max(version))
.first(&conn)
}
fn migrations_in_directory(path: &Path) -> Result<Vec<Box<Migration>>, MigrationError> {
use self::migration::migration_from;
try!(path.read_dir())
.filter_map(|entry| {
let entry = match entry {
Ok(e) => e,
Err(e) => return Some(Err(e.into())),
};
if !entry.file_name().to_string_lossy().starts_with(".") {
Some(migration_from(entry.path()))
} else {
None
}
}).collect()
}
fn run_migrations<T>(conn: &Connection, migrations: T)
-> Result<(), RunMigrationsError> where
T: Iterator<Item=Box<Migration>>
{
for migration in migrations {
try!(run_migration(conn, migration));
}
Ok(())
}
fn run_migration(conn: &Connection, migration: Box<Migration>)
-> Result<(), RunMigrationsError>
{
conn.transaction(|| {
println!("Running migration {}", migration.version());
try!(migration.run(conn));
try!(::insert(&NewMigration(migration.version()))
.into(__diesel_schema_migrations)
.execute(&conn));
Ok(())
}).map_err(|e| e.into())
}
fn revert_migration(conn: &Connection, migration: Box<Migration>)
-> Result<(), RunMigrationsError>
{
try!(conn.transaction(|| {
println!("Rolling back migration {}", migration.version());
try!(migration.revert(conn));
let target = __diesel_schema_migrations.filter(version.eq(migration.version()));
try!(::delete(target).execute(&conn));
Ok(())
}));
Ok(())
}
/// Returns the directory containing migrations. Will look at for
/// $PWD/migrations. If it is not found, it will search the parents of the
/// current directory, until it reaches the root directory. Returns
/// `MigrationError::MigrationDirectoryNotFound` if no directory is found.
pub fn find_migrations_directory() -> Result<PathBuf, MigrationError> {
search_for_migrations_directory(&try!(env::current_dir()))
}
fn search_for_migrations_directory(path: &Path) -> Result<PathBuf, MigrationError> {
let migration_path = path.join("migrations");
if migration_path.is_dir() {
Ok(migration_path)
} else {
path.parent().map(search_for_migrations_directory)
.unwrap_or(Err(MigrationError::MigrationDirectoryNotFound))
}
}
#[cfg(test)]
mod tests {
extern crate tempdir;
use super::*;
use super::search_for_migrations_directory;
use self::tempdir::TempDir;
use std::fs;
#[test]
fn migration_directory_not_found_if_no_migration_dir_exists() {
let dir = TempDir::new("diesel").unwrap();
assert_eq!(Err(MigrationError::MigrationDirectoryNotFound),
search_for_migrations_directory(dir.path()));
}
#[test]
fn migration_directory_defaults_to_pwd_slash_migrations() {
let dir = TempDir::new("diesel").unwrap();
let temp_path = dir.path().canonicalize().unwrap();
let migrations_path = temp_path.join("migrations");
fs::create_dir(&migrations_path).unwrap();
assert_eq!(Ok(migrations_path), search_for_migrations_directory(&temp_path));
}
#[test]
fn migration_directory_checks_parents() {
let dir = TempDir::new("diesel").unwrap();
let temp_path = dir.path().canonicalize().unwrap();
let migrations_path = temp_path.join("migrations");
let child_path = temp_path.join("child");
fs::create_dir(&child_path).unwrap();
fs::create_dir(&migrations_path).unwrap();
assert_eq!(Ok(migrations_path), search_for_migrations_directory(&child_path));
}
} | //! Individual migrations should be a folder containing exactly two files, `up.sql` and `down.sql`.
//! `up.sql` will be used to run the migration, while `down.sql` will be used for reverting it. The
//! folder itself should have the structure `{version}_{migration_name}`. It is recommended that
//! you use the timestamp of creation for the version.
//! | random_line_split |
mod.rs | //! Provides functions for maintaining database schema.
//!
//! A database migration always provides procedures to update the schema, as well as to revert
//! itself. Diesel's migrations are versioned, and run in order. Diesel also takes care of tracking
//! which migrations have already been run automatically. Your migrations don't need to be
//! idempotent, as Diesel will ensure no migration is run twice unless it has been reverted.
//!
//! Migrations should be placed in a `/migrations` directory at the root of your project (the same
//! directory as `Cargo.toml`). When any of these functions are run, Diesel will search for the
//! migrations directory in the current directory and its parents, stopping when it finds the
//! directory containing `Cargo.toml`.
//!
//! Individual migrations should be a folder containing exactly two files, `up.sql` and `down.sql`.
//! `up.sql` will be used to run the migration, while `down.sql` will be used for reverting it. The
//! folder itself should have the structure `{version}_{migration_name}`. It is recommended that
//! you use the timestamp of creation for the version.
//!
//! ## Example
//!
//! ```text
//! # Directory Structure
//! - 20151219180527_create_users
//! - up.sql
//! - down.sql
//! - 20160107082941_create_posts
//! - up.sql
//! - down.sql
//! ```
//!
//! ```sql
//! -- 20151219180527_create_users/up.sql
//! CREATE TABLE users (
//! id SERIAL PRIMARY KEY,
//! name VARCHAR NOT NULL,
//! hair_color VARCHAR
//! );
//! ```
//!
//! ```sql
//! -- 20151219180527_create_users/down.sql
//! DROP TABLE users;
//! ```
//!
//! ```sql
//! -- 20160107082941_create_posts/up.sql
//! CREATE TABLE posts (
//! id SERIAL PRIMARY KEY,
//! user_id INTEGER NOT NULL,
//! title VARCHAR NOT NULL,
//! body TEXT
//! );
//! ```
//!
//! ```sql
//! -- 20160107082941_create_posts/down.sql
//! DROP TABLE posts;
//! ```
mod migration;
mod migration_error;
mod schema;
pub use self::migration_error::*;
use ::expression::expression_methods::*;
use ::query_dsl::*;
use self::migration::*;
use self::migration_error::MigrationError::*;
use self::schema::NewMigration;
use self::schema::__diesel_schema_migrations::dsl::*;
use {Connection, QueryResult};
use std::collections::HashSet;
use std::env;
use std::path::{PathBuf, Path};
/// Runs all migrations that have not yet been run. This function will print all progress to
/// stdout. This function will return an `Err` if some error occurs reading the migrations, or if
/// any migration fails to run. Each migration is run in its own transaction, so some migrations
/// may be committed, even if a later migration fails to run.
///
/// It should be noted that this runs all migrations that have not already been run, regardless of
/// whether or not their version is later than the latest run migration. This is generally not a
/// problem, and eases the more common case of two developers generating independent migrations on
/// a branch. Whoever created the second one will eventually need to run the first when both
/// branches are merged.
///
/// See the [module level documentation](index.html) for information on how migrations should be
/// structured, and where Diesel will look for them by default.
pub fn run_pending_migrations(conn: &Connection) -> Result<(), RunMigrationsError> {
try!(create_schema_migrations_table_if_needed(conn));
let already_run = try!(previously_run_migration_versions(conn));
let migrations_dir = try!(find_migrations_directory());
let all_migrations = try!(migrations_in_directory(&migrations_dir));
let pending_migrations = all_migrations.into_iter().filter(|m| {
!already_run.contains(m.version())
});
run_migrations(conn, pending_migrations)
}
/// Reverts the last migration that was run. Returns the version that was reverted. Returns an
/// `Err` if no migrations have ever been run.
///
/// See the [module level documentation](index.html) for information on how migrations should be
/// structured, and where Diesel will look for them by default.
pub fn revert_latest_migration(conn: &Connection) -> Result<String, RunMigrationsError> {
try!(create_schema_migrations_table_if_needed(conn));
let latest_migration_version = try!(latest_run_migration_version(conn));
revert_migration_with_version(conn, &latest_migration_version)
.map(|_| latest_migration_version)
}
#[doc(hidden)]
pub fn revert_migration_with_version(conn: &Connection, ver: &str) -> Result<(), RunMigrationsError> {
migration_with_version(ver)
.map_err(|e| e.into())
.and_then(|m| revert_migration(conn, m))
}
#[doc(hidden)]
pub fn run_migration_with_version(conn: &Connection, ver: &str) -> Result<(), RunMigrationsError> {
migration_with_version(ver)
.map_err(|e| e.into())
.and_then(|m| run_migration(conn, m))
}
fn migration_with_version(ver: &str) -> Result<Box<Migration>, MigrationError> {
let migrations_dir = try!(find_migrations_directory());
let all_migrations = try!(migrations_in_directory(&migrations_dir));
let migration = all_migrations.into_iter().find(|m| {
m.version() == ver
});
match migration {
Some(m) => Ok(m),
None => Err(UnknownMigrationVersion(ver.into())),
}
}
fn create_schema_migrations_table_if_needed(conn: &Connection) -> QueryResult<usize> {
conn.silence_notices(|| {
conn.execute("CREATE TABLE IF NOT EXISTS __diesel_schema_migrations (
version VARCHAR PRIMARY KEY NOT NULL,
run_on TIMESTAMP NOT NULL DEFAULT NOW()
)")
})
}
fn previously_run_migration_versions(conn: &Connection) -> QueryResult<HashSet<String>> {
__diesel_schema_migrations.select(version)
.load(&conn)
.map(|r| r.collect())
}
fn latest_run_migration_version(conn: &Connection) -> QueryResult<String> {
use ::expression::dsl::max;
__diesel_schema_migrations.select(max(version))
.first(&conn)
}
fn migrations_in_directory(path: &Path) -> Result<Vec<Box<Migration>>, MigrationError> {
use self::migration::migration_from;
try!(path.read_dir())
.filter_map(|entry| {
let entry = match entry {
Ok(e) => e,
Err(e) => return Some(Err(e.into())),
};
if !entry.file_name().to_string_lossy().starts_with(".") {
Some(migration_from(entry.path()))
} else {
None
}
}).collect()
}
fn run_migrations<T>(conn: &Connection, migrations: T)
-> Result<(), RunMigrationsError> where
T: Iterator<Item=Box<Migration>>
{
for migration in migrations {
try!(run_migration(conn, migration));
}
Ok(())
}
fn run_migration(conn: &Connection, migration: Box<Migration>)
-> Result<(), RunMigrationsError>
{
conn.transaction(|| {
println!("Running migration {}", migration.version());
try!(migration.run(conn));
try!(::insert(&NewMigration(migration.version()))
.into(__diesel_schema_migrations)
.execute(&conn));
Ok(())
}).map_err(|e| e.into())
}
fn revert_migration(conn: &Connection, migration: Box<Migration>)
-> Result<(), RunMigrationsError>
{
try!(conn.transaction(|| {
println!("Rolling back migration {}", migration.version());
try!(migration.revert(conn));
let target = __diesel_schema_migrations.filter(version.eq(migration.version()));
try!(::delete(target).execute(&conn));
Ok(())
}));
Ok(())
}
/// Returns the directory containing migrations. Will look at for
/// $PWD/migrations. If it is not found, it will search the parents of the
/// current directory, until it reaches the root directory. Returns
/// `MigrationError::MigrationDirectoryNotFound` if no directory is found.
pub fn find_migrations_directory() -> Result<PathBuf, MigrationError> {
search_for_migrations_directory(&try!(env::current_dir()))
}
fn search_for_migrations_directory(path: &Path) -> Result<PathBuf, MigrationError> {
let migration_path = path.join("migrations");
if migration_path.is_dir() {
Ok(migration_path)
} else {
path.parent().map(search_for_migrations_directory)
.unwrap_or(Err(MigrationError::MigrationDirectoryNotFound))
}
}
#[cfg(test)]
mod tests {
extern crate tempdir;
use super::*;
use super::search_for_migrations_directory;
use self::tempdir::TempDir;
use std::fs;
#[test]
fn migration_directory_not_found_if_no_migration_dir_exists() {
let dir = TempDir::new("diesel").unwrap();
assert_eq!(Err(MigrationError::MigrationDirectoryNotFound),
search_for_migrations_directory(dir.path()));
}
#[test]
fn migration_directory_defaults_to_pwd_slash_migrations() {
let dir = TempDir::new("diesel").unwrap();
let temp_path = dir.path().canonicalize().unwrap();
let migrations_path = temp_path.join("migrations");
fs::create_dir(&migrations_path).unwrap();
assert_eq!(Ok(migrations_path), search_for_migrations_directory(&temp_path));
}
#[test]
fn | () {
let dir = TempDir::new("diesel").unwrap();
let temp_path = dir.path().canonicalize().unwrap();
let migrations_path = temp_path.join("migrations");
let child_path = temp_path.join("child");
fs::create_dir(&child_path).unwrap();
fs::create_dir(&migrations_path).unwrap();
assert_eq!(Ok(migrations_path), search_for_migrations_directory(&child_path));
}
}
| migration_directory_checks_parents | identifier_name |
mod.rs | //! Provides functions for maintaining database schema.
//!
//! A database migration always provides procedures to update the schema, as well as to revert
//! itself. Diesel's migrations are versioned, and run in order. Diesel also takes care of tracking
//! which migrations have already been run automatically. Your migrations don't need to be
//! idempotent, as Diesel will ensure no migration is run twice unless it has been reverted.
//!
//! Migrations should be placed in a `/migrations` directory at the root of your project (the same
//! directory as `Cargo.toml`). When any of these functions are run, Diesel will search for the
//! migrations directory in the current directory and its parents, stopping when it finds the
//! directory containing `Cargo.toml`.
//!
//! Individual migrations should be a folder containing exactly two files, `up.sql` and `down.sql`.
//! `up.sql` will be used to run the migration, while `down.sql` will be used for reverting it. The
//! folder itself should have the structure `{version}_{migration_name}`. It is recommended that
//! you use the timestamp of creation for the version.
//!
//! ## Example
//!
//! ```text
//! # Directory Structure
//! - 20151219180527_create_users
//! - up.sql
//! - down.sql
//! - 20160107082941_create_posts
//! - up.sql
//! - down.sql
//! ```
//!
//! ```sql
//! -- 20151219180527_create_users/up.sql
//! CREATE TABLE users (
//! id SERIAL PRIMARY KEY,
//! name VARCHAR NOT NULL,
//! hair_color VARCHAR
//! );
//! ```
//!
//! ```sql
//! -- 20151219180527_create_users/down.sql
//! DROP TABLE users;
//! ```
//!
//! ```sql
//! -- 20160107082941_create_posts/up.sql
//! CREATE TABLE posts (
//! id SERIAL PRIMARY KEY,
//! user_id INTEGER NOT NULL,
//! title VARCHAR NOT NULL,
//! body TEXT
//! );
//! ```
//!
//! ```sql
//! -- 20160107082941_create_posts/down.sql
//! DROP TABLE posts;
//! ```
mod migration;
mod migration_error;
mod schema;
pub use self::migration_error::*;
use ::expression::expression_methods::*;
use ::query_dsl::*;
use self::migration::*;
use self::migration_error::MigrationError::*;
use self::schema::NewMigration;
use self::schema::__diesel_schema_migrations::dsl::*;
use {Connection, QueryResult};
use std::collections::HashSet;
use std::env;
use std::path::{PathBuf, Path};
/// Runs all migrations that have not yet been run. This function will print all progress to
/// stdout. This function will return an `Err` if some error occurs reading the migrations, or if
/// any migration fails to run. Each migration is run in its own transaction, so some migrations
/// may be committed, even if a later migration fails to run.
///
/// It should be noted that this runs all migrations that have not already been run, regardless of
/// whether or not their version is later than the latest run migration. This is generally not a
/// problem, and eases the more common case of two developers generating independent migrations on
/// a branch. Whoever created the second one will eventually need to run the first when both
/// branches are merged.
///
/// See the [module level documentation](index.html) for information on how migrations should be
/// structured, and where Diesel will look for them by default.
pub fn run_pending_migrations(conn: &Connection) -> Result<(), RunMigrationsError> {
try!(create_schema_migrations_table_if_needed(conn));
let already_run = try!(previously_run_migration_versions(conn));
let migrations_dir = try!(find_migrations_directory());
let all_migrations = try!(migrations_in_directory(&migrations_dir));
let pending_migrations = all_migrations.into_iter().filter(|m| {
!already_run.contains(m.version())
});
run_migrations(conn, pending_migrations)
}
/// Reverts the last migration that was run. Returns the version that was reverted. Returns an
/// `Err` if no migrations have ever been run.
///
/// See the [module level documentation](index.html) for information on how migrations should be
/// structured, and where Diesel will look for them by default.
pub fn revert_latest_migration(conn: &Connection) -> Result<String, RunMigrationsError> |
#[doc(hidden)]
pub fn revert_migration_with_version(conn: &Connection, ver: &str) -> Result<(), RunMigrationsError> {
migration_with_version(ver)
.map_err(|e| e.into())
.and_then(|m| revert_migration(conn, m))
}
#[doc(hidden)]
pub fn run_migration_with_version(conn: &Connection, ver: &str) -> Result<(), RunMigrationsError> {
migration_with_version(ver)
.map_err(|e| e.into())
.and_then(|m| run_migration(conn, m))
}
fn migration_with_version(ver: &str) -> Result<Box<Migration>, MigrationError> {
let migrations_dir = try!(find_migrations_directory());
let all_migrations = try!(migrations_in_directory(&migrations_dir));
let migration = all_migrations.into_iter().find(|m| {
m.version() == ver
});
match migration {
Some(m) => Ok(m),
None => Err(UnknownMigrationVersion(ver.into())),
}
}
fn create_schema_migrations_table_if_needed(conn: &Connection) -> QueryResult<usize> {
conn.silence_notices(|| {
conn.execute("CREATE TABLE IF NOT EXISTS __diesel_schema_migrations (
version VARCHAR PRIMARY KEY NOT NULL,
run_on TIMESTAMP NOT NULL DEFAULT NOW()
)")
})
}
fn previously_run_migration_versions(conn: &Connection) -> QueryResult<HashSet<String>> {
__diesel_schema_migrations.select(version)
.load(&conn)
.map(|r| r.collect())
}
fn latest_run_migration_version(conn: &Connection) -> QueryResult<String> {
use ::expression::dsl::max;
__diesel_schema_migrations.select(max(version))
.first(&conn)
}
fn migrations_in_directory(path: &Path) -> Result<Vec<Box<Migration>>, MigrationError> {
use self::migration::migration_from;
try!(path.read_dir())
.filter_map(|entry| {
let entry = match entry {
Ok(e) => e,
Err(e) => return Some(Err(e.into())),
};
if !entry.file_name().to_string_lossy().starts_with(".") {
Some(migration_from(entry.path()))
} else {
None
}
}).collect()
}
fn run_migrations<T>(conn: &Connection, migrations: T)
-> Result<(), RunMigrationsError> where
T: Iterator<Item=Box<Migration>>
{
for migration in migrations {
try!(run_migration(conn, migration));
}
Ok(())
}
fn run_migration(conn: &Connection, migration: Box<Migration>)
-> Result<(), RunMigrationsError>
{
conn.transaction(|| {
println!("Running migration {}", migration.version());
try!(migration.run(conn));
try!(::insert(&NewMigration(migration.version()))
.into(__diesel_schema_migrations)
.execute(&conn));
Ok(())
}).map_err(|e| e.into())
}
fn revert_migration(conn: &Connection, migration: Box<Migration>)
-> Result<(), RunMigrationsError>
{
try!(conn.transaction(|| {
println!("Rolling back migration {}", migration.version());
try!(migration.revert(conn));
let target = __diesel_schema_migrations.filter(version.eq(migration.version()));
try!(::delete(target).execute(&conn));
Ok(())
}));
Ok(())
}
/// Returns the directory containing migrations. Will look at for
/// $PWD/migrations. If it is not found, it will search the parents of the
/// current directory, until it reaches the root directory. Returns
/// `MigrationError::MigrationDirectoryNotFound` if no directory is found.
pub fn find_migrations_directory() -> Result<PathBuf, MigrationError> {
search_for_migrations_directory(&try!(env::current_dir()))
}
fn search_for_migrations_directory(path: &Path) -> Result<PathBuf, MigrationError> {
let migration_path = path.join("migrations");
if migration_path.is_dir() {
Ok(migration_path)
} else {
path.parent().map(search_for_migrations_directory)
.unwrap_or(Err(MigrationError::MigrationDirectoryNotFound))
}
}
#[cfg(test)]
mod tests {
extern crate tempdir;
use super::*;
use super::search_for_migrations_directory;
use self::tempdir::TempDir;
use std::fs;
#[test]
fn migration_directory_not_found_if_no_migration_dir_exists() {
let dir = TempDir::new("diesel").unwrap();
assert_eq!(Err(MigrationError::MigrationDirectoryNotFound),
search_for_migrations_directory(dir.path()));
}
#[test]
fn migration_directory_defaults_to_pwd_slash_migrations() {
let dir = TempDir::new("diesel").unwrap();
let temp_path = dir.path().canonicalize().unwrap();
let migrations_path = temp_path.join("migrations");
fs::create_dir(&migrations_path).unwrap();
assert_eq!(Ok(migrations_path), search_for_migrations_directory(&temp_path));
}
#[test]
fn migration_directory_checks_parents() {
let dir = TempDir::new("diesel").unwrap();
let temp_path = dir.path().canonicalize().unwrap();
let migrations_path = temp_path.join("migrations");
let child_path = temp_path.join("child");
fs::create_dir(&child_path).unwrap();
fs::create_dir(&migrations_path).unwrap();
assert_eq!(Ok(migrations_path), search_for_migrations_directory(&child_path));
}
}
| {
try!(create_schema_migrations_table_if_needed(conn));
let latest_migration_version = try!(latest_run_migration_version(conn));
revert_migration_with_version(conn, &latest_migration_version)
.map(|_| latest_migration_version)
} | identifier_body |
makeseeds.py | #!/usr/bin/env python3
# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/Satoshi:("
r"0.14.(0|1|2|3|99)|"
r"0.15.(0|1|2|99)|"
r"0.16.(0|1|2|3|99)|"
r"0.17.(0|0.1|1|2|99)|"
r"0.18.(0|1|99)|"
r"0.19.(0|1|99)|"
r"0.20.(0|1|99)|"
r"0.21.99"
r")")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips):
'''deduplicate by address,port'''
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values())
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
def lookup_asn(net, ip):
'''
Look up the asn for an IP (4 or 6) address by querying cymru.com, or None
if it could not be found.
'''
try:
if net == 'ipv4':
ipaddr = ip
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip.split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.resolve('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_per_net):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count = collections.defaultdict(int)
asn_count = collections.defaultdict(int)
for ip in ips_ipv46:
if net_count[ip['net']] == max_per_net:
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips):
hist = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion'])
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print('%s Initial' % (ip_stats(ips)), file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr)
# Skip duplicates (in case multiple seeds files were concatenated)
ips = dedup(ips)
print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr)
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True) | ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main() | # Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
print('%s Filter out hosts with multiple bitcoin ports' % (ip_stats(ips)), file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally. | random_line_split |
makeseeds.py | #!/usr/bin/env python3
# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/Satoshi:("
r"0.14.(0|1|2|3|99)|"
r"0.15.(0|1|2|99)|"
r"0.16.(0|1|2|3|99)|"
r"0.17.(0|0.1|1|2|99)|"
r"0.18.(0|1|99)|"
r"0.19.(0|1|99)|"
r"0.20.(0|1|99)|"
r"0.21.99"
r")")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips):
'''deduplicate by address,port'''
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values())
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
def lookup_asn(net, ip):
'''
Look up the asn for an IP (4 or 6) address by querying cymru.com, or None
if it could not be found.
'''
try:
if net == 'ipv4':
ipaddr = ip
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip.split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.resolve('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_per_net):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count = collections.defaultdict(int)
asn_count = collections.defaultdict(int)
for ip in ips_ipv46:
if net_count[ip['net']] == max_per_net:
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips):
hist = collections.defaultdict(int)
for ip in ips:
if ip is not None:
|
return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion'])
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print('%s Initial' % (ip_stats(ips)), file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr)
# Skip duplicates (in case multiple seeds files were concatenated)
ips = dedup(ips)
print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr)
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
print('%s Filter out hosts with multiple bitcoin ports' % (ip_stats(ips)), file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| hist[ip['net']] += 1 | conditional_block |
makeseeds.py | #!/usr/bin/env python3
# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/Satoshi:("
r"0.14.(0|1|2|3|99)|"
r"0.15.(0|1|2|99)|"
r"0.16.(0|1|2|3|99)|"
r"0.17.(0|0.1|1|2|99)|"
r"0.18.(0|1|99)|"
r"0.19.(0|1|99)|"
r"0.20.(0|1|99)|"
r"0.21.99"
r")")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips):
'''deduplicate by address,port'''
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values())
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
def | (net, ip):
'''
Look up the asn for an IP (4 or 6) address by querying cymru.com, or None
if it could not be found.
'''
try:
if net == 'ipv4':
ipaddr = ip
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip.split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.resolve('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_per_net):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count = collections.defaultdict(int)
asn_count = collections.defaultdict(int)
for ip in ips_ipv46:
if net_count[ip['net']] == max_per_net:
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips):
hist = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion'])
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print('%s Initial' % (ip_stats(ips)), file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr)
# Skip duplicates (in case multiple seeds files were concatenated)
ips = dedup(ips)
print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr)
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
print('%s Filter out hosts with multiple bitcoin ports' % (ip_stats(ips)), file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| lookup_asn | identifier_name |
makeseeds.py | #!/usr/bin/env python3
# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/Satoshi:("
r"0.14.(0|1|2|3|99)|"
r"0.15.(0|1|2|99)|"
r"0.16.(0|1|2|3|99)|"
r"0.17.(0|0.1|1|2|99)|"
r"0.18.(0|1|99)|"
r"0.19.(0|1|99)|"
r"0.20.(0|1|99)|"
r"0.21.99"
r")")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips):
|
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
def lookup_asn(net, ip):
'''
Look up the asn for an IP (4 or 6) address by querying cymru.com, or None
if it could not be found.
'''
try:
if net == 'ipv4':
ipaddr = ip
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip.split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.resolve('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_per_net):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count = collections.defaultdict(int)
asn_count = collections.defaultdict(int)
for ip in ips_ipv46:
if net_count[ip['net']] == max_per_net:
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips):
hist = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion'])
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print('%s Initial' % (ip_stats(ips)), file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr)
# Skip duplicates (in case multiple seeds files were concatenated)
ips = dedup(ips)
print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr)
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
print('%s Filter out hosts with multiple bitcoin ports' % (ip_stats(ips)), file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| '''deduplicate by address,port'''
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values()) | identifier_body |
base.py | try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from inspect import isgenerator
class Element(object):
tag = ''
self_closing = False
def __init__(self, *children, **attrs):
if children and isinstance(children[0], dict):
self.attrs = children[0]
children = children[1:]
else:
self.attrs = attrs
# Some helpers for the `class` attribute
if 'classes' in attrs:
attrs['class'] = ' '.join(c for c in attrs.pop('classes') if c)
elif 'class_' in attrs:
attrs['class'] = attrs.pop('class_')
self.children = []
self.add_children(children)
def | (self, *children):
self.add_children(children)
return self
def __repr__(self):
attr_string = ''.join(' {}="{}"'.format(key, val) for key, val in self.attrs.items() if val)
return '<{}{}>'.format(self.tag, attr_string)
def add_children(self, children):
if self.self_closing and children:
raise ValueError("Self-closing tags can't have children.")
if children and isgenerator(children[0]):
children = children[0]
for child in children:
if child is not None:
if isinstance(child, list):
self.add_children(child)
else:
self.children.append(child)
| __call__ | identifier_name |
base.py | try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from inspect import isgenerator
class Element(object):
tag = ''
self_closing = False
def __init__(self, *children, **attrs):
if children and isinstance(children[0], dict):
self.attrs = children[0]
children = children[1:]
else:
self.attrs = attrs
# Some helpers for the `class` attribute
if 'classes' in attrs:
attrs['class'] = ' '.join(c for c in attrs.pop('classes') if c)
elif 'class_' in attrs:
attrs['class'] = attrs.pop('class_')
self.children = []
self.add_children(children)
def __call__(self, *children):
|
def __repr__(self):
attr_string = ''.join(' {}="{}"'.format(key, val) for key, val in self.attrs.items() if val)
return '<{}{}>'.format(self.tag, attr_string)
def add_children(self, children):
if self.self_closing and children:
raise ValueError("Self-closing tags can't have children.")
if children and isgenerator(children[0]):
children = children[0]
for child in children:
if child is not None:
if isinstance(child, list):
self.add_children(child)
else:
self.children.append(child)
| self.add_children(children)
return self | identifier_body |
base.py | try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from inspect import isgenerator
class Element(object):
tag = ''
self_closing = False
def __init__(self, *children, **attrs):
if children and isinstance(children[0], dict):
self.attrs = children[0]
children = children[1:]
else:
|
# Some helpers for the `class` attribute
if 'classes' in attrs:
attrs['class'] = ' '.join(c for c in attrs.pop('classes') if c)
elif 'class_' in attrs:
attrs['class'] = attrs.pop('class_')
self.children = []
self.add_children(children)
def __call__(self, *children):
self.add_children(children)
return self
def __repr__(self):
attr_string = ''.join(' {}="{}"'.format(key, val) for key, val in self.attrs.items() if val)
return '<{}{}>'.format(self.tag, attr_string)
def add_children(self, children):
if self.self_closing and children:
raise ValueError("Self-closing tags can't have children.")
if children and isgenerator(children[0]):
children = children[0]
for child in children:
if child is not None:
if isinstance(child, list):
self.add_children(child)
else:
self.children.append(child)
| self.attrs = attrs | conditional_block |
base.py | try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from inspect import isgenerator
class Element(object):
tag = ''
self_closing = False
def __init__(self, *children, **attrs):
if children and isinstance(children[0], dict):
self.attrs = children[0]
children = children[1:]
else:
self.attrs = attrs
# Some helpers for the `class` attribute
if 'classes' in attrs:
attrs['class'] = ' '.join(c for c in attrs.pop('classes') if c)
elif 'class_' in attrs:
attrs['class'] = attrs.pop('class_')
self.children = []
self.add_children(children)
def __call__(self, *children):
self.add_children(children)
return self
def __repr__(self):
attr_string = ''.join(' {}="{}"'.format(key, val) for key, val in self.attrs.items() if val)
return '<{}{}>'.format(self.tag, attr_string)
def add_children(self, children):
if self.self_closing and children:
raise ValueError("Self-closing tags can't have children.")
if children and isgenerator(children[0]):
children = children[0] | else:
self.children.append(child) | for child in children:
if child is not None:
if isinstance(child, list):
self.add_children(child) | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.