code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def modSymbolsFromLabelInfo(labelDescriptor):
modSymbols = set()
for labelStateEntry in viewvalues(labelDescriptor.labels):
for labelPositionEntry in viewvalues(labelStateEntry['aminoAcidLabels']):
for modSymbol in aux.toList(labelPositionEntry):
if modSymbol != '':
modSymbols.add(modSymbol)
return modSymbols
|
Returns a set of all modiciation symbols which were used in the
labelDescriptor
:param labelDescriptor: :class:`LabelDescriptor` describes the label setup
of an experiment
:returns: #TODO: docstring
|
def modAminoacidsFromLabelInfo(labelDescriptor):
modAminoacids = set()
for labelStateEntry in viewvalues(labelDescriptor.labels):
for labelPositionEntry in viewkeys(labelStateEntry['aminoAcidLabels']):
for modAminoacid in aux.toList(labelPositionEntry):
if modAminoacid != '':
modAminoacids.add(modAminoacid)
return modAminoacids
|
Returns a set of all amino acids and termini which can bear a label, as
described in "labelDescriptor".
:param labelDescriptor: :class:`LabelDescriptor` describes the label setup
of an experiment
:returns: #TODO: docstring
|
def addLabel(self, aminoAcidLabels, excludingModifications=None):
if excludingModifications is not None:
self.excludingModifictions = True
labelEntry = {'aminoAcidLabels': aminoAcidLabels,
'excludingModifications': excludingModifications
}
self.labels[self._labelCounter] = labelEntry
self._labelCounter += 1
|
Adds a new labelstate.
:param aminoAcidsLabels: Describes which amino acids can bear which
labels. Possible keys are the amino acids in one letter code and
'nTerm', 'cTerm'. Possible values are the modifications ids from
:attr:`maspy.constants.aaModMass` as strings or a list of strings.
An example for one expected label at the n-terminus and two expected
labels at each Lysine:
``{'nTerm': 'u:188', 'K': ['u:188', 'u:188']}``
:param excludingModifications: optional, A Dectionary that describes
which modifications can prevent the addition of labels. Keys and
values have to be the modifications ids from
:attr:`maspy.constants.aaModMass`. The key specifies the
modification that prevents the label modification specified by the
value. For example for each modification 'u:1' that is present at an
amino acid or terminus of a peptide the number of expected labels at
this position is reduced by one: ``{'u:1':'u:188'}``
|
def get_gen_slice(ctx=Bubble(), iterable=[], amount=-1, index=-1):
ctx.gbc.say('get_gen_slice', stuff=iterable, verbosity=10)
i = -1
# TODO
# i = 0 #NATURAL INDEX, this will break all features with exports and -p
if amount > 0:
if index < 0:
index = 0
else:
for item in iterable:
i += 1
item[buts('index')] = i
ctx.gbc.say('Get gen NO slice:item %d' % i, verbosity=100)
ctx.gbc.say('Get gen NO slice:a:%d i:%d' %
(amount, index), verbosity=100)
ctx.gbc.say('Get gen NO slice:item', stuff=item, verbosity=1000)
yield item
until = index + amount
if six.PY2:
sli = xrange(index, until)
else:
sli = range(index, until)
ctx.gbc.say('Get gen slice:range %s' % str(sli), verbosity=1000)
# TODO: iterable should be empty if not slicing
# if valid slice ...
for item in iterable:
i += 1
if i in sli:
ctx.gbc.say('Get gen slice:item %d' % i, verbosity=100)
ctx.gbc.say('Get gen slice:a:%d i:%d' %
(amount, index), verbosity=100)
ctx.gbc.say('Get gen slice:item', stuff=item, verbosity=1000)
item[buts('index')] = i
yield item
elif i > until:
break
else:
pass
|
very crude way of slicing a generator
|
def _get_scripts(self, scripts_path_rel, files_deployment, script_type, project_path):
scripts_dict = {}
if scripts_path_rel:
self._logger.debug('Getting scripts with {0} definitions'.format(script_type))
scripts_dict = pgpm.lib.utils.misc.collect_scripts_from_sources(scripts_path_rel, files_deployment,
project_path, False, self._logger)
if len(scripts_dict) == 0:
self._logger.debug('No {0} definitions were found in {1} folder'.format(script_type, scripts_path_rel))
else:
self._logger.debug('No {0} folder was specified'.format(script_type))
return scripts_dict
|
Gets scripts from specified folders
|
def _resolve_dependencies(self, cur, dependencies):
list_of_deps_ids = []
_list_of_deps_unresolved = []
_is_deps_resolved = True
for k, v in dependencies.items():
pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name)
cur.execute("SELECT _find_schema('{0}', '{1}')"
.format(k, v))
pgpm_v_ext = tuple(cur.fetchone()[0][1:-1].split(','))
try:
list_of_deps_ids.append(int(pgpm_v_ext[0]))
except:
pass
if not pgpm_v_ext[0]:
_is_deps_resolved = False
_list_of_deps_unresolved.append("{0}: {1}".format(k, v))
return _is_deps_resolved, list_of_deps_ids, _list_of_deps_unresolved
|
Function checks if dependant packages are installed in DB
|
def find_table_links(self):
html = urlopen(self.model_url).read()
doc = lh.fromstring(html)
href_list = [area.attrib['href'] for area in doc.cssselect('map area')]
tables = self._inception_table_links(href_list)
return tables
|
When given a url, this function will find all the available table names
for that EPA dataset.
|
def _inception_table_links(self, href_list):
tables = set()
for link in href_list:
if not link.startswith('http://'):
link = self.agency_url + link
html = urlopen(link).read()
doc = lh.fromstring(html)
area = doc.cssselect('map area')
if area:
# Then this is a model containing models.
tables.update((a.attrib['href'] for a in area))
else:
# The link is a table without additional models.
tables.update(link)
return tables
|
Sometimes the EPA likes to nest their models and tables -- model within
a model within a model -- so this internal method tries to clear all
that up.
|
def find_definition_urls(self, set_of_links):
definition_dict = {}
re_link_name = re.compile('.*p_table_name=(\w+)&p_topic.*')
for link in set_of_links:
if link.startswith('http://'):
table_dict = {}
html = urlopen(link).read()
doc = lh.fromstring(html)
unordered_list = doc.cssselect('#main ul')[-1]
for li in unordered_list.iterchildren():
a = li.find('a')
table_dict.update({a.text: a.attrib['href']})
link_name = re_link_name.sub(r'\1', link).upper()
definition_dict.update({link_name: table_dict})
return definition_dict
|
Find the available definition URLs for the columns in a table.
|
def create_agency(self):
agency = self.agency
links = self.find_table_links()
definition_dict = self.find_definition_urls(links)
with open(agency + '.txt', 'w') as f:
f.write(str(definition_dict))
|
Create an agency text file of definitions.
|
def loop_through_agency(self):
agency = self.agency
with open(agency + '.txt') as f:
data = eval(f.read())
for table in data:
for column in data[table]:
value_link = data[table][column]
data[table][column] = self.grab_definition(value_link)
data = json.dumps(data)
with open(agency + '_values.json', 'w') as f:
f.write(str(data))
|
Loop through an agency to grab the definitions for its tables.
|
def grab_definition(self, url):
re_description = re.compile('Description:(.+?\\n)')
re_table_name = re.compile("(\w+ Table.+)")
if url.startswith('//'):
url = 'http:' + url
elif url.startswith('/'):
url = 'http://www.epa.gov' + url
try:
html = urlopen(url).read()
doc = lh.fromstring(html)
main = doc.cssselect('#main')[0]
text = main.text_content()
definition = re_description.search(text).group(1).strip()
except (AttributeError, IndexError, TypeError, HTTPError):
print url
else:
value = re_table_name.sub('', definition)
return value
return url
|
Grab the column definition of a table from the EPA using a combination
of regular expressions and lxml.
|
def add_arc(self, src, dst, char):
if src not in self.automaton.states():
self.add_state()
arc = fst.Arc(self.isyms[char], self.osyms[char], fst.Weight.One(self.automaton.weight_type()), dst)
self.automaton.add_arc(src, arc)
|
Adds a new Arc
Args:
src (int): The source state identifier
dst (int): The destination state identifier
char (str): The character for the transition
Returns:
None
|
def fixminimized(self, alphabet):
insymbols = fst.SymbolTable()
outsymbols = fst.SymbolTable()
num = 1
for char in self.alphabet:
self.isyms.__setitem__(char, num)
self.osyms.__setitem__(char, num)
insymbols.add_symbol(char, num)
outsymbols.add_symbol(char, num)
num = num + 1
self.automaton.set_input_symbols(insymbols)
self.automaton.set_output_symbols(outsymbols)
endstate = self.add_state()
for state in self.states:
for char in alphabet:
found = 0
for arc in state.arcs:
if self.isyms.find(arc.ilabel) == char:
found = 1
break
if found == 0:
self.add_arc(state.stateid, endstate, char)
self[endstate].final = False
for char in alphabet:
self.add_arc(endstate, endstate, char)
|
After pyfst minimization,
all unused arcs are removed,
and all sink states are removed.
However this may break compatibility.
Args:
alphabet (list): The input alphabet
Returns:
None
|
def complement(self, alphabet):
self._addsink(alphabet)
for state in self.automaton.states():
if self.automaton.final(state) == fst.Weight.One(self.automaton.weight_type()):
self.automaton.set_final(state, fst.Weight.Zero(self.automaton.weight_type()))
else:
self.automaton.set_final(state, fst.Weight.One(self.automaton.weight_type()))
|
Returns the complement of DFA
Args:
alphabet (list): The input alphabet
Returns:
None
|
def init_from_acceptor_bycopying(self, acceptor):
for state in acceptor.states:
for arc in state.arcs:
self.add_arc(state.stateid, arc.nextstate, acceptor.isyms.find(arc.ilabel))
if state.final:
print state.stateid,' is final'
self[state.stateid].final = True;
|
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
|
def intersect(self, other):
self.automaton = fst.intersect(self.automaton, other.automaton)
return self
|
Constructs an unminimized DFA recognizing
the intersection of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the intersect operation
Returns:
Returns:
DFA: The resulting DFA
|
def main(*argv,
filesystem=None,
do_exit=True,
stdout=None,
stderr=None):
try:
mdcli = MdCLI()
mdcli.filesystem = filesystem
mdcli.stdout = stdout or sys.stdout
mdcli.stderr = stderr or sys.stderr
retval = mdcli.main(*argv, loop=LOOP_NEVER)
if do_exit:
sys.exit(retval)
else:
return retval
except KeyboardInterrupt:
pass
|
Main method for the cli.
We allow the filesystem to be overridden for test purposes.
|
def get_optparser(self):
p = Cmdln.get_optparser(self)
p.add_option(
"-M",
"--maildir",
action="store",
dest="maildir"
)
p.add_option(
"-V",
"--verbose",
action="store_true",
dest="verbose"
)
return p
|
Override to allow specification of the maildir
|
def do_lsfolders(self, subcmd, opts):
client = MdClient(self.maildir, filesystem=self.filesystem)
client.lsfolders(stream=self.stdout)
|
${cmd_name}: list the sub folders of the maildir.
${cmd_usage}
|
def do_ls(self, subcmd, opts, folder=""):
client = MdClient(self.maildir, filesystem=self.filesystem)
client.ls(
foldername = folder,
stream = self.stdout,
reverse = getattr(opts, "reverse", False),
grep = getattr(opts, "grep", None),
field = getattr(opts, "field", None),
since = float(getattr(opts, "since", -1))
)
|
${cmd_name}: list messages in the specified folder
${cmd_usage}
${cmd_option_list}
SINCE can be used with epoch times, for example:
md ls -s $(date '+%s')
|
def do_lisp(self, subcmd, opts, folder=""):
client = MdClient(self.maildir, filesystem=self.filesystem)
client.lisp(
foldername=folder,
stream=self.stdout,
reverse=getattr(opts, "reverse", False),
since=float(getattr(opts, "since", -1))
)
|
${cmd_name}: list messages in the specified folder in JSON format
${cmd_usage}
|
def do_make(self, subcmd, opts, path):
# Do we need to make this ".path" if it's relative?
d = path if path[0] == "/" else joinpath(self.maildir, "." + path)
os.makedirs(joinpath(d, "cur"))
os.makedirs(joinpath(d, "new"))
os.makedirs(joinpath(d, "tmp"))
os.makedirs(joinpath(d, "store"))
|
${cmd_name}: make a maildir at the specified path.
${cmd_usage}
If the path is relative then create under MAILDIR
else create at the absolute location.
|
def do_rm(self, subcmd, opts, message):
maildir = self.maildir
client = MdClient(maildir, filesystem=self.filesystem)
try:
client.remove(message)
except KeyError:
return 1
|
${cmd_name}: remove the specified message
${cmd_usage}
|
def do_mv(self, subcmd, opts, message, folder):
client = MdClient(self.maildir, filesystem=self.filesystem)
client.move(message, folder)
|
${cmd_name}: move the specified message to the specified folder
${cmd_usage}
|
def do_text(self, subcmd, opts, message):
client = MdClient(self.maildir, filesystem=self.filesystem)
client.gettext(message, self.stdout)
|
${cmd_name}: get the best text part of the specified message
${cmd_usage}
|
def do_raw(self, subcmd, opts, message):
client = MdClient(self.maildir)
client.getraw(message, self.stdout)
|
${cmd_name}: dump the complete raw message
${cmd_usage}
|
def do_rawpart(self, subcmd, opts, message):
client = MdClient(self.maildir, filesystem=self.filesystem)
partid = getattr(opts, "part", None)
if not partid:
client.getrawpart(message, self.stdout)
else:
client.getrawpartid(message, partid, self.stdout)
|
${cmd_name}: dump a part from the specified message
${cmd_usage}
${cmd_option_list}
|
def do_struct(self, subcmd, opts, message):
client = MdClient(self.maildir, filesystem=self.filesystem)
as_json = getattr(opts, "json", False)
client.getstruct(message, as_json=as_json, stream=self.stdout)
|
${cmd_name}: get the structure of the specified message
${cmd_usage}
${cmd_option_list}
|
def do_file(self, subcmd, opts, message):
client = MdClient(self.maildir, filesystem=self.filesystem)
client.get(message, self.stdout)
|
${cmd_name}: download the whole file of the message.
${cmd_usage}
|
def do_newfilter(self, subcmd, opts):
from mdlib.filterprocessor import RULES
print(RULES, file=self.stdout)
|
${cmd_name}: make a filterfile and spit it to stdout.
|
def do_storecheck(self, subcmd, opts):
from os.path import basename
from os.path import dirname
from os.path import exists as existspath
from os.path import islink
from os.path import join as joinpath
maildir = self.maildir
cur = joinpath(maildir, "cur")
new = joinpath(maildir, "new")
store = joinpath(maildir, "store")
found_list = []
# Loop through the folders checking that everything maps back to the store
for scandir in [cur, new]:
for f in os.listdir(scandir):
filename = joinpath(scandir, f)
try:
assert islink(filename)
store_location = os.readlink(filename)
assert existspath(store_location) and dirname(store_location) == store
except AssertionError:
print("%s was not a link into the store" % (
"/".join([
filename.split("/")[-2],
filename.split("/")[-1]
])
),
file=self.stdout)
else:
found_list.append(basename(store_location))
for storefile in os.listdir(store):
if storefile not in found_list:
print(
"%s found in store but not folders" % joinpath("store", storefile),
file=self.stdout
)
|
${cmd_name}: checks the store for files that may not be in the maildirs.
|
def form(context, form, **kwargs):
if not isinstance(form, (forms.BaseForm, TapeformFieldset)):
raise template.TemplateSyntaxError(
'Provided form should be a `Form` instance, actual type: {0}'.format(
form.__class__.__name__))
return render_to_string(
form.get_layout_template(kwargs.get('using', None)),
form.get_layout_context(),
)
|
The `form` template tag will render a tape-form enabled form using the template
provided by `get_layout_template` method of the form using the context generated
by `get_layout_context` method of the form.
Usage::
{% load tapeforms %}
{% form my_form %}
You can override the used layout template using the keyword argument `using`::
{% load tapeforms %}
{% form my_form using='other_form_layout_template.html' %}
:param form: The Django form to render.
:return: Rendered form (errors + hidden fields + fields) as HTML.
|
def formfield(context, bound_field, **kwargs):
if not isinstance(bound_field, forms.BoundField):
raise template.TemplateSyntaxError(
'Provided field should be a `BoundField` instance, actual type: {0}'.format(
bound_field.__class__.__name__))
return render_to_string(
bound_field.form.get_field_template(bound_field, kwargs.get('using', None)),
bound_field.form.get_field_context(bound_field),
)
|
The `formfield` template tag will render a form field of a tape-form enabled form
using the template provided by `get_field_template` method of the form together with
the context generated by `get_field_context` method of the form.
Usage::
{% load tapeforms %}
{% formfield my_form.my_field %}
You can override the used field template using the keyword argument `using`::
{% load tapeforms %}
{% formfield my_form.my_field using='other_field_template.html' %}
:param bound_field: The `BoundField` from a Django form to render.
:return: Rendered field (label + widget + other stuff) as HTML.
|
def resolve_node_modules(self):
'import the modules specified in init'
if not self.resolved_node_modules:
try:
self.resolved_node_modules = [
importlib.import_module(mod, self.node_package)
for mod in self.node_modules
]
except ImportError:
self.resolved_node_modules = []
raise
return self.resolved_node_modulef resolve_node_modules(self):
'import the modules specified in init'
if not self.resolved_node_modules:
try:
self.resolved_node_modules = [
importlib.import_module(mod, self.node_package)
for mod in self.node_modules
]
except ImportError:
self.resolved_node_modules = []
raise
return self.resolved_node_modules
|
import the modules specified in init
|
def register(self, name, func, fields, subscribe_to, entry_point, ignore):
'''
Register a named function in the graph
:param name: name to register
:type name: :py:class:`str`
:param func: function to remember and call
:type func: callable
``fields``, ``subscribe_to`` and ``entry_point`` are the same as in
:py:meth:`Router.node`.
'''
self.fields[name] = fields
self.functions[name] = func
self.register_route(subscribe_to, name)
if ignore:
self.register_ignore(ignore, name)
if entry_point:
self.add_entry_point(name)
self.logger.info('registered %s', namef register(self, name, func, fields, subscribe_to, entry_point, ignore):
'''
Register a named function in the graph
:param name: name to register
:type name: :py:class:`str`
:param func: function to remember and call
:type func: callable
``fields``, ``subscribe_to`` and ``entry_point`` are the same as in
:py:meth:`Router.node`.
'''
self.fields[name] = fields
self.functions[name] = func
self.register_route(subscribe_to, name)
if ignore:
self.register_ignore(ignore, name)
if entry_point:
self.add_entry_point(name)
self.logger.info('registered %s', name)
|
Register a named function in the graph
:param name: name to register
:type name: :py:class:`str`
:param func: function to remember and call
:type func: callable
``fields``, ``subscribe_to`` and ``entry_point`` are the same as in
:py:meth:`Router.node`.
|
def add_entry_point(self, destination):
'''\
Add an entry point
:param destination: node to route to initially
:type destination: str
'''
self.routes.setdefault('__entry_point', set()).add(destination)
return self.routes['__entry_point'f add_entry_point(self, destination):
'''\
Add an entry point
:param destination: node to route to initially
:type destination: str
'''
self.routes.setdefault('__entry_point', set()).add(destination)
return self.routes['__entry_point']
|
\
Add an entry point
:param destination: node to route to initially
:type destination: str
|
def register_ignore(self, origins, destination):
'''
Add routes to the ignore dictionary
:param origins: a number of origins to register
:type origins: :py:class:`str` or iterable of :py:class:`str`
:param destination: where the origins should point to
:type destination: :py:class:`str`
Ignore dictionary takes the following form::
{'node_a': set(['node_b', 'node_c']),
'node_b': set(['node_d'])}
'''
if not isinstance(origins, list):
origins = [origins]
self.ignore_regexes.setdefault(destination, [re.compile(origin) for origin in origins])
self.regenerate_routes()
return self.ignore_regexes[destinationf register_ignore(self, origins, destination):
'''
Add routes to the ignore dictionary
:param origins: a number of origins to register
:type origins: :py:class:`str` or iterable of :py:class:`str`
:param destination: where the origins should point to
:type destination: :py:class:`str`
Ignore dictionary takes the following form::
{'node_a': set(['node_b', 'node_c']),
'node_b': set(['node_d'])}
'''
if not isinstance(origins, list):
origins = [origins]
self.ignore_regexes.setdefault(destination, [re.compile(origin) for origin in origins])
self.regenerate_routes()
return self.ignore_regexes[destination]
|
Add routes to the ignore dictionary
:param origins: a number of origins to register
:type origins: :py:class:`str` or iterable of :py:class:`str`
:param destination: where the origins should point to
:type destination: :py:class:`str`
Ignore dictionary takes the following form::
{'node_a': set(['node_b', 'node_c']),
'node_b': set(['node_d'])}
|
def regenerate_routes(self):
'regenerate the routes after a new route is added'
for destination, origins in self.regexes.items():
# we want only the names that match the destination regexes.
resolved = [
name for name in self.names
if name is not destination
and any(origin.search(name) for origin in origins)
]
ignores = self.ignore_regexes.get(destination, [])
for origin in resolved:
destinations = self.routes.setdefault(origin, set())
if any(ignore.search(origin) for ignore in ignores):
self.logger.info('ignoring route "%s" -> "%s"', origin, destination)
try:
destinations.remove(destination)
self.logger.debug('removed "%s" -> "%s"', origin, destination)
except KeyError:
pass
continue
if destination not in destinations:
self.logger.info('added route "%s" -> "%s"', origin, destination)
destinations.add(destinationf regenerate_routes(self):
'regenerate the routes after a new route is added'
for destination, origins in self.regexes.items():
# we want only the names that match the destination regexes.
resolved = [
name for name in self.names
if name is not destination
and any(origin.search(name) for origin in origins)
]
ignores = self.ignore_regexes.get(destination, [])
for origin in resolved:
destinations = self.routes.setdefault(origin, set())
if any(ignore.search(origin) for ignore in ignores):
self.logger.info('ignoring route "%s" -> "%s"', origin, destination)
try:
destinations.remove(destination)
self.logger.debug('removed "%s" -> "%s"', origin, destination)
except KeyError:
pass
continue
if destination not in destinations:
self.logger.info('added route "%s" -> "%s"', origin, destination)
destinations.add(destination)
|
regenerate the routes after a new route is added
|
def route(self, origin, message):
'''\
Using the routing dictionary, dispatch a message to all subscribers
:param origin: name of the origin node
:type origin: :py:class:`str`
:param message: message to dispatch
:type message: :py:class:`emit.message.Message` or subclass
'''
# side-effect: we have to know all the routes before we can route. But
# we can't resolve them while the object is initializing, so we have to
# do it just in time to route.
self.resolve_node_modules()
if not self.routing_enabled:
return
subs = self.routes.get(origin, set())
for destination in subs:
self.logger.debug('routing "%s" -> "%s"', origin, destination)
self.dispatch(origin, destination, messagef route(self, origin, message):
'''\
Using the routing dictionary, dispatch a message to all subscribers
:param origin: name of the origin node
:type origin: :py:class:`str`
:param message: message to dispatch
:type message: :py:class:`emit.message.Message` or subclass
'''
# side-effect: we have to know all the routes before we can route. But
# we can't resolve them while the object is initializing, so we have to
# do it just in time to route.
self.resolve_node_modules()
if not self.routing_enabled:
return
subs = self.routes.get(origin, set())
for destination in subs:
self.logger.debug('routing "%s" -> "%s"', origin, destination)
self.dispatch(origin, destination, message)
|
\
Using the routing dictionary, dispatch a message to all subscribers
:param origin: name of the origin node
:type origin: :py:class:`str`
:param message: message to dispatch
:type message: :py:class:`emit.message.Message` or subclass
|
def dispatch(self, origin, destination, message):
'''\
dispatch a message to a named function
:param destination: destination to dispatch to
:type destination: :py:class:`str`
:param message: message to dispatch
:type message: :py:class:`emit.message.Message` or subclass
'''
func = self.functions[destination]
self.logger.debug('calling %r directly', func)
return func(_origin=origin, **messagef dispatch(self, origin, destination, message):
'''\
dispatch a message to a named function
:param destination: destination to dispatch to
:type destination: :py:class:`str`
:param message: message to dispatch
:type message: :py:class:`emit.message.Message` or subclass
'''
func = self.functions[destination]
self.logger.debug('calling %r directly', func)
return func(_origin=origin, **message)
|
\
dispatch a message to a named function
:param destination: destination to dispatch to
:type destination: :py:class:`str`
:param message: message to dispatch
:type message: :py:class:`emit.message.Message` or subclass
|
def wrap_result(self, name, result):
'''
Wrap a result from a function with it's stated fields
:param name: fields to look up
:type name: :py:class:`str`
:param result: return value from function. Will be converted to tuple.
:type result: anything
:raises: :py:exc:`ValueError` if name has no associated fields
:returns: :py:class:`dict`
'''
if not isinstance(result, tuple):
result = tuple([result])
try:
return dict(zip(self.fields[name], result))
except KeyError:
msg = '"%s" has no associated fields'
self.logger.exception(msg, name)
raise ValueError(msg % namef wrap_result(self, name, result):
'''
Wrap a result from a function with it's stated fields
:param name: fields to look up
:type name: :py:class:`str`
:param result: return value from function. Will be converted to tuple.
:type result: anything
:raises: :py:exc:`ValueError` if name has no associated fields
:returns: :py:class:`dict`
'''
if not isinstance(result, tuple):
result = tuple([result])
try:
return dict(zip(self.fields[name], result))
except KeyError:
msg = '"%s" has no associated fields'
self.logger.exception(msg, name)
raise ValueError(msg % name)
|
Wrap a result from a function with it's stated fields
:param name: fields to look up
:type name: :py:class:`str`
:param result: return value from function. Will be converted to tuple.
:type result: anything
:raises: :py:exc:`ValueError` if name has no associated fields
:returns: :py:class:`dict`
|
def get_name(self, func):
'''
Get the name to reference a function by
:param func: function to get the name of
:type func: callable
'''
if hasattr(func, 'name'):
return func.name
return '%s.%s' % (
func.__module__,
func.__name__
f get_name(self, func):
'''
Get the name to reference a function by
:param func: function to get the name of
:type func: callable
'''
if hasattr(func, 'name'):
return func.name
return '%s.%s' % (
func.__module__,
func.__name__
)
|
Get the name to reference a function by
:param func: function to get the name of
:type func: callable
|
def coerce(self, value):
if isinstance(value, bool):
return value
if not hasattr(value, 'lower'):
raise TypeError('Value is not bool or string.')
if value.lower() in ('yes', 'true', '1'):
return True
if value.lower() in ('no', 'false', '0'):
return False
raise ValueError('Could not coerce {0} to a bool.'.format(value))
|
Convert text values into boolean values.
True values are (case insensitive): 'yes', 'true', '1'. False values
are (case insensitive): 'no', 'false', '0'.
Args:
value (str or bool): The value to coerce.
Raises:
TypeError: If the value is not a bool or string.
ValueError: If the value is not bool or an acceptable value.
Returns:
bool: The True/False value represented.
|
def main():
if len(argv) < 2:
print 'Usage: '
print ' Get A String %s CFG_fileA FST_fileB' % argv[0]
return
alphabet = createalphabet()
cfgtopda = CfgPDA(alphabet)
print '* Parsing Grammar:',
mma = cfgtopda.yyparse(argv[1])
print 'OK'
flex_a = Flexparser(alphabet)
print '* Parsing Regex:',
mmb = flex_a.yyparse(argv[2])
print mmb
print 'OK'
print '* Minimize Automaton:',
mmb.minimize()
print 'OK'
print mmb
print '* Diff:',
ops = PdaDiff(mma, mmb, alphabet)
mmc = ops.diff()
print 'OK'
print '* Get String:',
print ops.get_string()
|
Testing function for PDA - DFA Diff Operation
|
def _delta(self, graph, cur_state, char):
for arc in cur_state.arcs:
if graph.isyms.find(arc.ilabel) == char:
return graph[arc.nextstate]
return None
|
Args:
graph (Fst Acceptor): The DFA
cur_state (Fst State): The current State
char (Char): The input character
Returns:
(Fst State): The destination state
|
def diff(self):
self.mmb.complement(self.alphabet)
self.mmb.minimize()
print 'start intersection'
self.mmc = self._intesect()
print 'end intersection'
return self.mmc
|
The Difference between a PDA and a DFA
|
def refresh_devices(self):
'''Queries hub for list of devices, and creates new device objects'''
try:
response = self.api.get("/api/v2/devices", {'properties':'all'})
for device_data in response['DeviceList']:
self.devices.append(Device(device_data, self))
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value)f refresh_devices(self):
'''Queries hub for list of devices, and creates new device objects'''
try:
response = self.api.get("/api/v2/devices", {'properties':'all'})
for device_data in response['DeviceList']:
self.devices.append(Device(device_data, self))
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value))
|
Queries hub for list of devices, and creates new device objects
|
def refresh_details(self):
'''Query hub and refresh all details of a device,
but NOT status, includes grouplist not present in
refresh_all_devices'''
try:
return self.api_iface._api_get("/api/v2/devices/" + str(self.device_id))
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value)f refresh_details(self):
'''Query hub and refresh all details of a device,
but NOT status, includes grouplist not present in
refresh_all_devices'''
try:
return self.api_iface._api_get("/api/v2/devices/" + str(self.device_id))
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value))
|
Query hub and refresh all details of a device,
but NOT status, includes grouplist not present in
refresh_all_devices
|
def send_command(self, command):
'''Send a command to a device'''
data = {"command": command, "device_id": self.device_id}
try:
response = self.api_iface._api_post("/api/v2/commands", data)
return Command(response, self)
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value)f send_command(self, command):
'''Send a command to a device'''
data = {"command": command, "device_id": self.device_id}
try:
response = self.api_iface._api_post("/api/v2/commands", data)
return Command(response, self)
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value))
|
Send a command to a device
|
def _update_details(self,data):
'''Intakes dict of details, and sets necessary properties
in device'''
# DeviceName, IconID, HouseID, DeviceID always present
self.device_id = data['DeviceID']
self.device_name = data['DeviceName']
self.properties = datf _update_details(self,data):
'''Intakes dict of details, and sets necessary properties
in device'''
# DeviceName, IconID, HouseID, DeviceID always present
self.device_id = data['DeviceID']
self.device_name = data['DeviceName']
self.properties = data
|
Intakes dict of details, and sets necessary properties
in device
|
def _update_details(self,data):
'''Intakes dict of details, and sets necessary properties
in command'''
for api_name in self._properties:
if api_name in data:
setattr(self, "_" + api_name, data[api_name])
else:
# Only set to blank if not initialized
try:
getattr(self, "_" + api_name)
except AttributeError:
setattr(self, "_" + api_name, ''f _update_details(self,data):
'''Intakes dict of details, and sets necessary properties
in command'''
for api_name in self._properties:
if api_name in data:
setattr(self, "_" + api_name, data[api_name])
else:
# Only set to blank if not initialized
try:
getattr(self, "_" + api_name)
except AttributeError:
setattr(self, "_" + api_name, '')
|
Intakes dict of details, and sets necessary properties
in command
|
def query_status(self):
'''Query the hub for the status of this command'''
try:
data = self.api_iface._api_get(self.link)
self._update_details(data)
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value)f query_status(self):
'''Query the hub for the status of this command'''
try:
data = self.api_iface._api_get(self.link)
self._update_details(data)
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value))
|
Query the hub for the status of this command
|
def tracks(self):
if self._tracks is None:
self._tracks = TrackList(self.version, self.id)
return self._tracks
|
Tracks list context
:return: Tracks list context
|
def list(self, ids, market=values.UNSET):
params = values.of({
'ids': ','.join(ids),
'market': market
})
response = self.version.request('GET', '/albums', params=params)
return AlbumPage(self.version, response.json(), 'albums')
|
List albums
:param List[str] ids: List of albums ids
:param str market: Market locale
:return: Page of Albums
:rtype: AlbumPage
|
def to_string(self):
if self.major == -1:
major_str = 'x'
else:
major_str = self.major
if self.minor == -1:
minor_str = 'x'
else:
minor_str = self.minor
if self.patch == -1:
patch_str = 'x'
else:
patch_str = self.patch
return '{0}_{1}_{2}'.format(major_str, minor_str, patch_str)
|
stringifies version
:return: string of version
|
def find(self, binding_id, instance):
binding = AtlasServiceBinding.Binding(binding_id, instance)
self.backend.storage.populate(binding)
return binding
|
find an instance
Create a new instance and populate it with data stored if it exists.
Args:
binding_id (string): UUID of the binding
instance (AtlasServiceInstance.Instance): instance
Returns:
AtlasServiceBinding: A binding
|
def bind(self, binding, parameters):
if not binding.isProvisioned():
# Update binding parameters
binding.parameters = parameters
# Credentials
creds = self.backend.config.generate_binding_credentials(binding)
# Binding
p = self.backend.config.generate_binding_permissions(
binding,
DatabaseUsersPermissionsSpecs(creds["username"],creds["password"])
)
try:
self.backend.atlas.DatabaseUsers.create_a_database_user(p)
except ErrAtlasConflict:
# The user already exists. This is not an issue because this is possible that we
# created it in a previous call that failed later on the broker.
pass
self.backend.storage.store(binding)
# Bind done
return Binding(BindState.SUCCESSFUL_BOUND,
credentials = creds)
elif binding.parameters == parameters:
if self.backend.config.isGenerateBindingCredentialsPredictible():
# Identical and credentials generation is predictible so we can return credentials again.
creds = self.backend.config.generate_binding_credentials(binding)
return Binding(BindState.IDENTICAL_ALREADY_EXISTS,
credentials = creds)
# Identical but credentials generation is NOT predictible. So we are breaking the spec to avoid
# wrong data injection. In this case we trigger a conflicting parameters for the existing binding depsite
# this is not the case.
raise ErrBindingAlreadyExists()
else:
# Different parameters ...
raise ErrBindingAlreadyExists()
|
Create the binding
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
parameters (dict): Parameters for the binding
Returns:
Binding: Status
Raises:
ErrBindingAlreadyExists: If binding exists but with different parameters
|
def unbind(self, binding):
username = self.backend.config.generate_binding_username(binding)
try:
self.backend.atlas.DatabaseUsers.delete_a_database_user(username)
except ErrAtlasNotFound:
# The user does not exist. This is not an issue because this is possible that we
# removed it in a previous call that failed later on the broker.
# This cover a manually deleted user case too.
pass
self.backend.storage.remove(binding)
|
Unbind the instance
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
|
def extension(names):
for name in names:
if not NAME_PATTERN.match(name):
raise ValueError('invalid extension name: %s' % name)
def decorator(f, names=names):
return Extension(f, names=names)
return decorator
|
Makes a function to be an extension.
|
def register(self, extensions):
for ext in reversed(extensions):
for name in ext.names:
try:
self._extensions[name].appendleft(ext)
except KeyError:
self._extensions[name] = deque([ext])
|
Registers extensions.
|
def eval_extensions(self, value, name, option, format):
try:
exts = self._extensions[name]
except KeyError:
raise ValueError('no suitable extension: %s' % name)
for ext in exts:
rv = ext(self, value, name, option, format)
if rv is not None:
return rv
|
Evaluates extensions in the registry. If some extension handles the
format string, it returns a string. Otherwise, returns ``None``.
|
def GenerateNewFileName(self):
if self.showInfo.showName is not None and self.showInfo.seasonNum is not None and \
self.showInfo.episodeNum is not None and self.showInfo.episodeName is not None:
ext = os.path.splitext(self.fileInfo.origPath)[1]
newFileName = "{0}.S{1}E{2}".format(self.showInfo.showName, self.showInfo.seasonNum, \
self.showInfo.episodeNum)
for episodeNum in self.showInfo.multiPartEpisodeNumbers:
newFileName = newFileName + "_{0}".format(episodeNum)
newFileName = newFileName + ".{0}{1}".format(self.showInfo.episodeName, ext)
newFileName = util.StripSpecialCharacters(newFileName)
return newFileName
|
Create new file name from show name, season number, episode number
and episode name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
Returns
----------
string
New file name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
|
def GenerateNewFilePath(self, fileDir = None):
newFileName = self.GenerateNewFileName()
if newFileName is not None:
if fileDir is None:
fileDir = os.path.dirname(self.fileInfo.origPath)
self.fileInfo.newPath = os.path.join(fileDir, newFileName)
|
Create new file path. If a fileDir is provided it will be used otherwise
the original file path is used. Updates file info object with new path.
Parameters
----------
fileDir : string [optional : default = None]
Optional file directory
|
def Print(self):
goodlogging.Log.Info("TVFILE", "TV File details are:")
goodlogging.Log.IncreaseIndent()
goodlogging.Log.Info("TVFILE", "Original File Path = {0}".format(self.fileInfo.origPath))
if self.showInfo.showName is not None:
goodlogging.Log.Info("TVFILE", "Show Name (from guide) = {0}".format(self.showInfo.showName))
elif self.fileInfo.showName is not None:
goodlogging.Log.Info("TVFILE", "Show Name (from file) = {0}".format(self.fileInfo.showName))
if self.showInfo.seasonNum is not None and self.showInfo.episodeNum is not None:
goodlogging.Log.Info("TVFILE", "Season & Episode = S{0}E{1}".format(self.showInfo.seasonNum, self.showInfo.episodeNum))
if self.showInfo.episodeName is not None:
goodlogging.Log.Info("TVFILE", "Episode Name: = {0}".format(self.showInfo.episodeName))
if self.fileInfo.newPath is not None:
goodlogging.Log.Info("TVFILE", "New File Path = {0}".format(self.fileInfo.newPath))
goodlogging.Log.DecreaseIndent()
|
Print contents of showInfo and FileInfo object
|
def connectProcess(connection, processProtocol, commandLine='', env={},
usePTY=None, childFDs=None, *args, **kwargs):
processOpenDeferred = defer.Deferred()
process = SSHProcess(processProtocol, commandLine, env, usePTY, childFDs,
*args, **kwargs)
process.processOpen = processOpenDeferred.callback
process.openFailed = processOpenDeferred.errback
connection.openChannel(process)
return processOpenDeferred
|
Opens a SSHSession channel and connects a ProcessProtocol to it
@param connection: the SSH Connection to open the session channel on
@param processProtocol: the ProcessProtocol instance to connect to the process
@param commandLine: the command line to execute the process
@param env: optional environment variables to set for the process
@param usePTY: if set, request a PTY for the process
@param childFDs: custom child file descriptors for the process
|
def get_api_publisher(self, social_user):
def _post(**kwargs):
api = self.get_api(social_user)
response = api.wall.post(**kwargs)
return response
return _post
|
owner_id - VK user or group
from_group - 1 by group, 0 by user
message - text
attachments - comma separated links or VK resources ID's
and other https://vk.com/dev.php?method=wall.post
|
def get_api_publisher(self, social_user):
def _post(**kwargs):
api = self.get_api(social_user)
author = {
'group_id': kwargs.get('group_id'),
'user_id': kwargs.get('user_id'),
}
server_data = api.photos.getWallUploadServer(**author)
attachments = []
for _file in kwargs['files']:
upload_data = requests.post(
server_data['upload_url'], files={"photo": _file}).json()
upload_data.update(author)
photos_data = api.photos.saveWallPhoto(**upload_data)
attachments.append('photo{owner_id}_{id}'.format(**photos_data[0]))
del kwargs['files']
kwargs['attachments'] = ','.join(attachments)
response = api.wall.post(**kwargs)
server_data.update(response)
return server_data
return _post
|
files: {'file0':<file>}
message: 'mess'
|
def call_builder_init(cls, kb_app, sphinx_app: Sphinx):
# Find and commit docs project plugins
conf_dir = sphinx_app.confdir
plugins_dir = sphinx_app.config.kaybee_settings.plugins_dir
full_plugins_dir = os.path.join(conf_dir, plugins_dir)
if os.path.exists(full_plugins_dir):
sys.path.insert(0, conf_dir)
plugin_package = importlib.import_module(plugins_dir)
importscan.scan(plugin_package)
else:
logger.info(f'## Kaybee: No plugin dir at {plugins_dir}')
dectate.commit(kb_app)
for callback in cls.get_callbacks(kb_app, SphinxEvent.BI):
callback(kb_app, sphinx_app)
|
On builder init event, commit registry and do callbacks
|
def call_purge_doc(cls, kb_app, sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docname: str):
for callback in EventAction.get_callbacks(kb_app, SphinxEvent.EPD):
callback(kb_app, sphinx_app, sphinx_env, docname)
|
On env-purge-doc, do callbacks
|
def call_env_before_read_docs(cls, kb_app, sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docnames: List[str]):
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.EBRD):
callback(kb_app, sphinx_app, sphinx_env, docnames)
|
On env-read-docs, do callbacks
|
def call_env_doctree_read(cls, kb_app, sphinx_app: Sphinx,
doctree: doctree):
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.DREAD):
callback(kb_app, sphinx_app, doctree)
|
On doctree-read, do callbacks
|
def call_doctree_resolved(cls, kb_app, sphinx_app: Sphinx,
doctree: doctree,
fromdocname: str):
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.DRES):
callback(kb_app, sphinx_app, doctree, fromdocname)
|
On doctree-resolved, do callbacks
|
def call_env_updated(cls, kb_app,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment):
for callback in EventAction.get_callbacks(kb_app, SphinxEvent.EU):
callback(kb_app, sphinx_app, sphinx_env)
|
On the env-updated event, do callbacks
|
def call_html_collect_pages(cls, kb_app, sphinx_app: Sphinx):
EventAction.get_callbacks(kb_app,
SphinxEvent.HCP)
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.HCP):
yield callback(kb_app, sphinx_app)
|
On html-collect-pages, do callbacks
|
def call_env_check_consistency(cls, kb_app, builder: StandaloneHTMLBuilder,
sphinx_env: BuildEnvironment):
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.ECC):
callback(kb_app, builder, sphinx_env)
|
On env-check-consistency, do callbacks
|
def call_missing_reference(cls, kb_app, sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
node,
contnode,
):
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.MR):
return callback(kb_app, sphinx_app, sphinx_env, node, contnode)
|
On doctree-resolved, do callbacks
|
def call_html_page_context(cls, kb_app, sphinx_app: Sphinx,
pagename: str,
templatename: str,
context,
doctree: doctree
):
# We need to let one, and only one, callback return the name of
# the template. Detect multiple and raise an exception.
new_templatename = None
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.HPC):
# The protocol: the one controlling callback will return a value
# with a dictionary of {'templatename': 'sometemplate'}
result = callback(kb_app, sphinx_app, pagename, templatename,
context,
doctree)
if result and isinstance(result,
dict) and 'templatename' in result:
if new_templatename is not None:
raise AssertionError('Multiple handlers returning')
new_templatename = result['templatename']
return new_templatename
|
On doctree-resolved, do callbacks
|
def get_layout_template(self, template_name=None):
if template_name:
return template_name
if self.layout_template:
return self.layout_template
return defaults.LAYOUT_DEFAULT_TEMPLATE
|
Returns the layout template to use when rendering the form to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Form class property `layout_template`
3. Globally defined default template from `defaults.LAYOUT_DEFAULT_TEMPLATE`
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form.
|
def get_layout_context(self):
errors = self.non_field_errors()
for field in self.hidden_fields():
errors.extend(field.errors)
return {
'form': self,
'errors': errors,
'hidden_fields': self.hidden_fields(),
'visible_fields': self.visible_fields(),
}
|
Returns the context which is used when rendering the form to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* errors: `ErrorList` instance with non field errors and hidden field errors
* hidden_fields: All hidden fields to render.
* visible_fields: All visible fields to render.
:return: Template context for form rendering.
|
def full_clean(self, *args, **kwargs):
super().full_clean(*args, **kwargs)
for field in self.errors:
if field != NON_FIELD_ERRORS:
self.apply_widget_invalid_options(field)
|
The full_clean method is hijacked to apply special treatment to invalid
field inputs. For example adding extra options/classes to widgets.
|
def get_field_template(self, bound_field, template_name=None):
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE
|
Returns the field template to use when rendering a form field to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Template from `field_template_overrides` selected by field name
3. Template from `field_template_overrides` selected by field class
4. Form class property `field_template`
5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`
:param bound_field: `BoundField` instance to select a template for.
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form field.
|
def get_field_label_css_class(self, bound_field):
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None
|
Returns the optional label CSS class to use when rendering a field template.
By default, returns the Form class property `field_label_css_class`. If the
field has errors and the Form class property `field_label_invalid_css_class`
is defined, its value is appended to the CSS class.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string or `None`
|
def apply_widget_options(self, field_name):
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time'
|
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
|
def apply_widget_template(self, field_name):
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name
|
Applies widget template overrides if available.
The method uses the `get_widget_template` method to determine if the widget
template should be exchanged. If a template is available, the template_name
property of the widget instance is updated.
:param field_name: A field name of the form.
|
def get_widget_template(self, field_name, field):
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None
|
Returns the optional widget template to use when rendering the widget
for a form field.
Preference of template selection:
1. Template from `widget_template_overrides` selected by field name
2. Template from `widget_template_overrides` selected by widget class
By default, returns `None` which means "use Django's default widget template".
:param field_name: The field name to select a widget template for.
:param field: `Field` instance to return a widget template.
:return: Template name to use when rendering the widget or `None`
|
def apply_widget_css_class(self, field_name):
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
|
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
|
def apply_widget_invalid_options(self, field_name):
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true'
|
Applies additional widget options for an invalid field.
This method is called when there is some error on a field to apply
additional options on its widget. It does the following:
* Sets the aria-invalid property of the widget for accessibility.
* Adds an invalid CSS class, which is determined by the returned value
of `get_widget_invalid_css_class` method. If a CSS class is returned,
it is appended to the current value of the class property of the widget.
:param field_name: A field name of the form.
|
def use_quandl_data(self, authtoken):
dfs = {}
st = self.start.strftime("%Y-%m-%d")
at = authtoken
for pair in self.pairs:
symbol = "".join(pair)
qsym = "CURRFX/{}".format(symbol)
dfs[symbol] = qdl.get(qsym,authtoken=at, trim_start=st)['Rate']
self.build_conversion_table(dfs)
|
Use quandl data to build conversion table
|
def use_trump_data(self, symbols):
dfs = {sym.units : sym.df[sym.name] for sym in symbols}
self.build_conversion_table(dfs)
|
Use trump data to build conversion table
symbols :
list of symbols:
will attempt to use units to build the conversion table,
strings represent symbol names.
|
def build_conversion_table(self, dataframes):
self.data = pd.DataFrame(dataframes)
tmp_pairs = [s.split("/") for s in self.data.columns]
self.data.columns = pd.MultiIndex.from_tuples(tmp_pairs)
|
Build conversion table from a dictionary of dataframes
|
def match_tweet(self, tweet, user_stream):
if user_stream:
if len(self.track) > 0:
return self.is_tweet_match_track(tweet)
return True
return self.is_tweet_match_track(tweet) or self.is_tweet_match_follow(tweet)
|
Check if a tweet matches the defined criteria
:param tweet: The tweet in question
:type tweet: :class:`~responsebot.models.Tweet`
:return: True if matched, False otherwise
|
def find(self, _id, instance = None):
if instance is None:
# We are looking for an instance
return self.service_instance.find(_id)
else:
# We are looking for a binding
return self.service_binding.find(_id, instance)
|
Find
Args:
_id (str): instance id or binding Id
Keyword Arguments:
instance (AtlasServiceInstance.Instance): Existing instance
Returns:
AtlasServiceInstance.Instance or AtlasServiceBinding.Binding: An instance or binding.
|
def create(self, instance, parameters, existing=True):
return self.service_instance.create(instance, parameters, existing)
|
Create an instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
Keyword Arguments:
existing (bool): True (use an existing cluster), False (create a new cluster)
Returns:
ProvisionedServiceSpec: Status
|
def index(self, req, drivers):
result = []
for driver in drivers:
result.append(driver.list_network(req.params))
data = {
'action': "index",
'controller': "network",
'cloud': req.environ['calplus.cloud'],
'result': result
}
return data
|
List all network
List all of netowrks on some special cloud
with:
:Param req
:Type object Request
|
def delete(self, req, driver):
response = driver.delete_network(req.params, id)
data = {
'action': "delete",
'controller': "network",
'id': id,
'cloud': req.environ['calplus.cloud'],
'response': response
}
return data
|
Delete a network
Delete a specific netowrk with id on special cloud
with:
:Param req
:Type object Request
|
def update(self, req, driver):
response = driver.update_network(req.params, id)
data = {
'action': "update",
'controller': "network",
'id': id,
'cloud': req.environ['calplus.cloud'],
'response': response
}
return data
|
Update a network
Update a specific netowrk with id on special cloud
with:
:Param req
:Type object Request
|
def create(self, req, driver):
response = driver.create_network(req.params)
data = {
'action': "create",
'controller': "network",
'cloud': req.environ['calplus.cloud'],
'response': response
}
return data
|
Create a network
Create a new netowrk on special cloud
with:
:Param req
:Type object Request
|
def get(self, req, driver):
response = driver.get_network(req.params, id)
data = {
'action': "get",
'controller': "network",
'id': id,
'cloud': req.environ['calplus.cloud'],
'response': response
}
return data
|
Get info of a network
Get info of a specific netowrk with id on special cloud
with:
:Param req
:Type object Request
|
def attach_igw(self, req, driver):
igw = driver.get_igw(req.params)
if igw is None:
igw = driver.create_igw(req.params)
response = driver.attach_igw(req.params, igw)
data = {
'action': 'attach_igw',
'controller': 'network',
'id': id,
'cloud': req.environ['calplus.cloud'],
'response': response
}
return data
|
Attach network to Internet gateway
:Param req
:Type object Request
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.