code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
try: sess = self.sessions[session] if sess is not self: sess.close() except KeyError: return StatusCode.error_invalid_object
def close(self, session)
Closes the specified session, event, or find list. Corresponds to viClose function of the VISA library. :param session: Unique logical identifier to a session, event, or find list. :return: return value of the library call. :rtype: VISAStatus
7.690719
8.526262
0.902004
# For each session type, ask for the list of connected resources and # merge them into a single list. resources = sum([st.list_resources() for key, st in sessions.Session.iter_valid_session_classes()], []) resources = rname.filter(resources, query) return resources
def list_resources(self, session, query='?*::INSTR')
Returns a tuple of all connected devices matching query. :param query: regular expression used to match devices.
11.555687
12.76494
0.905268
# from the session handle, dispatch to the read method of the session object. try: ret = self.sessions[session].read(count) except KeyError: return 0, StatusCode.error_invalid_object if ret[1] < 0: raise errors.VisaIOError(ret[1]) return ret
def read(self, session, count)
Reads data from device or interface synchronously. Corresponds to viRead function of the VISA library. :param session: Unique logical identifier to a session. :param count: Number of bytes to be read. :return: data read, return value of the library call. :rtype: bytes, VISAStatus
6.630195
6.301667
1.052133
# from the session handle, dispatch to the write method of the session object. try: ret = self.sessions[session].write(data) except KeyError: return 0, StatusCode.error_invalid_object if ret[1] < 0: raise errors.VisaIOError(ret[1]) return ret
def write(self, session, data)
Writes data to device or interface synchronously. Corresponds to viWrite function of the VISA library. :param session: Unique logical identifier to a session. :param data: data to be written. :type data: str :return: Number of bytes actually transferred, return value of the library call. :rtype: int, VISAStatus
7.08723
6.345276
1.11693
try: sess = self.sessions[session] except KeyError: return None, StatusCode.error_invalid_object return sess.get_attribute(attribute)
def get_attribute(self, session, attribute)
Retrieves the state of an attribute. Corresponds to viGetAttribute function of the VISA library. :param session: Unique logical identifier to a session, event, or find list. :param attribute: Resource attribute for which the state query is made (see Attributes.*) :return: The state of the queried attribute for a specified resource, return value of the library call. :rtype: unicode | str | list | int, VISAStatus
7.070233
7.845831
0.901145
try: sess = self.sessions[session] except KeyError: return StatusCode.error_invalid_object return sess.set_attribute(attribute, attribute_state)
def set_attribute(self, session, attribute, attribute_state)
Sets the state of an attribute. Corresponds to viSetAttribute function of the VISA library. :param session: Unique logical identifier to a session. :param attribute: Attribute for which the state is to be modified. (Attributes.*) :param attribute_state: The state of the attribute to be set for the specified object. :return: return value of the library call. :rtype: VISAStatus
5.197118
5.411626
0.960362
try: sess = self.sessions[session] except KeyError: return StatusCode.error_invalid_object return sess.lock(lock_type, timeout, requested_key)
def lock(self, session, lock_type, timeout, requested_key=None)
Establishes an access mode to the specified resources. Corresponds to viLock function of the VISA library. :param session: Unique logical identifier to a session. :param lock_type: Specifies the type of lock requested, either Constants.EXCLUSIVE_LOCK or Constants.SHARED_LOCK. :param timeout: Absolute time period (in milliseconds) that a resource waits to get unlocked by the locking session before returning an error. :param requested_key: This parameter is not used and should be set to VI_NULL when lockType is VI_EXCLUSIVE_LOCK. :return: access_key that can then be passed to other sessions to share the lock, return value of the library call. :rtype: str, :class:`pyvisa.constants.StatusCode`
5.186496
4.533571
1.14402
try: sess = self.sessions[session] except KeyError: return StatusCode.error_invalid_object return sess.unlock()
def unlock(self, session)
Relinquishes a lock for the specified resource. Corresponds to viUnlock function of the VISA library. :param session: Unique logical identifier to a session. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode`
8.41961
7.020416
1.199304
def is_usbraw(dev): if custom_match and not custom_match(dev): return False return bool(find_interfaces(dev, bInterfaceClass=0xFF, bInterfaceSubClass=0xFF)) return find_devices(vendor, product, serial_number, is_usbraw, **kwargs)
def find_raw_devices(vendor=None, product=None, serial_number=None, custom_match=None, **kwargs)
Find connected USB RAW devices. See usbutil.find_devices for more info.
4.404818
3.542359
1.24347
begin, end, size = 0, 0, len(data) bytes_sent = 0 raw_write = super(USBRawDevice, self).write while not end > size: begin = end end = begin + self.RECV_CHUNK bytes_sent += raw_write(data[begin:end]) return bytes_sent
def write(self, data)
Send raw bytes to the instrument. :param data: bytes to be sent to the instrument :type data: bytes
5.214676
4.921571
1.059555
raw_read = super(USBRawDevice, self).read received = bytearray() while not len(received) >= size: resp = raw_read(self.RECV_CHUNK) received.extend(resp) return bytes(received)
def read(self, size)
Read raw bytes from the instrument. :param size: amount of bytes to be sent to the instrument :type size: integer :return: received bytes :return type: bytes
7.208115
7.890317
0.913539
for i in range(31): try: if gpib.listener(BOARD, i) and gpib.ask(BOARD, 1) != i: yield i except gpib.GpibError as e: logger.debug("GPIB error in _find_listeners(): %s", repr(e))
def _find_listeners()
Find GPIB listeners.
6.559713
5.311107
1.235093
kwargs = kwargs or {} attrs = {} if isinstance(vendor, str): attrs['manufacturer'] = vendor elif vendor is not None: kwargs['idVendor'] = vendor if isinstance(product, str): attrs['product'] = product elif product is not None: kwargs['idProduct'] = product if serial_number: attrs['serial_number'] = str(serial_number) if attrs: def cm(dev): if custom_match is not None and not custom_match(dev): return False for attr, pattern in attrs.items(): if not fnmatch(getattr(dev, attr).lower(), pattern.lower()): return False return True else: cm = custom_match return usb.core.find(find_all=True, custom_match=cm, **kwargs)
def find_devices(vendor=None, product=None, serial_number=None, custom_match=None, **kwargs)
Find connected USB devices matching certain keywords. Wildcards can be used for vendor, product and serial_number. :param vendor: name or id of the vendor (manufacturer) :param product: name or id of the product :param serial_number: serial number. :param custom_match: callable returning True or False that takes a device as only input. :param kwargs: other properties to match. See usb.core.find :return:
2.332002
2.20306
1.058528
interfaces = [] try: for cfg in device: try: interfaces.extend(usb_find_desc(cfg, find_all=True, **kwargs)) except: pass except: pass return interfaces
def find_interfaces(device, **kwargs)
:param device: :return:
5.465585
5.277004
1.035736
def _usb_reader(): try: return self.interface.read(count) except usb.USBError as exc: if exc.errno in (errno.ETIMEDOUT, -errno.ETIMEDOUT): raise USBTimeoutException() raise supress_end_en, _ = self.get_attribute(constants.VI_ATTR_SUPPRESS_END_EN) if supress_end_en: raise ValueError('VI_ATTR_SUPPRESS_END_EN == True is currently unsupported by pyvisa-py') term_char, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR) term_char_en, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR_EN) return self._read(_usb_reader, count, lambda current: True, # USB always returns a complete message supress_end_en, term_char, term_char_en, USBTimeoutException)
def read(self, count)
Reads data from device or interface synchronously. Corresponds to viRead function of the VISA library. :param count: Number of bytes to be read. :return: data read, return value of the library call. :rtype: (bytes, VISAStatus)
4.353695
4.287093
1.015535
send_end, _ = self.get_attribute(constants.VI_ATTR_SEND_END_EN) count = self.interface.write(data) return count, StatusCode.success
def write(self, data)
Writes data to device or interface synchronously. Corresponds to viWrite function of the VISA library. :param data: data to be written. :type data: bytes :return: Number of bytes actually transferred, return value of the library call. :rtype: (int, VISAStatus)
13.922159
7.963339
1.748282
if hasattr(get_model(parent_obj), "_tx_reference_resolver"): return get_model(parent_obj)._tx_reference_resolver. \ has_unresolved_crossrefs(parent_obj, attr_name) else: return False
def needs_to_be_resolved(parent_obj, attr_name)
This function determines, if a reference (CrossReference) needs to be resolved or not (while creating the model, while resolving references). Args: parent_obj: the object containing the attribute to be resolved. attr_name: the attribute identification object. Returns: True if the attribute needs to be resolved. Else False. In case of lists of references, this function return true if any of the references in the list needs to be resolved. Note: outside the model building process (from_file or from_str) this function always returns False.
5.054601
5.380177
0.939486
if isinstance(obj, obj_cls): return True if hasattr(obj_cls, "_tx_fqn") and hasattr(obj, "_tx_fqn"): if obj_cls._tx_fqn == obj._tx_fqn: return True if hasattr(obj_cls, "_tx_inh_by"): for cls in obj_cls._tx_inh_by: if (textx_isinstance(obj, cls)): return True return False
def textx_isinstance(obj, obj_cls)
This function determines, if a textx object is an instance of a textx class. Args: obj: the object to be analyzed obj_cls: the class to be checked Returns: True if obj is an instance of obj_cls.
2.229684
2.526858
0.882394
from textx.scoping import Postponed if lst is None: lst = [] if not obj: return lst if obj in lst: return lst lst.append(obj) if type(obj) is Postponed: return lst ret = get_referenced_object(None, obj, dot_separated_name) if type(ret) is list: for r in ret: lst = get_list_of_concatenated_objects(r, dot_separated_name, lst) else: lst = get_list_of_concatenated_objects(ret, dot_separated_name, lst) return lst
def get_list_of_concatenated_objects(obj, dot_separated_name, lst=None)
get a list of the objects consisting of - obj - obj+"."+dot_separated_name - (obj+"."+dot_separated_name)+"."+dot_separated_name (called recursively) Note: lists are expanded Args: obj: the starting point dot_separated_name: "the search path" (applied recursively) lst: the initial list (e.g. []) Returns: the filled list (if one single object is requested, a list with one entry is returned).
2.211682
2.353304
0.93982
the_model = get_model(model_obj) line, col = the_model._tx_parser.pos_to_linecol( model_obj._tx_position) return {"line": line, "col": col, "filename": the_model._tx_filename}
def get_location(model_obj)
Args: model_obj: the model object of interest Returns: the line, col and filename of the model element. The filename may be None. This function may be used to fill exceptions
4.81981
4.271876
1.128266
from textx.scoping import Postponed assert prev_obj or not type(obj) is list names = dot_separated_name.split(".") match = re.match(r'parent\((\w+)\)', names[0]) if match: next_obj = obj desired_parent_typename = match.group(1) next_obj = get_recursive_parent_with_typename(next_obj, desired_parent_typename) if next_obj: return get_referenced_object(None, next_obj, ".".join(names[1:]), desired_type) else: return None elif type(obj) is list: next_obj = None for res in obj: if hasattr(res, "name") and res.name == names[0]: if desired_type is None or textx_isinstance(res, desired_type): next_obj = res else: raise TypeError( "{} has type {} instead of {}.".format( names[0], type(res).__name__, desired_type.__name__)) if not next_obj: # if prev_obj needs to be resolved: return Postponed. if needs_to_be_resolved(prev_obj, names[0]): return Postponed() else: return None elif type(obj) is Postponed: return Postponed() else: next_obj = getattr(obj, names[0]) if not next_obj: # if obj in in crossref return Postponed, else None if needs_to_be_resolved(obj, names[0]): return Postponed() else: return None if len(names) > 1: return get_referenced_object(obj, next_obj, ".".join( names[1:]), desired_type) if type(next_obj) is list and needs_to_be_resolved(obj, names[0]): return Postponed() return next_obj
def get_referenced_object(prev_obj, obj, dot_separated_name, desired_type=None)
get objects based on a path Args: prev_obj: the object containing obj (req. if obj is a list) obj: the current object dot_separated_name: the attribute name "a.b.c.d" starting from obj Note: the attribute "parent(TYPE)" is a shortcut to jump to the parent of type "TYPE" (exact match of type name). desired_type: (optional) Returns: the object if found, None if not found or Postponed() if some postponed refs are found on the path
2.799629
2.75335
1.016808
res = get_referenced_object(prev_obj, obj, dot_separated_name, desired_type) if res is None: return [] elif type(res) is list: return res else: return [res]
def get_referenced_object_as_list( prev_obj, obj, dot_separated_name, desired_type=None)
Same as get_referenced_object, but always returns a list. Args: prev_obj: see get_referenced_object obj: see get_referenced_object dot_separated_name: see get_referenced_object desired_type: see get_referenced_object Returns: same as get_referenced_object, but always returns a list
2.149594
2.582005
0.832529
if hasattr(root, '_tx_model_repository'): src = list( root._tx_model_repository.local_models.filename_to_model.values()) if root not in src: src.append(root) else: src = [root] a = [] for m in src: print("analyzing {}".format(m._tx_filename)) a = a + get_children( lambda x: hasattr(x, 'name') and x.name == name, m) assert len(a) == 1 return a[0]
def get_unique_named_object_in_all_models(root, name)
retrieves a unique named object (no fully qualified name) Args: root: start of search (if root is a model all known models are searched as well) name: name of object Returns: the object (if not unique, raises an error)
4.45648
4.775742
0.933149
a = get_children(lambda x: hasattr(x, 'name') and x.name == name, root) assert len(a) == 1 return a[0]
def get_unique_named_object(root, name)
retrieves a unique named object (no fully qualified name) Args: root: start of search name: name of object Returns: the object (if not unique, raises an error)
3.450893
5.016219
0.687947
assert class_name == get_unique_named_object(root, name).__class__.__name__
def check_unique_named_object_has_class(root, name, class_name)
checks the type (type name) of an unique named object (no fully qualified name) Args: root: start of search name: name of object class_name: the name of the type to be checked Returns: nothing (if not unique, raises an error)
4.852103
7.333293
0.661654
if (hasattr(model, "_tx_model_repository")): models = list( model._tx_model_repository.all_models.filename_to_model.values()) if model not in models: models.append(model) else: models = [model] return models
def get_all_models_including_attached_models(model)
get a list of all models stored within a model (including the owning model). Args: model: the owning model Returns: a list of all models
4.245044
5.025095
0.844769
if (model): self.update_model_in_repo_based_on_filename(model) filenames = glob.glob(filename_pattern, **glob_args) if len(filenames) == 0: raise IOError( errno.ENOENT, os.strerror(errno.ENOENT), filename_pattern) loaded_models = [] for filename in filenames: the_metamodel = MetaModelProvider.get_metamodel(model, filename) loaded_models.append( self.load_model(the_metamodel, filename, is_main_model, encoding=encoding, add_to_local_models=add_to_local_models)) return loaded_models
def load_models_using_filepattern( self, filename_pattern, model, glob_args, is_main_model=False, encoding='utf-8', add_to_local_models=True)
add a new model to all relevant objects Args: filename_pattern: models to be loaded model: model holding the loaded models in its _tx_model_repository field (may be None). glob_args: arguments passed to the glob.glob function. Returns: the list of loaded models
2.709522
2.653336
1.021176
if (model): self.update_model_in_repo_based_on_filename(model) for the_path in search_path: full_filename = join(the_path, filename) # print(full_filename) if exists(full_filename): the_metamodel = \ MetaModelProvider.get_metamodel(model, full_filename) return self.load_model(the_metamodel, full_filename, is_main_model, encoding=encoding, add_to_local_models=add_to_local_models) raise IOError( errno.ENOENT, os.strerror(errno.ENOENT), filename)
def load_model_using_search_path( self, filename, model, search_path, is_main_model=False, encoding='utf8', add_to_local_models=True)
add a new model to all relevant objects Args: filename: models to be loaded model: model holding the loaded models in its _tx_model_repository field (may be None). search_path: list of search directories. Returns: the loaded model
3.157475
3.229188
0.977792
if not self.local_models.has_model(filename): if self.all_models.has_model(filename): new_model = self.all_models.filename_to_model[filename] else: # print("LOADING {}".format(filename)) # all models loaded here get their references resolved from the # root model new_model = the_metamodel.internal_model_from_file( filename, pre_ref_resolution_callback=lambda other_model: self.pre_ref_resolution_callback(other_model), is_main_model=is_main_model, encoding=encoding) self.all_models.filename_to_model[filename] = new_model # print("ADDING {}".format(filename)) if add_to_local_models: self.local_models.filename_to_model[filename] = new_model assert self.all_models.has_model(filename) # to be sure... return self.all_models.filename_to_model[filename]
def load_model( self, the_metamodel, filename, is_main_model, encoding='utf-8', add_to_local_models=True)
load a single model Args: the_metamodel: the metamodel used to load the model filename: the model to be loaded (if not cached) Returns: the loaded/cached model
2.978675
3.058554
0.973883
if model._tx_filename is None: for fn in self.all_models.filename_to_model: if self.all_models.filename_to_model[fn] == model: return fn i = 0 while self.all_models.has_model("anonymous{}".format(i)): i += 1 myfilename = "anonymous{}".format(i) self.all_models.filename_to_model[myfilename] = model else: myfilename = model._tx_filename if (not self.all_models.has_model(myfilename)): self.all_models.filename_to_model[myfilename] = model return myfilename
def update_model_in_repo_based_on_filename(self, model)
Adds a model to the repo (not initially visible) Args: model: the model to be added. If the model has no filename, a name is invented Returns: the filename of the model added to the repo
2.763983
2.597703
1.064011
# print("PRE-CALLBACK{}".format(filename)) filename = other_model._tx_filename assert (filename) other_model._tx_model_repository = \ GlobalModelRepository(self.all_models) self.all_models.filename_to_model[filename] = other_model
def pre_ref_resolution_callback(self, other_model)
(internal: used to store a model after parsing into the repository) Args: other_model: the parsed model Returns: nothing
9.863062
10.083471
0.978142
debug = ctx.obj['debug'] check_model(meta_model_file, model_file, debug, ignore_case)
def check(ctx, meta_model_file, model_file, ignore_case)
Check validity of meta-model and optionally model.
3.562787
3.512614
1.014283
debug = ctx.obj['debug'] meta_model, model = check_model(meta_model_file, model_file, debug, ignore_case) if output_format == 'plantuml': pu_file = "{}.pu".format(meta_model_file) click.echo("Generating '{}' file for meta-model.".format(pu_file)) click.echo("To convert to png run 'plantuml {}'".format(pu_file)) click.echo("To convert to svg run 'plantuml -tsvg {}'".format(pu_file)) metamodel_export(meta_model, pu_file, PlantUmlRenderer()) else: dot_file = "{}.dot".format(meta_model_file) click.echo("Generating '{}' file for meta-model.".format(dot_file)) click.echo("To convert to png run 'dot -Tpng -O {}'".format(dot_file)) metamodel_export(meta_model, dot_file) if model_file: if output_format == 'plantuml': raise Exception("plantuml is not supported for model files, yet.") dot_file = "{}.dot".format(model_file) click.echo("Generating '{}' file for model.".format(model_file)) click.echo( "To convert to png run 'dot -Tpng -O {}'".format(model_file)) model_export(model, dot_file)
def visualize(ctx, meta_model_file, model_file, ignore_case, output_format)
Generate .dot file(s) from meta-model and optionally model.
2.262424
2.221408
1.018464
# Register direct sub-commands global textx for subcommand in pkg_resources.iter_entry_points(group='textx_commands'): textx.command()(subcommand.load()) # Register sub-command groups for subgroup in pkg_resources.iter_entry_points( group='textx_command_groups'): subgroup.load()(textx)
def register_textx_subcommands()
Find and use all textx sub-commands registered through extension points. Extension points for CLI extension are: - `textx_commands` - for registering top-level commands. - `textx_command_groups` - for registering command groups.
3.355973
3.160208
1.061947
type_builtins = { 'integer': SimpleType(None, 'integer'), 'string': SimpleType(None, 'string') } entity_mm = metamodel_from_file(join(this_folder, 'entity.tx'), classes=[SimpleType], builtins=type_builtins) return entity_mm
def get_entity_mm()
Builds and returns a meta-model for Entity language.
5.339702
4.590118
1.163304
dot_str = HEADER # Render states first = True for state in model.states: dot_str += '{}[label="{{{}{}|{}}}"]\n'.format( id(state), r"-\> " if first else "", state.name, "\\n".join(action.name for action in state.actions)) first = False # Render transitions for transition in state.transitions: dot_str += '{} -> {} [label="{}"]\n'\ .format(id(state), id(transition.to_state), transition.event.name) # If there are reset events declared render them. if model.resetEvents: dot_str += 'reset_events [label="{{Reset Events|{}}}", style=""]\n'\ .format("\\n".join(event.name for event in model.resetEvents)) dot_str += '\n}\n' return dot_str
def sm_to_dot(model)
Transforms given state machine model to dot str.
4.063614
3.903861
1.040922
langs = list(pkg_resources.iter_entry_points(group=LANG_EP, name=language_name)) if not langs: raise TextXError('Language "{}" is not registered.' .format(language_name)) if len(langs) > 1: # Multiple languages defined with the same name raise TextXError('Language "{}" registered multiple times:\n{}' .format(language_name, "\n".join([l.dist for l in langs]))) return langs[0].load()()
def get_language(language_name)
Returns a callable that instantiates meta-model for the given language.
3.129678
3.035028
1.031186
p = obj while hasattr(p, 'parent'): p = p.parent return p
def get_model(obj)
Finds model root element for the given object.
4.666942
3.876984
1.203756
if type(typ) is not text: typ = typ.__name__ while hasattr(obj, 'parent'): obj = obj.parent if obj.__class__.__name__ == typ: return obj
def get_parent_of_type(typ, obj)
Finds first object up the parent chain of the given type. If no parent of the given type exists None is returned. Args: typ(str or python class): The type of the model object we are looking for. obj (model object): Python model object which is the start of the search process.
3.615697
3.968397
0.911123
collected = [] def follow(elem): if elem in collected: return # Use meta-model to search for all contained child elements. cls = elem.__class__ if hasattr(cls, '_tx_attrs') and decider(elem): collected.append(elem) if hasattr(cls, '_tx_attrs'): for attr_name, attr in cls._tx_attrs.items(): # Follow only attributes with containment semantics if attr.cont: if attr.mult in (MULT_ONE, MULT_OPTIONAL): new_elem = getattr(elem, attr_name) if new_elem: follow(new_elem) else: new_elem_list = getattr(elem, attr_name) if new_elem_list: for new_elem in new_elem_list: follow(new_elem) follow(root) return collected
def get_children(decider, root)
Returns a list of all model elements of type 'typ' starting from model element 'root'. The search process will follow containment links only. Non-containing references shall not be followed. Args: decider(obj): a callable returning True if the object is of interest. root (model object): Python model object which is the start of the search process.
3.635074
3.497431
1.039355
if type(typ) is not text: typ = typ.__name__ return get_children(lambda x: x.__class__.__name__ == typ, root)
def get_children_of_type(typ, root)
Returns a list of all model elements of type 'typ' starting from model element 'root'. The search process will follow containment links only. Non-containing references shall not be followed. Args: typ(str or python class): The type of the model object we are looking for. root (model object): Python model object which is the start of the search process.
4.741664
5.595148
0.84746
class TextXModelParser(Parser): def __init__(self, *args, **kwargs): super(TextXModelParser, self).__init__(*args, **kwargs) # By default first rule is starting rule # and must be followed by the EOF self.parser_model = Sequence( nodes=[top_rule, EOF()], rule_name='Model', root=True) self.comments_model = comments_model # Stack for metaclass instances self._inst_stack = [] # Dict for cross-ref resolving # { id(class): { obj.name: obj}} self._instances = {} # List to keep track of all cross-ref that need to be resolved # Contained elements are tuples: (instance, metaattr, cross-ref) self._crossrefs = [] def clone(self): import copy the_clone = copy.copy(self) # shallow copy # create new objects for parse-dependent data the_clone._inst_stack = [] the_clone._instances = {} the_clone._crossrefs = [] # TODO self.memoization = memoization the_clone.comments = [] the_clone.comment_positions = {} the_clone.sem_actions = {} return the_clone def _parse(self): try: return self.parser_model.parse(self) except NoMatch as e: line, col = e.parser.pos_to_linecol(e.position) raise TextXSyntaxError(message=text(e), line=line, col=col, expected_rules=e.rules) def get_model_from_file(self, file_name, encoding, debug, pre_ref_resolution_callback=None, is_main_model=True): with codecs.open(file_name, 'r', encoding) as f: model_str = f.read() model = self.get_model_from_str( model_str, file_name=file_name, debug=debug, pre_ref_resolution_callback=pre_ref_resolution_callback, is_main_model=is_main_model, encoding=encoding) return model def get_model_from_str(self, model_str, file_name=None, debug=None, pre_ref_resolution_callback=None, is_main_model=True, encoding='utf-8'): old_debug_state = self.debug try: if debug is not None: self.debug = debug if self.debug: self.dprint("*** PARSING MODEL ***") self.parse(model_str, file_name=file_name) # Transform parse tree to model. Skip root node which # represents the whole file ending in EOF. model = parse_tree_to_objgraph( self, self.parse_tree[0], file_name=file_name, pre_ref_resolution_callback=pre_ref_resolution_callback, is_main_model=is_main_model, encoding=encoding) finally: if debug is not None: self.debug = old_debug_state try: model._tx_metamodel = self.metamodel except AttributeError: # model is some primitive python type (e.g. str) pass return model return TextXModelParser(**kwargs)
def get_model_parser(top_rule, comments_model, **kwargs)
Creates model parser for the given language.
3.772685
3.749537
1.006174
if get_model(obj) != self.model: return get_model(obj). \ _tx_reference_resolver.has_unresolved_crossrefs(obj) else: for crossref_obj, attr, crossref in self.parser._crossrefs: if crossref_obj is obj: if (not attr_name) or attr_name == attr.name: return True return False
def has_unresolved_crossrefs(self, obj, attr_name=None)
Args: obj: has this object unresolved crossrefs in its fields (non recursively) Returns: True (has unresolved crossrefs) or False (else)
5.166722
5.255897
0.983033
metamodel = self.parser.metamodel current_crossrefs = self.parser._crossrefs # print("DEBUG: Current crossrefs #: {}". # format(len(current_crossrefs))) new_crossrefs = [] self.delayed_crossrefs = [] resolved_crossref_count = 0 # ------------------------- # start of resolve-loop # ------------------------- default_scope = DefaultScopeProvider() for obj, attr, crossref in current_crossrefs: if (get_model(obj) == self.model): attr_value = getattr(obj, attr.name) attr_refs = [obj.__class__.__name__ + "." + attr.name, "*." + attr.name, obj.__class__.__name__ + ".*", "*.*"] for attr_ref in attr_refs: if attr_ref in metamodel.scope_providers: if self.parser.debug: self.parser.dprint(" FOUND {}".format(attr_ref)) resolved = metamodel.scope_providers[attr_ref]( obj, attr, crossref) break else: resolved = default_scope(obj, attr, crossref) # Collect cross-references for textx-tools if resolved and not type(resolved) is Postponed: if metamodel.textx_tools_support: self.pos_crossref_list.append( RefRulePosition( name=crossref.obj_name, ref_pos_start=crossref.position, ref_pos_end=crossref.position + len( resolved.name), def_pos_start=resolved._tx_position, def_pos_end=resolved._tx_position_end)) if not resolved: # As a fall-back search builtins if given if metamodel.builtins: if crossref.obj_name in metamodel.builtins: # TODO: Classes must match resolved = metamodel.builtins[crossref.obj_name] if not resolved: line, col = self.parser.pos_to_linecol(crossref.position) raise TextXSemanticError( message='Unknown object "{}" of class "{}"'.format( crossref.obj_name, crossref.cls.__name__), line=line, col=col, err_type=UNKNOWN_OBJ_ERROR, expected_obj_cls=crossref.cls, filename=self.model._tx_filename) if type(resolved) is Postponed: self.delayed_crossrefs.append((obj, attr, crossref)) new_crossrefs.append((obj, attr, crossref)) else: resolved_crossref_count += 1 if attr.mult in [MULT_ONEORMORE, MULT_ZEROORMORE]: attr_value.append(resolved) else: setattr(obj, attr.name, resolved) else: # crossref not in model new_crossrefs.append((obj, attr, crossref)) # ------------------------- # end of resolve-loop # ------------------------- # store cross-refs from other models in the parser list (for later # processing) self.parser._crossrefs = new_crossrefs # print("DEBUG: Next crossrefs #: {}".format(len(new_crossrefs))) return (resolved_crossref_count, self.delayed_crossrefs)
def resolve_one_step(self)
Resolves model references.
3.746536
3.712319
1.009217
import textx.scoping if not global_model_repo: global_model_repo = textx.scoping.GlobalModelRepository() for filename_pattern in self.filename_pattern_list: global_model_repo.load_models_using_filepattern( filename_pattern, model=None, glob_args=self.glob_args, is_main_model=True, encoding=encoding ) return global_model_repo
def load_models_in_model_repo(self, global_model_repo=None, encoding='utf-8')
load all registered models (called explicitly from the user and not as an automatic activity). Normally this is done automatically while reference resolution of one loaded model. However, if you wish to load all models you can call this and get a model repository. The metamodels must be identifiable via the MetaModelProvider. Returns: a GlobalModelRepository with the loaded models
3.816426
4.028203
0.947427
return { 'ID': text, 'BOOL': bool, 'INT': int, 'FLOAT': float, 'STRICTFLOAT': float, 'STRING': text, 'NUMBER': float, 'BASETYPE': text, }.get(textx_type_name, textx_type_name)
def python_type(textx_type_name)
Return Python type from the name of base textx type.
3.524363
3.094882
1.138772
if type(language_def) is not text: raise TextXError("textX accepts only unicode strings.") if metamodel.debug: metamodel.dprint("*** PARSING LANGUAGE DEFINITION ***") # Check the cache for already conctructed textX parser if metamodel.debug in textX_parsers: parser = textX_parsers[metamodel.debug] else: # Create parser for TextX grammars using # the arpeggio grammar specified in this module parser = ParserPython(textx_model, comment_def=comment, ignore_case=False, reduce_tree=False, memoization=metamodel.memoization, debug=metamodel.debug, file=metamodel.file) # Cache it for subsequent calls textX_parsers[metamodel.debug] = parser # Parse language description with textX parser try: parse_tree = parser.parse(language_def) except NoMatch as e: line, col = parser.pos_to_linecol(e.position) raise TextXSyntaxError(text(e), line, col) # Construct new parser and meta-model based on the given language # description. lang_parser = visit_parse_tree(parse_tree, TextXVisitor(parser, metamodel)) # Meta-model is constructed. Validate its semantics. metamodel.validate() # Here we connect meta-model and language parser for convenience. lang_parser.metamodel = metamodel metamodel._parser_blueprint = lang_parser if metamodel.debug: # Create dot file for debuging purposes PMDOTExporter().exportFile( lang_parser.parser_model, "{}_parser_model.dot".format(metamodel.rootcls.__name__)) return lang_parser
def language_from_str(language_def, metamodel)
Constructs parser and initializes metamodel from language description given in textX language. Args: language_def (str): A language description in textX. metamodel (TextXMetaModel): A metamodel to initialize. Returns: Parser for the new language.
5.789937
5.530797
1.046854
if self.grammar_parser.debug: self.grammar_parser.dprint("RESOLVING MODEL PARSER: second_pass") self._resolve_rule_refs(self.grammar_parser, model_parser) self._determine_rule_types(model_parser.metamodel) self._resolve_cls_refs(self.grammar_parser, model_parser) return model_parser
def second_textx_model(self, model_parser)
Cross reference resolving for parser model.
5.44359
4.852181
1.121885
def _resolve_rule(rule): if not isinstance(rule, RuleCrossRef) and rule in resolved_rules: return rule resolved_rules.add(rule) if grammar_parser.debug: grammar_parser.dprint("Resolving rule: {}".format(rule)) if type(rule) is RuleCrossRef: rule_name = rule.rule_name suppress = rule.suppress if rule_name in model_parser.metamodel: rule = model_parser.metamodel[rule_name]._tx_peg_rule if type(rule) is RuleCrossRef: rule = _resolve_rule(rule) model_parser.metamodel[rule_name]._tx_peg_rule = rule if suppress: # Special case. Suppression on rule reference. _tx_class = rule._tx_class rule = Sequence(nodes=[rule], rule_name=rule_name, suppress=suppress) rule._tx_class = _tx_class else: line, col = grammar_parser.pos_to_linecol(rule.position) raise TextXSemanticError( 'Unexisting rule "{}" at position {}.' .format(rule.rule_name, (line, col)), line, col) assert isinstance(rule, ParsingExpression),\ "{}:{}".format(type(rule), text(rule)) # Recurse into subrules, and resolve rules. for idx, child in enumerate(rule.nodes): if child not in resolved_rules: child = _resolve_rule(child) rule.nodes[idx] = child return rule # Two pass resolving for i in range(2): if grammar_parser.debug: grammar_parser.dprint("RESOLVING RULE CROSS-REFS - PASS {}" .format(i + 1)) resolved_rules = set() _resolve_rule(model_parser.parser_model) # Resolve rules of all meta-classes to handle unreferenced # rules also. for cls in model_parser.metamodel: cls._tx_peg_rule = _resolve_rule(cls._tx_peg_rule)
def _resolve_rule_refs(self, grammar_parser, model_parser)
Resolves parser ParsingExpression crossrefs.
3.722717
3.654948
1.018542
def _determine_rule_type(cls): if cls in resolved_classes: return resolved_classes.add(cls) # If there are attributes collected than this is a common # rule if len(cls._tx_attrs) > 0: if cls._tx_type != RULE_COMMON: cls._tx_type = RULE_COMMON has_change[0] = True return rule = cls._tx_peg_rule # Check if this rule is abstract. Abstract are root rules which # haven't got any attributes and reference at least one non-match # rule. abstract = False if rule.rule_name and cls.__name__ != rule.rule_name: # Special case. Body of the rule is a single rule reference and # the referenced rule is not match rule. target_cls = metamodel[rule.rule_name] _determine_rule_type(target_cls) abstract = target_cls._tx_type != RULE_MATCH else: # Find at least one referenced rule that is not match rule by # going down the parser model and finding root rules. def _has_nonmatch_ref(rule): for r in rule.nodes: if r.root: _determine_rule_type(r._tx_class) result = r._tx_class._tx_type != RULE_MATCH else: result = _has_nonmatch_ref(r) if result: return True abstract = _has_nonmatch_ref(rule) if abstract and cls._tx_type != RULE_ABSTRACT: cls._tx_type = RULE_ABSTRACT has_change[0] = True # Add inherited classes to this rule's meta-class if rule.rule_name and cls.__name__ != rule.rule_name: if rule._tx_class not in cls._tx_inh_by: cls._tx_inh_by.append(rule._tx_class) else: # Recursively append all referenced classes. def _add_reffered_classes(rule, inh_by, start=False): if rule.root and not start: if hasattr(rule, '_tx_class'): _determine_rule_type(rule._tx_class) if rule._tx_class._tx_type != RULE_MATCH and\ rule._tx_class not in inh_by: inh_by.append(rule._tx_class) else: for r in rule.nodes: _add_reffered_classes(r, inh_by) if type(rule) is OrderedChoice: for r in rule.nodes: _add_reffered_classes(r, cls._tx_inh_by) else: _add_reffered_classes(rule, cls._tx_inh_by, start=True) # Multi-pass rule type resolving to support circular rule references. # `has_change` is a list to support outer scope variable change in # Python 2.x has_change = [True] while has_change[0]: has_change[0] = False resolved_classes = set() for cls in metamodel: _determine_rule_type(cls)
def _determine_rule_types(self, metamodel)
Determine textX rule/metaclass types
3.490058
3.465317
1.00714
def r(s): if s.root: if s in visited or s.rule_name in ALL_TYPE_NAMES or \ (hasattr(s, '_tx_class') and s._tx_class._tx_type is not RULE_MATCH): return s.rule_name visited.add(s) if isinstance(s, Match): result = text(s) elif isinstance(s, OrderedChoice): result = "|".join([r(x) for x in s.nodes]) elif isinstance(s, Sequence): result = " ".join([r(x) for x in s.nodes]) elif isinstance(s, ZeroOrMore): result = "({})*".format(r(s.nodes[0])) elif isinstance(s, OneOrMore): result = "({})+".format(r(s.nodes[0])) elif isinstance(s, Optional): result = "{}?".format(r(s.nodes[0])) elif isinstance(s, SyntaxPredicate): result = "" return "{}{}".format(result, "-" if s.suppress else "") mstr = "" if cls.__name__ not in ALL_TYPE_NAMES and \ not (cls._tx_type is RULE_ABSTRACT and cls.__name__ != cls._tx_peg_rule.rule_name): e = cls._tx_peg_rule visited = set() if not isinstance(e, Match): visited.add(e) if isinstance(e, OrderedChoice): mstr = "|".join([r(x) for x in e.nodes if x.rule_name in BASE_TYPE_NAMES or not x.root]) elif isinstance(e, Sequence): mstr = " ".join([r(x) for x in e.nodes]) else: mstr = r(e) mstr = dot_escape(mstr) return mstr
def match_abstract_str(cls)
For a given abstract or match rule meta-class returns a nice string representation for the body.
3.270157
3.176786
1.029392
with codecs.open(file_name, 'w', encoding="utf-8") as f: model_export_to_file(f, model, repo)
def model_export(model, file_name, repo=None)
Args: model: the model to be exported (may be None if repo is not None) file_name: the output file name repo: the model repo (alternative to model input) to be exported Returns: Nothing
3.268608
3.892439
0.839733
if not model and not repo: raise Exception("specity either a model or a repo") if model and repo: raise Exception("specity either a model or a repo") processed_set = set() f.write(HEADER) def _export(obj): if obj is None or obj in processed_set or type(obj) \ in PRIMITIVE_PYTHON_TYPES: return processed_set.add(obj) attrs = "" obj_cls = obj.__class__ name = "" for attr_name, attr in obj_cls._tx_attrs.items(): attr_value = getattr(obj, attr_name) endmark = 'arrowtail=diamond dir=both' if attr.cont else "" required = "+" if attr.mult in \ [MULT_ONE, MULT_ONEORMORE] else "" if attr.mult in [MULT_ONEORMORE, MULT_ZEROORMORE]: if all([type(x) in PRIMITIVE_PYTHON_TYPES for x in attr_value]): attrs += "{}{}:list=[".format(required, attr_name) attrs += ",".join([dot_repr(x) for x in attr_value]) attrs += "]\\l" else: for idx, list_obj in enumerate(attr_value): if list_obj is not None: if type(list_obj) in PRIMITIVE_PYTHON_TYPES: f.write( '{} -> "{}:{}" [label="{}:{}" {}]\n' .format(id(obj), list_obj, type(list_obj).__name__, attr_name, idx, endmark)) else: f.write('{} -> {} [label="{}:{}" {}]\n' .format(id(obj), id(list_obj), attr_name, idx, endmark)) _export(list_obj) else: # Plain attributes if type(attr_value) is text and attr_name != 'name': attr_value = dot_repr(attr_value) if type(attr_value) in PRIMITIVE_PYTHON_TYPES: if attr_name == 'name': name = attr_value else: attrs += "{}{}:{}={}\\l".format( required, attr_name, type(attr_value) .__name__, attr_value) else: # Object references if attr_value is not None: f.write('{} -> {} [label="{}" {}]\n'.format( id(obj), id(attr_value), attr_name, endmark)) _export(attr_value) name = "{}:{}".format(name, obj_cls.__name__) f.write('{}[label="{{{}|{}}}"]\n'.format(id(obj), name, attrs)) def _export_subgraph(m): from textx import get_children f.write('subgraph "cluster_{}" {{\n'.format(m._tx_filename)) f.write(''' penwidth=2.0 color=darkorange4; label = "{}"; '''.format(m._tx_filename)) for obj in get_children(lambda _: True, m): f.write('{};\n'.format(id(obj))) f.write('\n}\n') if repo or hasattr(model, "_tx_model_repository"): if not repo: repo = model._tx_model_repository.all_models for m in repo.filename_to_model.values(): _export_subgraph(m) _export(m) else: _export(model) f.write('\n}\n')
def model_export_to_file(f, model=None, repo=None)
Args: f: the file object to be used as output. model: the model to be exported (alternative to repo) repo: the repo to be exported (alternative to model) Returns: Nothing
2.90891
2.93332
0.991678
if not metamodel: metamodel = TextXMetaModel(**kwargs) language_from_str(lang_desc, metamodel) return metamodel
def metamodel_from_str(lang_desc, metamodel=None, **kwargs)
Creates a new metamodel from the textX description given as a string. Args: lang_desc(str): A textX language description. metamodel(TextXMetaModel): A metamodel that should be used. other params: See TextXMetaModel.
4.789968
4.782658
1.001529
with codecs.open(file_name, 'r', 'utf-8') as f: lang_desc = f.read() metamodel = metamodel_from_str(lang_desc=lang_desc, file_name=file_name, **kwargs) return metamodel
def metamodel_from_file(file_name, **kwargs)
Creates new metamodel from the given file. Args: file_name(str): The name of the file with textX language description. other params: See metamodel_from_str.
2.794861
2.901251
0.96333
if namespace_name not in self.namespaces: self.namespaces[namespace_name] = {} # BASETYPE namespace is imported in each namespace # as the first namespace to be searched. self._imported_namespaces[namespace_name] = \ [self.namespaces['__base__']] self._namespace_stack.append(namespace_name)
def _enter_namespace(self, namespace_name)
A namespace is usually an absolute file name of the grammar. A special namespace '__base__' is used for BASETYPE namespace.
5.753483
3.934246
1.462411
# Import can't be used if meta-model is loaded from string assert self.root_path is not None, \ '"import" statement can not be used if meta-model is ' \ 'loaded from string.' # Find the absolute file name of the import based on the relative # import_name and current namespace current_namespace = self._namespace_stack[-1] if '.' in current_namespace: root_namespace = current_namespace.rsplit('.', 1)[0] import_name = "%s.%s" % (root_namespace, import_name) import_file_name = "%s.tx" % os.path.join(self.root_path, *import_name.split(".")) if import_name not in self.namespaces: self._enter_namespace(import_name) if self.debug: self.dprint("*** IMPORTING FILE: %s" % import_file_name) metamodel_from_file(import_file_name, metamodel=self) self._leave_namespace() # Add the import to the imported_namespaces for current namespace # so that resolving of current grammar searches imported grammars # in the order of import self._imported_namespaces[current_namespace].append( self.namespaces[import_name])
def _new_import(self, import_name)
Starts a new import. Args: import_name(str): A relative import in the dot syntax (e.g. "first.second.expressions")
4.363936
4.221105
1.033837
class TextXMetaClass(type): def __repr__(cls): return '<textx:{} class at {}>'.format(cls._tx_fqn, id(cls)) @add_metaclass(TextXMetaClass) class TextXClass(object): def __repr__(self): if hasattr(self, 'name'): return "<{}:{}>".format(name, self.name) else: return "<textx:{} instance at {}>"\ .format(self._tx_fqn, hex(id(self))) cls = TextXClass cls.__name__ = name self._init_class(cls, peg_rule, position, position_end, inherits, root, rule_type) return cls
def _new_class(self, name, peg_rule, position, position_end=None, inherits=None, root=False, rule_type=RULE_MATCH)
Creates a new class with the given name in the current namespace. Args: name(str): The name of the class. peg_rule(ParserExpression): An arpeggio peg rule used to match this class. position(int): A position in the input where class is defined. root(bool): Is this class a root class of the metamodel. rule_type: The type of the rule this meta-class is for. One of RULE_COMMON, RULE_ABSTRACT or RULE_MATCH.
3.534596
3.576303
0.988338
cls._tx_metamodel = self # Attribute information (MetaAttr instances) keyed by name. cls._tx_attrs = OrderedDict() # A list of inheriting classes cls._tx_inh_by = inherits if inherits else [] cls._tx_position = position cls._tx_position_end = \ position if position_end is None else position_end # The type of the rule this meta-class results from. # There are three rule types: common, abstract and match # Base types are match rules. cls._tx_type = rule_type cls._tx_peg_rule = peg_rule if peg_rule: peg_rule._tx_class = cls # Push this class and PEG rule in the current namespace current_namespace = self.namespaces[self._namespace_stack[-1]] cls._tx_fqn = self._cls_fqn(cls) current_namespace[cls.__name__] = cls if root: self.rootcls = cls
def _init_class(self, cls, peg_rule, position, position_end=None, inherits=None, root=False, rule_type=RULE_MATCH)
Setup meta-class special attributes, namespaces etc. This is called both for textX created classes as well as user classes.
5.552593
5.252812
1.05707
ns = self._namespace_stack[-1] if ns in ['__base__', None]: return cls.__name__ else: return ns + '.' + cls.__name__
def _cls_fqn(self, cls)
Returns fully qualified name for the class based on current namespace and the class name.
4.694866
3.957639
1.186279
for attr in obj.__class__._tx_attrs.values(): if user: # Mangle name to prvent name clashing attr_name = "_txa_%s" % attr.name else: attr_name = attr.name if attr.mult in [MULT_ZEROORMORE, MULT_ONEORMORE]: # list setattr(obj, attr_name, []) elif attr.cls.__name__ in BASE_TYPE_NAMES: # Instantiate base python type if self.auto_init_attributes: setattr(obj, attr_name, python_type(attr.cls.__name__)()) else: # See https://github.com/textX/textX/issues/11 if attr.bool_assignment: # Only ?= assignments shall have default # value of False. setattr(obj, attr_name, False) else: # Set base type attribute to None initially # in order to be able to detect if an optional # values are given in the model. Default values # can be specified using object processors. setattr(obj, attr_name, None) else: # Reference to other obj setattr(obj, attr_name, None)
def _init_obj_attrs(self, obj, user=False)
Initialize obj attributes. Args: obj(object): A python object to set attributes to. user(bool): If this object is a user object mangle attribute names.
6.239015
6.099601
1.022856
attr = MetaAttr(name, cls, mult, cont, ref, bool_assignment, position) clazz._tx_attrs[name] = attr return attr
def _new_cls_attr(self, clazz, name, cls=None, mult=MULT_ONE, cont=True, ref=False, bool_assignment=False, position=0)
Creates new meta attribute of this class.
4.242287
4.212953
1.006963
return self.type_convertors.get(_type, lambda x: x)(value)
def convert(self, value, _type)
Convert instances of textx types and match rules to python types.
5.872494
4.785534
1.227134
if type(model_str) is not text: raise TextXError("textX accepts only unicode strings.") model = self._parser_blueprint.clone().get_model_from_str( model_str, debug=debug, pre_ref_resolution_callback=pre_ref_resolution_callback) for p in self._model_processors: p(model, self) return model
def model_from_str(self, model_str, debug=None, pre_ref_resolution_callback=None)
Instantiates model from the given string. :param pre_ref_resolution_callback: called before references are resolved. This can be useful to manage models distributed across files (scoping)
5.028049
5.34668
0.940406
model = None callback = pre_ref_resolution_callback if hasattr(self, "_tx_model_repository"): # metamodel has a global repo if not callback: def _pre_ref_resolution_callback(other_model): from textx.scoping import GlobalModelRepository filename = other_model._tx_filename assert filename # print("METAMODEL PRE-CALLBACK{}".format(filename)) other_model._tx_model_repository = GlobalModelRepository( self._tx_model_repository.all_models) self._tx_model_repository.all_models\ .filename_to_model[filename] = other_model callback = _pre_ref_resolution_callback if self._tx_model_repository.all_models.has_model(file_name): model = self._tx_model_repository.all_models\ .filename_to_model[file_name] if not model: # model not present (from global repo) -> load it model = self._parser_blueprint.clone().get_model_from_file( file_name, encoding, debug=debug, pre_ref_resolution_callback=callback, is_main_model=is_main_model) for p in self._model_processors: p(model, self) return model
def internal_model_from_file( self, file_name, encoding='utf-8', debug=None, pre_ref_resolution_callback=None, is_main_model=True)
Instantiates model from the given file. :param pre_ref_resolution_callback: called before references are resolved. This can be useful to manage models distributed across files (scoping)
3.62466
3.793835
0.955408
self.obj_processors = obj_processors self.type_convertors.update(obj_processors)
def register_obj_processors(self, obj_processors)
Object processors are callables that will be called after each successful model object construction. Those callables receive model object as its parameter. Registration of new object processors will replace previous. Args: obj_processors(dict): A dictionary where key=class name, value=callable
4.986892
7.263087
0.686608
self.print_menu() while True: try: event = input() if event == 'q': return event = int(event) event = self.model.events[event-1] except Exception: print('Invalid input') self.event(event) self.print_menu()
def interpret(self)
Main interpreter loop.
4.264306
3.941917
1.081785
if font_name in STANDARD_FONT_NAMES: return font_name, True elif font_name in _registered_fonts: return font_name, _registered_fonts[font_name] NOT_FOUND = (None, False) try: # Try first to register the font if it exists as ttf, # based on ReportLab font search. registerFont(TTFont(font_name, '%s.ttf' % font_name)) _registered_fonts[font_name] = True return font_name, True except TTFError: # Try searching with Fontconfig try: pipe = subprocess.Popen( ['fc-match', '-s', '--format=%{file}\\n', font_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) output = pipe.communicate()[0].decode(sys.getfilesystemencoding()) font_path = output.split('\n')[0] except OSError: return NOT_FOUND try: registerFont(TTFont(font_name, font_path)) except TTFError: return NOT_FOUND # Fontconfig may return a default font totally unrelated with font_name exact = font_name.lower() in os.path.basename(font_path).lower() _registered_fonts[font_name] = exact return font_name, exact
def find_font(font_name)
Return the font and a Boolean indicating if the match is exact.
3.372664
3.290298
1.025033
"Convert an SVG file to an RLG Drawing object." # unzip .svgz file into .svg unzipped = False if isinstance(path, str) and os.path.splitext(path)[1].lower() == ".svgz": with gzip.open(path, 'rb') as f_in, open(path[:-1], 'wb') as f_out: shutil.copyfileobj(f_in, f_out) path = path[:-1] unzipped = True svg_root = load_svg_file(path) if svg_root is None: return # convert to a RLG drawing svgRenderer = SvgRenderer(path, **kwargs) drawing = svgRenderer.render(svg_root) # remove unzipped .svgz file (.svg) if unzipped: os.remove(path) return drawing
def svg2rlg(path, **kwargs)
Convert an SVG file to an RLG Drawing object.
2.942146
2.808868
1.047449
from reportlab.pdfgen.canvas import Canvas from reportlab.graphics import shapes original_renderPath = shapes._renderPath def patchedRenderPath(path, drawFuncs, **kwargs): # Patched method to transfer fillRule from Path to PDFPathObject # Get back from bound method to instance try: drawFuncs[0].__self__.fillMode = path._fillRule except AttributeError: pass return original_renderPath(path, drawFuncs, **kwargs) shapes._renderPath = patchedRenderPath original_drawPath = Canvas.drawPath def patchedDrawPath(self, path, **kwargs): current = self._fillMode if hasattr(path, 'fillMode'): self._fillMode = path.fillMode else: self._fillMode = FILL_NON_ZERO original_drawPath(self, path, **kwargs) self._fillMode = current Canvas.drawPath = patchedDrawPath
def monkeypatch_reportlab()
https://bitbucket.org/rptlab/reportlab/issues/95/ ReportLab always use 'Even-Odd' filling mode for paths, this patch forces RL to honor the path fill rule mode (possibly 'Non-Zero Winding') instead.
3.57176
3.397208
1.051381
attrs = line.split(';') attrs = [a.strip() for a in attrs] attrs = filter(lambda a:len(a)>0, attrs) new_attrs = {} for a in attrs: k, v = a.split(':') k, v = [s.strip() for s in (k, v)] new_attrs[k] = v return new_attrs
def parseMultiAttributes(self, line)
Try parsing compound attribute string. Return a dictionary with single attributes in 'line'.
2.303242
2.227003
1.034234
# This needs also to lookup values like "url(#SomeName)"... if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False): if isinstance(svgNode, NodeTracker): svgNode.apply_rules(self.css_rules) else: ElementWrapper(svgNode).apply_rules(self.css_rules) attr_value = svgNode.attrib.get(name, '').strip() if attr_value and attr_value != "inherit": return attr_value elif svgNode.attrib.get("style"): dict = self.parseMultiAttributes(svgNode.attrib.get("style")) if name in dict: return dict[name] if svgNode.getparent() is not None: return self.findAttr(svgNode.getparent(), name) return ''
def findAttr(self, svgNode, name)
Search an attribute with some name in some node or above. First the node is searched, then its style attribute, then the search continues in the node's parent node. If no such attribute is found, '' is returned.
4.269323
4.164133
1.025261
"Return a dictionary of all attributes of svgNode or those inherited by it." dict = {} if node_name(svgNode.getparent()) == 'g': dict.update(self.getAllAttributes(svgNode.getparent())) style = svgNode.attrib.get("style") if style: d = self.parseMultiAttributes(style) dict.update(d) for key, value in svgNode.attrib.items(): if key != "style": dict[key] = value return dict
def getAllAttributes(self, svgNode)
Return a dictionary of all attributes of svgNode or those inherited by it.
3.767672
3.086727
1.220604
line = svgAttr.strip() ops = line[:] brackets = [] indices = [] for i, lin in enumerate(line): if lin in "()": brackets.append(i) for i in range(0, len(brackets), 2): bi, bj = brackets[i], brackets[i+1] subline = line[bi+1:bj] subline = subline.strip() subline = subline.replace(',', ' ') subline = re.sub("[ ]+", ',', subline) try: if ',' in subline: indices.append(tuple(float(num) for num in subline.split(','))) else: indices.append(float(subline)) except ValueError: continue ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:] ops = ops.replace(',', ' ').split() if len(ops) != len(indices): logger.warning("Unable to parse transform expression '%s'" % svgAttr) return [] result = [] for i, op in enumerate(ops): result.append((op, indices[i])) return result
def convertTransform(self, svgAttr)
Parse transform attribute string. E.g. "scale(2) translate(10,20)" -> [("scale", 2), ("translate", (10,20))]
2.850268
2.771946
1.028255
"Convert length to points." text = svgAttr if not text: return 0.0 if ' ' in text.replace(',', ' ').strip(): logger.debug("Only getting first value of %s" % text) text = text.replace(',', ' ').split()[0] if text.endswith('%'): logger.debug("Fiddling length unit: %") return float(text[:-1]) / 100 * percentOf elif text.endswith("pc"): return float(text[:-2]) * pica elif text.endswith("pt"): return float(text[:-2]) * 1.25 elif text.endswith("em"): return float(text[:-2]) * em_base elif text.endswith("px"): return float(text[:-2]) if "ex" in text: logger.warning("Ignoring unit ex") text = text.replace("ex", '') text = text.strip() length = toLength(text) # this does the default measurements such as mm and cm return length
def convertLength(self, svgAttr, percentOf=100, em_base=12)
Convert length to points.
4.186055
4.008618
1.044264
return [self.convertLength(a) for a in self.split_attr_list(svgAttr)]
def convertLengthList(self, svgAttr)
Convert a list of lengths.
6.776554
5.870009
1.154437
"Convert string to a RL color object." # fix it: most likely all "web colors" are allowed predefined = "aqua black blue fuchsia gray green lime maroon navy " predefined = predefined + "olive orange purple red silver teal white yellow " predefined = predefined + "lawngreen indianred aquamarine lightgreen brown" # This needs also to lookup values like "url(#SomeName)"... text = svgAttr if not text or text == "none": return None if text in predefined.split(): return self.color_converter(getattr(colors, text)) elif text == "currentColor": return "currentColor" elif len(text) == 7 and text[0] == '#': return self.color_converter(colors.HexColor(text)) elif len(text) == 4 and text[0] == '#': return self.color_converter(colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3])) elif text.startswith('rgb') and '%' not in text: t = text[3:].strip('()') tup = [h[2:] for h in [hex(int(num)) for num in t.split(',')]] tup = [(2 - len(h)) * '0' + h for h in tup] col = "#%s%s%s" % tuple(tup) return self.color_converter(colors.HexColor(col)) elif text.startswith('rgb') and '%' in text: t = text[3:].replace('%', '').strip('()') tup = (float(val)/100.0 for val in t.split(',')) return self.color_converter(colors.Color(*tup)) logger.warning("Can't handle color: %s" % text) return None
def convertColor(self, svgAttr)
Convert string to a RL color object.
3.844406
3.638626
1.056554
def get_path_from_node(node): for child in node.getchildren(): if node_name(child) == 'path': group = self.shape_converter.convertShape('path', NodeTracker(child)) return group.contents[-1] else: return get_path_from_node(child) clip_path = node.getAttribute('clip-path') if clip_path: m = re.match(r'url\(#([^\)]*)\)', clip_path) if m: ref = m.groups()[0] if ref in self.definitions: path = get_path_from_node(self.definitions[ref]) if path: path = ClippingPath(copy_from=path) return path
def get_clippath(self, node)
Return the clipping Path object referenced by the node 'clip-path' attribute, if any.
3.725276
3.507956
1.061951
xlink_href = node.attrib.get('{http://www.w3.org/1999/xlink}href') if not xlink_href: return None # First handle any raster embedded image data match = re.match(r"^data:image/(jpeg|png);base64", xlink_href) if match: img_format = match.groups()[0] image_data = base64.decodestring(xlink_href[(match.span(0)[1] + 1):].encode('ascii')) file_indicator, path = tempfile.mkstemp(suffix='.%s' % img_format) with open(path, 'wb') as fh: fh.write(image_data) # Close temporary file (as opened by tempfile.mkstemp) os.close(file_indicator) # this needs to be removed later, not here... # if exists(path): os.remove(path) return path # From here, we can assume this is a path. if '#' in xlink_href: iri, fragment = xlink_href.split('#', 1) else: iri, fragment = xlink_href, None if iri: # Only local relative paths are supported yet if not isinstance(self.source_path, str): logger.error( "Unable to resolve image path '%s' as the SVG source is not a file system path." % iri ) return None path = os.path.normpath(os.path.join(os.path.dirname(self.source_path), iri)) if not os.access(path, os.R_OK): return None if path == self.source_path: # Self-referencing, ignore the IRI part iri = None if iri: if path.endswith('.svg'): if path in self._parent_chain: logger.error("Circular reference detected in file.") raise CircularRefError() if path not in self._external_svgs: self._external_svgs[path] = ExternalSVG(path, self) ext_svg = self._external_svgs[path] if ext_svg.root_node is not None: if fragment: ext_frag = ext_svg.get_fragment(fragment) if ext_frag is not None: return ext_svg.renderer, ext_frag else: return ext_svg.renderer, ext_svg.root_node else: # A raster image path try: # This will catch invalid images PDFImage(path, 0, 0) except IOError: logger.error("Unable to read the image %s. Skipping..." % path) return None return path elif fragment: # A pointer to an internal definition if fragment in self.definitions: return self, self.definitions[fragment] else: # The missing definition should appear later in the file self.waiting_use_nodes[fragment].append((node, group)) return DELAYED
def xlink_href_target(self, node, group=None)
Return either: - a tuple (renderer, node) when the the xlink:href attribute targets a vector file or node - the path to an image file for any raster image targets - None if any problem occurs
3.591318
3.476697
1.032968
if text is None: return if preserve_space: text = text.replace('\r\n', ' ').replace('\n', ' ').replace('\t', ' ') else: text = text.replace('\r\n', '').replace('\n', '').replace('\t', ' ') text = text.strip() while (' ' in text): text = text.replace(' ', ' ') return text
def clean_text(self, text, preserve_space)
Text cleaning as per https://www.w3.org/TR/SVG/text.html#WhiteSpace
2.01261
1.91178
1.052741
tr = self.attrConverter.convertTransform(transform) for op, values in tr: if op == "scale": if not isinstance(values, tuple): values = (values, values) group.scale(*values) elif op == "translate": if isinstance(values, (int, float)): # From the SVG spec: If <ty> is not provided, it is assumed to be zero. values = values, 0 group.translate(*values) elif op == "rotate": if not isinstance(values, tuple) or len(values) == 1: group.rotate(values) elif len(values) == 3: angle, cx, cy = values group.translate(cx, cy) group.rotate(angle) group.translate(-cx, -cy) elif op == "skewX": group.skew(values, 0) elif op == "skewY": group.skew(0, values) elif op == "matrix": group.transform = values else: logger.debug("Ignoring transform: %s %s" % (op, values))
def applyTransformOnGroup(self, transform, group)
Apply an SVG transformation to a RL Group shape. The transformation is the value of an SVG transform attribute like transform="scale(1, -1) translate(10, 30)". rotate(<angle> [<cx> <cy>]) is equivalent to: translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
2.564492
2.326692
1.102205
# RLG-specific: all RLG shapes "Apply style attributes of a sequence of nodes to an RL shape." # tuple format: (svgAttr, rlgAttr, converter, default) mappingN = ( ("fill", "fillColor", "convertColor", "black"), ("fill-opacity", "fillOpacity", "convertOpacity", 1), ("fill-rule", "_fillRule", "convertFillRule", "nonzero"), ("stroke", "strokeColor", "convertColor", "none"), ("stroke-width", "strokeWidth", "convertLength", "1"), ("stroke-opacity", "strokeOpacity", "convertOpacity", 1), ("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"), ("stroke-linecap", "strokeLineCap", "convertLineCap", "0"), ("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"), ) mappingF = ( ("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME), ("font-size", "fontSize", "convertLength", "12"), ("text-anchor", "textAnchor", "id", "start"), ) if shape.__class__ == Group: # Recursively apply style on Group subelements for subshape in shape.contents: self.applyStyleOnShape(subshape, node, only_explicit=only_explicit) return ac = self.attrConverter for mapping in (mappingN, mappingF): if shape.__class__ != String and mapping == mappingF: continue for (svgAttrName, rlgAttr, func, default) in mapping: svgAttrValue = ac.findAttr(node, svgAttrName) if svgAttrValue == '': if only_explicit: continue else: svgAttrValue = default if svgAttrValue == "currentColor": svgAttrValue = ac.findAttr(node.getparent(), "color") or default try: meth = getattr(ac, func) setattr(shape, rlgAttr, meth(svgAttrValue)) except (AttributeError, KeyError, ValueError): pass if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor: shape.fillColor.alpha = shape.fillOpacity
def applyStyleOnShape(self, shape, node, only_explicit=False)
Apply styles from an SVG element to an RLG shape. If only_explicit is True, only attributes really present are applied.
3.025716
2.894875
1.045197
floats = [float(seq) for seq in re.findall(r'(-?\d*\.?\d*(?:e[+-]\d+)?)', value) if seq] res = [] for i in range(0, len(floats), min_num): if i > 0 and op in {'m', 'M'}: op = 'l' if op == 'm' else 'L' res.extend([op, floats[i:i + min_num]]) return res
def split_floats(op, min_num, value)
Split `value`, a list of numbers as a string, to a list of float numbers. Also optionally insert a `l` or `L` operation depending on the operation and the length of values. Example: with op='m' and value='10,20 30,40,' the returned value will be ['m', [10.0, 20.0], 'l', [30.0, 40.0]]
3.151099
2.864521
1.100044
# operator codes mapped to the minimum number of expected arguments ops = { 'A': 7, 'a': 7, 'Q': 4, 'q': 4, 'T': 2, 't': 2, 'S': 4, 's': 4, 'M': 2, 'L': 2, 'm': 2, 'l': 2, 'H': 1, 'V': 1, 'h': 1, 'v': 1, 'C': 6, 'c': 6, 'Z': 0, 'z': 0, } op_keys = ops.keys() # do some preprocessing result = [] groups = re.split('([achlmqstvz])', attr.strip(), flags=re.I) op = None for item in groups: if item.strip() == '': continue if item in op_keys: # fix sequences of M to one M plus a sequence of L operators, # same for m and l. if item == 'M' and item == op: op = 'L' elif item == 'm' and item == op: op = 'l' else: op = item if ops[op] == 0: # Z, z result.extend([op, []]) else: result.extend(split_floats(op, ops[op], item)) op = result[-2] # Remember last op return result
def normalise_svg_path(attr)
Normalise SVG path. This basically introduces operator codes for multi-argument parameters. Also, it fixes sequences of consecutive M or m operators to MLLL... and mlll... operators. It adds an empty list as argument for Z and z only in order to make the resul- ting list easier to iterate over. E.g. "M 10 20, M 20 20, L 30 40, 40 40, Z" -> ['M', [10, 20], 'L', [20, 20], 'L', [30, 40], 'L', [40, 40], 'Z', []]
3.50549
3.254278
1.077194
c0 = q0 c1 = (q0[0] + 2. / 3 * (q1[0] - q0[0]), q0[1] + 2. / 3 * (q1[1] - q0[1])) c2 = (c1[0] + 1. / 3 * (q2[0] - q0[0]), c1[1] + 1. / 3 * (q2[1] - q0[1])) c3 = q2 return c0, c1, c2, c3
def convert_quadratic_to_cubic_path(q0, q1, q2)
Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one.
1.657401
1.548058
1.070632
''' See http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes F.6.5 note that we reduce phi to zero outside this routine ''' rx = fabs(rx) ry = fabs(ry) # step 1 if phi: phi_rad = radians(phi) sin_phi = sin(phi_rad) cos_phi = cos(phi_rad) tx = 0.5 * (x1 - x2) ty = 0.5 * (y1 - y2) x1d = cos_phi * tx - sin_phi * ty y1d = sin_phi * tx + cos_phi * ty else: x1d = 0.5 * (x1 - x2) y1d = 0.5 * (y1 - y2) # step 2 # we need to calculate # (rx*rx*ry*ry-rx*rx*y1d*y1d-ry*ry*x1d*x1d) # ----------------------------------------- # (rx*rx*y1d*y1d+ry*ry*x1d*x1d) # # that is equivalent to # # rx*rx*ry*ry # = ----------------------------- - 1 # (rx*rx*y1d*y1d+ry*ry*x1d*x1d) # # 1 # = -------------------------------- - 1 # x1d*x1d/(rx*rx) + y1d*y1d/(ry*ry) # # = 1/r - 1 # # it turns out r is what they recommend checking # for the negative radicand case r = x1d * x1d / (rx * rx) + y1d * y1d / (ry * ry) if r > 1: rr = sqrt(r) rx *= rr ry *= rr r = x1d * x1d / (rx * rx) + y1d * y1d / (ry * ry) elif r != 0: r = 1 / r - 1 if -1e-10 < r < 0: r = 0 r = sqrt(r) if fA == fS: r = -r cxd = (r * rx * y1d) / ry cyd = -(r * ry * x1d) / rx # step 3 if phi: cx = cos_phi * cxd - sin_phi * cyd + 0.5 * (x1 + x2) cy = sin_phi * cxd + cos_phi * cyd + 0.5 * (y1 + y2) else: cx = cxd + 0.5 * (x1 + x2) cy = cyd + 0.5 * (y1 + y2) # step 4 theta1 = vector_angle((1, 0), ((x1d - cxd) / rx, (y1d - cyd) / ry)) dtheta = vector_angle( ((x1d - cxd) / rx, (y1d - cyd) / ry), ((-x1d - cxd) / rx, (-y1d - cyd) / ry) ) % 360 if fS == 0 and dtheta > 0: dtheta -= 360 elif fS == 1 and dtheta < 0: dtheta += 360 return cx, cy, rx, ry, -theta1, -dtheta
def end_point_to_center_parameters(x1, y1, x2, y2, fA, fS, rx, ry, phi=0)
See http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes F.6.5 note that we reduce phi to zero outside this routine
2.33642
2.128386
1.097743
context = {} socket = SocketIOChannelProxy(request.environ["socketio"]) client_start(request, socket, context) try: if socket.on_connect(): events.on_connect.send(request, socket, context) while True: messages = socket.recv() if not messages and not socket.connected(): events.on_disconnect.send(request, socket, context) break # Subscribe and unsubscribe messages are in two parts, the # name of either and the channel, so we use an iterator that # lets us jump a step in iteration to grab the channel name # for these. messages = iter(messages) for message in messages: if message == "__subscribe__": message = messages.next() message_type = "subscribe" socket.subscribe(message) events.on_subscribe.send(request, socket, context, message) elif message == "__unsubscribe__": message = messages.next() message_type = "unsubscribe" socket.unsubscribe(message) events.on_unsubscribe.send(request, socket, context, message) else: # Socket.IO sends arrays as individual messages, so # they're put into an object in socketio_scripts.html # and given the __array__ key so that they can be # handled consistently in the on_message event. message_type = "message" if message == "__array__": message = messages.next() events.on_message.send(request, socket, context, message) log_message = format_log(request, message_type, message) if log_message: socket.handler.server.log.write(log_message) except Exception, exception: from traceback import print_exc print_exc() events.on_error.send(request, socket, context, exception) client_end(request, socket, context) return HttpResponse("")
def socketio(request)
Socket.IO handler - maintains the lifecycle of a Socket.IO request, sending the each of the events. Also handles adding/removing request/socket pairs to the CLIENTS dict which is used for sending on_finish events when the server stops.
4.072297
4.014699
1.014347
CLIENTS[socket.session.session_id] = (request, socket, context)
def client_start(request, socket, context)
Adds the client triple to CLIENTS.
8.743094
5.890088
1.484374
# Send the unsubscribe event prior to actually unsubscribing, so # that the finish event can still match channels if applicable. for channel in socket.channels: events.on_unsubscribe.send(request, socket, context, channel) events.on_finish.send(request, socket, context) # Actually unsubscribe to cleanup channel data. for channel in socket.channels[:]: socket.unsubscribe(channel) # Remove the client. del CLIENTS[socket.session.session_id]
def client_end(request, socket, context)
Handles cleanup when a session ends for the given client triple. Sends unsubscribe and finish events, actually unsubscribes from any channels subscribed to, and removes the client triple from CLIENTS.
8.346941
5.590846
1.492966
for request, socket, context in CLIENTS.values()[:]: client_end(request, socket, context)
def client_end_all()
Performs cleanup on all clients - called by runserver_socketio when the server is shut down or reloaded.
12.547835
12.608103
0.99522
if channel in self.channels: return False CHANNELS[channel].append(self.socket.session.session_id) self.channels.append(channel) return True
def subscribe(self, channel)
Add the channel to this socket's channels, and to the list of subscribed session IDs for the channel. Return False if already subscribed, otherwise True.
4.541121
3.221674
1.409553
try: CHANNELS[channel].remove(self.socket.session.session_id) self.channels.remove(channel) except ValueError: return False return True
def unsubscribe(self, channel)
Remove the channel from this socket's channels, and from the list of subscribed session IDs for the channel. Return False if not subscribed, otherwise True.
5.347292
3.669023
1.457416
if channel is None: channels = self.channels else: channels = [channel] for channel in channels: for subscriber in CHANNELS[channel]: if subscriber != self.socket.session.session_id: session = self.socket.handler.server.sessions[subscriber] self._write(message, session)
def broadcast_channel(self, message, channel=None)
Send the given message to all subscribers for the channel given. If no channel is given, send to the subscribers for all the channels that this socket is subscribed to.
4.408431
3.968517
1.110851
self.send(message) self.broadcast_channel(message, channel)
def send_and_broadcast_channel(self, message, channel=None)
Shortcut for a socket to broadcast to all sockets subscribed to a channel, and itself.
4.337634
3.550968
1.221536
room = get_object_or_404(ChatRoom, id=message["room"]) if message["action"] == "start": name = strip_tags(message["name"]) user, created = room.users.get_or_create(name=name) if not created: socket.send({"action": "in-use"}) else: context["user"] = user users = [u.name for u in room.users.exclude(id=user.id)] socket.send({"action": "started", "users": users}) user.session = socket.session.session_id user.save() joined = {"action": "join", "name": user.name, "id": user.id} socket.send_and_broadcast_channel(joined) else: try: user = context["user"] except KeyError: return if message["action"] == "message": message["message"] = strip_tags(message["message"]) message["name"] = user.name socket.send_and_broadcast_channel(message)
def message(request, socket, context, message)
Event handler for a room receiving a message. First validates a joining user's name and sends them the list of users.
2.62002
2.577493
1.0165
try: user = context["user"] except KeyError: return left = {"action": "leave", "name": user.name, "id": user.id} socket.broadcast_channel(left) user.delete()
def finish(request, socket, context)
Event handler for a socket session ending in a room. Broadcast the user leaving and delete them from the DB.
5.575529
4.306678
1.294624
try: socket = CLIENTS[session_id][1] except KeyError: raise NoSocket("There is no socket with the session ID: " + session_id) socket.send(message)
def send(session_id, message)
Send a message to the socket for the given session ID.
4.745225
4.351968
1.090363
try: socket = CLIENTS.values()[0][1] except IndexError: raise NoSocket("There are no clients.") socket.send_and_broadcast(message)
def broadcast(message)
Find the first socket and use it to broadcast to all sockets including the socket itself.
8.688118
7.309945
1.188534
try: socket = CLIENTS[CHANNELS.get(channel, [])[0]][1] except (IndexError, KeyError): raise NoSocket("There are no clients on the channel: " + channel) socket.send_and_broadcast_channel(message, channel)
def broadcast_channel(message, channel)
Find the first socket for the given channel, and use it to broadcast to the channel, including the socket itself.
6.889958
5.495297
1.253792
from django_socketio.settings import MESSAGE_LOG_FORMAT if MESSAGE_LOG_FORMAT is None: return None now = datetime.now().replace(microsecond=0) args = dict(request.META, TYPE=message_type, MESSAGE=message, TIME=now) return (MESSAGE_LOG_FORMAT % args) + "\n"
def format_log(request, message_type, message)
Formats a log message similar to gevent's pywsgi request logging.
4.270058
4.138197
1.031864