code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
complexity = params.get('complexity', 10) no_assert = params.get('no_assert', False) show_closures = params.get('show_closures', False) visitor = ComplexityVisitor.from_code(code, no_assert=no_assert) blocks = visitor.blocks if show_closures: blocks = add_inner_blocks(blocks) return [ {'lnum': block.lineno, 'col': block.col_offset, 'type': 'R', 'number': 'R709', 'text': 'R701: %s is too complex %d' % (block.name, block.complexity)} for block in visitor.blocks if block.complexity > complexity ]
def run(path, code=None, params=None, ignore=None, select=None, **meta)
Check code with Radon. :return list: List of errors.
4.262563
4.070472
1.047191
p = Popen(command.split(), stdout=PIPE, stderr=PIPE) (stdout, stderr) = p.communicate() return (p.returncode, [line.strip() for line in stdout.splitlines()], [line.strip() for line in stderr.splitlines()])
def run(command)
Run a shell command. :return str: Stdout
1.942008
2.352074
0.825658
_, files_modified, _ = run("git diff-index --cached --name-only HEAD") options = parse_options() setup_logger(options) if sys.version_info >= (3,): candidates = [f.decode('utf-8') for f in files_modified] else: candidates = [str(f) for f in files_modified] if candidates: process_paths(options, candidates=candidates, error=error)
def git_hook(error=True)
Run pylama after git commit.
4.325484
4.111821
1.051963
seen = set() paths = [] if len(repo): for rev in range(repo[node], len(repo)): for file_ in repo[rev].files(): file_ = op.join(repo.root, file_) if file_ in seen or not op.exists(file_): continue seen.add(file_) paths.append(file_) options = parse_options() setup_logger(options) if paths: process_paths(options, candidates=paths)
def hg_hook(ui, repo, node=None, **kwargs)
Run pylama after mercurial commit.
4.480775
4.234087
1.058262
hook = op.join(path, 'pre-commit') with open(hook, 'w') as fd: fd.write() chmod(hook, 484)
def install_git(path)
Install hook in Git repository.
5.617914
5.391912
1.041915
hook = op.join(path, 'hgrc') if not op.isfile(hook): open(hook, 'w+').close() c = ConfigParser() c.readfp(open(hook, 'r')) if not c.has_section('hooks'): c.add_section('hooks') if not c.has_option('hooks', 'commit'): c.set('hooks', 'commit', 'python:pylama.hooks.hg_hook') if not c.has_option('hooks', 'qrefresh'): c.set('hooks', 'qrefresh', 'python:pylama.hooks.hg_hook') c.write(open(hook, 'w+'))
def install_hg(path)
Install hook in Mercurial repository.
2.39806
2.371255
1.011304
git = op.join(path, '.git', 'hooks') hg = op.join(path, '.hg') if op.exists(git): install_git(git) LOGGER.warn('Git hook has been installed.') elif op.exists(hg): install_hg(hg) LOGGER.warn('Mercurial hook has been installed.') else: LOGGER.error('VCS has not found. Check your path.') sys.exit(1)
def install_hook(path)
Auto definition of SCM and hook installation.
3.265351
3.10269
1.052426
code = converter(code) line_numbers = commented_out_code_line_numbers(code) lines = code.split('\n') result = [] for line_number in line_numbers: line = lines[line_number - 1] result.append(dict( lnum=line_number, offset=len(line) - len(line.rstrip()), # https://github.com/sobolevn/flake8-eradicate#output-example text=converter('E800 Found commented out code: ') + line, # https://github.com/sobolevn/flake8-eradicate#error-codes type='E800', )) return result
def run(path, code=None, params=None, **meta)
Eradicate code checking. :return list: List of errors.
4.205597
4.342315
0.968515
passed = defaultdict(list) for error in errors: key = error.linter, error.number if key in DUPLICATES: if key in passed[error.lnum]: continue passed[error.lnum] = DUPLICATES[key] yield error
def remove_duplicates(errors)
Filter duplicates from given error's list.
5.492425
5.246902
1.046794
self.path.profile = self.path.gen.joinpath("profile") if not self.path.profile.exists(): self.path.profile.mkdir() self.python = hitchpylibrarytoolkit.project_build( "strictyaml", self.path, self.given["python version"], {"ruamel.yaml": self.given["ruamel version"]}, ).bin.python self.example_py_code = ( ExamplePythonCode(self.python, self.path.gen) .with_code(self.given.get("code", "")) .with_setup_code( self.given.get("setup", "") ) .with_terminal_size(160, 100) .with_strings( yaml_snippet_1=self.given.get("yaml_snippet_1"), yaml_snippet=self.given.get("yaml_snippet"), yaml_snippet_2=self.given.get("yaml_snippet_2"), modified_yaml_snippet=self.given.get("modified_yaml_snippet"), ) )
def set_up(self)
Set up your applications and the test environment.
4.735573
4.596406
1.030277
forked_chunk = YAMLChunk( deepcopy(self._ruamelparsed), pointer=self.pointer, label=self.label, key_association=copy(self._key_association), ) forked_chunk.contents[self.ruamelindex(strictindex)] = new_value.as_marked_up() forked_chunk.strictparsed()[strictindex] = deepcopy(new_value.as_marked_up()) return forked_chunk
def fork(self, strictindex, new_value)
Return a chunk referring to the same location in a duplicated document. Used when modifying a YAML chunk so that the modification can be validated before changing it.
8.088015
7.030761
1.150375
if self.is_mapping(): for key, value in self.contents.items(): self.key(key, key).pointer.make_child_of(chunk.pointer) self.val(key).make_child_of(chunk) elif self.is_sequence(): for index, item in enumerate(self.contents): self.index(index).make_child_of(chunk) else: self.pointer.make_child_of(chunk.pointer)
def make_child_of(self, chunk)
Link one YAML chunk to another. Used when inserting a chunk of YAML into another chunk.
3.038475
2.916924
1.041671
return YAMLChunk( self._ruamelparsed, pointer=pointer, label=self._label, strictparsed=self._strictparsed, key_association=copy(self._key_association), )
def _select(self, pointer)
Get a YAMLChunk referenced by a pointer.
15.147156
9.996222
1.515288
return self._select(self._pointer.index(self.ruamelindex(strictindex)))
def index(self, strictindex)
Return a chunk in a sequence referenced by index.
22.849211
20.001774
1.142359
return ( self.key_association.get(strictindex, strictindex) if self.is_mapping() else strictindex )
def ruamelindex(self, strictindex)
Get the ruamel equivalent of a strict parsed index. E.g. 0 -> 0, 1 -> 2, parsed-via-slugify -> Parsed via slugify
8.838279
9.607499
0.919935
ruamelkey = self.ruamelindex(strictkey) return self._select(self._pointer.val(ruamelkey, strictkey))
def val(self, strictkey)
Return a chunk referencing a value in a mapping with the key 'key'.
14.256032
12.533983
1.13739
return self._select(self._pointer.key(key, strictkey))
def key(self, key, strictkey=None)
Return a chunk referencing a key in a mapping with the name 'key'.
11.997929
10.050747
1.193735
return self._select(self._pointer.textslice(start, end))
def textslice(self, start, end)
Return a chunk referencing a slice of a scalar text value.
11.728487
9.046867
1.296414
for x in items: if isinstance(x, Iterable) and not isinstance(x, (str, bytes)): for sub_x in flatten(x): yield sub_x else: yield x
def flatten(items)
Yield items from any nested iterable. >>> list(flatten([[1, 2, 3], [[4, 5], 6, 7]])) [1, 2, 3, 4, 5, 6, 7]
1.829987
2.307895
0.792925
chunks = [] start = 0 end = 0 for item in text.split(","): space_increment = 1 if item[0] == " " else 0 start += space_increment # Is there a space after the comma to ignore? ", " end += len(item.lstrip()) + space_increment chunks.append((start, end)) start += len(item.lstrip()) + 1 # Plus comma end = start return chunks
def comma_separated_positions(text)
Start and end positions of comma separated text items. Commas and trailing spaces should not be included. >>> comma_separated_positions("ABC, 2,3") [(0, 3), (5, 6), (7, 8)]
4.4954
4.776556
0.941138
if isinstance(data, dict): if len(data) == 0: raise exceptions.CannotBuildDocumentsFromEmptyDictOrList( "Document must be built with non-empty dicts and lists" ) return CommentedMap( [ (ruamel_structure(key), ruamel_structure(value)) for key, value in data.items() ] ) elif isinstance(data, list): if len(data) == 0: raise exceptions.CannotBuildDocumentsFromEmptyDictOrList( "Document must be built with non-empty dicts and lists" ) return CommentedSeq([ruamel_structure(item) for item in data]) elif isinstance(data, bool): return u"yes" if data else u"no" elif isinstance(data, (int, float)): return str(data) else: if not is_string(data): raise exceptions.CannotBuildDocumentFromInvalidData( ( "Document must be built from a combination of:\n" "string, int, float, bool or nonempty list/dict\n\n" "Instead, found variable with type '{}': '{}'" ).format(type(data).__name__, data) ) return data
def ruamel_structure(data, validator=None)
Take dicts and lists and return a ruamel.yaml style structure of CommentedMaps, CommentedSeqs and data. If a validator is presented and the type is unknown, it is checked against the validator to see if it will turn it back in to YAML.
2.589052
2.532845
1.022191
settings = _personal_settings().data settings["engine"]["rewrite"] = True _storybook(settings["engine"]).with_params( **{"python version": settings["params"]["python version"]} ).only_uninherited().shortcut(*keywords).play()
def rbdd(*keywords)
Run story matching keywords and rewrite story if code changed.
33.171799
29.009739
1.143471
_storybook({"rewrite": False}).in_filename(filename).with_params( **{"python version": "2.7.14"} ).filter( lambda story: not story.info.get("fails_on_python_2") ).ordered_by_name().play() _storybook({"rewrite": False}).with_params( **{"python version": "3.7.0"} ).in_filename(filename).ordered_by_name().play()
def regressfile(filename)
Run all stories in filename 'filename' in python 2 and 3.
7.553854
6.556505
1.152116
lint() doctests() storybook = _storybook({}).only_uninherited() storybook.with_params(**{"python version": "2.7.14"}).filter( lambda story: not story.info.get("fails_on_python_2") ).ordered_by_name().play() storybook.with_params(**{"python version": "3.7.0"}).ordered_by_name().play()
def regression()
Run regression testing - lint and then run all tests.
10.66464
10.214753
1.044043
hitchpylibrarytoolkit.docgen( _storybook({}), DIR.project, DIR.key / "story", DIR.gen )
def docgen()
Build documentation.
133.620636
128.469681
1.040095
from commandlib import Command Command(DIR.gen.joinpath("py{0}".format(version), "bin", "python"))( DIR.gen.joinpath("state", "examplepythoncode.py") ).in_dir(DIR.gen.joinpath("state")).run()
def rerun(version="3.7.0")
Rerun last example code block with specified version of python.
9.836322
9.4488
1.041013
if schema is None: schema = Any() return schema(YAMLChunk(schema.to_yaml(data), label=label))
def as_document(data, schema=None, label=u"<unicode string>")
Translate dicts/lists and scalar (string/bool/float/int/etc.) values into a YAML object which can be dumped out.
13.147665
12.204609
1.07727
return generic_load( yaml_string, schema=schema, label=label, allow_flow_style=allow_flow_style )
def dirty_load( yaml_string, schema=None, label=u"<unicode string>", allow_flow_style=False )
Parse the first YAML document in a string and produce corresponding YAML object. If allow_flow_style is set to True, then flow style is allowed.
2.848286
3.381554
0.842301
return generic_load(yaml_string, schema=schema, label=label)
def load(yaml_string, schema=None, label=u"<unicode string>")
Parse the first YAML document in a string and produce corresponding YAML object.
4.507564
5.41784
0.831986
if isinstance(self._value, CommentedMap): mapping = OrderedDict() for key, value in self._value.items(): mapping[key.data] = value.data return mapping elif isinstance(self._value, CommentedSeq): return [item.data for item in self._value] else: return self._value
def data(self)
Returns raw data representation of the document or document segment. Mappings are rendered as ordered dicts, sequences as lists and scalar values as whatever the validator returns (int, string, etc.). If no validators are used, scalar values are always returned as strings.
2.44505
2.302618
1.061857
dumped = dump(self.as_marked_up(), Dumper=StrictYAMLDumper, allow_unicode=True) return dumped if sys.version_info[0] == 3 else dumped.decode("utf8")
def as_yaml(self)
Render the YAML node and subnodes as string.
6.991371
6.384092
1.095124
if isinstance(self._value, CommentedMap): raise TypeError("{0} is a mapping, has no text value.".format(repr(self))) if isinstance(self._value, CommentedSeq): raise TypeError("{0} is a sequence, has no text value.".format(repr(self))) return self._text
def text(self)
Return string value of scalar, whatever value it was parsed as.
3.19191
2.866465
1.113535
# In python2, ast.parse(text_string_with_encoding_pragma) raises # SyntaxError: encoding declaration in Unicode string ast_obj = ast.parse(src.encode('UTF-8')) visitor = TopLevelImportVisitor() visitor.visit(ast_obj) line_offsets = get_line_offsets_by_line_no(src) chunks = [] startpos = 0 pending_chunk_type = None possible_ending_tokens = None seen_import = False for ( token_type, token_text, (srow, scol), (erow, ecol), _, ) in tokenize.generate_tokens(io.StringIO(src).readline): # Searching for a start of a chunk if pending_chunk_type is None: if not seen_import and token_type == tokenize.COMMENT: if 'noreorder' in token_text: chunks.append(CodePartition(CodeType.CODE, src[startpos:])) break else: pending_chunk_type = CodeType.PRE_IMPORT_CODE possible_ending_tokens = TERMINATES_COMMENT elif not seen_import and token_type == tokenize.STRING: pending_chunk_type = CodeType.PRE_IMPORT_CODE possible_ending_tokens = TERMINATES_DOCSTRING elif scol == 0 and srow in visitor.top_level_import_line_numbers: seen_import = True pending_chunk_type = CodeType.IMPORT possible_ending_tokens = TERMINATES_IMPORT elif token_type == tokenize.NL: # A NL token is a non-important newline, we'll immediately # append a NON_CODE partition endpos = line_offsets[erow] + ecol srctext = src[startpos:endpos] startpos = endpos chunks.append(CodePartition(CodeType.NON_CODE, srctext)) elif token_type == tokenize.COMMENT: if 'noreorder' in token_text: chunks.append(CodePartition(CodeType.CODE, src[startpos:])) break else: pending_chunk_type = CodeType.CODE possible_ending_tokens = TERMINATES_COMMENT elif token_type == tokenize.ENDMARKER: # Token ended right before end of file or file was empty pass else: # We've reached a `CODE` block, which spans the rest of the # file (intentionally timid). Let's append that block and be # done chunks.append(CodePartition(CodeType.CODE, src[startpos:])) break # Attempt to find ending of token elif token_type in possible_ending_tokens: endpos = line_offsets[erow] + ecol srctext = src[startpos:endpos] startpos = endpos chunks.append(CodePartition(pending_chunk_type, srctext)) pending_chunk_type = None possible_ending_tokens = None elif token_type == tokenize.COMMENT and 'noreorder' in token_text: chunks.append(CodePartition(CodeType.CODE, src[startpos:])) break chunks = [chunk for chunk in chunks if chunk.src] # Make sure we're not removing any code assert _partitions_to_src(chunks) == src return chunks
def partition_source(src)
Partitions source into a list of `CodePartition`s for import refactoring.
3.194907
3.145165
1.015815
def _inner(): for partition in partitions: if partition.code_type is CodeType.IMPORT: import_obj = import_obj_from_str(partition.src) if import_obj.has_multiple_imports: for new_import_obj in import_obj.split_imports(): yield CodePartition( CodeType.IMPORT, new_import_obj.to_text(), ) else: yield partition else: yield partition return list(_inner())
def separate_comma_imports(partitions)
Turns `import a, b` into `import a` and `import b`
3.572402
3.443955
1.037296
parts = s.split('.') for i in range(1, len(parts)): yield '.'.join(parts[:i])
def _module_to_base_modules(s)
return all module names that would be imported due to this import-import
2.913912
3.047305
0.956226
condlist = [input <= threshold for threshold in thresholds] if len(condlist) == len(choices) - 1: # If a choice is provided for input > highest threshold, last condition must be true to return it. condlist += [True] assert len(condlist) == len(choices), \ "apply_thresholds must be called with the same number of thresholds than choices, or one more choice" return np.select(condlist, choices)
def apply_thresholds(input, thresholds, choices)
Return one of the choices depending on the input position compared to thresholds, for each input. >>> apply_thresholds(np.array([4]), [5, 7], [10, 15, 20]) array([10]) >>> apply_thresholds(np.array([5]), [5, 7], [10, 15, 20]) array([10]) >>> apply_thresholds(np.array([6]), [5, 7], [10, 15, 20]) array([15]) >>> apply_thresholds(np.array([8]), [5, 7], [10, 15, 20]) array([20]) >>> apply_thresholds(np.array([10]), [5, 7, 9], [10, 15, 20]) array([0])
5.12275
5.679704
0.90194
if not os.path.exists(file_path): raise ValueError("{} doest not exist".format(file_path)) if os.path.isdir(file_path): return ParameterNode(name, directory_path = file_path) data = _load_yaml_file(file_path) return _parse_child(name, data, file_path)
def load_parameter_file(file_path, name = '')
Load parameters from a YAML file (or a directory containing YAML files). :returns: An instance of :any:`ParameterNode` or :any:`Scale` or :any:`Parameter`.
3.368314
3.141247
1.072285
if period is not None: if start is not None or stop is not None: raise TypeError("Wrong input for 'update' method: use either 'update(period, value = value)' or 'update(start = start, stop = stop, value = value)'. You cannot both use 'period' and 'start' or 'stop'.") if isinstance(period, str): period = periods.period(period) start = period.start stop = period.stop if start is None: raise ValueError("You must provide either a start or a period") start_str = str(start) stop_str = str(stop.offset(1, 'day')) if stop else None old_values = self.values_list new_values = [] n = len(old_values) i = 0 # Future intervals : not affected if stop_str: while (i < n) and (old_values[i].instant_str >= stop_str): new_values.append(old_values[i]) i += 1 # Right-overlapped interval if stop_str: if new_values and (stop_str == new_values[-1].instant_str): pass # such interval is empty else: if i < n: overlapped_value = old_values[i].value value_name = _compose_name(self.name, item_name = stop_str) new_interval = ParameterAtInstant(value_name, stop_str, data = {'value': overlapped_value}) new_values.append(new_interval) else: value_name = _compose_name(self.name, item_name = stop_str) new_interval = ParameterAtInstant(value_name, stop_str, data = {'value': None}) new_values.append(new_interval) # Insert new interval value_name = _compose_name(self.name, item_name = start_str) new_interval = ParameterAtInstant(value_name, start_str, data = {'value': value}) new_values.append(new_interval) # Remove covered intervals while (i < n) and (old_values[i].instant_str >= start_str): i += 1 # Past intervals : not affected while i < n: new_values.append(old_values[i]) i += 1 self.values_list = new_values
def update(self, period = None, start = None, stop = None, value = None)
Change the value for a given period. :param period: Period where the value is modified. If set, `start` and `stop` should be `None`. :param start: Start of the period. Instance of `openfisca_core.periods.Instant`. If set, `period` should be `None`. :param stop: Stop of the period. Instance of `openfisca_core.periods.Instant`. If set, `period` should be `None`. :param value: New value. If `None`, the parameter is removed from the legislation parameters for the given period.
2.809549
2.77398
1.012822
for child_name, child in other.children.items(): self.add_child(child_name, child)
def merge(self, other)
Merges another ParameterNode into the current node. In case of child name conflict, the other node child will replace the current node child.
3.381519
2.938192
1.150884
if name in self.children: raise ValueError("{} has already a child named {}".format(self.name, name)) if not (isinstance(child, ParameterNode) or isinstance(child, Parameter) or isinstance(child, Scale)): raise TypeError("child must be of type ParameterNode, Parameter, or Scale. Instead got {}".format(type(child))) self.children[name] = child setattr(self, name, child)
def add_child(self, name, child)
Add a new child to the node. :param name: Name of the child that must be used to access that child. Should not contain anything that could interfere with the operator `.` (dot). :param child: The new child, an instance of :any:`Scale` or :any:`Parameter` or :any:`ParameterNode`.
2.790143
2.25925
1.234987
MESSAGE_PART_1 = "Cannot use fancy indexing on parameter node '{}', as" MESSAGE_PART_3 = "To use fancy indexing on parameter node, its children must be homogenous." MESSAGE_PART_4 = "See more at <https://openfisca.org/doc/coding-the-legislation/legislation_parameters#computing-a-parameter-that-depends-on-a-variable-fancy-indexing>." def raise_key_inhomogeneity_error(node_with_key, node_without_key, missing_key): message = " ".join([ MESSAGE_PART_1, "'{}' exists, but '{}' doesn't.", MESSAGE_PART_3, MESSAGE_PART_4, ]).format( node._name, '.'.join([node_with_key, missing_key]), '.'.join([node_without_key, missing_key]), ) raise ValueError(message) def raise_type_inhomogeneity_error(node_name, non_node_name): message = " ".join([ MESSAGE_PART_1, "'{}' is a node, but '{}' is not.", MESSAGE_PART_3, MESSAGE_PART_4, ]).format( node._name, node_name, non_node_name, ) raise ValueError(message) def raise_not_implemented(node_name, node_type): message = " ".join([ MESSAGE_PART_1, "'{}' is a '{}', and fancy indexing has not been implemented yet on this kind of parameters.", MESSAGE_PART_4, ]).format( node._name, node_name, node_type, ) raise NotImplementedError(message) def extract_named_children(node): return { '.'.join([node._name, key]): value for key, value in node._children.items() } def check_nodes_homogeneous(named_nodes): names = list(named_nodes.keys()) nodes = list(named_nodes.values()) first_node = nodes[0] first_name = names[0] if isinstance(first_node, ParameterNodeAtInstant): children = extract_named_children(first_node) for node, name in list(zip(nodes, names))[1:]: if not isinstance(node, ParameterNodeAtInstant): raise_type_inhomogeneity_error(first_name, name) first_node_keys = first_node._children.keys() node_keys = node._children.keys() if not first_node_keys == node_keys: missing_keys = set(first_node_keys).difference(node_keys) if missing_keys: # If the first_node has a key that node hasn't raise_key_inhomogeneity_error(first_name, name, missing_keys.pop()) else: # If If the node has a key that first_node doesn't have missing_key = set(node_keys).difference(first_node_keys).pop() raise_key_inhomogeneity_error(name, first_name, missing_key) children.update(extract_named_children(node)) check_nodes_homogeneous(children) elif isinstance(first_node, float) or isinstance(first_node, int): for node, name in list(zip(nodes, names))[1:]: if isinstance(node, int) or isinstance(node, float): pass elif isinstance(node, ParameterNodeAtInstant): raise_type_inhomogeneity_error(name, first_name) else: raise_not_implemented(name, type(node).__name__) else: raise_not_implemented(first_name, type(first_node).__name__) check_nodes_homogeneous(extract_named_children(node))
def check_node_vectorisable(node)
Check that a node can be casted to a vectorial node, in order to be able to use fancy indexing.
2.672922
2.623215
1.018949
name = variable.__name__ if self.variables.get(name) is not None: del self.variables[name] self.load_variable(variable, update = False)
def replace_variable(self, variable)
Replaces an existing OpenFisca variable in the tax and benefit system by a new one. The new variable must have the same name than the replaced one. If no variable with the given name exists in the tax and benefit system, no error will be raised and the new variable will be simply added. :param Variable variable: New variable to add. Must be a subclass of Variable.
4.886647
6.501171
0.751656
try: file_name = path.splitext(path.basename(file_path))[0] # As Python remembers loaded modules by name, in order to prevent collisions, we need to make sure that: # - Files with the same name, but located in different directories, have a different module names. Hence the file path hash in the module name. # - The same file, loaded by different tax and benefit systems, has distinct module names. Hence the `id(self)` in the module name. module_name = '{}_{}_{}'.format(id(self), hash(path.abspath(file_path)), file_name) module_directory = path.dirname(file_path) try: module = load_module(module_name, *find_module(file_name, [module_directory])) except NameError as e: logging.error(str(e) + ": if this code used to work, this error might be due to a major change in OpenFisca-Core. Checkout the changelog to learn more: <https://github.com/openfisca/openfisca-core/blob/master/CHANGELOG.md>") raise potential_variables = [getattr(module, item) for item in dir(module) if not item.startswith('__')] for pot_variable in potential_variables: # We only want to get the module classes defined in this module (not imported) if isclass(pot_variable) and issubclass(pot_variable, Variable) and pot_variable.__module__ == module_name: self.add_variable(pot_variable) except Exception: log.error('Unable to load OpenFisca variables from file "{}"'.format(file_path)) raise
def add_variables_from_file(self, file_path)
Adds all OpenFisca variables contained in a given file to the tax and benefit system.
4.935579
4.493096
1.098481
py_files = glob.glob(path.join(directory, "*.py")) for py_file in py_files: self.add_variables_from_file(py_file) subdirectories = glob.glob(path.join(directory, "*/")) for subdirectory in subdirectories: self.add_variables_from_directory(subdirectory)
def add_variables_from_directory(self, directory)
Recursively explores a directory, and adds all OpenFisca variables found there to the tax and benefit system.
2.022274
2.07724
0.973539
# Load extension from installed pip package try: package = importlib.import_module(extension) extension_directory = package.__path__[0] except ImportError: message = linesep.join([traceback.format_exc(), 'Error loading extension: `{}` is neither a directory, nor a package.'.format(extension), 'Are you sure it is installed in your environment? If so, look at the stack trace above to determine the origin of this error.', 'See more at <https://github.com/openfisca/openfisca-extension-template#installing>.']) raise ValueError(message) self.add_variables_from_directory(extension_directory) param_dir = path.join(extension_directory, 'parameters') if path.isdir(param_dir): extension_parameters = ParameterNode(directory_path = param_dir) self.parameters.merge(extension_parameters)
def load_extension(self, extension)
Loads an extension to the tax and benefit system. :param string extension: The extension to load. Can be an absolute path pointing to an extension directory, or the name of an OpenFisca extension installed as a pip package.
5.253275
4.856853
1.081621
from openfisca_core.reforms import Reform try: reform_package, reform_name = reform_path.rsplit('.', 1) except ValueError: raise ValueError('`{}` does not seem to be a path pointing to a reform. A path looks like `some_country_package.reforms.some_reform.`'.format(reform_path)) try: reform_module = importlib.import_module(reform_package) except ImportError: message = linesep.join([traceback.format_exc(), 'Could not import `{}`.'.format(reform_package), 'Are you sure of this reform module name? If so, look at the stack trace above to determine the origin of this error.']) raise ValueError(message) reform = getattr(reform_module, reform_name, None) if reform is None: raise ValueError('{} has no attribute {}'.format(reform_package, reform_name)) if not issubclass(reform, Reform): raise ValueError('`{}` does not seem to be a valid Openfisca reform.'.format(reform_path)) return reform(self)
def apply_reform(self, reform_path)
Generates a new tax and benefit system applying a reform to the tax and benefit system. The current tax and benefit system is **not** mutated. :param string reform_path: The reform to apply. Must respect the format *installed_package.sub_module.reform* :returns: A reformed tax and benefit system. Example: >>> self.apply_reform('openfisca_france.reforms.inversion_revenus')
3.299459
3.243202
1.017346
variables = self.variables found = variables.get(variable_name) if not found and check_existence: raise VariableNotFound(variable_name, self) return found
def get_variable(self, variable_name, check_existence = False)
Get a variable from the tax and benefit system. :param variable_name: Name of the requested variable. :param check_existence: If True, raise an error if the requested variable does not exist.
3.099923
4.146153
0.747663
self.variables[variable_name] = get_neutralized_variable(self.get_variable(variable_name))
def neutralize_variable(self, variable_name)
Neutralizes an OpenFisca variable existing in the tax and benefit system. A neutralized variable always returns its default value when computed. Trying to set inputs for a neutralized variable has no effect except raising a warning.
4.18548
4.524904
0.924988
parameters = ParameterNode('', directory_path = path_to_yaml_dir) if self.preprocess_parameters is not None: parameters = self.preprocess_parameters(parameters) self.parameters = parameters
def load_parameters(self, path_to_yaml_dir)
Loads the legislation parameter for a directory containing YAML parameters files. :param path_to_yaml_dir: Absolute path towards the YAML parameter directory. Example: >>> self.load_parameters('/path/to/yaml/parameters/dir')
5.1062
6.439757
0.792918
if isinstance(instant, periods.Period): instant = instant.start elif isinstance(instant, (str, int)): instant = periods.instant(instant) else: assert isinstance(instant, periods.Instant), "Expected an Instant (e.g. Instant((2017, 1, 1)) ). Got: {}.".format(instant) parameters_at_instant = self._parameters_at_instant_cache.get(instant) if parameters_at_instant is None and self.parameters is not None: parameters_at_instant = self.parameters.get_at_instant(str(instant)) self._parameters_at_instant_cache[instant] = parameters_at_instant return parameters_at_instant
def get_parameters_at_instant(self, instant)
Get the parameters of the legislation at a given instant :param instant: string of the format 'YYYY-MM-DD' or `openfisca_core.periods.Instant` instance. :returns: The parameters of the legislation at a given instant. :rtype: :any:`ParameterNodeAtInstant`
3.027206
2.730723
1.108573
# Handle reforms if self.baseline: return self.baseline.get_package_metadata() fallback_metadata = { 'name': self.__class__.__name__, 'version': '', 'repository_url': '', 'location': '', } module = inspect.getmodule(self) if not module.__package__: return fallback_metadata package_name = module.__package__.split('.')[0] try: distribution = pkg_resources.get_distribution(package_name) except pkg_resources.DistributionNotFound: return fallback_metadata location = inspect.getsourcefile(module).split(package_name)[0].rstrip('/') home_page_metadatas = [ metadata.split(':', 1)[1].strip(' ') for metadata in distribution._get_metadata(distribution.PKG_INFO) if 'Home-page' in metadata ] repository_url = home_page_metadatas[0] if home_page_metadatas else '' return { 'name': distribution.key, 'version': distribution.version, 'repository_url': repository_url, 'location': location, }
def get_package_metadata(self)
Gets metatada relative to the country package the tax and benefit system is built from. :returns: Country package metadata :rtype: dict Example: >>> tax_benefit_system.get_package_metadata() >>> { >>> 'location': '/path/to/dir/containing/package', >>> 'name': 'openfisca-france', >>> 'repository_url': 'https://github.com/openfisca/openfisca-france', >>> 'version': '17.2.0' >>> }
3.065089
2.944355
1.041005
if not entity: return self.variables else: return { variable_name: variable for variable_name, variable in self.variables.items() # TODO - because entities are copied (see constructor) they can't be compared if variable.entity.key == entity.key }
def get_variables(self, entity = None)
Gets all variables contained in a tax and benefit system. :param <Entity subclass> entity: If set, returns only the variable defined for the given entity. :returns: A dictionnary, indexed by variable names. :rtype: dict
5.589043
5.688595
0.9825
input_dict = self.explicit_singular_entities(tax_benefit_system, input_dict) if any(key in tax_benefit_system.entities_plural() for key in input_dict.keys()): return self.build_from_entities(tax_benefit_system, input_dict) else: return self.build_from_variables(tax_benefit_system, input_dict)
def build_from_dict(self, tax_benefit_system, input_dict)
Build a simulation from ``input_dict`` This method uses :any:`build_from_entities` if entities are fully specified, or :any:`build_from_variables` if not. :param dict input_dict: A dict represeting the input of the simulation :return: A :any:`Simulation`
3.266781
3.124174
1.045646
input_dict = deepcopy(input_dict) simulation = Simulation(tax_benefit_system, tax_benefit_system.instantiate_entities()) # Register variables so get_variable_entity can find them for (variable_name, _variable) in tax_benefit_system.variables.items(): self.register_variable(variable_name, simulation.get_variable_population(variable_name).entity) check_type(input_dict, dict, ['error']) axes = input_dict.pop('axes', None) unexpected_entities = [entity for entity in input_dict if entity not in tax_benefit_system.entities_plural()] if unexpected_entities: unexpected_entity = unexpected_entities[0] raise SituationParsingError([unexpected_entity], ''.join([ "Some entities in the situation are not defined in the loaded tax and benefit system.", "These entities are not found: {0}.", "The defined entities are: {1}."] ) .format( ', '.join(unexpected_entities), ', '.join(tax_benefit_system.entities_plural()) ) ) persons_json = input_dict.get(tax_benefit_system.person_entity.plural, None) if not persons_json: raise SituationParsingError([tax_benefit_system.person_entity.plural], 'No {0} found. At least one {0} must be defined to run a simulation.'.format(tax_benefit_system.person_entity.key)) persons_ids = self.add_person_entity(simulation.persons.entity, persons_json) for entity_class in tax_benefit_system.group_entities: instances_json = input_dict.get(entity_class.plural) if instances_json is not None: self.add_group_entity(self.persons_plural, persons_ids, entity_class, instances_json) else: self.add_default_group_entity(persons_ids, entity_class) if axes: self.axes = axes self.expand_axes() try: self.finalize_variables_init(simulation.persons) except PeriodMismatchError as e: self.raise_period_mismatch(simulation.persons.entity, persons_json, e) for entity_class in tax_benefit_system.group_entities: try: population = simulation.populations[entity_class.key] self.finalize_variables_init(population) except PeriodMismatchError as e: self.raise_period_mismatch(population.entity, instances_json, e) return simulation
def build_from_entities(self, tax_benefit_system, input_dict)
Build a simulation from a Python dict ``input_dict`` fully specifying entities. Examples: >>> simulation_builder.build_from_entities({ 'persons': {'Javier': { 'salary': {'2018-11': 2000}}}, 'households': {'household': {'parents': ['Javier']}} })
3.7659
3.708328
1.015525
count = _get_person_count(input_dict) simulation = self.build_default_simulation(tax_benefit_system, count) for variable, value in input_dict.items(): if not isinstance(value, dict): if self.default_period is None: raise SituationParsingError([variable], "Can't deal with type: expected object. Input variables should be set for specific periods. For instance: {'salary': {'2017-01': 2000, '2017-02': 2500}}, or {'birth_date': {'ETERNITY': '1980-01-01'}}.") simulation.set_input(variable, self.default_period, value) else: for period_str, dated_value in value.items(): simulation.set_input(variable, period_str, dated_value) return simulation
def build_from_variables(self, tax_benefit_system, input_dict)
Build a simulation from a Python dict ``input_dict`` describing variables values without expliciting entities. This method uses :any:`build_default_simulation` to infer an entity structure Example: >>> simulation_builder.build_from_variables( {'salary': {'2016-10': 12000}} )
4.806546
4.471941
1.074823
simulation = Simulation(tax_benefit_system, tax_benefit_system.instantiate_entities()) for population in simulation.populations.values(): population.count = count population.ids = np.array(range(count)) if not population.entity.is_person: population.members_entity_id = population.ids # Each person is its own group entity return simulation
def build_default_simulation(self, tax_benefit_system, count = 1)
Build a simulation where: - There are ``count`` persons - There are ``count`` instances of each group entity, containing one person - Every person has, in each entity, the first role
6.747325
6.161342
1.095107
singular_keys = set(input_dict).intersection(tax_benefit_system.entities_by_singular()) if not singular_keys: return input_dict result = { entity_id: entity_description for (entity_id, entity_description) in input_dict.items() if entity_id in tax_benefit_system.entities_plural() } # filter out the singular entities for singular in singular_keys: plural = tax_benefit_system.entities_by_singular()[singular].plural result[plural] = {singular: input_dict[singular]} return result
def explicit_singular_entities(self, tax_benefit_system, input_dict)
Preprocess ``input_dict`` to explicit entities defined using the single-entity shortcut Example: >>> simulation_builder.explicit_singular_entities( {'persons': {'Javier': {}, }, 'household': {'parents': ['Javier']}} ) >>> {'persons': {'Javier': {}}, 'households': {'household': {'parents': ['Javier']}}
3.195752
3.348786
0.954302
check_type(instances_json, dict, [entity.plural]) entity_ids = list(map(str, instances_json.keys())) self.persons_plural = entity.plural self.entity_ids[self.persons_plural] = entity_ids self.entity_counts[self.persons_plural] = len(entity_ids) for instance_id, instance_object in instances_json.items(): check_type(instance_object, dict, [entity.plural, instance_id]) self.init_variable_values(entity, instance_object, str(instance_id)) return self.get_ids(entity.plural)
def add_person_entity(self, entity, instances_json)
Add the simulation's instances of the persons entity as described in ``instances_json``.
3.599334
3.414491
1.054135
check_type(instances_json, dict, [entity.plural]) entity_ids = list(map(str, instances_json.keys())) self.entity_ids[entity.plural] = entity_ids self.entity_counts[entity.plural] = len(entity_ids) persons_count = len(persons_ids) persons_to_allocate = set(persons_ids) self.memberships[entity.plural] = np.empty(persons_count, dtype = np.int32) self.roles[entity.plural] = np.empty(persons_count, dtype = object) self.entity_ids[entity.plural] = entity_ids self.entity_counts[entity.plural] = len(entity_ids) for instance_id, instance_object in instances_json.items(): check_type(instance_object, dict, [entity.plural, instance_id]) variables_json = instance_object.copy() # Don't mutate function input roles_json = { role.plural or role.key: transform_to_strict_syntax(variables_json.pop(role.plural or role.key, [])) for role in entity.roles } for role_id, role_definition in roles_json.items(): check_type(role_definition, list, [entity.plural, instance_id, role_id]) for index, person_id in enumerate(role_definition): entity_plural = entity.plural self.check_persons_to_allocate(persons_plural, entity_plural, persons_ids, person_id, instance_id, role_id, persons_to_allocate, index) persons_to_allocate.discard(person_id) entity_index = entity_ids.index(instance_id) role_by_plural = {role.plural or role.key: role for role in entity.roles} for role_plural, persons_with_role in roles_json.items(): role = role_by_plural[role_plural] if role.max is not None and len(persons_with_role) > role.max: raise SituationParsingError([entity.plural, instance_id, role_plural], f"There can be at most {role.max} {role_plural} in a {entity.key}. {len(persons_with_role)} were declared in '{instance_id}'.") for index_within_role, person_id in enumerate(persons_with_role): person_index = persons_ids.index(person_id) self.memberships[entity.plural][person_index] = entity_index person_role = role.subroles[index_within_role] if role.subroles else role self.roles[entity.plural][person_index] = person_role self.init_variable_values(entity, variables_json, instance_id) if persons_to_allocate: entity_ids = entity_ids + list(persons_to_allocate) for person_id in persons_to_allocate: person_index = persons_ids.index(person_id) self.memberships[entity.plural][person_index] = entity_ids.index(person_id) self.roles[entity.plural][person_index] = entity.flattened_roles[0] # Adjust previously computed ids and counts self.entity_ids[entity.plural] = entity_ids self.entity_counts[entity.plural] = len(entity_ids) # Convert back to Python array self.roles[entity.plural] = self.roles[entity.plural].tolist() self.memberships[entity.plural] = self.memberships[entity.plural].tolist()
def add_group_entity(self, persons_plural, persons_ids, entity, instances_json)
Add all instances of one of the model's entities as described in ``instances_json``.
2.537916
2.530896
1.002774
combined_tax_scales = None for child_name in node: child = node[child_name] if not isinstance(child, AbstractTaxScale): log.info('Skipping {} with value {} because it is not a tax scale'.format(child_name, child)) continue if combined_tax_scales is None: combined_tax_scales = MarginalRateTaxScale(name = child_name) combined_tax_scales.add_bracket(0, 0) combined_tax_scales.add_tax_scale(child) return combined_tax_scales
def combine_tax_scales(node)
Combine all the MarginalRateTaxScales in the node into a single MarginalRateTaxScale.
3.179197
2.74806
1.156888
# threshold : threshold of brut revenue # net_threshold: threshold of net revenue # theta : ordonnée à l'origine des segments des différents seuils dans une # représentation du revenu imposable comme fonction linéaire par # morceaux du revenu brut # Actually 1 / (1- global_rate) inverse = self.__class__(name = self.name + "'", option = self.option, unit = self.unit) net_threshold = 0 for threshold, rate in zip(self.thresholds, self.rates): if threshold == 0: previous_rate = 0 theta = 0 # On calcule le seuil de revenu imposable de la tranche considérée. net_threshold = (1 - previous_rate) * threshold + theta inverse.add_bracket(net_threshold, 1 / (1 - rate)) theta = (rate - previous_rate) * threshold + theta previous_rate = rate return inverse
def inverse(self)
Returns a new instance of MarginalRateTaxScale Invert a taxscale: Assume tax_scale composed of bracket which thresholds are expressed in term of brut revenue. The inverse is another MarginalTaxSclae which thresholds are expressed in terms of net revenue. If net = revbrut - tax_scale.calc(revbrut) then brut = tax_scale.inverse().calc(net)
9.209588
7.623271
1.208089
assert isinstance(factor, (float, int)) scaled_tax_scale = self.copy() return scaled_tax_scale.multiply_thresholds(factor)
def scale_tax_scales(self, factor)
Scale all the MarginalRateTaxScales in the node.
5.573665
5.368193
1.038276
if type(array) is EnumArray: return array if array.dtype.kind in {'U', 'S'}: # String array array = np.select([array == item.name for item in cls], [item.index for item in cls]).astype(ENUM_ARRAY_DTYPE) elif array.dtype.kind == 'O': # Enum items arrays # Ensure we are comparing the comparable. The problem this fixes: # On entering this method "cls" will generally come from variable.possible_values, # while the array values may come from directly importing a module containing an Enum class. # However, variables (and hence their possible_values) are loaded by a call to load_module, # which gives them a different identity from the ones imported in the usual way. # So, instead of relying on the "cls" passed in, we use only its name to check that # the values in the array, if non-empty, are of the right type. if len(array) > 0 and cls.__name__ is array[0].__class__.__name__: cls = array[0].__class__ array = np.select([array == item for item in cls], [item.index for item in cls]).astype(ENUM_ARRAY_DTYPE) return EnumArray(array, cls)
def encode(cls, array)
Encode a string numpy array, or an enum item numpy array, into an :any:`EnumArray`. See :any:`EnumArray.decode` for decoding. :param numpy.ndarray array: Numpy array of string identifiers, or of enum items, to encode. :returns: An :any:`EnumArray` encoding the input array values. :rtype: :any:`EnumArray` For instance: >>> string_identifier_array = numpy.asarray(['free_lodger', 'owner']) >>> encoded_array = HousingOccupancyStatus.encode(string_identifier_array) >>> encoded_array[0] >>> 2 # Encoded value >>> enum_item_array = numpy.asarray([HousingOccupancyStatus.free_lodger, HousingOccupancyStatus.owner]) >>> encoded_array = HousingOccupancyStatus.encode(enum_item_array) >>> encoded_array[0] >>> 2 # Encoded value
6.83665
6.445151
1.060743
return np.select([self == item.index for item in self.possible_values], [item for item in self.possible_values])
def decode(self)
Return the array of enum items corresponding to self >>> enum_array = household('housing_occupancy_status', period) >>> enum_array[0] >>> 2 # Encoded value >>> enum_array.decode()[0] >>> <HousingOccupancyStatus.free_lodger: 'Free lodger'> # Decoded value : enum item
9.33155
9.110715
1.024239
return np.select([self == item.index for item in self.possible_values], [item.name for item in self.possible_values])
def decode_to_str(self)
Return the array of string identifiers corresponding to self >>> enum_array = household('housing_occupancy_status', period) >>> enum_array[0] >>> 2 # Encoded value >>> enum_array.decode_to_str()[0] >>> 'free_lodger' # String identifier
7.206234
7.600552
0.94812
key = self._get_key(variable_name, period, **parameters) if self.stack: # The variable is a dependency of another variable parent = self.stack[-1] self.trace[parent]['dependencies'].append(key) else: # The variable has been requested by the client self.requested_calculations.add(key) if not self.trace.get(key): self.trace[key] = {'dependencies': [], 'parameters': {}} self.stack.append(key) self._computation_log.append((key, len(self.stack))) self.usage_stats[variable_name]['nb_requests'] += 1
def record_calculation_start(self, variable_name, period, **parameters)
Record that OpenFisca started computing a variable. :param str variable_name: Name of the variable starting to be computed :param Period period: Period for which the variable is being computed :param list parameters: Parameter with which the variable is being computed
4.580722
5.027543
0.911125
key = self._get_key(variable_name, period, **parameters) expected_key = self.stack.pop() if not key == expected_key: raise ValueError( "Something went wrong with the simulation tracer: result of '{0}' was expected, got results for '{1}' instead. This does not make sense as the last variable we started computing was '{0}'." .format(expected_key, key) ) self.trace[key]['value'] = result
def record_calculation_end(self, variable_name, period, result, **parameters)
Record that OpenFisca finished computing a variable. :param str variable_name: Name of the variable starting to be computed :param Period period: Period for which the variable is being computed :param numpy.ndarray result: Result of the computation :param list parameters: Parameter with which the variable is being computed
7.171857
7.723878
0.928531
key = self._get_key(variable_name, period) def _print_details(key, depth): if depth > 0 and ignore_zero and np.all(self.trace[key]['value'] == 0): return yield self._print_node(key, depth, aggregate) if depth < max_depth: for dependency in self.trace[key]['dependencies']: return _print_details(dependency, depth + 1) return _print_details(key, 0)
def print_trace(self, variable_name, period, max_depth = 1, aggregate = False, ignore_zero = False)
Print value, the dependencies, and the dependencies values of the variable for the given period (and possibly the given set of extra parameters). :param str variable_name: Name of the variable to investigate :param Period period: Period to investigate :param int max_depth: Maximum level of recursion :param bool aggregate: See :any:`print_computation_log` :param bool ignore_zero: If ``True``, don't print dependencies if their value is 0
3.172595
3.422118
0.927085
for line in self.computation_log(aggregate): print(line)
def print_computation_log(self, aggregate = False)
Print the computation log of a simulation. If ``aggregate`` is ``False`` (default), print the value of each computed vector. If ``aggregate`` is ``True``, only print the minimum, maximum, and average value of each computed vector. This mode is more suited for simulations on a large population.
6.867529
9.357975
0.733869
simulation = Simulation(tax_benefit_system = tax_benefit_system, **kwargs) simulation.persons.ids = np.arange(nb_persons) simulation.persons.count = nb_persons adults = [0] + sorted(random.sample(range(1, nb_persons), nb_groups - 1)) members_entity_id = np.empty(nb_persons, dtype = int) # A legacy role is an index that every person within an entity has. For instance, the 'demandeur' has legacy role 0, the 'conjoint' 1, the first 'child' 2, the second 3, etc. members_legacy_role = np.empty(nb_persons, dtype = int) id_group = -1 for id_person in range(nb_persons): if id_person in adults: id_group += 1 legacy_role = 0 else: legacy_role = 2 if legacy_role == 0 else legacy_role + 1 members_legacy_role[id_person] = legacy_role members_entity_id[id_person] = id_group for entity in simulation.populations.values(): if not entity.is_person: entity.members_entity_id = members_entity_id entity.count = nb_groups entity.members_role = np.where(members_legacy_role == 0, entity.flattened_roles[0], entity.flattened_roles[-1]) return simulation
def make_simulation(tax_benefit_system, nb_persons, nb_groups, **kwargs)
Generate a simulation containing nb_persons persons spread in nb_groups groups. Example: >>> from openfisca_core.scripts.simulation_generator import make_simulation >>> from openfisca_france import CountryTaxBenefitSystem >>> tbs = CountryTaxBenefitSystem() >>> simulation = make_simulation(tbs, 400, 100) # Create a simulation with 400 persons, spread among 100 families >>> simulation.calculate('revenu_disponible', 2017)
3.690534
3.956117
0.932868
if condition is None: condition = True variable = simulation.tax_benefit_system.get_variable(variable_name) population = simulation.get_variable_population(variable_name) value = (np.random.rand(population.count) * max_value * condition).astype(variable.dtype) simulation.set_input(variable_name, period, value)
def randomly_init_variable(simulation, variable_name, period, max_value, condition = None)
Initialise a variable with random values (from 0 to max_value) for the given period. If a condition vector is provided, only set the value of persons or groups for which condition is True. Example: >>> from openfisca_core.scripts.simulation_generator import make_simulation, randomly_init_variable >>> from openfisca_france import CountryTaxBenefitSystem >>> tbs = CountryTaxBenefitSystem() >>> simulation = make_simulation(tbs, 400, 100) # Create a simulation with 400 persons, spread among 100 families >>> randomly_init_variable(simulation, 'salaire_net', 2017, max_value = 50000, condition = simulation.persons.has_role(simulation.famille.DEMANDEUR)) # Randomly set a salaire_net for all persons between 0 and 50000? >>> simulation.calculate('revenu_disponible', 2017)
4.626046
5.089206
0.908992
baseline_parameters = self.baseline.parameters baseline_parameters_copy = copy.deepcopy(baseline_parameters) reform_parameters = modifier_function(baseline_parameters_copy) if not isinstance(reform_parameters, ParameterNode): return ValueError( 'modifier_function {} in module {} must return a ParameterNode' .format(modifier_function.__name__, modifier_function.__module__,) ) self.parameters = reform_parameters self._parameters_at_instant_cache = {}
def modify_parameters(self, modifier_function)
Make modifications on the parameters of the legislation Call this function in `apply()` if the reform asks for legislation parameter modifications. :param modifier_function: A function that takes an object of type :any:`ParameterNode` and should return an object of the same type.
4.513614
3.962288
1.139143
if instant is None: return None if isinstance(instant, Instant): return instant if isinstance(instant, str): if not INSTANT_PATTERN.match(instant): raise ValueError("'{}' is not a valid instant. Instants are described using the 'YYYY-MM-DD' format, for instance '2015-06-15'.".format(instant)) instant = Instant( int(fragment) for fragment in instant.split('-', 2)[:3] ) elif isinstance(instant, datetime.date): instant = Instant((instant.year, instant.month, instant.day)) elif isinstance(instant, int): instant = (instant,) elif isinstance(instant, list): assert 1 <= len(instant) <= 3 instant = tuple(instant) elif isinstance(instant, Period): instant = instant.start else: assert isinstance(instant, tuple), instant assert 1 <= len(instant) <= 3 if len(instant) == 1: return Instant((instant[0], 1, 1)) if len(instant) == 2: return Instant((instant[0], instant[1], 1)) return Instant(instant)
def instant(instant)
Return a new instant, aka a triple of integers (year, month, day). >>> instant(2014) Instant((2014, 1, 1)) >>> instant('2014') Instant((2014, 1, 1)) >>> instant('2014-02') Instant((2014, 2, 1)) >>> instant('2014-3-2') Instant((2014, 3, 2)) >>> instant(instant('2014-3-2')) Instant((2014, 3, 2)) >>> instant(period('month', '2014-3-2')) Instant((2014, 3, 2)) >>> instant(None)
2.4638
2.38875
1.031418
if isinstance(value, Period): return value if isinstance(value, Instant): return Period((DAY, value, 1)) def parse_simple_period(value): try: date = datetime.datetime.strptime(value, '%Y') except ValueError: try: date = datetime.datetime.strptime(value, '%Y-%m') except ValueError: try: date = datetime.datetime.strptime(value, '%Y-%m-%d') except ValueError: return None else: return Period((DAY, Instant((date.year, date.month, date.day)), 1)) else: return Period((MONTH, Instant((date.year, date.month, 1)), 1)) else: return Period((YEAR, Instant((date.year, date.month, 1)), 1)) def raise_error(value): message = linesep.join([ "Expected a period (eg. '2017', '2017-01', '2017-01-01', ...); got: '{}'.".format(value), "Learn more about legal period formats in OpenFisca:", "<https://openfisca.org/doc/coding-the-legislation/35_periods.html#periods-in-simulations>." ]) raise ValueError(message) if value == 'ETERNITY' or value == ETERNITY: return Period(('eternity', instant(datetime.date.min), float("inf"))) # check the type if isinstance(value, int): return Period((YEAR, Instant((value, 1, 1)), 1)) if not isinstance(value, str): raise_error(value) # try to parse as a simple period period = parse_simple_period(value) if period is not None: return period # complex period must have a ':' in their strings if ":" not in value: raise_error(value) components = value.split(':') # left-most component must be a valid unit unit = components[0] if unit not in (DAY, MONTH, YEAR): raise_error(value) # middle component must be a valid iso period base_period = parse_simple_period(components[1]) if not base_period: raise_error(value) # period like year:2015-03 have a size of 1 if len(components) == 2: size = 1 # if provided, make sure the size is an integer elif len(components) == 3: try: size = int(components[2]) except ValueError: raise_error(value) # if there is more than 2 ":" in the string, the period is invalid else: raise_error(value) # reject ambiguous period such as month:2014 if unit_weight(base_period.unit) > unit_weight(unit): raise_error(value) return Period((unit, base_period.start, size))
def period(value)
Return a new period, aka a triple (unit, start_instant, size). >>> period('2014') Period((YEAR, Instant((2014, 1, 1)), 1)) >>> period('year:2014') Period((YEAR, Instant((2014, 1, 1)), 1)) >>> period('2014-2') Period((MONTH, Instant((2014, 2, 1)), 1)) >>> period('2014-02') Period((MONTH, Instant((2014, 2, 1)), 1)) >>> period('month:2014-2') Period((MONTH, Instant((2014, 2, 1)), 1)) >>> period('year:2014-2') Period((YEAR, Instant((2014, 2, 1)), 1))
3.181535
2.95307
1.077365
unit, start, size = period return '{}_{}'.format(unit_weight(unit), size)
def key_period_size(period)
Defines a key in order to sort periods by length. It uses two aspects : first unit then size :param period: an OpenFisca period :return: a string >>> key_period_size(period('2014')) '2_1' >>> key_period_size(period('2013')) '2_1' >>> key_period_size(period('2014-01')) '1_1'
26.468252
23.631594
1.120037
instant_date = date_by_instant_cache.get(self) if instant_date is None: date_by_instant_cache[self] = instant_date = datetime.date(*self) return instant_date
def date(self)
Convert instant to a date. >>> instant(2014).date datetime.date(2014, 1, 1) >>> instant('2014-2').date datetime.date(2014, 2, 1) >>> instant('2014-2-3').date datetime.date(2014, 2, 3)
4.654008
5.529907
0.841607
assert unit in (DAY, MONTH, YEAR), 'Invalid unit: {} of type {}'.format(unit, type(unit)) assert isinstance(size, int) and size >= 1, 'Invalid size: {} of type {}'.format(size, type(size)) return Period((unit, self, size))
def period(self, unit, size = 1)
Create a new period starting at instant. >>> instant(2014).period('month') Period(('month', Instant((2014, 1, 1)), 1)) >>> instant('2014-2').period('year', 2) Period(('year', Instant((2014, 2, 1)), 2)) >>> instant('2014-2-3').period('day', size = 2) Period(('day', Instant((2014, 2, 3)), 2))
3.235071
2.90682
1.112924
if unit_weight(self.unit) < unit_weight(unit): raise ValueError('Cannot subdivide {0} into {1}'.format(self.unit, unit)) if unit == YEAR: return [self.this_year.offset(i, YEAR) for i in range(self.size)] if unit == MONTH: return [self.first_month.offset(i, MONTH) for i in range(self.size_in_months)] if unit == DAY: return [self.first_day.offset(i, DAY) for i in range(self.size_in_days)]
def get_subperiods(self, unit)
Return the list of all the periods of unit ``unit`` contained in self. Examples: >>> period('2017').get_subperiods(MONTH) >>> [period('2017-01'), period('2017-02'), ... period('2017-12')] >>> period('year:2014:2').get_subperiods(YEAR) >>> [period('2014'), period('2015')]
2.738005
2.84387
0.962774
if not isinstance(other, Period): other = period(other) return self.start <= other.start and self.stop >= other.stop
def contains(self, other)
Returns ``True`` if the period contains ``other``. For instance, ``period(2015)`` contains ``period(2015-01)``
3.80454
2.902289
1.310876
if (self[0] == MONTH): return self[2] if(self[0] == YEAR): return self[2] * 12 raise ValueError("Cannot calculate number of months in {0}".format(self[0]))
def size_in_months(self)
Return the size of the period in months. >>> period('month', '2012-2-29', 4).size_in_months 4 >>> period('year', '2012', 1).size_in_months 12
4.679566
4.718817
0.991682
unit, instant, length = self if unit == DAY: return length if unit in [MONTH, YEAR]: last_day = self.start.offset(length, unit).offset(-1, DAY) return (last_day.date - self.start.date).days + 1 raise ValueError("Cannot calculate number of days in {0}".format(unit))
def size_in_days(self)
Return the size of the period in days. >>> period('month', '2012-2-29', 4).size_in_days 28 >>> period('year', '2012', 1).size_in_days 366
6.376077
7.776191
0.819949
unit, start_instant, size = self year, month, day = start_instant if unit == ETERNITY: return Instant((float("inf"), float("inf"), float("inf"))) if unit == 'day': if size > 1: day += size - 1 month_last_day = calendar.monthrange(year, month)[1] while day > month_last_day: month += 1 if month == 13: year += 1 month = 1 day -= month_last_day month_last_day = calendar.monthrange(year, month)[1] else: if unit == 'month': month += size while month > 12: year += 1 month -= 12 else: assert unit == 'year', 'Invalid unit: {} of type {}'.format(unit, type(unit)) year += size day -= 1 if day < 1: month -= 1 if month == 0: year -= 1 month = 12 day += calendar.monthrange(year, month)[1] else: month_last_day = calendar.monthrange(year, month)[1] if day > month_last_day: month += 1 if month == 13: year += 1 month = 1 day -= month_last_day return Instant((year, month, day))
def stop(self)
Return the last day of the period as an Instant instance. >>> period('year', 2014).stop Instant((2014, 12, 31)) >>> period('month', 2014).stop Instant((2014, 12, 31)) >>> period('day', 2014).stop Instant((2014, 12, 31)) >>> period('year', '2012-2-29').stop Instant((2013, 2, 28)) >>> period('month', '2012-2-29').stop Instant((2012, 3, 28)) >>> period('day', '2012-2-29').stop Instant((2012, 2, 29)) >>> period('year', '2012-2-29', 2).stop Instant((2014, 2, 28)) >>> period('month', '2012-2-29', 2).stop Instant((2012, 4, 28)) >>> period('day', '2012-2-29', 2).stop Instant((2012, 3, 1))
2.232918
2.075705
1.07574
result = variable.clone() result.is_neutralized = True result.label = '[Neutralized]' if variable.label is None else '[Neutralized] {}'.format(variable.label), return result
def get_neutralized_variable(variable)
Return a new neutralized variable (to be used by reforms). A neutralized variable always returns its default value, and does not cache anything.
6.251149
5.97042
1.04702
def raise_error(): raise ValueError( 'Unrecognized formula name in variable "{}". Expecting "formula_YYYY" or "formula_YYYY_MM" or "formula_YYYY_MM_DD where YYYY, MM and DD are year, month and day. Found: "{}".' .format(self.name, attribute_name)) if attribute_name == FORMULA_NAME_PREFIX: return date.min FORMULA_REGEX = r'formula_(\d{4})(?:_(\d{2}))?(?:_(\d{2}))?$' # YYYY or YYYY_MM or YYYY_MM_DD match = re.match(FORMULA_REGEX, attribute_name) if not match: raise_error() date_str = '-'.join([match.group(1), match.group(2) or '01', match.group(3) or '01']) try: return datetime.datetime.strptime(date_str, '%Y-%m-%d').date() except ValueError: # formula_2005_99_99 for instance raise_error()
def parse_formula_name(self, attribute_name)
Returns the starting date of a formula based on its name. Valid dated name formats are : 'formula', 'formula_YYYY', 'formula_YYYY_MM' and 'formula_YYYY_MM_DD' where YYYY, MM and DD are a year, month and day. By convention, the starting date of: - `formula` is `0001-01-01` (minimal date in Python) - `formula_YYYY` is `YYYY-01-01` - `formula_YYYY_MM` is `YYYY-MM-01`
3.324126
2.915635
1.140104
comments = inspect.getcomments(cls) # Handle dynamically generated variable classes or Jupyter Notebooks, which have no source. try: absolute_file_path = inspect.getsourcefile(cls) except TypeError: source_file_path = None else: source_file_path = absolute_file_path.replace(tax_benefit_system.get_package_metadata()['location'], '') try: source_lines, start_line_number = inspect.getsourcelines(cls) source_code = textwrap.dedent(''.join(source_lines)) except (IOError, TypeError): source_code, start_line_number = None, None return comments, source_file_path, source_code, start_line_number
def get_introspection_data(cls, tax_benefit_system)
Get instrospection data about the code of the variable. :returns: (comments, source file path, source code, start line number) :rtype: tuple
3.761736
3.326539
1.130826
if not self.formulas: return None if period is None: return self.formulas.peekitem(index = 0)[1] # peekitem gets the 1st key-value tuple (the oldest start_date and formula). Return the formula. if isinstance(period, periods.Period): instant = period.start else: try: instant = periods.period(period).start except ValueError: instant = periods.instant(period) if self.end and instant.date > self.end: return None instant = str(instant) for start_date in reversed(self.formulas): if start_date <= instant: return self.formulas[start_date] return None
def get_formula(self, period = None)
Returns the formula used to compute the variable at the given period. If no period is given and the variable has several formula, return the oldest formula. :returns: Formula used to compute the variable :rtype: function
4.507066
4.260242
1.057937
self.entity.check_role_validity(role) group_population = self.simulation.get_population(role.entity.plural) if role.subroles: return np.logical_or.reduce([group_population.members_role == subrole for subrole in role.subroles]) else: return group_population.members_role == role
def has_role(self, role)
Check if a person has a given role within its :any:`GroupEntity` Example: >>> person.has_role(Household.CHILD) >>> array([False])
6.044673
6.367808
0.949255
# If entity is for instance 'person.household', we get the reference entity 'household' behind the projector entity = entity if not isinstance(entity, Projector) else entity.reference_entity positions = entity.members_position biggest_entity_size = np.max(positions) + 1 filtered_criteria = np.where(condition, criteria, np.inf) ids = entity.members_entity_id # Matrix: the value in line i and column j is the value of criteria for the jth person of the ith entity matrix = np.asarray([ entity.value_nth_person(k, filtered_criteria, default = np.inf) for k in range(biggest_entity_size) ]).transpose() # We double-argsort all lines of the matrix. # Double-argsorting gets the rank of each value once sorted # For instance, if x = [3,1,6,4,0], y = np.argsort(x) is [4, 1, 0, 3, 2] (because the value with index 4 is the smallest one, the value with index 1 the second smallest, etc.) and z = np.argsort(y) is [2, 1, 4, 3, 0], the rank of each value. sorted_matrix = np.argsort(np.argsort(matrix)) # Build the result vector by taking for each person the value in the right line (corresponding to its household id) and the right column (corresponding to its position) result = sorted_matrix[ids, positions] # Return -1 for the persons who don't respect the condition return np.where(condition, result, -1)
def get_rank(self, entity, criteria, condition = True)
Get the rank of a person within an entity according to a criteria. The person with rank 0 has the minimum value of criteria. If condition is specified, then the persons who don't respect it are not taken into account and their rank is -1. Example: >>> age = person('age', period) # e.g [32, 34, 2, 8, 1] >>> person.get_rank(household, age) >>> [3, 4, 0, 2, 1] >>> is_child = person.has_role(Household.CHILD) # [False, False, True, True, True] >>> person.get_rank(household, - age, condition = is_child) # Sort in reverse order so that the eldest child gets the rank 0. >>> [-1, -1, 1, 0, 2]
6.67801
6.281248
1.063166
if self._ordered_members_map is None: return np.argsort(self.members_entity_id) return self._ordered_members_map
def ordered_members_map(self)
Mask to group the persons by entity This function only caches the map value, to see what the map is used for, see value_nth_person method.
5.087168
3.695176
1.376705
self.entity.check_role_validity(role) self.members.check_array_compatible_with_entity(array) if role is not None: role_filter = self.members.has_role(role) return np.bincount( self.members_entity_id[role_filter], weights = array[role_filter], minlength = self.count) else: return np.bincount(self.members_entity_id, weights = array)
def sum(self, array, role = None)
Return the sum of ``array`` for the members of the entity. ``array`` must have the dimension of the number of persons in the simulation If ``role`` is provided, only the entity member with the given role are taken into account. Example: >>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0] >>> household.sum(salaries) >>> array([3500])
4.031557
3.786839
1.064623
sum_in_entity = self.sum(array, role = role) return (sum_in_entity > 0)
def any(self, array, role = None)
Return ``True`` if ``array`` is ``True`` for any members of the entity. ``array`` must have the dimension of the number of persons in the simulation If ``role`` is provided, only the entity member with the given role are taken into account. Example: >>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0] >>> household.any(salaries >= 1800) >>> array([True])
6.40965
12.286271
0.521692
return self.reduce(array, reducer = np.logical_and, neutral_element = True, role = role)
def all(self, array, role = None)
Return ``True`` if ``array`` is ``True`` for all members of the entity. ``array`` must have the dimension of the number of persons in the simulation If ``role`` is provided, only the entity member with the given role are taken into account. Example: >>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0] >>> household.all(salaries >= 1800) >>> array([False])
10.945863
16.496422
0.66353
return self.reduce(array, reducer = np.maximum, neutral_element = - np.infty, role = role)
def max(self, array, role = None)
Return the maximum value of ``array`` for the entity members. ``array`` must have the dimension of the number of persons in the simulation If ``role`` is provided, only the entity member with the given role are taken into account. Example: >>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0] >>> household.max(salaries) >>> array([2000])
11.214331
16.349531
0.685911
return self.reduce(array, reducer = np.minimum, neutral_element = np.infty, role = role)
def min(self, array, role = None)
Return the minimum value of ``array`` for the entity members. ``array`` must have the dimension of the number of persons in the simulation If ``role`` is provided, only the entity member with the given role are taken into account. Example: >>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0] >>> household.min(salaries) >>> array([0]) >>> household.min(salaries, role = Household.PARENT) # Assuming the 1st two persons are parents >>> array([1500])
9.753899
13.039447
0.74803
if role: if role.subroles: role_condition = np.logical_or.reduce([self.members_role == subrole for subrole in role.subroles]) else: role_condition = self.members_role == role return self.sum(role_condition) else: return np.bincount(self.members_entity_id)
def nb_persons(self, role = None)
Returns the number of persons contained in the entity. If ``role`` is provided, only the entity member with the given role are taken into account.
4.210663
4.222682
0.997154
self.entity.check_role_validity(role) if role.max != 1: raise Exception( 'You can only use value_from_person with a role that is unique in {}. Role {} is not unique.' .format(self.key, role.key) ) self.members.check_array_compatible_with_entity(array) members_map = self.ordered_members_map result = self.filled_array(default, dtype = array.dtype) if isinstance(array, EnumArray): result = EnumArray(result, array.possible_values) role_filter = self.members.has_role(role) entity_filter = self.any(role_filter) result[entity_filter] = array[members_map][role_filter[members_map]] return result
def value_from_person(self, array, role, default = 0)
Get the value of ``array`` for the person with the unique role ``role``. ``array`` must have the dimension of the number of persons in the simulation If such a person does not exist, return ``default`` instead The result is a vector which dimension is the number of entities
5.970808
5.837087
1.022909
self.members.check_array_compatible_with_entity(array) positions = self.members_position nb_persons_per_entity = self.nb_persons() members_map = self.ordered_members_map result = self.filled_array(default, dtype = array.dtype) # For households that have at least n persons, set the result as the value of criteria for the person for which the position is n. # The map is needed b/c the order of the nth persons of each household in the persons vector is not necessarily the same than the household order. result[nb_persons_per_entity > n] = array[members_map][positions[members_map] == n] return result
def value_nth_person(self, n, array, default = 0)
Get the value of array for the person whose position in the entity is n. Note that this position is arbitrary, and that members are not sorted. If the nth person does not exist, return ``default`` instead. The result is a vector which dimension is the number of entities.
9.075412
8.417521
1.078157
if self._data_storage_dir is None: self._data_storage_dir = tempfile.mkdtemp(prefix = "openfisca_") log.warn(( "Intermediate results will be stored on disk in {} in case of memory overflow. " "You should remove this directory once you're done with your simulation." ).format(self._data_storage_dir)) return self._data_storage_dir
def data_storage_dir(self)
Temporary folder used to store intermediate calculation data in case the memory is saturated
4.699297
3.846897
1.221581
population = self.get_variable_population(variable_name) holder = population.get_holder(variable_name) variable = self.tax_benefit_system.get_variable(variable_name, check_existence = True) if period is not None and not isinstance(period, periods.Period): period = periods.period(period) if self.trace: self.tracer.record_calculation_start(variable.name, period, **parameters) self._check_period_consistency(period, variable) # First look for a value already cached cached_array = holder.get_array(period) if cached_array is not None: if self.trace: self.tracer.record_calculation_end(variable.name, period, cached_array, **parameters) return cached_array array = None # First, try to run a formula try: self._check_for_cycle(variable, period) array = self._run_formula(variable, population, period) # If no result, use the default value and cache it if array is None: array = holder.default_array() array = self._cast_formula_result(array, variable) holder.put_in_cache(array, period) except SpiralError: array = holder.default_array() finally: if self.trace: self.tracer.record_calculation_end(variable.name, period, array, **parameters) self._clean_cycle_detection_data(variable.name) self.purge_cache_of_invalid_values() return array
def calculate(self, variable_name, period, **parameters)
Calculate the variable ``variable_name`` for the period ``period``, using the variable formula if it exists. :returns: A numpy array containing the result of the calculation
3.982761
4.02391
0.989774
variable = self.tax_benefit_system.get_variable(variable_name, check_existence = True) if variable.calculate_output is None: return self.calculate(variable_name, period) return variable.calculate_output(self, variable_name, period)
def calculate_output(self, variable_name, period)
Calculate the value of a variable using the ``calculate_output`` attribute of the variable.
4.748347
4.227147
1.123298