code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
cfg = Config.instance() default = cfg.get_expanded(section, option, **kwargs) return cfg.get_expanded(section, "{}_{}".format(self.workflow_type, option), default=default, **kwargs)
def get_prefixed_config(self, section, option, **kwargs)
TODO.
5.379511
5.336412
1.008076
tol = abs(tol) im = np.zeros((p.imzmldict["max count of pixels y"], p.imzmldict["max count of pixels x"])) for i, (x, y, z_) in enumerate(p.coordinates): if z_ == 0: UserWarning("z coordinate = 0 present, if you're getting blank images set getionimage(.., .., z=0)") if z_ == z: mzs, ints = map(lambda x: np.asarray(x), p.getspectrum(i)) min_i, max_i = _bisect_spectrum(mzs, mz_value, tol) im[y - 1, x - 1] = reduce_func(ints[min_i:max_i+1]) return im
def getionimage(p, mz_value, tol=0.1, z=1, reduce_func=sum)
Get an image representation of the intensity distribution of the ion with specified m/z value. By default, the intensity values within the tolerance region are summed. :param p: the ImzMLParser (or anything else with similar attributes) for the desired dataset :param mz_value: m/z value for which the ion image shall be returned :param tol: Absolute tolerance for the m/z value, such that all ions with values mz_value-|tol| <= x <= mz_value+|tol| are included. Defaults to 0.1 :param z: z Value if spectrogram is 3-dimensional. :param reduce_func: the bahaviour for reducing the intensities between mz_value-|tol| and mz_value+|tol| to a single value. Must be a function that takes a sequence as input and outputs a number. By default, the values are summed. :return: numpy matrix with each element representing the ion intensity in this pixel. Can be easily plotted with matplotlib
4.948709
5.223704
0.947356
mz_group = int_group = None slist = None elem_iterator = self.iterparse(self.filename, events=("start", "end")) if sys.version_info > (3,): _, self.root = next(elem_iterator) else: _, self.root = elem_iterator.next() for event, elem in elem_iterator: if elem.tag == self.sl + "spectrumList" and event == "start": slist = elem elif elem.tag == self.sl + "spectrum" and event == "end": self.__process_spectrum(elem) slist.remove(elem) elif elem.tag == self.sl + "referenceableParamGroup" and event == "end": for param in elem: if param.attrib["name"] == "m/z array": self.mzGroupId = elem.attrib['id'] mz_group = elem elif param.attrib["name"] == "intensity array": self.intGroupId = elem.attrib['id'] int_group = elem self.__assign_precision(int_group, mz_group) self.__fix_offsets()
def __iter_read_spectrum_meta(self)
This method should only be called by __init__. Reads the data formats, coordinates and offsets from the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty. Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or "IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer".
3.121592
2.955619
1.056155
d = {} scan_settings_list_elem = self.root.find('%sscanSettingsList' % self.sl) instrument_config_list_elem = self.root.find('%sinstrumentConfigurationList' % self.sl) supportedparams1 = [("max count of pixels x", int), ("max count of pixels y", int), ("max dimension x", int), ("max dimension y", int), ("pixel size x", float), ("pixel size y", float), ("matrix solution concentration", float)] supportedparams2 = [("wavelength", float), ("focus diameter x", float), ("focus diameter y", float), ("pulse energy", float), ("pulse duration", float), ("attenuation", float)] supportedaccessions1 = [("IMS:1000042", "value"), ("IMS:1000043", "value"), ("IMS:1000044", "value"), ("IMS:1000045", "value"), ("IMS:1000046", "value"), ("IMS:1000047", "value"), ("MS:1000835", "value")] supportedaccessions2 = [("MS:1000843", "value"), ("MS:1000844", "value"), ("MS:1000845", "value"), ("MS:1000846", "value"), ("MS:1000847", "value"), ("MS:1000848", "value")] for i in range(len(supportedparams1)): acc, attr = supportedaccessions1[i] elem = scan_settings_list_elem.find('.//%scvParam[@accession="%s"]' % (self.sl, acc)) if elem is None: break name, T = supportedparams1[i] try: d[name] = T(elem.attrib[attr]) except ValueError: warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name)) for i in range(len(supportedparams2)): acc, attr = supportedaccessions2[i] elem = instrument_config_list_elem.find('.//%scvParam[@accession="%s"]' % (self.sl, acc)) if elem is None: break name, T = supportedparams2[i] try: d[name] = T(elem.attrib[attr]) except ValueError: warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name)) return d
def __readimzmlmeta(self)
This method should only be called by __init__. Initializes the imzmldict with frequently used metadata from the .imzML file. This method reads only a subset of the available meta information and may be extended in the future. The keys are named similarly to the imzML names. Currently supported keys: "max dimension x", "max dimension y", "pixel size x", "pixel size y", "matrix solution concentration", "wavelength", "focus diameter x", "focus diameter y", "pulse energy", "pulse duration", "attenuation". If a key is not found in the XML tree, it will not be in the dict either. :return d: dict containing above mentioned meta data :rtype: dict :raises Warning: if an xml attribute has a number format different from the imzML specification
2.245338
1.921898
1.168292
try: pixel_size_x = self.imzmldict["pixel size x"] pixel_size_y = self.imzmldict["pixel size y"] except KeyError: raise KeyError("Could not find all pixel size attributes in imzML file") image_x, image_y = self.coordinates[i][:2] return image_x * pixel_size_x, image_y * pixel_size_y
def get_physical_coordinates(self, i)
For a pixel index i, return the real-world coordinates in nanometers. This is equivalent to multiplying the image coordinates of the given pixel with the pixel size. :param i: the pixel index :return: a tuple of x and y coordinates. :rtype: Tuple[float] :raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y"
3.14895
2.340383
1.345485
mz_bytes, intensity_bytes = self.get_spectrum_as_string(index) mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision) intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision) return mz_array, intensity_array
def getspectrum(self, index)
Reads the spectrum at specified index from the .ibd file. :param index: Index of the desired spectrum in the .imzML file Output: mz_array: numpy.ndarray Sequence of m/z values representing the horizontal axis of the desired mass spectrum intensity_array: numpy.ndarray Sequence of intensity values corresponding to mz_array
2.810451
2.797519
1.004623
offsets = [self.mzOffsets[index], self.intensityOffsets[index]] lengths = [self.mzLengths[index], self.intensityLengths[index]] lengths[0] *= self.sizeDict[self.mzPrecision] lengths[1] *= self.sizeDict[self.intensityPrecision] self.m.seek(offsets[0]) mz_string = self.m.read(lengths[0]) self.m.seek(offsets[1]) intensity_string = self.m.read(lengths[1]) return mz_string, intensity_string
def get_spectrum_as_string(self, index)
Reads m/z array and intensity array of the spectrum at specified location from the binary file as a byte string. The string can be unpacked by the struct module. To get the arrays as numbers, use getspectrum :param index: Index of the desired spectrum in the .imzML file :rtype: Tuple[str, str] Output: mz_string: string where each character represents a byte of the mz array of the spectrum intensity_string: string where each character represents a byte of the intensity array of the spectrum
2.494165
2.358719
1.057424
'''reads a mz array from the currently open ibd file''' self.ibd.seek(mz_offset) data = self.ibd.read(mz_enc_len) self.ibd.seek(0, 2) data = self.mz_compression.decompress(data) return tuple(np.fromstring(data, dtype=self.mz_dtype))
def _read_mz(self, mz_offset, mz_len, mz_enc_len)
reads a mz array from the currently open ibd file
4.031571
2.985821
1.350239
'''given an mz array, return the mz_data (disk location) if the mz array was not previously written, write to disk first''' mzs = tuple(mzs) # must be hashable if mzs in self.lru_cache: return self.lru_cache[mzs] # mz not recognized ... check hash mz_hash = "%s-%s-%s" % (hash(mzs), sum(mzs), len(mzs)) if mz_hash in self.hashes: for mz_data in self.hashes[mz_hash]: test_mz = self._read_mz(*mz_data) if mzs == test_mz: self.lru_cache[test_mz] = mz_data return mz_data # hash not recognized # must be a new mz array ... write it, add it to lru_cache and hashes mz_data = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression) self.hashes[mz_hash].append(mz_data) self.lru_cache[mzs] = mz_data return mz_data
def _get_previous_mz(self, mzs)
given an mz array, return the mz_data (disk location) if the mz array was not previously written, write to disk first
3.759005
2.78922
1.347691
# must be rounded now to allow comparisons to later data # but don't waste CPU time in continuous mode since the data will not be used anyway if self.mode != "continuous" or self.first_mz is None: mzs = self.mz_compression.rounding(mzs) intensities = self.intensity_compression.rounding(intensities) if self.mode == "continuous": if self.first_mz is None: self.first_mz = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression) mz_data = self.first_mz elif self.mode == "processed": mz_data = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression) elif self.mode == "auto": mz_data = self._get_previous_mz(mzs) else: raise TypeError("Unknown mode: %s" % self.mode) mz_offset, mz_len, mz_enc_len = mz_data int_offset, int_len, int_enc_len = self._encode_and_write(intensities, self.intensity_dtype, self.intensity_compression) mz_min = np.min(mzs) mz_max = np.max(mzs) ix_max = np.argmax(intensities) mz_base = mzs[ix_max] int_base = intensities[ix_max] int_tic = np.sum(intensities) s = _Spectrum(coords, mz_len, mz_offset, mz_enc_len, int_len, int_offset, int_enc_len, mz_min, mz_max, mz_base, int_base, int_tic, userParams) self.spectra.append(s)
def addSpectrum(self, mzs, intensities, coords, userParams=[])
Add a mass spectrum to the file. :param mz: mz array :param intensities: intensity array :param coords: * 2-tuple of x and y position OR * 3-tuple of x, y, and z position note some applications want coords to be 1-indexed
2.912286
2.966696
0.98166
'''alias of close()''' self.ibd.close() self._write_xml() self.xml.close()
def finish(self)
alias of close()
20.66239
14.284315
1.446509
global settings, project_settings # Global changes settings settings = Changes.load() # Project specific settings project_settings = Project.load(GitHubRepository(auth_token=settings.auth_token))
def initialise()
Detects, prompts and initialises the project. Stores project and tool configuration in the `changes` module.
14.724542
12.665328
1.162587
rmtree('dist', ignore_errors=True) build_package_command = 'python setup.py clean sdist bdist_wheel' result = shell.dry_run(build_package_command, context.dry_run) packages = Path('dist').files() if not context.dry_run else "nothing" if not result: raise Exception('Error building packages: %s' % result) else: log.info('Built %s' % ', '.join(packages)) return packages
def build_distributions(context)
Builds package distributions
4.634826
4.43005
1.046224
if not context.dry_run and build_distributions(context): with util.mktmpdir() as tmp_dir: venv.create_venv(tmp_dir=tmp_dir) for distribution in Path('dist').files(): try: venv.install(distribution, tmp_dir) log.info('Successfully installed %s', distribution) if context.test_command and verification.run_test_command(context): log.info( 'Successfully ran test command: %s', context.test_command ) except Exception as e: raise Exception( 'Error installing distribution %s' % distribution, e ) else: log.info('Dry run, skipping installation')
def install_package(context)
Attempts to install the sdist and wheel.
4.551575
4.267712
1.066514
if not context.dry_run and build_distributions(context): upload_args = 'twine upload ' upload_args += ' '.join(Path('dist').files()) if context.pypi: upload_args += ' -r %s' % context.pypi upload_result = shell.dry_run(upload_args, context.dry_run) if not context.dry_run and not upload_result: raise Exception('Error uploading: %s' % upload_result) else: log.info( 'Successfully uploaded %s:%s', context.module_name, context.new_version ) else: log.info('Dry run, skipping package upload')
def upload_package(context)
Uploads your project packages to pypi with twine.
4.163539
3.958171
1.051885
tmp_dir = venv.create_venv() install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name) package_index = 'pypi' if context.pypi: install_cmd += '-i %s' % context.pypi package_index = context.pypi try: result = shell.dry_run(install_cmd, context.dry_run) if not context.dry_run and not result: log.error( 'Failed to install %s from %s', context.module_name, package_index ) else: log.info( 'Successfully installed %s from %s', context.module_name, package_index ) except Exception as e: error_msg = 'Error installing %s from %s' % (context.module_name, package_index) log.exception(error_msg) raise Exception(error_msg, e)
def install_from_pypi(context)
Attempts to install your package from pypi.
2.647385
2.59316
1.020911
log.info('%s? %s' % (probe_name, probe_result)) if not probe_result: raise exceptions.ProbeException(failure_msg) else: return True
def report_and_raise(probe_name, probe_result, failure_msg)
Logs the probe result and raises on failure
3.410565
3.182794
1.071563
init_path = '{}/__init__.py'.format(python_module) has_metadata = ( exists(init_path) and attributes.has_attribute(python_module, '__version__') and attributes.has_attribute(python_module, '__url__') ) return report_and_raise( 'Has module metadata', has_metadata, 'Your %s/__init__.py must contain __version__ and __url__ attributes', )
def has_metadata(python_module)
`<module_name>/__init__.py` with `__version__` and `__url__`
4.048614
3.536479
1.144815
log.info('Checking project for changes requirements.') return ( has_tools() and has_setup() and has_metadata(python_module) and has_test_runner() and has_readme() and has_changelog() )
def probe_project(python_module)
Check if the project meets `changes` requirements. Complain and exit otherwise.
7.210746
5.473756
1.317331
commit_version_change(context) if context.github: # github token project_settings = project_config(context.module_name) if not project_settings['gh_token']: click.echo('You need a GitHub token for changes to create a release.') click.pause( 'Press [enter] to launch the GitHub "New personal access ' 'token" page, to create a token for changes.' ) click.launch('https://github.com/settings/tokens/new') project_settings['gh_token'] = click.prompt('Enter your changes token') store_settings(context.module_name, project_settings) description = click.prompt('Describe this release') upload_url = create_github_release( context, project_settings['gh_token'], description ) upload_release_distributions( context, project_settings['gh_token'], build_distributions(context), upload_url, ) click.pause('Press [enter] to review and update your new release') click.launch( '{0}/releases/tag/{1}'.format(context.repo_url, context.new_version) ) else: tag_and_push(context)
def publish(context)
Publishes the project
4.631872
4.660793
0.993795
try: run_tests() if not context.skip_changelog: generate_changelog(context) increment_version(context) build_distributions(context) install_package(context) upload_package(context) install_from_pypi(context) publish(context) except Exception: log.exception('Error releasing')
def perform_release(context)
Executes the release process.
5.368446
5.264467
1.019751
with open('%s/__init__.py' % module_name) as input_file: for line in input_file: if line.startswith(attribute_name): return ast.literal_eval(line.split('=')[1].strip())
def extract_attribute(module_name, attribute_name)
Extract metatdata property from a module
2.54507
2.438196
1.043833
init_file = '%s/__init__.py' % module_name _, tmp_file = tempfile.mkstemp() with open(init_file) as input_file: with open(tmp_file, 'w') as output_file: for line in input_file: if line.startswith(attribute_name): line = "%s = '%s'\n" % (attribute_name, new_value) output_file.write(line) if not dry_run: Path(tmp_file).copy(init_file) else: log.info(diff(tmp_file, init_file, retcode=None))
def replace_attribute(module_name, attribute_name, new_value, dry_run=True)
Update a metadata attribute
2.463835
2.611035
0.943624
init_file = '%s/__init__.py' % module_name return any( [attribute_name in init_line for init_line in open(init_file).readlines()] )
def has_attribute(module_name, attribute_name)
Is this attribute present?
3.870974
3.664319
1.056397
if not alternatives: raise ValueError if not isinstance(alternatives, list): raise TypeError choice_map = OrderedDict( ('{}'.format(i), value) for i, value in enumerate(alternatives, 1) ) # prepend a termination option input_terminator = '0' choice_map.update({input_terminator: '<done>'}) choice_map.move_to_end('0', last=False) choice_indexes = choice_map.keys() choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()] prompt = '\n'.join( ( 'Select labels:', '\n'.join(choice_lines), 'Choose from {}'.format(', '.join(choice_indexes)), ) ) user_choices = set() user_choice = None while not user_choice == input_terminator: if user_choices: note('Selected labels: [{}]'.format(', '.join(user_choices))) user_choice = click.prompt( prompt, type=click.Choice(choice_indexes), default=input_terminator ) done = user_choice == input_terminator new_selection = user_choice not in user_choices nothing_selected = not user_choices if not done and new_selection: user_choices.add(choice_map[user_choice]) if done and nothing_selected: error('Please select at least one label') user_choice = None return user_choices
def choose_labels(alternatives)
Prompt the user select several labels from the provided alternatives. At least one label must be selected. :param list alternatives: Sequence of options that are available to select from :return: Several selected labels
3.222221
3.184933
1.011708
curdir = os.getcwd() try: if dirname is not None: os.chdir(dirname) requests_cache.configure(expire_after=60 * 10 * 10) changes.initialise() yield finally: os.chdir(curdir)
def work_in(dirname=None)
Context manager version of os.chdir. When exited, returns to the working directory prior to entering.
4.860586
4.761619
1.020784
with work_in(repo_directory): if discard: stage_command.discard(release_name, release_description) else: stage_command.stage(draft, release_name, release_description)
def stage(draft, discard, repo_directory, release_name, release_description)
Stages a release
3.020448
3.139434
0.9621
changelog_content = [ '\n## [%s](%s/compare/%s...%s)\n\n' % ( context.new_version, context.repo_url, context.current_version, context.new_version, ) ] git_log_content = None git_log = 'log --oneline --no-merges --no-color'.split(' ') try: git_log_tag = git_log + ['%s..master' % context.current_version] git_log_content = git(git_log_tag) log.debug('content: %s' % git_log_content) except Exception: log.warn('Error diffing previous version, initial release') git_log_content = git(git_log) git_log_content = replace_sha_with_commit_link(context.repo_url, git_log_content) # turn change log entries into markdown bullet points if git_log_content: [ changelog_content.append('* %s\n' % line) if line else line for line in git_log_content[:-1] ] write_new_changelog( context.repo_url, 'CHANGELOG.md', changelog_content, dry_run=context.dry_run ) log.info('Added content to CHANGELOG.md') context.changelog_content = changelog_content
def generate_changelog(context)
Generates an automatic changelog from your commit messages.
3.631119
3.564735
1.018623
return dict((k, dictionary[k]) for k in keys if k in dictionary)
def extract(dictionary, keys)
Extract only the specified keys from a dict :param dictionary: source dictionary :param keys: list of keys to extract :return dict: extracted dictionary
2.9793
6.869225
0.433717
long_arguments = extract(arguments, long_keys) return dict( [(key.replace(key_prefix, ''), value) for key, value in long_arguments.items()] )
def extract_arguments(arguments, long_keys, key_prefix='--')
:param arguments: dict of command line arguments
3.033424
3.002808
1.010196
tag_option = '--annotate' if probe.has_signing_key(context): tag_option = '--sign' shell.dry_run( TAG_TEMPLATE % (tag_option, context.new_version, context.new_version), context.dry_run, ) shell.dry_run('git push --tags', context.dry_run)
def tag_and_push(context)
Tags your git repo with the new version number
6.109601
5.272678
1.158728
if not dry_run: cmd_parts = command.split(' ') # http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen return local[cmd_parts[0]](cmd_parts[1:]) else: log.info('Dry run of %s, skipping' % command) return True
def dry_run(command, dry_run)
Executes a shell command unless the dry run option is set
3.849571
4.033144
0.954484
project_name = curdir config_path = Path(join(project_name, PROJECT_CONFIG_FILE)) if not exists(config_path): store_settings(DEFAULTS.copy()) return DEFAULTS return toml.load(io.open(config_path)) or {}
def project_config()
Deprecated
6.483994
5.976521
1.084911
version = semantic_version.Version(version) if major: version.major += 1 version.minor = 0 version.patch = 0 elif minor: version.minor += 1 version.patch = 0 elif patch: version.patch += 1 return str(version)
def increment(version, major=False, minor=False, patch=True)
Increment a semantic version :param version: str of the version to increment :param major: bool specifying major level version increment :param minor: bool specifying minor level version increment :param patch: bool specifying patch level version increment :return: str of the incremented version
1.642908
1.900236
0.864581
attributes.replace_attribute( context.module_name, '__version__', context.new_version, dry_run=context.dry_run ) log.info( 'Bumped version from %s to %s' % (context.current_version, context.new_version) )
def increment_version(context)
Increments the __version__ attribute of your module's __init__.
4.927061
4.221978
1.167003
response = _request('GET', href) response.raise_for_status() items.extend(response.json()) if "link" not in response.headers: return links = link_header.parse(response.headers["link"]) rels = {link.rel: link.href for link in links.links} if "next" in rels: _recursive_gh_get(rels["next"], items)
def _recursive_gh_get(href, items)
Recursively get list of GitHub objects. See https://developer.github.com/v3/guides/traversing-with-pagination/
2.242004
2.321362
0.965814
global progress_reporter_cls progress_reporter_cls.reportProgress = sys.stdout.isatty() and progress if progress_reporter_cls.reportProgress: progress_reporter_cls = _progress_bar global _github_token_cli_arg _github_token_cli_arg = github_token global _github_api_url _github_api_url = github_api_url
def main(github_token, github_api_url, progress)
A CLI to easily manage GitHub releases, assets and references.
3.687137
3.80503
0.969017
if new_release_sha is None: return refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tag_name) if not refs: return assert len(refs) == 1 # If sha associated with "<tag_name>" is up-to-date, we are done. previous_release_sha = refs[0]["object"]["sha"] if previous_release_sha == new_release_sha: return tmp_tag_name = tag_name + "-tmp" # If any, remove leftover temporary tag "<tag_name>-tmp" refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tmp_tag_name) if refs: assert len(refs) == 1 time.sleep(0.1) gh_ref_delete(repo_name, "refs/tags/%s" % tmp_tag_name, dry_run=dry_run) # Update "<tag_name>" release by associating it with the "<tag_name>-tmp" # and "<new_release_sha>". It will create the temporary tag. time.sleep(0.1) patch_release(repo_name, tag_name, tag_name=tmp_tag_name, target_commitish=new_release_sha, dry_run=dry_run) # Now "<tag_name>-tmp" references "<new_release_sha>", remove "<tag_name>" time.sleep(0.1) gh_ref_delete(repo_name, "refs/tags/%s" % tag_name, dry_run=dry_run) # Finally, update "<tag_name>-tmp" release by associating it with the # "<tag_name>" and "<new_release_sha>". time.sleep(0.1) patch_release(repo_name, tmp_tag_name, tag_name=tag_name, target_commitish=new_release_sha, dry_run=dry_run) # ... and remove "<tag_name>-tmp" time.sleep(0.1) gh_ref_delete(repo_name, "refs/tags/%s" % tmp_tag_name, dry_run=dry_run)
def _update_release_sha(repo_name, tag_name, new_release_sha, dry_run)
Update the commit associated with a given release tag. Since updating a tag commit is not directly possible, this function does the following steps: * set the release tag to ``<tag_name>-tmp`` and associate it with ``new_release_sha``. * delete tag ``refs/tags/<tag_name>``. * update the release tag to ``<tag_name>`` and associate it with ``new_release_sha``.
2.17288
2.214266
0.98131
funds_df = data.approved_funds_by_projects project = ( funds_df .loc[funds_df['PRONAC'] == pronac] ) project = project.to_dict('records')[0] info = ( data .approved_funds_agg.to_dict(orient="index") [project['idSegmento']] ) mean, std = info.values() outlier = gaussian_outlier.is_outlier(project['VlTotalAprovado'], mean, std) maximum_expected_funds = gaussian_outlier.maximum_expected_value(mean, std) return { 'is_outlier': outlier, 'total_approved_funds': project['VlTotalAprovado'], 'maximum_expected_funds': maximum_expected_funds }
def approved_funds(pronac, dt)
Verifica se o valor total de um projeto é um outlier em relação aos projetos do mesmo seguimento cultural Dataframes: planilha_orcamentaria
5.202016
5.171587
1.005884
indicators = obj.indicator_set.all() if not indicators: value = 0.0 else: value = indicators.first().value return value
def complexidade(obj)
Returns a value that indicates project health, currently FinancialIndicator is used as this value, but it can be a result of calculation with other indicators in future
4.65029
4.192709
1.109137
indicators = project.indicator_set.all() indicators_detail = [(indicator_details(i) for i in indicators)][0] if not indicators: indicators_detail = [ {'FinancialIndicator': {'valor': 0.0, 'metrics': default_metrics, }, }] indicators_detail = convert_list_into_dict(indicators_detail) return {'pronac': project.pronac, 'nome': project.nome, 'indicadores': indicators_detail, }
def details(project)
Project detail endpoint, Returns project pronac, name, and indicators with details
8.09786
6.441978
1.257046
metrics = format_metrics_json(indicator) metrics_list = set(indicator.metrics .filter(name__in=metrics_name_map.keys()) .values_list('name', flat=True)) null_metrics = default_metrics for keys in metrics_list: null_metrics.pop(metrics_name_map[keys], None) metrics.update(null_metrics) return {type(indicator).__name__: { 'valor': indicator.value, 'metricas': metrics, }, }
def indicator_details(indicator)
Return a dictionary with all metrics in FinancialIndicator, if there aren't values for that Indicator, it is filled with default values
5.650019
5.344788
1.057108
assert isinstance(metric, str) assert '.' in metric, 'metric must declare a namespace' try: func = self._metrics[metric] return func(pronac, self._data) except KeyError: raise InvalidMetricError('metric does not exist')
def get_metric(self, pronac, metric)
Get metric for the project with the given pronac number. Usage: >>> metrics.get_metric(pronac_id, 'finance.approved_funds')
5.467054
6.412118
0.852613
def decorator(func): name = func.__name__ key = f'{category}.{name}' self._metrics[key] = func return func return decorator
def register(self, category)
Usage: @metrics.register('finance') def approved_funds(pronac, data): return metric_from_data_and_pronac_number(data, pronac)
4.563823
3.205209
1.423877
# TODO: Remove except and use ignore_conflicts # on bulk_create when django 2.2. is released with open(MODEL_FILE, "r") as file_content: query = file_content.read() db = db_connector() query_result = db.execute_pandas_sql_query(query) db.close() try: projects = Project.objects.bulk_create( (Project(**vals) for vals in query_result.to_dict("records")), # ignore_conflicts=True available on django 2.2. ) indicators = [FinancialIndicator(project=p) for p in projects] FinancialIndicator.objects.bulk_create(indicators) except IntegrityError: # happens when there are duplicated projects LOG("Projects bulk_create failed, creating one by one...") with transaction.atomic(): if force_update: for item in query_result.to_dict("records"): p, _ = Project.objects.update_or_create(**item) FinancialIndicator.objects.update_or_create(project=p) else: for item in query_result.to_dict("records"): p, _ = Project.objects.get_or_create(**item) FinancialIndicator.objects.update_or_create(project=p)
def execute_project_models_sql_scripts(force_update=False)
Used to get project information from MinC database and convert to this application Project models. Uses bulk_create if database is clean
3.942353
3.798009
1.038005
missing = missing_metrics(metrics, pronacs) print(f"There are {len(missing)} missing metrics!") processors = mp.cpu_count() print(f"Using {processors} processors to calculate metrics!") indicators_qs = FinancialIndicator.objects.filter( project_id__in=[p for p, _ in missing] ) indicators = {i.project_id: i for i in indicators_qs} pool = mp.Pool(processors) results = [ pool.apply_async(create_metric, args=(indicators, metric_name, pronac)) for pronac, metric_name in missing ] calculated_metrics = [p.get() for p in results] if calculated_metrics: Metric.objects.bulk_create(calculated_metrics) print("Bulk completed") for indicator in indicators.values(): indicator.fetch_weighted_complexity() print("Finished update indicators!") pool.close() print("Finished metrics calculation!")
def create_finance_metrics(metrics: list, pronacs: list)
Creates metrics, creating an Indicator if it doesn't already exists Metrics are created for projects that are in pronacs and saved in database. args: metrics: list of names of metrics that will be calculated pronacs: pronacs in dataset that is used to calculate those metrics
3.92227
3.861784
1.015663
dataframe = data.planilha_comprovacao project = dataframe.loc[dataframe['PRONAC'] == pronac] segment_id = project.iloc[0]["idSegmento"] segments_cache = data.segment_projects_agg segments_cache = segments_cache.to_dict(orient="index") mean = segments_cache[segment_id]["mean"] std = segments_cache[segment_id]["<lambda>"] total_receipts = project.shape[0] is_outlier = gaussian_outlier.is_outlier(total_receipts, mean, std) maximum_expected = gaussian_outlier.maximum_expected_value(mean, std) return { "is_outlier": is_outlier, "valor": total_receipts, "maximo_esperado": maximum_expected, "minimo_esperado": 0, }
def total_receipts(pronac, dt)
This metric calculates the project total of receipts and compare it to projects in the same segment output: is_outlier: True if projects receipts is not compatible to others projects in the same segment total_receipts: absolute number of receipts maximum_expected_in_segment: maximum receipts expected in segment
5.000904
4.16001
1.202138
all_metrics = FinancialIndicator.METRICS for key in all_metrics: df = getattr(data, key) pronac = 'PRONAC' if key == 'planilha_captacao': pronac = 'Pronac' pronacs = df[pronac].unique().tolist() create_finance_metrics(all_metrics[key], pronacs)
def load_project_metrics()
Create project metrics for financial indicator Updates them if already exists
9.193935
7.995182
1.149934
info = data.providers_info df = info[info['PRONAC'] == pronac] providers_count = data.providers_count.to_dict()[0] new_providers = [] segment_id = None for _, row in df.iterrows(): cnpj = row['nrCNPJCPF'] cnpj_count = providers_count.get(cnpj, 0) segment_id = row['idSegmento'] if cnpj_count <= 1: item_id = row['idPlanilhaAprovacao'] item_name = row['Item'] provider_name = row['nmFornecedor'] new_provider = { 'nome': provider_name, 'cnpj': cnpj, 'itens': { item_id: { 'nome': item_name, 'tem_comprovante': True } } } new_providers.append(new_provider) providers_amount = len(df['nrCNPJCPF'].unique()) new_providers_amount = len(new_providers) new_providers_percentage = new_providers_amount / providers_amount averages = data.average_percentage_of_new_providers.to_dict() segments_average = averages['segments_average_percentage'] all_projects_average = list(averages['all_projects_average'].values())[0] if new_providers: new_providers.sort(key=lambda provider: provider['nome']) return { 'lista_de_novos_fornecedores': new_providers, 'valor': new_providers_amount, 'new_providers_percentage': new_providers_percentage, 'is_outlier': new_providers_percentage > segments_average[segment_id], 'segment_average_percentage': segments_average[segment_id], 'all_projects_average_percentage': all_projects_average, }
def new_providers(pronac, dt)
Return the percentage of providers of a project that are new to the providers database.
3.247592
3.20025
1.014793
segments_percentages = {} all_projects_percentages = [] providers_count = providers_count.to_dict()[0] for _, items in providers_info.groupby('PRONAC'): cnpj_array = items['nrCNPJCPF'].unique() new_providers = 0 for cnpj in cnpj_array: cnpj_count = providers_count.get(cnpj, 0) if cnpj_count <= 1: new_providers += 1 segment_id = items.iloc[0]['idSegmento'] new_providers_percent = new_providers / cnpj_array.size segments_percentages.setdefault(segment_id, []) segments_percentages[segment_id].append(new_providers_percent) all_projects_percentages.append(new_providers_percent) segments_average_percentage = {} for segment_id, percentages in segments_percentages.items(): mean = np.mean(percentages) segments_average_percentage[segment_id] = mean return pd.DataFrame.from_dict({ 'segments_average_percentage': segments_average_percentage, 'all_projects_average': np.mean(all_projects_percentages) })
def average_percentage_of_new_providers(providers_info, providers_count)
Return the average percentage of new providers per segment and the average percentage of all projects.
2.640097
2.441161
1.081493
providers_count = {} cnpj_array = df.values for a in cnpj_array: cnpj = a[0] occurrences = providers_count.get(cnpj, 0) providers_count[cnpj] = occurrences + 1 return pd.DataFrame.from_dict(providers_count, orient='index')
def providers_count(df)
Returns total occurrences of each provider in the database.
2.834273
2.810792
1.008354
cnpj_list = [] for _, items in df.groupby('PRONAC'): unique_cnpjs = items['nrCNPJCPF'].unique() cnpj_list += list(unique_cnpjs) return pd.DataFrame(cnpj_list)
def all_providers_cnpj(df)
Return CPF/CNPJ of all providers in database.
6.302386
6.378539
0.988061
df = data.providers_info grouped = df.groupby('PRONAC') return grouped.get_group(pronac)
def get_providers_info(pronac)
Return all info about providers of a project with the given pronac.
8.903428
8.299947
1.072709
agg = df.groupby(group).agg(info) agg.columns = agg.columns.droplevel(0) return agg
def get_info(df, group, info=['mean', 'std'])
Aggregate mean and std with the given group.
2.689953
2.483105
1.083302
url_keys = { 'pronac': 'idPronac', 'uf': 'uf', 'product': 'produto', 'county': 'idmunicipio', 'item_id': 'idPlanilhaItem', 'stage': 'etapa', } if df_values: values = [item[v] for v in df_values] url_values = dict( zip(url_keys.keys(), values) ) else: url_values = { "pronac": item["idPronac"], "uf": item["UfItem"], "product": item["idProduto"], "county": item["cdCidade"], "item_id": item["idPlanilhaItens"], "stage": item["cdEtapa"], } item_data = [(value, url_values[key]) for key, value in url_keys.items()] url = prefix for k, v in item_data: url += f'/{str(k)}/{str(v)}' return url
def get_salic_url(item, prefix, df_values=None)
Mount a salic url for the given item.
3.264235
3.221401
1.013297
df = data.planilha_projetos cpf_cnpj = None row_df = df[df['PRONAC'].astype(str) == str(pronac)] if not row_df.empty: cpf_cnpj = row_df.iloc[0]['CgcCpf'] return str(cpf_cnpj)
def get_cpf_cnpj_by_pronac(pronac)
Return the CNPF/CNPJ of the proponent of the project with the given pronac.
4.282752
3.966133
1.079831
pronac_id = str(item['idPronac']) item_id = str(item["idPlanilhaItens"]) combined_id = f'{pronac_id}/{item_id}' return combined_id in data.receipt.index
def has_receipt(item)
Verify if a item has a receipt.
9.23429
8.773776
1.052488
df = data.all_items return ( df[df['idSegmento'] == str(segment_id)] .drop_duplicates(["PRONAC"]) .values )
def get_segment_projects(segment_id)
Returns all projects from a segment.
17.715309
16.727488
1.059054
mutated_df = df[['IdPRONAC', 'idPlanilhaItem']].astype(str) mutated_df['pronac_planilha_itens'] = ( f"{mutated_df['IdPRONAC']}/{mutated_df['idPlanilhaItem']}" ) return ( mutated_df .set_index(['pronac_planilha_itens']) )
def receipt(df)
Return a dataframe to verify if a item has a receipt.
5.685514
5.426241
1.047781
if pickles: save_sql_to_files(f) if models: if f: manage(ctx, 'create_models_from_sql --force True', env={}) else: manage(ctx, 'create_models_from_sql', env={})
def update_data(ctx, models=True, pickles=False, f=False)
Updates local django db projects and pickle files using salic database from MinC Pickles are saved in /data/raw/ from sql queries in /data/scripts/ Models are created from /data/scripts/models/
6.51684
5.857473
1.112568
if f: manage(ctx, 'create_models_from_sql --force True', env={}) else: manage(ctx, 'create_models_from_sql', env={})
def update_models(ctx, f=False)
Updates local django db projects models using salic database from MinC
6.66271
6.313838
1.055255
project = Project.objects.get(pronac=project) indicator, _ = (FinancialIndicator .objects.update_or_create(project=project)) indicator.is_valid = is_valid if indicator.is_valid: p_metrics = metrics_calc.get_project(project.pronac) for metric_name in metrics_list: print("calculando a metrica ", metric_name) x = getattr(p_metrics.finance, metric_name) print("do projeto: ", project) Metric.objects.create_metric(metric_name, x, indicator) indicator.fetch_weighted_complexity() return indicator
def create_indicator(self, project, is_valid, metrics_list)
Creates FinancialIndicator object for a project, calculating metrics and indicator value
7.11271
6.260557
1.136115
ext_size = len(SQL_EXTENSION) path = DATA_PATH / 'scripts' save_dir = DATA_PATH / "raw" for file in os.listdir(path): if file.endswith(SQL_EXTENSION): file_path = os.path.join(save_dir, file[:-ext_size] + '.' + FILE_EXTENSION) if not os.path.isfile(file_path) or overwrite: query_result = make_query(path / file) save_dataframe_as_pickle(query_result, file_path) else: print(("file {} already exists, if you would like to update" " it, use -f flag\n").format(file_path))
def save_sql_to_files(overwrite=False)
Executes every .sql files in /data/scripts/ using salic db vpn and then saves pickle files into /data/raw/
3.956542
3.806432
1.039436
# TODO: implment metrics recalculation max_total = sum( [self.metrics_weights[metric_name] for metric_name in self.metrics_weights] ) total = 0 if recalculate_metrics: self.calculate_indicator_metrics() for metric in self.metrics.all(): if metric.name in self.metrics_weights and metric.is_outlier: total += self.metrics_weights[metric.name] value = total / max_total final_value = "{:.1f}".format(value * 10) if final_value[-1] == "0": final_value = "{:.0f}".format(value * 10) final_value = int(final_value) else: final_value = float(final_value) self.value = float(final_value) self.is_valid = True self.updated_at = datetime.datetime.now() self.save() return final_value
def fetch_weighted_complexity(self, recalculate_metrics=False)
Calculates indicator value according to metrics weights Uses metrics in database args: recalculate_metrics: If true metrics values are updated before using weights
3.067878
2.792649
1.098555
threshold = 0.1 outlier_info = get_outliers_percentage(pronac) outlier_info['is_outlier'] = outlier_info['percentage'] > threshold outlier_info['maximum_expected'] = threshold * outlier_info['total_items'] return outlier_info
def item_prices(pronac, data)
Verify if a project is an outlier compared to the other projects in his segment, based on the price of bought items.
4.38345
4.071906
1.076511
if (segment_id, item_id) not in df.index: return False mean = df.loc[(segment_id, item_id)]['mean'] std = df.loc[(segment_id, item_id)]['std'] return gaussian_outlier.is_outlier( x=price, mean=mean, standard_deviation=std )
def is_outlier(df, item_id, segment_id, price)
Verify if a item is an outlier compared to the other occurrences of the same item, based on his price. Args: item_id: idPlanilhaItens segment_id: idSegmento price: VlUnitarioAprovado
2.653478
3.059657
0.867247
df = ( raw_df[['idSegmento', 'idPlanilhaItens', 'VlUnitarioAprovado']] .groupby(by=['idSegmento', 'idPlanilhaItens']) .agg([np.mean, lambda x: np.std(x, ddof=0)]) ) df.columns = df.columns.droplevel(0) return ( df .rename(columns={'<lambda>': 'std'}) )
def aggregated_relevant_items(raw_df)
Aggragation for calculate mean and std.
4.346748
4.046689
1.074149
start_date = datetime(2013, 1, 1) df['DataProjeto'] = pd.to_datetime(df['DataProjeto']) # get only projects newer than start_date # and items with price > 0 df = df[df.DataProjeto >= start_date] df = df[df.VlUnitarioAprovado > 0.0] return df
def relevant_items(df)
Dataframe with items used by cultural projects, filtered by date and price.
5.270566
4.712513
1.118419
df = ( raw_df [['PRONAC', 'idPlanilhaAprovacao', 'Item', 'idPlanilhaItens', 'VlUnitarioAprovado', 'idSegmento', 'DataProjeto', 'idPronac', 'UfItem', 'idProduto', 'cdCidade', 'cdEtapa']] ).copy() df['VlUnitarioAprovado'] = df['VlUnitarioAprovado'].apply(pd.to_numeric) return df
def items_with_price(raw_df)
Dataframe with price as number.
9.322672
9.317584
1.000546
items = ( data.items_with_price .groupby(['PRONAC']) .get_group(pronac) ) df = data.aggregated_relevant_items outlier_items = {} url_prefix = '/prestacao-contas/analisar/comprovante' for _, item in items.iterrows(): item_id = item['idPlanilhaItens'] price = item['VlUnitarioAprovado'] segment_id = item['idSegmento'] item_name = item['Item'] if is_outlier(df, item_id, segment_id, price): outlier_items[item_id] = { 'name': item_name, 'salic_url': get_salic_url(item, url_prefix), 'has_receipt': has_receipt(item) } total_items = items.shape[0] outliers_amount = len(outlier_items) percentage = outliers_amount / total_items return { 'items': outlier_items, 'valor': outliers_amount, 'total_items': total_items, 'percentage': percentage, 'is_outlier': outliers_amount > 0, }
def get_outliers_percentage(pronac)
Returns the percentage of items of the project that are outliers.
4.225052
4.214628
1.002473
df = data.items_by_project project = df.loc[df['PRONAC'] == pronac] seg = project.iloc[0]["idSegmento"] info = data.items_by_project_agg.to_dict(orient="index")[seg] mean, std = info.values() threshold = mean + 1.5 * std project_items_count = project.shape[0] is_outlier = project_items_count > threshold return { 'is_outlier': is_outlier, 'valor': project_items_count, 'maximo_esperado': mean, 'desvio_padrao': std, }
def number_of_items(pronac, dt)
This metric calculates the project number of declared number of items and compare it to projects in the same segment output: is_outlier: True if projects number of items is not compatible to others projects in the same segment valor: absolute number of items maximo_esperado: mean number of items of segment desvio_padrao: standard deviation of number of items in project segment
4.795186
3.288674
1.458091
percentage = 0.1 return ( df .groupby(['idSegmento', 'idPlanilhaItens']) .count() .rename(columns={'PRONAC': 'itemOccurrences'}) .sort_values('itemOccurrences', ascending=False) .reset_index(['idSegmento', 'idPlanilhaItens']) .groupby('idSegmento') .apply(lambda x: x[None: max(2, int(len(x) * percentage))]) .reset_index(['idSegmento'], drop=True) .set_index(['idSegmento']) )
def common_items(df)
Returns the itens that are common in all the segments, in the format | idSegmento | id planilhaItens |.
4.431274
3.245601
1.365317
if len(seg_common_items) == 0: return 0 project_items = get_project_items(pronac).values[:, 0] project_items_amount = len(project_items) if project_items_amount == 0: return 1 common_found_items = sum( seg_common_items.isin(project_items)['idPlanilhaItens'] ) return common_found_items / project_items_amount
def common_items_percentage(pronac, seg_common_items)
Returns the percentage of items in a project that are common in the cultural segment.
4.137002
3.856675
1.072686
segments = common_items.index.unique() metrics = {} for seg in segments: seg_common_items = segment_common_items(seg) projects = get_segment_projects(seg) metric_values = [] for proj in projects: pronac = proj[0] percentage = common_items_percentage(pronac, seg_common_items) metric_values.append(percentage) metrics[seg] = { 'mean': np.mean(metric_values), 'std': np.std(metric_values) } return pd.DataFrame.from_dict(metrics, orient='index')
def common_items_metrics(all_items, common_items)
Calculates the percentage of common items for each project in each segment and calculates the mean and std of this percentage for each segment.
3.276222
2.917948
1.122783
df = data.all_items return ( df[df['PRONAC'] == pronac] .drop(columns=['PRONAC', 'idSegmento']) )
def get_project_items(pronac)
Returns all items from a project.
8.785918
8.124227
1.081447
df = data.common_items return ( df .loc[str(segment_id)] .reset_index(drop=1) .drop(columns=["itemOccurrences"]) )
def segment_common_items(segment_id)
Returns all the common items in a segment.
7.082325
7.042107
1.005711
segment_id = get_segment_id(str(pronac)) seg_common_items = ( segment_common_items(segment_id) .set_index('idPlanilhaItens') .index ) project_items = ( get_project_items(pronac) .set_index('idPlanilhaItens') .index ) diff = list(project_items.difference(seg_common_items)) return ( data.distinct_items .loc[diff] .to_dict()['Item'] )
def get_uncommon_items(pronac)
Return all uncommon items of a project (related to segment common items).
4.576333
3.964573
1.154307
result = uncommon_items url_prefix = '/prestacao-contas/analisar/comprovante' for _, item in filtered_items.iterrows(): item_id = item['idPlanilhaItens'] item_name = uncommon_items[item_id] result[item_id] = { 'name': item_name, 'salic_url': get_salic_url(item, url_prefix), 'has_recepit': has_receipt(item) } return result
def add_info_to_uncommon_items(filtered_items, uncommon_items)
Add extra info to the uncommon items.
5.647473
5.70158
0.99051
segment_id = get_segment_id(str(pronac)) metrics = data.common_items_metrics.to_dict(orient='index')[segment_id] ratio = common_items_percentage(pronac, segment_common_items(segment_id)) # constant that defines the threshold to verify if a project # is an outlier. k = 1.5 threshold = metrics['mean'] - k * metrics['std'] uncommon_items = get_uncommon_items(pronac) pronac_filter = data.all_items['PRONAC'] == pronac uncommon_items_filter = ( data.all_items['idPlanilhaItens'] .isin(uncommon_items) ) items_filter = (pronac_filter & uncommon_items_filter) filtered_items = ( data .all_items[items_filter] .drop_duplicates(subset='idPlanilhaItens') ) uncommon_items = add_info_to_uncommon_items(filtered_items, uncommon_items) return { 'is_outlier': ratio < threshold, 'valor': ratio, 'maximo_esperado': metrics['mean'], 'desvio_padrao': metrics['std'], 'items_incomuns': uncommon_items, 'items_comuns_que_o_projeto_nao_possui': get_common_items_not_present(pronac), }
def common_items_ratio(pronac, dt)
Calculates the common items on projects in a cultural segment, calculates the uncommon items on projects in a cultural segment and verify if a project is an outlier compared to the other projects in his segment.
4.281772
3.943729
1.085717
dataframe = data.planilha_comprovacao project = dataframe.loc[dataframe['PRONAC'] == pronac] segment_id = project.iloc[0]["idSegmento"] pronac_funds = project[ ["idPlanilhaAprovacao", "PRONAC", "vlComprovacao", "idSegmento"] ] funds_grp = pronac_funds.drop(columns=["idPlanilhaAprovacao"]).groupby( ["PRONAC"] ) project_funds = funds_grp.sum().loc[pronac]["vlComprovacao"] segments_info = data.verified_funds_by_segment_agg.to_dict(orient="index") mean = segments_info[segment_id]["mean"] std = segments_info[segment_id]["std"] is_outlier = gaussian_outlier.is_outlier(project_funds, mean, std) maximum_expected_funds = gaussian_outlier.maximum_expected_value(mean, std) return { "is_outlier": is_outlier, "valor": project_funds, "maximo_esperado": maximum_expected_funds, "minimo_esperado": 0, }
def verified_funds(pronac, dt)
Responsable for detecting anomalies in projects total verified funds.
4.268046
4.162892
1.02526
df['CaptacaoReal'] = df['CaptacaoReal'].apply( pd.to_numeric ) return ( df[['Pronac', 'CaptacaoReal']] .groupby(['Pronac']) .sum() )
def raised_funds_by_project(df)
Raised funds organized by project.
8.513004
8.019538
1.061533
project_raised_funds = data.raised_funds_by_project.loc[pronac]['CaptacaoReal'] dataframe = data.planilha_comprovacao project_verified = dataframe.loc[dataframe['PRONAC'] == str(pronac)] if project_verified.empty: project_verified_funds = 0 else: pronac_funds = project_verified[ ["idPlanilhaAprovacao", "PRONAC", "vlComprovacao", "idSegmento"] ] funds_grp = pronac_funds.drop(columns=["idPlanilhaAprovacao"]).groupby( ["PRONAC"] ) project_verified_funds = funds_grp.sum().loc[pronac]["vlComprovacao"] to_verify_value = project_raised_funds - float(project_verified_funds) is_outlier = to_verify_value != 0 return { 'is_outlier': is_outlier, 'valor': to_verify_value, 'valor_captado': project_raised_funds, 'valor_comprovado': project_verified_funds, 'minimo_esperado': 0, }
def to_verify_funds(pronac, dt)
Checks how much money is left for the project to verify, using raised_funds - verified_funds This value can be negative (a project can verify more money than the value approved)
4.086668
3.948051
1.03511
cpf_cnpj = get_cpf_cnpj_by_pronac(pronac) proponent_submitted_projects = {} proponent_analyzed_projects = {} if cpf_cnpj: submitted_projects = get_proponent_submitted_projects(cpf_cnpj) analyzed_projects = get_proponent_analyzed_projects(cpf_cnpj) try: proponent_submitted_projects = { 'number_of_projects': submitted_projects['num_pronacs'], 'pronacs_of_this_proponent': submitted_projects['pronac_list'] } except KeyError: pass try: proponent_analyzed_projects = { 'number_of_projects': analyzed_projects['num_pronacs'], 'pronacs_of_this_proponent': analyzed_projects['pronac_list'] } except KeyError: pass return { 'cpf_cnpj': cpf_cnpj, 'valor': len(proponent_submitted_projects), 'projetos_submetidos': proponent_submitted_projects, 'projetos_analizados': proponent_analyzed_projects, }
def proponent_projects(pronac, data)
Checks the CNPJ/CPF of the proponent of project with the given pronac and returns all the projects that have been submitted by this proponent and all projects that have already been analyzed.
2.024149
1.915527
1.056706
df = raw_df[['PRONAC', 'proponenteCgcCpf']] analyzed_projects = df.groupby('proponenteCgcCpf')[ 'PRONAC' ].agg(['unique', 'nunique']) analyzed_projects.columns = ['pronac_list', 'num_pronacs'] return analyzed_projects
def analyzed_projects(raw_df)
Return all projects that was analyzed.
7.180153
6.907331
1.039497
df = raw_df.astype({'PRONAC': str, 'CgcCpf': str}) submitted_projects = df.groupby('CgcCpf')[ 'PRONAC' ].agg(['unique', 'nunique']) submitted_projects.columns = ['pronac_list', 'num_pronacs'] return submitted_projects
def submitted_projects(raw_df)
Return all submitted projects.
6.900997
6.640459
1.039235
csv_path = os.path.join(DATA_FOLDER, csv_name) csv = pd.read_csv(csv_path, low_memory=False, usecols=usecols, encoding="utf-8") return csv
def read_csv(csv_name, usecols=None)
Returns a DataFrame from a .csv file stored in /data/raw/
2.529575
2.569948
0.98429
csv_path = os.path.join(DATA_FOLDER, csv_name) csv = pd.read_csv( csv_path, usecols=usecols, encoding="utf-8", dtype=column_types_dict, engine="python", ) for key_column, val_type in column_types_dict.items(): if val_type == str: csv[key_column] = csv[key_column].str.strip() return csv
def read_csv_with_different_type(csv_name, column_types_dict, usecols=None)
Returns a DataFrame from a .csv file stored in /data/raw/. Reads the CSV as string.
2.188866
2.147208
1.019401
csv_path = os.path.join(DATA_FOLDER, csv_name) csv = pd.read_csv(csv_path, low_memory=False, usecols=usecols) for column in integer_columns: csv = csv[pd.to_numeric(csv[column], errors="coerce").notnull()] csv[integer_columns] = csv[integer_columns].apply(pd.to_numeric) return csv
def read_csv_as_integer(csv_name, integer_columns, usecols=None)
Returns a DataFrame from a .csv file stored in /data/raw/. Converts columns specified by 'integer_columns' to integer.
2.069399
2.006403
1.031397
df = verified_repeated_receipts_for_pronac(pronac) comprovantes_cheque = df[df['tpFormaDePagamento'] == 1.0] return metric_return(comprovantes_cheque)
def check_receipts(pronac, dt)
Checks how many items are in a same receipt when payment type is check - is_outlier: True if there are any receipts that have more than one - itens_que_compartilham_comprovantes: List of items that share receipt
14.824362
13.341099
1.11118
df = verified_repeated_receipts_for_pronac(pronac) comprovantes_transferencia = df[df['tpFormaDePagamento'] == 2.0] return metric_return(comprovantes_transferencia)
def transfer_receipts(pronac, dt)
Checks how many items are in a same receipt when payment type is bank transfer - is_outlier: True if there are any receipts that have more than one - itens_que_compartilham_comprovantes: List of items that share receipt
13.299489
12.094478
1.099633
df = verified_repeated_receipts_for_pronac(pronac) comprovantes_saque = df[df['tpFormaDePagamento'] == 3.0] return metric_return(comprovantes_saque)
def money_receipts(pronac, dt)
Checks how many items are in a same receipt when payment type is withdraw/money - is_outlier: True if there are any receipts that have more than one - itens_que_compartilham_comprovantes: List of items that share receipt
14.961937
15.003139
0.997254
is_outlier, mean, std, total_raised_funds = get_outlier_info(pronac) maximum_expected_funds = gaussian_outlier.maximum_expected_value(mean, std) return { 'is_outlier': is_outlier, 'total_raised_funds': total_raised_funds, 'maximum_expected_funds': maximum_expected_funds }
def raised_funds(pronac, data)
Returns the total raised funds of a project with the given pronac and if this project is an outlier based on this value.
3.432835
3.329156
1.031143
grouped = df.groupby('Segmento') aggregated = grouped.agg(['mean', 'std']) aggregated.columns = aggregated.columns.droplevel(0) return aggregated
def segment_raised_funds_average(df)
Return some info about raised funds.
4.372418
4.396574
0.994506
df = data.planilha_captacao raised_funds_averages = data.segment_raised_funds_average.to_dict('index') segment_id = df[df['Pronac'] == pronac]['Segmento'].iloc[0] mean = raised_funds_averages[segment_id]['mean'] std = raised_funds_averages[segment_id]['std'] project_raised_funds = get_project_raised_funds(pronac) outlier = gaussian_outlier.is_outlier(project_raised_funds, mean, std) return (outlier, mean, std, project_raised_funds)
def get_outlier_info(pronac)
Return if a project with the given pronac is an outlier based on raised funds.
4.141648
3.614941
1.145703
items_df = data.approved_verified_items items_df = items_df.loc[items_df['PRONAC'] == pronac] items_df[[APPROVED_COLUMN, VERIFIED_COLUMN]] = items_df[ [APPROVED_COLUMN, VERIFIED_COLUMN] ].astype(float) items_df["Item"] = items_df["Item"].str.replace("\r", "") items_df["Item"] = items_df["Item"].str.replace("\n", "") items_df["Item"] = items_df["Item"].str.replace('"', "") items_df["Item"] = items_df["Item"].str.replace("'", "") items_df["Item"] = items_df["Item"].str.replace("\\", "") THRESHOLD = 1.5 bigger_than_approved = items_df[VERIFIED_COLUMN] > ( items_df[APPROVED_COLUMN] * THRESHOLD ) features = items_df[bigger_than_approved] outlier_items = outlier_items_(features) features_size = features.shape[0] is_outlier = features_size > 0 return { "is_outlier": is_outlier, "valor": features_size, "maximo_esperado": MIN_EXPECTED_ITEMS, "minimo_esperado": MAX_EXPECTED_ITEMS, "lista_de_comprovantes": outlier_items, "link_da_planilha": "http://salic.cultura.gov.br/projeto/#/{0}/relacao-de-pagamento".format(pronac) }
def verified_approved(pronac, dt)
This metric compare budgetary items of SALIC projects in terms of verified versus approved value Items that have vlComprovacao > vlAprovacao * 1.5 are considered outliers output: is_outlier: True if any item is outlier valor: Absolute number of items that are outliers outlier_items: Outlier items detail
3.436749
2.958867
1.161509
for file in os.listdir(path): base, ext = os.path.splitext(file) if ext != ".csv": continue LOG(f"converting {file} to pickle") df = pd.read_csv(path / file, low_memory=True) WRITE_DF(df, path / (base + "." + FILE_EXTENSION), **WRITE_DF_OPTS) if clean: os.remove(path / file) LOG(f"removed {file}")
def csv_to_pickle(path=ROOT / "raw", clean=False)
Convert all CSV files in path to pickle.
3.251387
3.140841
1.035196
path = "%s.%s" % (self._root / "processed" / loc, FILE_EXTENSION) WRITE_DF(df, path, **WRITE_DF_OPTS) self._cache[loc] = df
def store(self, loc, df)
Store dataframe in the given location. Store some arbitrary dataframe: >>> data.store('my_data', df) Now recover it from the global store. >>> data.my_data ...
8.826602
12.440313
0.709516
if self.gc_book: self.gc_book.close() if self.pricedb_session: self.pricedb_session.close()
def close_databases(self)
Close all database sessions
5.578297
4.984272
1.11918
book = self.get_gc_book() collection = SecuritiesAggregate(book) sec = collection.get_aggregate_for_symbol(symbol) quantity = sec.get_quantity() return quantity
def load_stock_quantity(self, symbol: str) -> Decimal(0)
retrieves stock quantity
8.911856
8.548979
1.042447
if not self.gc_book: gc_db = self.config.get(ConfigKeys.gnucash_book_path) if not gc_db: raise AttributeError("GnuCash book path not configured.") # check if this is the abs file exists if not os.path.isabs(gc_db): gc_db = resource_filename( Requirement.parse("Asset-Allocation"), gc_db) if not os.path.exists(gc_db): raise ValueError(f"Invalid GnuCash book path {gc_db}") self.gc_book = open_book(gc_db, open_if_lock=True) return self.gc_book
def get_gc_book(self)
Returns the GnuCash db session
4.072523
3.781544
1.076947