code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
space = None variants = { # to be expanded once addtional spaces are supported 'space1': ['fsaverage5', 'MNI152NLin2009cAsym'], 'space2': ['fsaverage6', 'MNI152NLin2009cAsym'], } for sp, targets in variants.items(): if all(target in targets for target in [self.inputs.surface_target, self.inputs.volume_target]): space = sp if space is None: raise NotImplementedError variant_key = os.path.abspath('dtseries_variant.json') with open(variant_key, 'w') as fp: json.dump({space: variants[space]}, fp) return variant_key, space
def _define_variant(self)
Assign arbitrary label to combination of CIFTI spaces
4.373242
3.908627
1.118869
if (self.inputs.surface_target == "fsnative" or self.inputs.volume_target != "MNI152NLin2009cAsym"): # subject space is not support yet raise NotImplementedError annotation_files = sorted(glob(os.path.join(self.inputs.subjects_dir, self.inputs.surface_target, 'label', '*h.aparc.annot'))) if not annotation_files: raise IOError("Freesurfer annotations for %s not found in %s" % ( self.inputs.surface_target, self.inputs.subjects_dir)) label_file = str(get_template( 'MNI152NLin2009cAsym', resolution=2, desc='DKT31', suffix='dseg')) return annotation_files, label_file
def _fetch_data(self)
Converts inputspec to files
4.604553
4.357285
1.056748
''' generates a report showing slices from each axis of an arbitrary volume of in_file, with the resulting binary brain mask overlaid ''' self._anat_file = self.inputs.in_file self._mask_file = self.aggregate_outputs(runtime=runtime).mask_file self._seg_files = [self._mask_file] self._masked = self.inputs.mask NIWORKFLOWS_LOG.info('Generating report for BET. file "%s", and mask file "%s"', self._anat_file, self._mask_file) return super(BETRPT, self)._post_run_hook(runtime)
def _post_run_hook(self, runtime)
generates a report showing slices from each axis of an arbitrary volume of in_file, with the resulting binary brain mask overlaid
8.91142
4.452518
2.001434
''' generates a report showing slices from each axis ''' brain_extraction_mask = self.aggregate_outputs(runtime=runtime).BrainExtractionMask if isdefined(self.inputs.keep_temporary_files) and self.inputs.keep_temporary_files == 1: self._anat_file = self.aggregate_outputs(runtime=runtime).N4Corrected0 else: self._anat_file = self.inputs.anatomical_image self._mask_file = brain_extraction_mask self._seg_files = [brain_extraction_mask] self._masked = False NIWORKFLOWS_LOG.info('Generating report for ANTS BrainExtraction. file "%s", mask "%s"', self._anat_file, self._mask_file) return super(BrainExtractionRPT, self)._post_run_hook(runtime)
def _post_run_hook(self, runtime)
generates a report showing slices from each axis
6.353986
5.279508
1.203519
''' generates a report showing slices from each axis of an arbitrary volume of in_file, with the resulting binary brain mask overlaid ''' self._anat_file = self.inputs.in_file self._mask_file = self.aggregate_outputs(runtime=runtime).mask_file self._seg_files = [self._mask_file] self._masked = True NIWORKFLOWS_LOG.info( 'Generating report for nilearn.compute_epi_mask. file "%s", and mask file "%s"', self._anat_file, self._mask_file) return super(ComputeEPIMask, self)._post_run_hook(runtime)
def _post_run_hook(self, runtime)
generates a report showing slices from each axis of an arbitrary volume of in_file, with the resulting binary brain mask overlaid
8.269335
4.219032
1.960008
''' generates a report showing slices from each axis ''' assert len(self.inputs.mask_files) == 1, \ "ACompCorRPT only supports a single input mask. " \ "A list %s was found." % self.inputs.mask_files self._anat_file = self.inputs.realigned_file self._mask_file = self.inputs.mask_files[0] self._seg_files = self.inputs.mask_files self._masked = False NIWORKFLOWS_LOG.info('Generating report for aCompCor. file "%s", mask "%s"', self.inputs.realigned_file, self._mask_file) return super(ACompCorRPT, self)._post_run_hook(runtime)
def _post_run_hook(self, runtime)
generates a report showing slices from each axis
6.057389
4.898936
1.23647
''' generates a report showing slices from each axis ''' high_variance_masks = self.aggregate_outputs(runtime=runtime).high_variance_masks assert not isinstance(high_variance_masks, list),\ "TCompCorRPT only supports a single output high variance mask. " \ "A list %s was found." % str(high_variance_masks) self._anat_file = self.inputs.realigned_file self._mask_file = high_variance_masks self._seg_files = [high_variance_masks] self._masked = False NIWORKFLOWS_LOG.info('Generating report for tCompCor. file "%s", mask "%s"', self.inputs.realigned_file, self.aggregate_outputs(runtime=runtime).high_variance_masks) return super(TCompCorRPT, self)._post_run_hook(runtime)
def _post_run_hook(self, runtime)
generates a report showing slices from each axis
7.202376
5.976713
1.205073
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass root = os.path.realpath(__file__) root_dir = os.path.dirname(root) if os.path.isfile(os.path.join(root_dir, 'VERSION')): with open(os.path.join(root_dir, 'VERSION')) as vfile: version = vfile.readline().strip() return { "version": version, "full-revisionid": None, "dirty": None, "error": None, "date": None } try: # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None}
def get_versions()
Get version information or return default if unable to do so.
4.396214
4.362468
1.007736
out_file = fname_presuffix(in_file, suffix='_ras', newpath=newpath) nb.as_closest_canonical(nb.load(in_file)).to_filename(out_file) return out_file
def reorient(in_file, newpath=None)
Reorient Nifti files to RAS
2.60884
2.247648
1.160698
# Let nibabel convert from affine to quaternions, and recover xform tmp_header = img.header.copy() tmp_header.set_qform(img.affine) xform = tmp_header.get_qform() xform_code = 2 # Check desired codes qform, qform_code = img.get_qform(coded=True) sform, sform_code = img.get_sform(coded=True) if all((qform is not None and np.allclose(qform, xform), sform is not None and np.allclose(sform, xform), int(qform_code) == xform_code, int(sform_code) == xform_code)): return img new_img = img.__class__(img.get_data(), xform, img.header) # Unconditionally set sform/qform new_img.set_sform(xform, xform_code) new_img.set_qform(xform, xform_code) return new_img
def normalize_xform(img)
Set identical, valid qform and sform matrices in an image Selects the best available affine (sform > qform > shape-based), and coerces it to be qform-compatible (no shears). The resulting image represents this same affine as both qform and sform, and is marked as NIFTI_XFORM_ALIGNED_ANAT, indicating that it is valid, not aligned to template, and not necessarily preserving the original coordinates. If header would be unchanged, returns input image.
2.750549
2.694984
1.020618
import os import numpy as np import nibabel as nb from nipype.utils.filemanip import fname_presuffix out_file = fname_presuffix(in_file, suffix='_demeaned', newpath=os.getcwd()) nii = nb.load(in_file) msk = nb.load(in_mask).get_data() data = nii.get_data() if only_mask: data[msk > 0] -= np.median(data[msk > 0]) else: data -= np.median(data[msk > 0]) nb.Nifti1Image(data, nii.affine, nii.header).to_filename( out_file) return out_file
def demean(in_file, in_mask, only_mask=False, newpath=None)
Demean ``in_file`` within the mask defined by ``in_mask``
1.754064
1.772482
0.989609
import os import numpy as np import nibabel as nb nii = nb.load(in_file) data = np.ones(nii.shape, dtype=float) * value out_file = os.path.join(newpath or os.getcwd(), "filled.nii.gz") nii = nb.Nifti1Image(data, nii.affine, nii.header) nii.set_data_dtype(dtype) nii.to_filename(out_file) return out_file
def nii_ones_like(in_file, value, dtype, newpath=None)
Create a NIfTI file filled with ``value``, matching properties of ``in_file``
1.894142
1.910428
0.991475
workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['out_file']), name='outputnode') deoblique = pe.Node(afni.Refit(deoblique=True), name='deoblique') reorient = pe.Node(afni.Resample( orientation='RPI', outputtype='NIFTI_GZ'), name='reorient') workflow.connect([ (inputnode, deoblique, [('in_file', 'in_file')]), (deoblique, reorient, [('out_file', 'in_file')]), (reorient, outputnode, [('out_file', 'out_file')]) ]) return workflow
def reorient_wf(name='ReorientWorkflow')
A workflow to reorient images to 'RPI' orientation
1.546337
1.539241
1.00461
desc = [] if self.__desc__: desc += [self.__desc__] for node in pe.utils.topological_sort(self._graph)[0]: if isinstance(node, LiterateWorkflow): add_desc = node.visit_desc() if add_desc not in desc: desc.append(add_desc) if self.__postdesc__: desc += [self.__postdesc__] return ''.join(desc)
def visit_desc(self)
Builds a citation boilerplate by visiting all workflows appending their ``__desc__`` field
5.392745
4.461732
1.208666
if out_file is None: out_file = fname_presuffix(fixed_image, suffix='_reference', newpath=os.getcwd()) # Moving images may not be RAS/LPS (more generally, transverse-longitudinal-axial) reoriented_moving_img = nb.as_closest_canonical(nb.load(moving_image)) new_zooms = reoriented_moving_img.header.get_zooms()[:3] # Avoid small differences in reported resolution to cause changes to # FOV. See https://github.com/poldracklab/fmriprep/issues/512 # A positive diagonal affine is RAS, hence the need to reorient above. new_affine = np.diag(np.round(new_zooms, 3)) resampled = nli.resample_img(fixed_image, target_affine=new_affine, interpolation='nearest') if fov_mask is not None: # If we have a mask, resample again dropping (empty) samples # out of the FoV. fixednii = nb.load(fixed_image) masknii = nb.load(fov_mask) if np.all(masknii.shape[:3] != fixednii.shape[:3]): raise RuntimeError( 'Fixed image and mask do not have the same dimensions.') if not np.allclose(masknii.affine, fixednii.affine, atol=1e-5): raise RuntimeError( 'Fixed image and mask have different affines') # Get mask into reference space masknii = nli.resample_img(fixed_image, target_affine=new_affine, interpolation='nearest') res_shape = np.array(masknii.shape[:3]) # Calculate a bounding box for the input mask # with an offset of 2 voxels per face bbox = np.argwhere(masknii.get_data() > 0) new_origin = np.clip(bbox.min(0) - 2, a_min=0, a_max=None) new_end = np.clip(bbox.max(0) + 2, a_min=0, a_max=res_shape - 1) # Find new origin, and set into new affine new_affine_4 = resampled.affine.copy() new_affine_4[:3, 3] = new_affine_4[:3, :3].dot( new_origin) + new_affine_4[:3, 3] # Calculate new shapes new_shape = new_end - new_origin + 1 resampled = nli.resample_img(fixed_image, target_affine=new_affine_4, target_shape=new_shape.tolist(), interpolation='nearest') xform = resampled.affine # nibabel will pick the best affine _, qform_code = resampled.header.get_qform(coded=True) _, sform_code = resampled.header.get_sform(coded=True) xform_code = sform_code if sform_code > 0 else qform_code if xform_code == 1: xform_code = 2 if force_xform_code is not None: xform_code = force_xform_code # Keep 0, 2, 3, 4 unchanged resampled.header.set_qform(xform, int(xform_code)) resampled.header.set_sform(xform, int(xform_code)) resampled.header['descrip'] = 'reference image generated by %s.' % ( message or '(unknown software)') resampled.to_filename(out_file) return out_file
def _gen_reference(fixed_image, moving_image, fov_mask=None, out_file=None, message=None, force_xform_code=None)
Generates a sampling reference, and makes sure xform matrices/codes are correct
3.061691
3.046823
1.00488
import pandas as pd # Adapted from https://dev.to/rrampage/snake-case-to-camel-case-and- ... # back-using-regular-expressions-and-python-m9j re_to_camel = r'(.*?)_([a-zA-Z0-9])' re_to_snake = r'(^.+?|.*?)((?<![_A-Z])[A-Z]|(?<![_0-9])[0-9]+)' def snake(match): return '{}_{}'.format(match.group(1).lower(), match.group(2).lower()) def camel(match): return '{}{}'.format(match.group(1), match.group(2).upper()) # from fmriprep def less_breakable(a_string): return ''.join(a_string.split()).strip('#') drop_columns = drop_columns or [] additional_metadata = additional_metadata or {} tsv_data = pd.read_csv(in_tsv, '\t') for k, v in additional_metadata.items(): tsv_data[k] = v for col in drop_columns: tsv_data.drop(labels=col, axis='columns', inplace=True) tsv_data.set_index(index_column, drop=True, inplace=True) if enforce_case: tsv_data.index = [re.sub(re_to_snake, snake, less_breakable(i), 0).lower() for i in tsv_data.index] tsv_data.columns = [re.sub(re_to_camel, camel, less_breakable(i).title(), 0) for i in tsv_data.columns] json_data = tsv_data.to_json(orient='index') json_data = json.JSONDecoder( object_pairs_hook=OrderedDict).decode(json_data) if out_json is None: return json_data with open(out_json, 'w') as f: json.dump(json_data, f, indent=4) return out_json
def _tsv2json(in_tsv, out_json, index_column, additional_metadata=None, drop_columns=None, enforce_case=True)
Convert metadata from TSV format to JSON format. Parameters ---------- in_tsv: str Path to the metadata in TSV format. out_json: str Path where the metadata should be saved in JSON format after conversion. If this is None, then a dictionary is returned instead. index_column: str Name of the column in the TSV to be used as an index (top-level key in the JSON). additional_metadata: dict Any additional metadata that should be applied to all entries in the JSON. drop_columns: list List of columns from the input TSV to be dropped from the JSON. enforce_case: bool Indicates whether BIDS case conventions should be followed. Currently, this means that index fields (column names in the associated data TSV) use snake case and other fields use camel case. Returns ------- str Path to the metadata saved in JSON format.
2.785429
2.785978
0.999803
tpm_img = nb.load(in_tpm) roi_mask = (tpm_img.get_data() >= pthres).astype(np.uint8) eroded_mask_file = None erode_in = (mask_erosion_mm is not None and mask_erosion_mm > 0 or mask_erosion_prop is not None and mask_erosion_prop < 1) if erode_in: eroded_mask_file = fname_presuffix(in_mask, suffix='_eroded', newpath=newpath) mask_img = nb.load(in_mask) mask_data = mask_img.get_data().astype(np.uint8) if mask_erosion_mm: iter_n = max(int(mask_erosion_mm / max(mask_img.header.get_zooms())), 1) mask_data = nd.binary_erosion(mask_data, iterations=iter_n) else: orig_vol = np.sum(mask_data > 0) while np.sum(mask_data > 0) / orig_vol > mask_erosion_prop: mask_data = nd.binary_erosion(mask_data, iterations=1) # Store mask eroded = nb.Nifti1Image(mask_data, mask_img.affine, mask_img.header) eroded.set_data_dtype(np.uint8) eroded.to_filename(eroded_mask_file) # Mask TPM data (no effect if not eroded) roi_mask[~mask_data] = 0 # shrinking erode_out = (erosion_mm is not None and erosion_mm > 0 or erosion_prop is not None and erosion_prop < 1) if erode_out: if erosion_mm: iter_n = max(int(erosion_mm / max(tpm_img.header.get_zooms())), 1) iter_n = int(erosion_mm / max(tpm_img.header.get_zooms())) roi_mask = nd.binary_erosion(roi_mask, iterations=iter_n) else: orig_vol = np.sum(roi_mask > 0) while np.sum(roi_mask > 0) / orig_vol > erosion_prop: roi_mask = nd.binary_erosion(roi_mask, iterations=1) # Create image to resample roi_fname = fname_presuffix(in_tpm, suffix='_roi', newpath=newpath) roi_img = nb.Nifti1Image(roi_mask, tpm_img.affine, tpm_img.header) roi_img.set_data_dtype(np.uint8) roi_img.to_filename(roi_fname) return roi_fname, eroded_mask_file or in_mask
def _tpm2roi(in_tpm, in_mask, mask_erosion_mm=None, erosion_mm=None, mask_erosion_prop=None, erosion_prop=None, pthres=0.95, newpath=None)
Generate a mask from a tissue probability map
1.796648
1.791842
1.002682
report = Report(Path(reportlets_dir), out_dir, run_uuid, config=config, subject_id=subject_label, packagename=packagename) return report.generate_report()
def run_reports(reportlets_dir, out_dir, subject_label, run_uuid, config=None, packagename=None)
Runs the reports .. testsetup:: >>> from shutil import copytree >>> from tempfile import TemporaryDirectory >>> new_path = Path(__file__).resolve().parent.parent >>> test_data_path = new_path / 'data' / 'tests' / 'work' >>> tmpdir = TemporaryDirectory() >>> os.chdir(tmpdir.name) #noqa >>> testdir = Path().resolve() >>> data_dir = copytree(test_data_path, testdir / 'work') >>> (testdir / 'fmriprep').mkdir(parents=True, exist_ok=True) .. doctest:: >>> run_reports(str(testdir / 'work' / 'reportlets'), ... str(testdir / 'out'), '01', 'madeoutuuid') 0 .. testcleanup:: >>> tmpdir.cleanup()
3.242139
5.461984
0.593583
reports_dir = str(Path(work_dir) / 'reportlets') report_errors = [ run_reports(reports_dir, output_dir, subject_label, run_uuid, config, packagename=packagename) for subject_label in subject_list ] errno = sum(report_errors) if errno: import logging logger = logging.getLogger('cli') error_list = ', '.join('%s (%d)' % (subid, err) for subid, err in zip(subject_list, report_errors) if err) logger.error( 'Preprocessing did not finish successfully. Errors occurred while processing ' 'data from participants: %s. Check the HTML reports for details.', error_list) return errno
def generate_reports(subject_list, output_dir, work_dir, run_uuid, config=None, packagename=None)
A wrapper to run_reports on a given ``subject_list``
4.129618
4.105053
1.005984
for subrep_cfg in config: # First determine whether we need to split by some ordering # (ie. sessions / tasks / runs), which are separated by commas. orderings = [s for s in subrep_cfg.get('ordering', '').strip().split(',') if s] queries = [] for key in orderings: values = getattr(self.layout, 'get_%s%s' % (key, PLURAL_SUFFIX[key]))() if values: queries.append((key, values)) if not queries: # E.g. this is an anatomical reportlet reportlets = [Reportlet(self.layout, self.out_dir, config=cfg) for cfg in subrep_cfg['reportlets']] else: # Do not use dictionary for queries, as we need to preserve ordering # of ordering columns. reportlets = [] entities, values = zip(*queries) combinations = list(product(*values)) # e.g.: [('rest', 1), ('rest', 2)] for c in combinations: # Set a common title for this particular combination c title = 'Reports for: %s.' % ', '.join( ['%s <span class="bids-entity">%s</span>' % (entities[i], c[i]) for i in range(len(c))]) for cfg in subrep_cfg['reportlets']: cfg['bids'].update({entities[i]: c[i] for i in range(len(c))}) rlet = Reportlet(self.layout, self.out_dir, config=cfg) if not rlet.is_empty(): rlet.title = title title = None reportlets.append(rlet) # Filter out empty reportlets reportlets = [r for r in reportlets if not r.is_empty()] if reportlets: sub_report = SubReport( subrep_cfg['name'], isnested=len(queries) > 0, reportlets=reportlets, title=subrep_cfg.get('title')) self.sections.append(sub_report) # Populate errors sections error_dir = self.out_dir / self.packagename / 'sub-{}'.format(self.subject_id) / \ 'log' / self.run_uuid if error_dir.is_dir(): from ..utils.misc import read_crashfile self.errors = [read_crashfile(str(f)) for f in error_dir.glob('crash*.*')]
def index(self, config)
Traverse the reports config definition and instantiate reportlets. This method also places figures in their final location.
4.641042
4.520357
1.026698
logs_path = self.out_dir / 'logs' boilerplate = [] boiler_idx = 0 if (logs_path / 'CITATION.html').exists(): text = (logs_path / 'CITATION.html').read_text(encoding='UTF-8') text = '<div class="boiler-html">%s</div>' % re.compile( '<body>(.*?)</body>', re.DOTALL | re.IGNORECASE).findall(text)[0].strip() boilerplate.append((boiler_idx, 'HTML', text)) boiler_idx += 1 if (logs_path / 'CITATION.md').exists(): text = '<pre>%s</pre>\n' % (logs_path / 'CITATION.md').read_text(encoding='UTF-8') boilerplate.append((boiler_idx, 'Markdown', text)) boiler_idx += 1 if (logs_path / 'CITATION.tex').exists(): text = (logs_path / 'CITATION.tex').read_text(encoding='UTF-8') text = re.compile( r'\\begin{document}(.*?)\\end{document}', re.DOTALL | re.IGNORECASE).findall(text)[0].strip() text = '<pre>%s</pre>\n' % text text += '<h3>Bibliography</h3>\n' text += '<pre>%s</pre>\n' % Path( pkgrf(self.packagename, 'data/boilerplate.bib')).read_text(encoding='UTF-8') boilerplate.append((boiler_idx, 'LaTeX', text)) boiler_idx += 1 env = jinja2.Environment( loader=jinja2.FileSystemLoader(searchpath=str(self.template_path.parent)), trim_blocks=True, lstrip_blocks=True ) report_tpl = env.get_template(self.template_path.name) report_render = report_tpl.render(sections=self.sections, errors=self.errors, boilerplate=boilerplate) # Write out report (self.out_dir / self.out_filename).write_text(report_render, encoding='UTF-8') return len(self.errors)
def generate_report(self)
Once the Report has been indexed, the final HTML can be generated
2.272565
2.255641
1.007503
import nibabel as nb from nipype.utils.filemanip import fname_presuffix from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms in_file, in_xform, ifargs, index, newpath = args out_file = fname_presuffix(in_file, suffix='_xform-%05d' % index, newpath=newpath, use_ext=True) copy_dtype = ifargs.pop('copy_dtype', False) xfm = ApplyTransforms( input_image=in_file, transforms=in_xform, output_image=out_file, **ifargs) xfm.terminal_output = 'allatonce' xfm.resource_monitor = False runtime = xfm.run().runtime if copy_dtype: nii = nb.load(out_file) in_dtype = nb.load(in_file).get_data_dtype() # Overwrite only iff dtypes don't match if in_dtype != nii.get_data_dtype(): nii.set_data_dtype(in_dtype) nii.to_filename(out_file) return (out_file, runtime.cmdline)
def _applytfms(args)
Applies ANTs' antsApplyTransforms to the input image. All inputs are zipped in one tuple to make it digestible by multiprocessing's map
2.981967
2.94735
1.011745
base_xform = ['#Insight Transform File V1.0', '#Transform 0'] # Initialize the transforms matrix xfms_T = [] for i, tf_file in enumerate(transforms): # If it is a deformation field, copy to the tfs_matrix directly if guess_type(tf_file)[0] != 'text/plain': xfms_T.append([tf_file] * num_files) continue with open(tf_file) as tf_fh: tfdata = tf_fh.read().strip() # If it is not an ITK transform file, copy to the tfs_matrix directly if not tfdata.startswith('#Insight Transform File'): xfms_T.append([tf_file] * num_files) continue # Count number of transforms in ITK transform file nxforms = tfdata.count('#Transform') # Remove first line tfdata = tfdata.split('\n')[1:] # If it is a ITK transform file with only 1 xform, copy to the tfs_matrix directly if nxforms == 1: xfms_T.append([tf_file] * num_files) continue if nxforms != num_files: raise RuntimeError('Number of transforms (%d) found in the ITK file does not match' ' the number of input image files (%d).' % (nxforms, num_files)) # At this point splitting transforms will be necessary, generate a base name out_base = fname_presuffix(tf_file, suffix='_pos-%03d_xfm-{:05d}' % i, newpath=tmp_folder.name).format # Split combined ITK transforms file split_xfms = [] for xform_i in range(nxforms): # Find start token to extract startidx = tfdata.index('#Transform %d' % xform_i) next_xform = base_xform + tfdata[startidx + 1:startidx + 4] + [''] xfm_file = out_base(xform_i) with open(xfm_file, 'w') as out_xfm: out_xfm.write('\n'.join(next_xform)) split_xfms.append(xfm_file) xfms_T.append(split_xfms) # Transpose back (only Python 3) return list(map(list, zip(*xfms_T)))
def _arrange_xfms(transforms, num_files, tmp_folder)
Convenience method to arrange the list of transforms that should be applied to each input file
3.799871
3.764827
1.009308
img = nb.load(in_file) transform = load_transform(transform_file) pointset = img.get_arrays_from_intent('NIFTI_INTENT_POINTSET')[0] coords = pointset.data.T c_ras_keys = ('VolGeomC_R', 'VolGeomC_A', 'VolGeomC_S') ras = np.array([[float(pointset.metadata[key])] for key in c_ras_keys]) ones = np.ones((1, coords.shape[1]), dtype=coords.dtype) # Apply C_RAS translation to coordinates, then transform pointset.data = transform.dot(np.vstack((coords + ras, ones)))[:3].T.astype(coords.dtype) secondary = nb.gifti.GiftiNVPairs('AnatomicalStructureSecondary', 'MidThickness') geom_type = nb.gifti.GiftiNVPairs('GeometricType', 'Anatomical') has_ass = has_geo = False for nvpair in pointset.meta.data: # Remove C_RAS translation from metadata to avoid double-dipping in FreeSurfer if nvpair.name in c_ras_keys: nvpair.value = '0.000000' # Check for missing metadata elif nvpair.name == secondary.name: has_ass = True elif nvpair.name == geom_type.name: has_geo = True fname = os.path.basename(in_file) # Update metadata for MidThickness/graymid surfaces if 'midthickness' in fname.lower() or 'graymid' in fname.lower(): if not has_ass: pointset.meta.data.insert(1, secondary) if not has_geo: pointset.meta.data.insert(2, geom_type) if newpath is not None: newpath = os.getcwd() out_file = os.path.join(newpath, fname) img.to_filename(out_file) return out_file
def normalize_surfs(in_file, transform_file, newpath=None)
Re-center GIFTI coordinates to fit align to native T1 space For midthickness surfaces, add MidThickness metadata Coordinate update based on: https://github.com/Washington-University/workbench/blob/1b79e56/src/Algorithms/AlgorithmSurfaceApplyAffine.cxx#L73-L91 and https://github.com/Washington-University/Pipelines/blob/ae69b9a/PostFreeSurfer/scripts/FreeSurfer2CaretConvertAndRegisterNonlinear.sh#L147
4.054125
3.778632
1.072908
if fname is None: return np.eye(4) if fname.endswith('.mat'): return np.loadtxt(fname) elif fname.endswith('.lta'): with open(fname, 'rb') as fobj: for line in fobj: if line.startswith(b'1 4 4'): break lines = fobj.readlines()[:4] return np.genfromtxt(lines) raise ValueError("Unknown transform type; pass FSL (.mat) or LTA (.lta)")
def load_transform(fname)
Load affine transform from file Parameters ---------- fname : str or None Filename of an LTA or FSL-style MAT transform file. If ``None``, return an identity transform Returns ------- affine : (4, 4) numpy.ndarray
3.731141
3.230267
1.155057
def normalize_v3(arr): ''' Normalize a numpy array of 3 component vectors shape=(n,3) ''' lens = np.sqrt(arr[:, 0]**2 + arr[:, 1]**2 + arr[:, 2]**2) arr /= lens[:, np.newaxis] tris = vertices[faces] facenorms = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0]) normalize_v3(facenorms) norm = np.zeros(vertices.shape, dtype=vertices.dtype) norm[faces[:, 0]] += facenorms norm[faces[:, 1]] += facenorms norm[faces[:, 2]] += facenorms normalize_v3(norm) return norm
def vertex_normals(vertices, faces)
Calculates the normals of a triangular mesh
2.108995
2.124116
0.992881
from pathlib import Path import pandas as pd from pyntcloud import PyntCloud df = pd.DataFrame(np.hstack((vertices, normals))) df.columns = ['x', 'y', 'z', 'nx', 'ny', 'nz'] cloud = PyntCloud(df) if out_file is None: out_file = Path('pointcloud.ply').resolve() cloud.to_file(str(out_file)) return out_file
def pointcloud2ply(vertices, normals, out_file=None)
Converts the file to PLY format
2.118778
2.100948
1.008487
from pathlib import Path from numpy import eye from nibabel.gifti import ( GiftiMetaData, GiftiCoordSystem, GiftiImage, GiftiDataArray, ) from pyntcloud import PyntCloud in_file = Path(in_file) surf = PyntCloud.from_file(str(in_file)) # Update centroid metadata metadata.update( zip(('SurfaceCenterX', 'SurfaceCenterY', 'SurfaceCenterZ'), ['%.4f' % c for c in surf.centroid]) ) # Prepare data arrays da = ( GiftiDataArray( data=surf.xyz.astype('float32'), datatype='NIFTI_TYPE_FLOAT32', intent='NIFTI_INTENT_POINTSET', meta=GiftiMetaData.from_dict(metadata), coordsys=GiftiCoordSystem(xform=eye(4), xformspace=3)), GiftiDataArray( data=surf.mesh.values, datatype='NIFTI_TYPE_INT32', intent='NIFTI_INTENT_TRIANGLE', coordsys=None)) surfgii = GiftiImage(darrays=da) if out_file is None: out_file = fname_presuffix( in_file.name, suffix='.gii', use_ext=False, newpath=str(Path.cwd())) surfgii.to_filename(str(out_file)) return out_file
def ply2gii(in_file, metadata, out_file=None)
Convert from ply to GIfTI
3.002446
2.969859
1.010972
import os from nipype.utils.filemanip import filename_to_list base, in_file = os.path.split(filename_to_list(in_files)[0]) subject_label = in_file.split("_", 1)[0].split("-")[1] return os.path.join(base, "sub-%s_T1w.nii.gz" % subject_label)
def fix_multi_T1w_source_name(in_files)
Make up a generic source name when there are multiple T1s >>> fix_multi_T1w_source_name([ ... '/path/to/sub-045_ses-test_T1w.nii.gz', ... '/path/to/sub-045_ses-retest_T1w.nii.gz']) '/path/to/sub-045_T1w.nii.gz'
2.463247
2.40072
1.026045
import os.path as op from nipype.utils.filemanip import fname_presuffix, filename_to_list return op.basename(fname_presuffix(filename_to_list(in_files)[0], suffix=suffix))
def add_suffix(in_files, suffix)
Wrap nipype's fname_presuffix to conveniently just add a prefix >>> add_suffix([ ... '/path/to/sub-045_ses-test_T1w.nii.gz', ... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test') 'sub-045_ses-test_T1w_test.nii.gz'
2.92731
3.1749
0.922016
from pathlib import Path lines = Path(path).read_text().splitlines() data = {'file': str(path)} traceback_start = 0 if lines[0].startswith('Node'): data['node'] = lines[0].split(': ', 1)[1].strip() data['node_dir'] = lines[1].split(': ', 1)[1].strip() inputs = [] cur_key = '' cur_val = '' for i, line in enumerate(lines[5:]): if not line.strip(): continue if line[0].isspace(): cur_val += line continue if cur_val: inputs.append((cur_key, cur_val.strip())) if line.startswith("Traceback ("): traceback_start = i + 5 break cur_key, cur_val = tuple(line.split(' = ', 1)) data['inputs'] = sorted(inputs) else: data['node_dir'] = "Node crashed before execution" data['traceback'] = '\n'.join(lines[traceback_start:]).strip() return data
def _read_txt(path)
Read a txt crashfile >>> new_path = Path(__file__).resolve().parent.parent >>> test_data_path = new_path / 'data' / 'tests' >>> info = _read_txt(test_data_path / 'crashfile.txt') >>> info['node'] # doctest: +ELLIPSIS '...func_preproc_task_machinegame_run_02_wf.carpetplot_wf.conf_plot' >>> info['traceback'] # doctest: +ELLIPSIS '...ValueError: zero-size array to reduction operation minimum which has no identity'
3.177927
3.025209
1.050482
from pathlib import Path basename = str(Path(fname).name) stem = Path(basename.rstrip('.gz')).stem return stem, basename[len(stem):]
def splitext(fname)
Splits filename and extension (.gz safe) >>> splitext('some/file.nii.gz') ('file', '.nii.gz') >>> splitext('some/other/file.nii') ('file', '.nii') >>> splitext('otherext.tar.gz') ('otherext', '.tar.gz') >>> splitext('text.txt') ('text', '.txt')
5.251011
6.155988
0.852992
sns.set_style("whitegrid") sns.set_context("paper", font_scale=0.8) if figure is None: figure = plt.gcf() nconfounds = len(self.confounds) nspikes = len(self.spikes) nrows = 1 + nconfounds + nspikes # Create grid grid = mgs.GridSpec(nrows, 1, wspace=0.0, hspace=0.05, height_ratios=[1] * (nrows - 1) + [5]) grid_id = 0 for tsz, name, iszs in self.spikes: spikesplot(tsz, title=name, outer_gs=grid[grid_id], tr=self.tr, zscored=iszs) grid_id += 1 if self.confounds: palette = color_palette("husl", nconfounds) for i, (name, kwargs) in enumerate(self.confounds.items()): tseries = kwargs.pop('values') confoundplot( tseries, grid[grid_id], tr=self.tr, color=palette[i], name=name, **kwargs) grid_id += 1 plot_carpet(self.func_file, self.seg_data, subplot=grid[-1], tr=self.tr) # spikesplot_cb([0.7, 0.78, 0.2, 0.008]) return figure
def plot(self, figure=None)
Main plotter
3.985959
3.927922
1.014775
from niworkflows.viz.utils import plot_registration NIWORKFLOWS_LOG.info('Generating visual report') fixed_image_nii = load_img(self._fixed_image) moving_image_nii = load_img(self._moving_image) contour_nii = load_img(self._contour) if self._contour is not None else None if self._fixed_image_mask: fixed_image_nii = unmask(apply_mask(fixed_image_nii, self._fixed_image_mask), self._fixed_image_mask) # since the moving image is already in the fixed image space we # should apply the same mask moving_image_nii = unmask(apply_mask(moving_image_nii, self._fixed_image_mask), self._fixed_image_mask) mask_nii = load_img(self._fixed_image_mask) else: mask_nii = threshold_img(fixed_image_nii, 1e-3) n_cuts = 7 if not self._fixed_image_mask and contour_nii: cuts = cuts_from_bbox(contour_nii, cuts=n_cuts) else: cuts = cuts_from_bbox(mask_nii, cuts=n_cuts) # Call composer compose_view( plot_registration(fixed_image_nii, 'fixed-image', estimate_brightness=True, cuts=cuts, label=self._fixed_image_label, contour=contour_nii, compress=self.inputs.compress_report), plot_registration(moving_image_nii, 'moving-image', estimate_brightness=True, cuts=cuts, label=self._moving_image_label, contour=contour_nii, compress=self.inputs.compress_report), out_file=self._out_report )
def _generate_report(self)
Generates the visual report
2.815573
2.736114
1.029041
from niworkflows.viz.utils import plot_registration NIWORKFLOWS_LOG.info('Generating visual report') anat = load_img(self._anat_file) contour_nii = load_img(self._contour) if self._contour is not None else None if self._mask_file: anat = unmask(apply_mask(anat, self._mask_file), self._mask_file) mask_nii = load_img(self._mask_file) else: mask_nii = threshold_img(anat, 1e-3) n_cuts = 7 if not self._mask_file and contour_nii: cuts = cuts_from_bbox(contour_nii, cuts=n_cuts) else: cuts = cuts_from_bbox(mask_nii, cuts=n_cuts) # Call composer compose_view( plot_registration(anat, 'fixed-image', estimate_brightness=True, cuts=cuts, contour=contour_nii, compress=self.inputs.compress_report), [], out_file=self._out_report )
def _generate_report(self)
Generates the visual report
4.458096
4.27102
1.043801
from pathlib import Path import nibabel as nb from nipype.utils.filemanip import fname_presuffix ref = nb.load(in_reference) nii = nb.load(in_mask) hdr = nii.header.copy() hdr.set_data_dtype('int16') hdr.set_slope_inter(1, 0) qform, qcode = ref.header.get_qform(coded=True) if qcode is not None: hdr.set_qform(qform, int(qcode)) sform, scode = ref.header.get_sform(coded=True) if scode is not None: hdr.set_sform(sform, int(scode)) if '_maths' in in_mask: # Cut the name at first _maths occurrence ext = ''.join(Path(in_mask).suffixes) basename = Path(in_mask).name in_mask = basename.split('_maths')[0] + ext out_file = fname_presuffix(in_mask, suffix='_mask', newpath=str(Path())) nii.__class__(nii.get_data().astype('int16'), ref.affine, hdr).to_filename(out_file) return out_file
def _conform_mask(in_mask, in_reference)
Ensures the mask headers make sense and match those of the T1w
2.481216
2.435782
1.018652
warn(DEPRECATION_MSG) file_id, md5 = OSF_RESOURCES[dataset_name] if url is None: url = '{}/{}'.format(OSF_PROJECT_URL, file_id) return fetch_file(dataset_name, url, data_dir, dataset_prefix=dataset_prefix, filetype='tar', resume=resume, verbose=verbose, md5sum=md5)
def get_dataset(dataset_name, dataset_prefix=None, data_dir=None, url=None, resume=True, verbose=1)
Download and load the BIDS-fied brainweb 1mm normal :param str data_dir: path of the data directory. Used to force data storage in a non-standard location. :param str url: download URL of the dataset. Overwrite the default URL.
4.047349
4.948656
0.817868
warn(DEPRECATION_MSG) if template_name.startswith('tpl-'): template_name = template_name[4:] # An aliasing mechanism. Please avoid template_name = TEMPLATE_ALIASES.get(template_name, template_name) return get_dataset(template_name, dataset_prefix='tpl-', data_dir=data_dir, url=url, resume=resume, verbose=verbose)
def get_template(template_name, data_dir=None, url=None, resume=True, verbose=1)
Download and load a template
4.474999
4.443446
1.007101
warn(DEPRECATION_MSG) return get_dataset('brainweb', data_dir=data_dir, url=url, resume=resume, verbose=verbose)
def get_brainweb_1mm_normal(data_dir=None, url=None, resume=True, verbose=1)
Download and load the BIDS-fied brainweb 1mm normal :param str data_dir: path of the data directory. Used to force data storage in a non-standard location. :param str url: download URL of the dataset. Overwrite the default URL.
3.386086
6.829307
0.495817
warn(DEPRECATION_MSG) variant = 'BIDS-examples-1-1.0.0-rc3u5' if variant not in BIDS_EXAMPLES else variant if url is None: url = BIDS_EXAMPLES[variant][0] md5 = BIDS_EXAMPLES[variant][1] return fetch_file(variant, url, data_dir, resume=resume, verbose=verbose, md5sum=md5)
def get_bids_examples(data_dir=None, url=None, resume=True, verbose=1, variant='BIDS-examples-1-1.0.0-rc3u5')
Download BIDS-examples-1
2.861348
2.829586
1.011225
''' takes an image as created by nilearn.plotting and returns a blob svg. Performs compression (can be disabled). A bit hacky. ''' # Check availability of svgo and cwebp has_compress = all((which('svgo'), which('cwebp'))) if compress is True and not has_compress: raise RuntimeError('Compression is required, but svgo or cwebp are not installed') else: compress = (compress is True or compress == 'auto') and has_compress # Compress the SVG file using SVGO if compress: cmd = 'svgo -i - -o - -q -p 3 --pretty --disable=cleanupNumericValues' try: pout = subprocess.run(cmd, input=image.encode('utf-8'), stdout=subprocess.PIPE, shell=True, check=True, close_fds=True).stdout except OSError as e: from errno import ENOENT if compress is True and e.errno == ENOENT: raise e else: image = pout.decode('utf-8') # Convert all of the rasters inside the SVG file with 80% compressed WEBP if compress: new_lines = [] with StringIO(image) as fp: for line in fp: if "image/png" in line: tmp_lines = [line] while "/>" not in line: line = fp.readline() tmp_lines.append(line) content = ''.join(tmp_lines).replace('\n', '').replace( ', ', ',') left = content.split('base64,')[0] + 'base64,' left = left.replace("image/png", "image/webp") right = content.split('base64,')[1] png_b64 = right.split('"')[0] right = '"' + '"'.join(right.split('"')[1:]) cmd = "cwebp -quiet -noalpha -q 80 -o - -- -" pout = subprocess.run( cmd, input=base64.b64decode(png_b64), shell=True, stdout=subprocess.PIPE, check=True, close_fds=True).stdout webpimg = base64.b64encode(pout).decode('utf-8') new_lines.append(left + webpimg + right) else: new_lines.append(line) lines = new_lines else: lines = image.splitlines() svg_start = 0 for i, line in enumerate(lines): if '<svg ' in line: svg_start = i continue image_svg = lines[svg_start:] # strip out extra DOCTYPE, etc headers return ''.join(image_svg)
def svg_compress(image, compress='auto')
takes an image as created by nilearn.plotting and returns a blob svg. Performs compression (can be disabled). A bit hacky.
3.83689
3.325856
1.153655
from io import StringIO image_buf = StringIO() display_object.frame_axes.figure.savefig( image_buf, dpi=dpi, format='svg', facecolor='k', edgecolor='k') return image_buf.getvalue()
def svg2str(display_object, dpi=300)
Serializes a nilearn display object as a string
3.486459
3.426224
1.017581
image_svg = svg2str(display_object, dpi) if compress is True or compress == 'auto': image_svg = svg_compress(image_svg, compress) image_svg = re.sub(' height="[0-9]+[a-z]*"', '', image_svg, count=1) image_svg = re.sub(' width="[0-9]+[a-z]*"', '', image_svg, count=1) image_svg = re.sub(' viewBox', ' preseveAspectRation="xMidYMid meet" viewBox', image_svg, count=1) start_tag = '<svg ' start_idx = image_svg.find(start_tag) end_tag = '</svg>' end_idx = image_svg.rfind(end_tag) if start_idx is -1 or end_idx is -1: NIWORKFLOWS_LOG.info('svg tags not found in extract_svg') # rfind gives the start index of the substr. We want this substr # included in our return value so we add its length to the index. end_idx += len(end_tag) return image_svg[start_idx:end_idx]
def extract_svg(display_object, dpi=300, compress='auto')
Removes the preamble of the svg files generated with nilearn
3.60663
3.569006
1.010542
from nibabel.affines import apply_affine mask_data = mask_nii.get_data() > 0.0 # First, project the number of masked voxels on each axes ijk_counts = [ mask_data.sum(2).sum(1), # project sagittal planes to transverse (i) axis mask_data.sum(2).sum(0), # project coronal planes to to longitudinal (j) axis mask_data.sum(1).sum(0), # project axial planes to vertical (k) axis ] # If all voxels are masked in a slice (say that happens at k=10), # then the value for ijk_counts for the projection to k (ie. ijk_counts[2]) # at that element of the orthogonal axes (ijk_counts[2][10]) is # the total number of voxels in that slice (ie. Ni x Nj). # Here we define some thresholds to consider the plane as "masked" # The thresholds vary because of the shape of the brain # I have manually found that for the axial view requiring 30% # of the slice elements to be masked drops almost empty boxes # in the mosaic of axial planes (and also addresses #281) ijk_th = [ int((mask_data.shape[1] * mask_data.shape[2]) * 0.2), # sagittal int((mask_data.shape[0] * mask_data.shape[2]) * 0.0), # coronal int((mask_data.shape[0] * mask_data.shape[1]) * 0.3), # axial ] vox_coords = [] for ax, (c, th) in enumerate(zip(ijk_counts, ijk_th)): B = np.argwhere(c > th) if B.size: smin, smax = B.min(), B.max() # Avoid too narrow selections of cuts (very small masks) if not B.size or (th > 0 and (smin + cuts + 1) >= smax): B = np.argwhere(c > 0) # Resort to full plane if mask is seemingly empty smin, smax = B.min(), B.max() if B.size else (0, mask_data.shape[ax]) inc = (smax - smin) / (cuts + 1) vox_coords.append([smin + (i + 1) * inc for i in range(cuts)]) ras_coords = [] for cross in np.array(vox_coords).T: ras_coords.append(apply_affine( mask_nii.affine, cross).tolist()) ras_cuts = [list(coords) for coords in np.transpose(ras_coords)] return {k: v for k, v in zip(['x', 'y', 'z'], ras_cuts)}
def cuts_from_bbox(mask_nii, cuts=3)
Finds equi-spaced cuts for presenting images
4.566057
4.559087
1.001529
''' if self.inputs.in_file is 3d, return it. if 4d, pick an arbitrary volume and return that. if in_file is a list of files, return an arbitrary file from the list, and an arbitrary volume from that file ''' in_file = filemanip.filename_to_list(in_file)[0] try: in_file = nb.load(in_file) except AttributeError: in_file = in_file if in_file.get_data().ndim == 3: return in_file return nlimage.index_img(in_file, 0)
def _3d_in_file(in_file)
if self.inputs.in_file is 3d, return it. if 4d, pick an arbitrary volume and return that. if in_file is a list of files, return an arbitrary file from the list, and an arbitrary volume from that file
5.139257
2.444416
2.102448
plot_params = {} if plot_params is None else plot_params image_nii = _3d_in_file(image_nii) data = image_nii.get_data() plot_params = robust_set_limits(data, plot_params) bbox_nii = nb.load(image_nii if bbox_nii is None else bbox_nii) if masked: bbox_nii = nlimage.threshold_img(bbox_nii, 1e-3) cuts = cuts_from_bbox(bbox_nii, cuts=7) plot_params['colors'] = colors or plot_params.get('colors', None) out_files = [] for d in plot_params.pop('dimensions', ('z', 'x', 'y')): plot_params['display_mode'] = d plot_params['cut_coords'] = cuts[d] svg = _plot_anat_with_contours(image_nii, segs=seg_niis, compress=compress, **plot_params) # Find and replace the figure_1 id. try: xml_data = etree.fromstring(svg) except etree.XMLSyntaxError as e: NIWORKFLOWS_LOG.info(e) return find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS) find_text(xml_data)[0].set('id', 'segmentation-%s-%s' % (d, uuid4())) svg_fig = SVGFigure() svg_fig.root = xml_data out_files.append(svg_fig) return out_files
def plot_segs(image_nii, seg_niis, out_file, bbox_nii=None, masked=False, colors=None, compress='auto', **plot_params)
plot segmentation as contours over the image (e.g. anatomical). seg_niis should be a list of files. mask_nii helps determine the cut coordinates. plot_params will be passed on to nilearn plot_* functions. If seg_niis is a list of size one, it behaves as if it was plotting the mask.
3.896513
3.890539
1.001536
plot_params = {} if plot_params is None else plot_params # Use default MNI cuts if none defined if cuts is None: raise NotImplementedError # TODO out_files = [] if estimate_brightness: plot_params = robust_set_limits(anat_nii.get_data().reshape(-1), plot_params) # FreeSurfer ribbon.mgz ribbon = contour is not None and np.array_equal( np.unique(contour.get_data()), [0, 2, 3, 41, 42]) if ribbon: contour_data = contour.get_data() % 39 white = nlimage.new_img_like(contour, contour_data == 2) pial = nlimage.new_img_like(contour, contour_data >= 2) # Plot each cut axis for i, mode in enumerate(list(order)): plot_params['display_mode'] = mode plot_params['cut_coords'] = cuts[mode] if i == 0: plot_params['title'] = label else: plot_params['title'] = None # Generate nilearn figure display = plot_anat(anat_nii, **plot_params) if ribbon: kwargs = {'levels': [0.5], 'linewidths': 0.5} display.add_contours(white, colors='b', **kwargs) display.add_contours(pial, colors='r', **kwargs) elif contour is not None: display.add_contours(contour, colors='b', levels=[0.5], linewidths=0.5) svg = extract_svg(display, compress=compress) display.close() # Find and replace the figure_1 id. try: xml_data = etree.fromstring(svg) except etree.XMLSyntaxError as e: NIWORKFLOWS_LOG.info(e) return find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS) find_text(xml_data)[0].set('id', '%s-%s-%s' % (div_id, mode, uuid4())) svg_fig = SVGFigure() svg_fig.root = xml_data out_files.append(svg_fig) return out_files
def plot_registration(anat_nii, div_id, plot_params=None, order=('z', 'x', 'y'), cuts=None, estimate_brightness=False, label=None, contour=None, compress='auto')
Plots the foreground and background views Default order is: axial, coronal, sagittal
3.566437
3.629044
0.982748
import svgutils.transform as svgt if fg_svgs is None: fg_svgs = [] # Merge SVGs and get roots svgs = bg_svgs + fg_svgs roots = [f.getroot() for f in svgs] # Query the size of each sizes = [] for f in svgs: viewbox = [float(v) for v in f.root.get("viewBox").split(" ")] width = int(viewbox[2]) height = int(viewbox[3]) sizes.append((width, height)) nsvgs = len(bg_svgs) sizes = np.array(sizes) # Calculate the scale to fit all widths width = sizes[ref, 0] scales = width / sizes[:, 0] heights = sizes[:, 1] * scales # Compose the views panel: total size is the width of # any element (used the first here) and the sum of heights fig = svgt.SVGFigure(width, heights[:nsvgs].sum()) yoffset = 0 for i, r in enumerate(roots): r.moveto(0, yoffset, scale=scales[i]) if i == (nsvgs - 1): yoffset = 0 else: yoffset += heights[i] # Group background and foreground panels in two groups if fg_svgs: newroots = [ svgt.GroupElement(roots[:nsvgs], {'class': 'background-svg'}), svgt.GroupElement(roots[nsvgs:], {'class': 'foreground-svg'}) ] else: newroots = roots fig.append(newroots) fig.root.attrib.pop("width") fig.root.attrib.pop("height") fig.root.set("preserveAspectRatio", "xMidYMid meet") out_file = op.abspath(out_file) fig.save(out_file) # Post processing with open(out_file, 'r' if PY3 else 'rb') as f: svg = f.read().split('\n') # Remove <?xml... line if svg[0].startswith("<?xml"): svg = svg[1:] # Add styles for the flicker animation if fg_svgs: svg.insert(2, % tuple([uuid4()] * 2)) with open(out_file, 'w' if PY3 else 'wb') as f: f.write('\n'.join(svg)) return out_file
def compose_view(bg_svgs, fg_svgs, ref=0, out_file='report.svg')
Composes the input svgs into one standalone svg and inserts the CSS code for the flickering animation
3.518538
3.439302
1.023038
import numpy as np # get the shape of the array we are projecting to new_shape = list(data.shape) del new_shape[max_axis] # generate a 3D indexing array that points to max abs value in the # current projection a1, a2 = np.indices(new_shape) inds = [a1, a2] inds.insert(max_axis, np.abs(data).argmax(axis=max_axis)) # take the values where the absolute value of the projection # is the highest maximum_intensity_data = data[inds] return np.rot90(maximum_intensity_data)
def transform_to_2d(data, max_axis)
Projects 3d data cube along one axis using maximum intensity with preservation of the signs. Adapted from nilearn.
5.067584
4.555455
1.112421
from os import path as op from inspect import getfile, currentframe from setuptools import setup, find_packages from niworkflows.__about__ import ( __packagename__, __author__, __email__, __maintainer__, __license__, __description__, __longdesc__, __url__, DOWNLOAD_URL, CLASSIFIERS, REQUIRES, SETUP_REQUIRES, LINKS_REQUIRES, TESTS_REQUIRES, EXTRA_REQUIRES, ) pkg_data = { 'niworkflows': [ 'data/t1-mni_registration*.json', 'data/bold-mni_registration*.json', 'reports/figures.json', 'reports/fmriprep.yml', 'reports/report.tpl', ]} root_dir = op.dirname(op.abspath(getfile(currentframe()))) version = None cmdclass = {} if op.isfile(op.join(root_dir, __packagename__, 'VERSION')): with open(op.join(root_dir, __packagename__, 'VERSION')) as vfile: version = vfile.readline().strip() pkg_data[__packagename__].insert(0, 'VERSION') if version is None: import versioneer version = versioneer.get_version() cmdclass = versioneer.get_cmdclass() setup( name=__packagename__, version=version, description=__description__, long_description=__longdesc__, author=__author__, author_email=__email__, maintainer=__maintainer__, maintainer_email=__email__, license=__license__, url=__url__, download_url=DOWNLOAD_URL, classifiers=CLASSIFIERS, packages=find_packages(exclude=['*.tests']), zip_safe=False, # Dependencies handling setup_requires=SETUP_REQUIRES, install_requires=list(set(REQUIRES)), dependency_links=LINKS_REQUIRES, tests_require=TESTS_REQUIRES, extras_require=EXTRA_REQUIRES, # Data package_data=pkg_data, include_package_data=True, cmdclass=cmdclass, )
def main()
Install entry-point
2.186432
2.153685
1.015205
workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['bias_corrected', 'out_file', 'out_mask', 'bias_image']), name='outputnode') inu_n4 = pe.Node( ants.N4BiasFieldCorrection(dimension=3, save_bias=True, num_threads=n4_nthreads, copy_header=True), n_procs=n4_nthreads, name='inu_n4') sstrip = pe.Node(afni.SkullStrip(outputtype='NIFTI_GZ'), name='skullstrip') sstrip_orig_vol = pe.Node(afni.Calc( expr='a*step(b)', outputtype='NIFTI_GZ'), name='sstrip_orig_vol') binarize = pe.Node(fsl.Threshold(args='-bin', thresh=1.e-3), name='binarize') if unifize: # Add two unifize steps, pre- and post- skullstripping. inu_uni_0 = pe.Node(afni.Unifize(outputtype='NIFTI_GZ'), name='unifize_pre_skullstrip') inu_uni_1 = pe.Node(afni.Unifize(gm=True, outputtype='NIFTI_GZ'), name='unifize_post_skullstrip') workflow.connect([ (inu_n4, inu_uni_0, [('output_image', 'in_file')]), (inu_uni_0, sstrip, [('out_file', 'in_file')]), (inu_uni_0, sstrip_orig_vol, [('out_file', 'in_file_a')]), (sstrip_orig_vol, inu_uni_1, [('out_file', 'in_file')]), (inu_uni_1, outputnode, [('out_file', 'out_file')]), (inu_uni_0, outputnode, [('out_file', 'bias_corrected')]), ]) else: workflow.connect([ (inputnode, sstrip_orig_vol, [('in_file', 'in_file_a')]), (inu_n4, sstrip, [('output_image', 'in_file')]), (sstrip_orig_vol, outputnode, [('out_file', 'out_file')]), (inu_n4, outputnode, [('output_image', 'bias_corrected')]), ]) # Remaining connections workflow.connect([ (sstrip, sstrip_orig_vol, [('out_file', 'in_file_b')]), (inputnode, inu_n4, [('in_file', 'input_image')]), (sstrip_orig_vol, binarize, [('out_file', 'in_file')]), (binarize, outputnode, [('out_file', 'out_mask')]), (inu_n4, outputnode, [('bias_image', 'bias_image')]), ]) return workflow
def afni_wf(name='AFNISkullStripWorkflow', unifize=False, n4_nthreads=1)
Skull-stripping workflow Originally derived from the `codebase of the QAP <https://github.com/preprocessed-connectomes-project/\ quality-assessment-protocol/blob/master/qap/anatomical_preproc.py#L105>`_. Now, this workflow includes :abbr:`INU (intensity non-uniformity)` correction using the N4 algorithm and (optionally) intensity harmonization using ANFI's ``3dUnifize``.
1.80834
1.784328
1.013457
data = u'{0}'.format(data) # We put data as a argument for log.error() so error tracking systems such # as Sentry will properly group errors together by msg only log.error(u'{0}: %s'.format(msg), data) raise cls(u'{0}: {1}'.format(msg, data))
def _log_and_raise_exception(msg, data, cls=FoursquareException)
Calls log.error() then raises an exception of class cls
9.539396
8.496726
1.122714
param_string = _foursquare_urlencode(params) for i in xrange(NUM_REQUEST_RETRIES): try: try: response = requests.get(url, headers=headers, params=param_string, verify=VERIFY_SSL) return _process_response(response) except requests.exceptions.RequestException as e: _log_and_raise_exception('Error connecting with foursquare API', e) except FoursquareException as e: # Some errors don't bear repeating if e.__class__ in [InvalidAuth, ParamError, EndpointError, NotAuthorized, Deprecated]: raise # If we've reached our last try, re-raise if ((i + 1) == NUM_REQUEST_RETRIES): raise time.sleep(1)
def _get(url, headers={}, params=None)
Tries to GET data from an endpoint using retries
4.807091
4.621552
1.040147
try: response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL) return _process_response(response) except requests.exceptions.RequestException as e: _log_and_raise_exception('Error connecting with foursquare API', e)
def _post(url, headers={}, data=None, files=None)
Tries to POST data to an endpoint
3.019049
3.147531
0.95918
# Read the response as JSON try: data = response.json() except ValueError: _log_and_raise_exception('Invalid response', response.text) # Default case, Got proper response if response.status_code == 200: return { 'headers': response.headers, 'data': data } return _raise_error_from_response(data)
def _process_response(response)
Make the request and handle exception processing
4.987516
4.655517
1.071313
# Check the meta-data for why this request failed meta = data.get('meta') if meta: # Account for foursquare conflicts # see: https://developer.foursquare.com/overview/responses if meta.get('code') in (200, 409): return data exc = error_types.get(meta.get('errorType')) if exc: raise exc(meta.get('errorDetail')) else: _log_and_raise_exception('Unknown error. meta', meta) else: _log_and_raise_exception('Response format invalid, missing meta property. data', data)
def _raise_error_from_response(data)
Processes the response data
5.003201
5.10445
0.980165
# Original doc: http://docs.python.org/2/library/urllib.html#urllib.urlencode # Works the same way as urllib.urlencode except two differences - # 1. it uses `quote()` instead of `quote_plus()` # 2. it takes an extra parameter called `safe_chars` which is a string # having the characters which should not be encoded. # # Courtesy of github.com/iambibhas if hasattr(query,"items"): # mapping objects query = query.items() else: # it's a bother at times that strings and string-like objects are # sequences... try: # non-sequence items should not work with len() # non-empty strings will fail this if len(query) and not isinstance(query[0], tuple): raise TypeError # zero-length sequences of all types will get here and succeed, # but that's a minor nit - since the original implementation # allowed empty dicts that type of behavior probably should be # preserved for consistency except TypeError: ty,va,tb = sys.exc_info() raise TypeError("not a valid non-string sequence or mapping object").with_traceback(tb) l = [] if not doseq: # preserve old behavior for k, v in query: k = parse.quote(_as_utf8(k), safe=safe_chars) v = parse.quote(_as_utf8(v), safe=safe_chars) l.append(k + '=' + v) else: for k, v in query: k = parse.quote(_as_utf8(k), safe=safe_chars) if isinstance(v, six.string_types): v = parse.quote(_as_utf8(v), safe=safe_chars) l.append(k + '=' + v) else: try: # is this a sufficient test for sequence-ness? len(v) except TypeError: # not a sequence v = parse.quote(_as_utf8(v), safe=safe_chars) l.append(k + '=' + v) else: # loop over the sequence for elt in v: l.append(k + '=' + parse.quote(_as_utf8(elt))) return '&'.join(l)
def _foursquare_urlencode(query, doseq=0, safe_chars="&/,+")
Gnarly hack because Foursquare doesn't properly handle standard url encoding
2.735785
2.728538
1.002656
for name, endpoint in inspect.getmembers(self): if inspect.isclass(endpoint) and issubclass(endpoint, self._Endpoint) and (endpoint is not self._Endpoint): endpoint_instance = endpoint(self.base_requester) setattr(self, endpoint_instance.endpoint, endpoint_instance)
def _attach_endpoints(self)
Dynamically attach endpoint callables to this client
3.457282
3.123193
1.10697
permissions = [permission() for permission in self.permission_classes] for permission in permissions: if not permission.has_permission(request): raise PermissionDenied()
def check_permissions(self, request)
Check if the request should be permitted. Raises an appropriate exception if the request is not permitted.
2.869801
2.978936
0.963364
self.registry.append((widget, basename, parameters))
def register(self, widget, basename, **parameters)
Register a widget, URL basename and any optional URL parameters. Parameters are passed as keyword arguments, i.e. >>> router.register(MyWidget, 'mywidget', my_parameter="[A-Z0-9]+") This would be the equivalent of manually adding the following to urlpatterns: >>> url(r"^widgets/mywidget/(P<my_parameter>[A-Z0-9]+)/?", MyWidget.as_view(), "widget_mywidget")
5.542313
7.183408
0.771544
return HttpResponse( self.convert_context_to_json(context), content_type='application/json', **response_kwargs )
def render_to_json_response(self, context, **response_kwargs)
Returns a JSON response, transforming 'context' to make the payload.
2.568584
2.053806
1.250646
for obj in (self.execute().json().get("items") or []): yield self.api_obj_class(self.api, obj)
def iterator(self)
Execute the API request and return an iterator over the objects. This method does not use the query cache.
11.0207
6.764718
1.629144
response = self.get(version="", base="/version") response.raise_for_status() data = response.json() return (data["major"], data["minor"])
def version(self)
Get Kubernetes API version
5.616045
4.505897
1.246377
version = kwargs.pop("version", "v1") if version == "v1": base = kwargs.pop("base", "/api") elif "/" in version: base = kwargs.pop("base", "/apis") else: if "base" not in kwargs: raise TypeError("unknown API version; base kwarg must be specified.") base = kwargs.pop("base") bits = [base, version] # Overwrite (default) namespace from context if it was set if "namespace" in kwargs: n = kwargs.pop("namespace") if n is not None: if n: namespace = n else: namespace = self.config.namespace if namespace: bits.extend([ "namespaces", namespace, ]) url = kwargs.get("url", "") if url.startswith("/"): url = url[1:] bits.append(url) kwargs["url"] = self.url + posixpath.join(*bits) return kwargs
def get_kwargs(self, **kwargs)
Creates a full URL to request based on arguments. :Parametes: - `kwargs`: All keyword arguments to build a kubernetes API endpoint
3.706887
3.670803
1.00983
return self.session.request(*args, **self.get_kwargs(**kwargs))
def request(self, *args, **kwargs)
Makes an API request based on arguments. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments
5.601411
8.944571
0.626236
return self.session.get(*args, **self.get_kwargs(**kwargs))
def get(self, *args, **kwargs)
Executes an HTTP GET. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments
5.863813
8.392553
0.698692
return self.session.options(*args, **self.get_kwargs(**kwargs))
def options(self, *args, **kwargs)
Executes an HTTP OPTIONS. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments
8.144148
11.111175
0.732969
return self.session.head(*args, **self.get_kwargs(**kwargs))
def head(self, *args, **kwargs)
Executes an HTTP HEAD. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments
6.41939
10.030725
0.639973
return self.session.post(*args, **self.get_kwargs(**kwargs))
def post(self, *args, **kwargs)
Executes an HTTP POST. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments
6.590245
9.408467
0.700459
return self.session.put(*args, **self.get_kwargs(**kwargs))
def put(self, *args, **kwargs)
Executes an HTTP PUT. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments
6.052427
9.451118
0.640393
return self.session.patch(*args, **self.get_kwargs(**kwargs))
def patch(self, *args, **kwargs)
Executes an HTTP PATCH. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments
6.507771
9.624771
0.676148
return self.session.delete(*args, **self.get_kwargs(**kwargs))
def delete(self, *args, **kwargs)
Executes an HTTP DELETE. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments
6.506244
8.964755
0.725758
filename = os.path.expanduser(filename) if not os.path.isfile(filename): raise exceptions.PyKubeError("Configuration file {} not found".format(filename)) with open(filename) as f: doc = yaml.safe_load(f.read()) self = cls(doc, **kwargs) self.filename = filename return self
def from_file(cls, filename, **kwargs)
Creates an instance of the KubeConfig class from a kubeconfig file. :Parameters: - `filename`: The full path to the configuration file
2.671562
3.132702
0.852798
doc = { "clusters": [ { "name": "self", "cluster": { "server": url, }, }, ], "contexts": [ { "name": "self", "context": { "cluster": "self", }, } ], "current-context": "self", } self = cls(doc, **kwargs) return self
def from_url(cls, url, **kwargs)
Creates an instance of the KubeConfig class from a single URL (useful for interacting with kubectl proxy).
3.097856
2.236976
1.384841
if not hasattr(self, "_clusters"): cs = {} for cr in self.doc["clusters"]: cs[cr["name"]] = c = copy.deepcopy(cr["cluster"]) if "server" not in c: c["server"] = "http://localhost" BytesOrFile.maybe_set(c, "certificate-authority") self._clusters = cs return self._clusters
def clusters(self)
Returns known clusters by exposing as a read-only property.
6.495685
5.869808
1.106626
if not hasattr(self, "_users"): us = {} if "users" in self.doc: for ur in self.doc["users"]: us[ur["name"]] = u = copy.deepcopy(ur["user"]) BytesOrFile.maybe_set(u, "client-certificate") BytesOrFile.maybe_set(u, "client-key") self._users = us return self._users
def users(self)
Returns known users by exposing as a read-only property.
4.635858
4.279012
1.083394
if not hasattr(self, "_contexts"): cs = {} for cr in self.doc["contexts"]: cs[cr["name"]] = copy.deepcopy(cr["context"]) self._contexts = cs return self._contexts
def contexts(self)
Returns known contexts by exposing as a read-only property.
4.473895
3.935567
1.136785
return self.users.get(self.contexts[self.current_context].get("user", ""), {})
def user(self)
Returns the current user set by current context
12.732038
8.380587
1.51923
if self._filename: with open(self._filename, "rb") as f: return f.read() else: return self._bytes
def bytes(self)
Returns the provided data as bytes.
3.36892
2.872238
1.172925
if self._filename: return self._filename else: with tempfile.NamedTemporaryFile(delete=False) as f: f.write(self._bytes) return f.name
def filename(self)
Returns the provided data as a file location.
3.249624
2.720655
1.194427
resource_list = api.resource_list(api_version) resource = next((resource for resource in resource_list["resources"] if resource["kind"] == kind), None) base = NamespacedAPIObject if resource["namespaced"] else APIObject return type(kind, (base,), { "version": api_version, "endpoint": resource["name"], "kind": kind })
def object_factory(api, api_version, kind)
Dynamically builds a Python class for the given Kubernetes object in an API. For example: api = pykube.HTTPClient(...) NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy") This enables construction of any Kubernetes object kind without explicit support from pykube. Currently, the HTTPClient passed to this function will not be bound to the returned type. It is planned to fix this, but in the mean time pass it as you would normally.
3.721412
4.125898
0.901964
if target_revision is None: revision = {} else: revision = { "revision": target_revision } params = { "kind": "DeploymentRollback", "apiVersion": self.version, "name": self.name, "rollbackTo": revision } kwargs = { "version": self.version, "namespace": self.namespace, "operation": "rollback", } r = self.api.post(**self.api_kwargs(data=json.dumps(params), **kwargs)) r.raise_for_status() return r.text
def rollout_undo(self, target_revision=None)
Produces same action as kubectl rollout undo deployment command. Input variable is revision to rollback to (in kubectl, --to-revision)
3.210568
3.040935
1.055783
log_call = "log" params = {} if container is not None: params["container"] = container if pretty is not None: params["pretty"] = pretty if previous: params["previous"] = "true" if since_seconds is not None and since_time is None: params["sinceSeconds"] = int(since_seconds) elif since_time is not None and since_seconds is None: params["sinceTime"] = since_time if timestamps: params["timestamps"] = "true" if tail_lines is not None: params["tailLines"] = int(tail_lines) if limit_bytes is not None: params["limitBytes"] = int(limit_bytes) query_string = urlencode(params) log_call += "?{}".format(query_string) if query_string else "" kwargs = { "version": self.version, "namespace": self.namespace, "operation": log_call, } r = self.api.get(**self.api_kwargs(**kwargs)) r.raise_for_status() return r.text
def logs(self, container=None, pretty=None, previous=False, since_seconds=None, since_time=None, timestamps=False, tail_lines=None, limit_bytes=None)
Produces the same result as calling kubectl logs pod/<pod-name>. Check parameters meaning at http://kubernetes.io/docs/api-reference/v1/operations/, part 'read log of the specified Pod'. The result is plain text.
1.883352
1.88621
0.998485
while self.running['restart_thread']: try: endpoint = self.restart_q.get(timeout=.5) except TimeoutError: continue log.info("_restart_thread(): Restarting Thread for endpoint %s", endpoint) self.unsubscribe(endpoint) self.subscribe(endpoint)
def _restart_thread(self)
Restarts subscription threads if their connection drops. :return:
5.87137
5.813797
1.009903
try: conn = create_connection(self.addr + endpoint, timeout=5) except WebSocketTimeoutException: self.restart_q.put(endpoint) return while self.threads_running[endpoint]: try: msg = conn.recv() except WebSocketTimeoutException: self._controller_q.put(endpoint) log.debug("%s, %s", endpoint, msg) ep, pair = endpoint.split('/') log.debug("_subscription_thread(): Putting data on q..") try: self.data_q.put((ep, pair, msg, time.time()), timeout=1) except TimeoutError: continue finally: log.debug("_subscription_thread(): Data Processed, looping back..") conn.close() log.debug("_subscription_thread(): Thread Loop Ended.")
def _subscription_thread(self, endpoint)
Thread Method, running the connection for each endpoint. :param endpoint: :return:
5.323078
5.208319
1.022034
log.info("WSSAPI.start(): Starting Basic Facilities") self.running = True if self._controller_thread is None or not self._controller_thread.is_alive(): self._controller_thread = Thread(target=self._controller, daemon=True, name='%s Controller Thread' % self.name) self._controller_thread.start()
def start(self)
Starts threads. Extend this in your child class. :return:
5.26883
5.160657
1.020961
while self.running: try: cmd = self._controller_q.get(timeout=1) except (TimeoutError, Empty): continue log.debug("WSSAPI._controller(): Received command: %s", cmd) Thread(target=self.eval_command, args=(cmd,)).start()
def _controller(self)
This method runs in a dedicated thread, calling self.eval_command(). :return:
5.196097
4.340761
1.197048
if cmd == 'restart': self.restart() elif cmd == 'stop': self.stop() else: raise ValueError("Unknown Command passed to controller! %s" % cmd)
def eval_command(self, cmd)
Evaluates commands issued by internal threads. Extend this as necessary. :param cmd: :return:
5.180481
5.459244
0.948937
def decorator(func): @wraps(func) def wrapper(*args, **kwargs): try: r = func(*args, **kwargs) except Exception: log.exception("return_api_response(): Error during call to %s(%s, %s)", func.__name__, args, kwargs) raise # Check Status try: r.raise_for_status() except requests.HTTPError: log.exception("return_api_response: HTTPError for url %s", r.request.url) # Verify json data try: data = r.json() except json.JSONDecodeError: log.error('return_api_response: Error while parsing json. ' 'Request url was: %s, result is: ' '%s', r.request.url, r.text) data = None except Exception: log.exception("return_api_response(): Unexpected error while parsing " "json from %s", r.request.url) raise # Format, if available if formatter is not None and data: try: r.formatted = formatter(data, *args, **kwargs) except Exception: log.exception("Error while applying formatter!") return r return wrapper return decorator
def return_api_response(formatter=None)
Decorator, which Applies the referenced formatter (if available) to the function output and adds it to the APIResponse Object's `formatted` attribute. :param formatter: bitex.formatters.Formatter() obj :return: bitex.api.response.APIResponse()
2.971254
2.888549
1.028632
# There are some exceptions from the general formatting rule # see https://api.kraken.com/0/public/AssetPairs format_exceptions = ['BCHEUR', 'BCHUSD', 'BCHXBT', 'DASHEUR', 'DASHUSD', 'DASHXBT', 'EOSETH', 'EOSXBT', 'GNOETH', 'GNOXBT', 'USDTZUSD'] if input_pair.upper() in format_exceptions: return input_pair.upper() if len(input_pair) % 2 == 0: base_cur, quote_cur = input_pair[:len(input_pair)//2], input_pair[len(input_pair)//2:] else: base_cur, quote_cur = input_pair.split(input_pair[len(input_pair)//2]) def add_prefix(input_string): input_string = input_string.lower() if any(x in input_string for x in ['usd', 'eur', 'jpy', 'gbp', 'cad']): # appears to be fiat currency if not input_string.startswith('z'): input_string = 'z' + input_string else: # Appears to be Crypto currency if 'btc' in input_string: input_string = input_string.replace('btc', 'xbt') if not input_string.startswith('x') or len(input_string) == 3: input_string = 'x' + input_string return input_string base_cur = add_prefix(base_cur) quote_cur = add_prefix(quote_cur) return (base_cur + quote_cur).upper()
def format_pair(input_pair)
Formats input to conform with kraken pair format. The API expects one of two formats: XBTXLT or XXBTXLTC Where crypto currencies have an X prepended, and fiat currencies have a Z prepended. Since the API returns the 8 character format, that's what we will format into as well. We expect 6 or 8 character strings, but do not explicitly check for it. Should the string be of uneven length, we'll split the pair in the middle like so: BTC-LTC -> BTC, LTC. Furthermore, since Kraken uses 'XBT' as Bitcoins symbol, we look for, and replace occurrences of 'btc' with 'XBT'. In addition there are some exceptions from this rules. Kraken is using the pairs 'BCHEUR', 'BCHUSD', 'BCHXBT', 'DASHEUR', 'DASHUSD', 'DASHXBT', 'EOSETH', 'EOSXBT', 'GNOETH', 'GNOXBT' and 'USDTZUSD' as they are. If the input matches one of this pairs, we just return the uppercase representation of it. :param input_pair: str :return: str
2.662906
2.063657
1.290382
nonce = str(int(time.time() * 1000)) package = {'apikey': self.key, 'message': {'nonce': nonce, 'payload': payload}} signature = hmac.new(self.secret, json.dumps(payload).hexdigest, hashlib.sha512).hexdigest() package['signature'] = signature return json.dumps(package)
def sign(self, payload)
Signature method which wraps signature and nonce parameters around a payload dictionary. :param payload: :return:
3.872504
4.135823
0.936332
super(BitstampWSS, self).start() self.pusher = pusherclient.Pusher(self.addr, **self.__pusher_options) self.pusher.connection.bind('pusher:connection_established', self._register_bindings) self.pusher.connect()
def start(self)
Extension of Pusher.connect() method, which registers all callbacks with the relevant channels, before initializing a connection. :return:
5.320473
5.354642
0.993619
self._register_diff_order_book_channels() self._register_live_orders_channels() self._register_live_trades_channels() self._register_order_book_channels()
def _register_bindings(self, data)
connection_handler method which is called when we connect to pusher. Responsible for binding callbacks to channels before we connect. :return:
5.275407
4.425334
1.192092
for channel_name in channels: if channel_name in self.channels: channel = self.pusher.subscribe(channel_name) if isinstance(events, list): for event in events: channel.bind(event, channels[channel_name]) else: channel.bind(events, channels[channel_name])
def _bind_channels(self, events, channels)
Binds given channel events to callbacks. :param events: str or list :param channels: dict of channel_name: callback_method() pairs :return:
2.34468
2.237894
1.047717
channels = {'live_trades': self.btcusd_lt_callback, 'live_trades_btceur': self.btceur_lt_callback, 'live_trades_eurusd': self.eurusd_lt_callback, 'live_trades_xrpusd': self.xrpusd_lt_callback, 'live_trades_xrpeur': self.xrpeur_lt_callback, 'live_trades_xrpbtc': self.xrpbtc_lt_callback} event = 'trade' self._bind_channels(event, channels)
def _register_live_trades_channels(self)
Registers the binding for the live_trades_channels channels. :return:
2.331412
2.267457
1.028206
channels = {'order_book': self.btcusd_ob_callback, 'order_book_btceur': self.btceur_ob_callback, 'order_book_eurusd': self.eurusd_ob_callback, 'order_book_xrpusd': self.xrpusd_ob_callback, 'order_book_xrpeur': self.xrpeur_ob_callback, 'order_book_xrpbtc': self.xrpbtc_ob_callback} event = 'data' self._bind_channels(event, channels)
def _register_order_book_channels(self)
Registers the binding for the order_book channels. :return:
2.523975
2.492804
1.012505
channels = {'diff_order_book': self.btcusd_dob_callback, 'diff_order_book_btceur': self.btceur_dob_callback, 'diff_order_book_eurusd': self.eurusd_dob_callback, 'diff_order_book_xrpusd': self.xrpusd_dob_callback, 'diff_order_book_xrpeur': self.xrpeur_dob_callback, 'diff_order_book_xrpbtc': self.xrpbtc_dob_callback} event = 'data' self._bind_channels(event, channels)
def _register_diff_order_book_channels(self)
Registers the binding for the diff_order_book channels. :return:
2.4277
2.379951
1.020063
with open(path, 'r') as f: self.key = f.readline().strip() self.secret = f.readline().strip()
def load_key(self, path)
Load key and secret from file. :param path: path to file with first two lines are key, secret respectively
2.680023
2.269225
1.18103
r = requests.request(*args, **kwargs) return APIResponse(r)
def api_request(*args, **kwargs)
Wrapper which converts a requests.Response into our custom APIResponse object :param args: :param kwargs: :return:
5.809351
4.499496
1.291112
url = self.uri return url, {'params': {'test_param': "authenticated_chimichanga"}}
def sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs)
Dummy Signature creation method. Override this in child. URL is required to be returned, as some Signatures use the url for sig generation, and api calls made must match the address exactly. param url: self.uri + self.version + endpoint (i.e https://api.kraken/0/Depth) param endpoint: api endpoint to call (i.e. 'Depth') param endpoint_path: self.version + endpoint (i.e. '0/Depth') param method_verb: valid request type (PUT, GET, POST etc) param return:
54.724823
58.995899
0.927604