index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
56,594 |
libsast.core_matcher.choice_matcher
|
__init__
| null |
def __init__(self, options: dict) -> None:
self.scan_rules = get_rules(options.get('choice_rules'))
self.show_progress = options.get('show_progress')
self.alternative_path = options.get('alternative_path')
exts = options.get('choice_extensions')
if exts:
self.exts = [ext.lower() for ext in exts]
else:
self.exts = []
self.findings = {}
|
(self, options: dict) -> NoneType
|
56,595 |
libsast.core_matcher.choice_matcher
|
add_finding
|
Add Choice Findings.
|
def add_finding(self, results):
"""Add Choice Findings."""
for res_list in results:
if not res_list:
continue
for match_dict in res_list:
all_matches = match_dict['all_matches']
matches = match_dict['matches']
rule = match_dict['rule']
if all_matches:
selection = rule['selection'].format(list(all_matches))
elif matches:
select = rule['choice'][min(matches)][1]
selection = rule['selection'].format(select)
elif rule.get('else'):
selection = rule['selection'].format(rule['else'])
else:
continue
self.findings[rule['id']] = self.get_meta(rule, selection)
|
(self, results)
|
56,596 |
libsast.core_matcher.choice_matcher
|
choice_matcher
|
Run a Single Choice Matcher rule on all files.
|
def choice_matcher(self, args):
"""Run a Single Choice Matcher rule on all files."""
results = []
scan_paths, rule = args
try:
matches = set()
all_matches = set()
for sfile in scan_paths:
ext = sfile.suffix.lower()
if self.exts and ext not in self.exts:
continue
if sfile.stat().st_size / 1000 / 1000 > 5:
# Skip scanning files greater than 5 MB
continue
data = sfile.read_text('utf-8', 'ignore')
if ext in ('.html', '.xml'):
data = strip_comments2(data)
else:
data = strip_comments(data)
match = choices.find_choices(data, rule)
if match:
if isinstance(match, set):
# all
all_matches.update(match)
elif isinstance(match, list):
# or, and
matches.add(match[0])
results.append({
'rule': rule,
'matches': matches,
'all_matches': all_matches,
})
except Exception:
raise exceptions.RuleProcessingError('Rule processing error.')
return results
|
(self, args)
|
56,597 |
libsast.core_matcher.choice_matcher
|
get_meta
|
Get Finding Meta.
|
def get_meta(self, rule, selection):
"""Get Finding Meta."""
meta_dict = {}
meta_dict['choice'] = selection
meta_dict['description'] = rule['message']
for key in rule:
if key in ('choice',
'message',
'id',
'type',
'choice_type',
'selection',
'else'):
continue
meta_dict[key] = rule[key]
return meta_dict
|
(self, rule, selection)
|
56,598 |
libsast.core_matcher.choice_matcher
|
scan
|
Scan file(s) or directory per rule.
|
def scan(self, paths: list) -> dict:
"""Scan file(s) or directory per rule."""
if not (self.scan_rules and paths):
return
self.validate_rules()
choice_args = []
if self.show_progress:
pbar = common.ProgressBar('Choice Match', len(self.scan_rules))
self.scan_rules = pbar.progrees_loop(self.scan_rules)
for rule in self.scan_rules:
scan_paths = paths
if rule['type'] != 'code' and self.alternative_path:
# Scan only alternative path
scan_paths = [Path(self.alternative_path)]
choice_args.append((scan_paths, rule))
with ProcessPoolExecutor(max_workers=common.get_worker_count()) as exe:
results = exe.map(
self.choice_matcher,
choice_args,
chunksize=1)
self.add_finding(results)
return self.findings
|
(self, paths: list) -> dict
|
56,599 |
libsast.core_matcher.choice_matcher
|
validate_rules
|
Validate Rules before scanning.
|
def validate_rules(self):
"""Validate Rules before scanning."""
for rule in self.scan_rules:
if not isinstance(rule, dict):
raise exceptions.InvalidRuleFormatError(
'Choice Matcher Rule format is invalid.')
if not rule.get('id'):
raise exceptions.TypeKeyMissingError(
'The rule is missing the key \'id\'')
if not rule.get('type'):
raise exceptions.PatternKeyMissingError(
'The rule is missing the key \'type\'')
if not rule.get('choice_type'):
raise exceptions.PatternKeyMissingError(
'The rule is missing the key \'choice_type\'')
if not rule.get('selection'):
raise exceptions.PatternKeyMissingError(
'The rule is missing the key \'selection\'')
if not rule.get('choice'):
raise exceptions.PatternKeyMissingError(
'The rule is missing the key \'choice\'')
|
(self)
|
56,600 |
libsast.core_matcher.pattern_matcher
|
PatternMatcher
| null |
class PatternMatcher:
def __init__(self, options: dict) -> None:
self.matcher = matchers.MatchCommand()
self.scan_rules = get_rules(options.get('match_rules'))
self.show_progress = options.get('show_progress')
exts = options.get('match_extensions')
if exts:
self.exts = [ext.lower() for ext in exts]
else:
self.exts = []
self.findings = {}
def scan(self, paths: list) -> dict:
"""Scan file(s) or directory."""
if not (self.scan_rules and paths):
return
self.validate_rules()
if self.show_progress:
pbar = common.ProgressBar('Pattern Match', len(paths))
paths = pbar.progrees_loop(paths)
files_to_scan = set()
for sfile in paths:
if self.exts and sfile.suffix.lower() not in self.exts:
continue
if sfile.stat().st_size / 1000 / 1000 > 5:
# Skip scanning files greater than 5 MB
print(f'Skipping large file {sfile.as_posix()}')
continue
files_to_scan.add(sfile)
with ProcessPoolExecutor(max_workers=common.get_worker_count()) as exe:
results = exe.map(
self.pattern_matcher,
files_to_scan,
chunksize=1)
self.add_finding(results)
return self.findings
def validate_rules(self):
"""Validate Rules before scanning."""
for rule in self.scan_rules:
if not isinstance(rule, dict):
raise exceptions.InvalidRuleFormatError(
'Pattern Matcher Rule format is invalid.')
if not rule.get('type'):
raise exceptions.TypeKeyMissingError(
'The rule is missing the key \'type\'')
if not rule.get('pattern'):
raise exceptions.PatternKeyMissingError(
'The rule is missing the key \'pattern\'')
all_mts = [m for m in dir(matchers) if m.startswith('R')]
pattern_name = rule['type']
if pattern_name not in all_mts:
supported = ', '.join(all_mts)
raise exceptions.MatcherNotFoundError(
f'Matcher \'{pattern_name}\' is not supported.'
f' Available matchers are {supported}',
)
def pattern_matcher(self, file_path):
"""Static Analysis Pattern Matcher."""
results = []
try:
data = file_path.read_text('utf-8', 'ignore')
for rule in self.scan_rules:
case = rule.get('input_case')
if case == 'lower':
tmp_data = data.lower()
elif case == 'upper':
tmp_data = data.upper()
else:
tmp_data = data
if file_path.suffix.lower() in ('.html', '.xml'):
fmt_data = strip_comments2(tmp_data)
else:
fmt_data = strip_comments(tmp_data)
matches = self.matcher._find_match(
rule['type'],
fmt_data,
rule)
if matches:
results.append({
'file': file_path.as_posix(),
'rule': rule,
'matches': matches,
})
except Exception:
raise exceptions.RuleProcessingError('Rule processing error.')
return results
def add_finding(self, results):
"""Add Code Analysis Findings."""
for res_list in results:
if not res_list:
continue
for match_dict in res_list:
rule = match_dict['rule']
for match in match_dict['matches']:
crule = deepcopy(rule)
file_details = {
'file_path': match_dict['file'],
'match_string': match[0],
'match_position': match[1],
'match_lines': match[2],
}
if rule['id'] in self.findings:
self.findings[rule['id']]['files'].append(file_details)
else:
metadata = crule.get('metadata', {})
metadata['description'] = crule['message']
metadata['severity'] = crule['severity']
self.findings[rule['id']] = {
'files': [file_details],
'metadata': metadata,
}
self.findings[rule['id']]['files'] = sorted(
self.findings[rule['id']]['files'],
key=itemgetter('file_path', 'match_string', 'match_lines'))
|
(options: dict) -> None
|
56,601 |
libsast.core_matcher.pattern_matcher
|
__init__
| null |
def __init__(self, options: dict) -> None:
self.matcher = matchers.MatchCommand()
self.scan_rules = get_rules(options.get('match_rules'))
self.show_progress = options.get('show_progress')
exts = options.get('match_extensions')
if exts:
self.exts = [ext.lower() for ext in exts]
else:
self.exts = []
self.findings = {}
|
(self, options: dict) -> NoneType
|
56,602 |
libsast.core_matcher.pattern_matcher
|
add_finding
|
Add Code Analysis Findings.
|
def add_finding(self, results):
"""Add Code Analysis Findings."""
for res_list in results:
if not res_list:
continue
for match_dict in res_list:
rule = match_dict['rule']
for match in match_dict['matches']:
crule = deepcopy(rule)
file_details = {
'file_path': match_dict['file'],
'match_string': match[0],
'match_position': match[1],
'match_lines': match[2],
}
if rule['id'] in self.findings:
self.findings[rule['id']]['files'].append(file_details)
else:
metadata = crule.get('metadata', {})
metadata['description'] = crule['message']
metadata['severity'] = crule['severity']
self.findings[rule['id']] = {
'files': [file_details],
'metadata': metadata,
}
self.findings[rule['id']]['files'] = sorted(
self.findings[rule['id']]['files'],
key=itemgetter('file_path', 'match_string', 'match_lines'))
|
(self, results)
|
56,603 |
libsast.core_matcher.pattern_matcher
|
pattern_matcher
|
Static Analysis Pattern Matcher.
|
def pattern_matcher(self, file_path):
"""Static Analysis Pattern Matcher."""
results = []
try:
data = file_path.read_text('utf-8', 'ignore')
for rule in self.scan_rules:
case = rule.get('input_case')
if case == 'lower':
tmp_data = data.lower()
elif case == 'upper':
tmp_data = data.upper()
else:
tmp_data = data
if file_path.suffix.lower() in ('.html', '.xml'):
fmt_data = strip_comments2(tmp_data)
else:
fmt_data = strip_comments(tmp_data)
matches = self.matcher._find_match(
rule['type'],
fmt_data,
rule)
if matches:
results.append({
'file': file_path.as_posix(),
'rule': rule,
'matches': matches,
})
except Exception:
raise exceptions.RuleProcessingError('Rule processing error.')
return results
|
(self, file_path)
|
56,604 |
libsast.core_matcher.pattern_matcher
|
scan
|
Scan file(s) or directory.
|
def scan(self, paths: list) -> dict:
"""Scan file(s) or directory."""
if not (self.scan_rules and paths):
return
self.validate_rules()
if self.show_progress:
pbar = common.ProgressBar('Pattern Match', len(paths))
paths = pbar.progrees_loop(paths)
files_to_scan = set()
for sfile in paths:
if self.exts and sfile.suffix.lower() not in self.exts:
continue
if sfile.stat().st_size / 1000 / 1000 > 5:
# Skip scanning files greater than 5 MB
print(f'Skipping large file {sfile.as_posix()}')
continue
files_to_scan.add(sfile)
with ProcessPoolExecutor(max_workers=common.get_worker_count()) as exe:
results = exe.map(
self.pattern_matcher,
files_to_scan,
chunksize=1)
self.add_finding(results)
return self.findings
|
(self, paths: list) -> dict
|
56,605 |
libsast.core_matcher.pattern_matcher
|
validate_rules
|
Validate Rules before scanning.
|
def validate_rules(self):
"""Validate Rules before scanning."""
for rule in self.scan_rules:
if not isinstance(rule, dict):
raise exceptions.InvalidRuleFormatError(
'Pattern Matcher Rule format is invalid.')
if not rule.get('type'):
raise exceptions.TypeKeyMissingError(
'The rule is missing the key \'type\'')
if not rule.get('pattern'):
raise exceptions.PatternKeyMissingError(
'The rule is missing the key \'pattern\'')
all_mts = [m for m in dir(matchers) if m.startswith('R')]
pattern_name = rule['type']
if pattern_name not in all_mts:
supported = ', '.join(all_mts)
raise exceptions.MatcherNotFoundError(
f'Matcher \'{pattern_name}\' is not supported.'
f' Available matchers are {supported}',
)
|
(self)
|
56,606 |
libsast.scanner
|
Scanner
| null |
class Scanner:
def __init__(self, options: dict, paths: list) -> None:
if options:
self.options = options
else:
self.options = {
'sgrep_rules': None,
'sgrep_extensions': None,
'match_rules': None,
'match_extensions': None,
'choice_rules': None,
'choice_extensions': None,
'alternative_path': None,
'ignore_filenames': None,
'ignore_extensions': None,
'ignore_paths': None,
'show_progress': False,
}
if options.get('ignore_extensions'):
self.ignore_extensions = options.get('ignore_extensions')
else:
self.ignore_extensions = []
if options.get('ignore_filenames'):
self.ignore_filenames = options.get('ignore_filenames')
else:
self.ignore_filenames = []
if options.get('ignore_paths'):
self.ignore_paths = options.get('ignore_paths')
else:
self.ignore_paths = []
self.paths = paths
def scan(self) -> dict:
"""Start Scan."""
results = {}
valid_paths = self.get_scan_files(self.paths)
if not valid_paths:
return
if self.options.get('match_rules'):
results['pattern_matcher'] = PatternMatcher(
self.options).scan(valid_paths)
if self.options.get('choice_rules'):
results['choice_matcher'] = ChoiceMatcher(
self.options).scan(valid_paths)
if self.options.get('sgrep_rules'):
results['semantic_grep'] = SemanticGrep(
self.options).scan(valid_paths)
return results
def get_scan_files(self, paths):
"""Get files valid for scanning."""
if not isinstance(paths, list):
raise InvalidPathError('Path should be a list')
all_files = set()
for path in paths:
pobj = Path(path)
if pobj.is_dir():
for pfile in pobj.rglob('*'):
if self.validate_file(pfile):
all_files.add(pfile)
else:
if self.validate_file(pobj):
all_files.add(pobj)
return all_files
def validate_file(self, path):
"""Check if we should scan the file."""
ignore_paths = any(
Path(pp).as_posix() in path.as_posix() for pp in self.ignore_paths)
ignore_files = path.name in self.ignore_filenames
ignore_exts = path.suffix.lower() in self.ignore_extensions
if (ignore_paths or ignore_files or ignore_exts):
return False
if not path.exists() or not path.is_file():
return False
return True
|
(options: dict, paths: list) -> None
|
56,607 |
libsast.scanner
|
__init__
| null |
def __init__(self, options: dict, paths: list) -> None:
if options:
self.options = options
else:
self.options = {
'sgrep_rules': None,
'sgrep_extensions': None,
'match_rules': None,
'match_extensions': None,
'choice_rules': None,
'choice_extensions': None,
'alternative_path': None,
'ignore_filenames': None,
'ignore_extensions': None,
'ignore_paths': None,
'show_progress': False,
}
if options.get('ignore_extensions'):
self.ignore_extensions = options.get('ignore_extensions')
else:
self.ignore_extensions = []
if options.get('ignore_filenames'):
self.ignore_filenames = options.get('ignore_filenames')
else:
self.ignore_filenames = []
if options.get('ignore_paths'):
self.ignore_paths = options.get('ignore_paths')
else:
self.ignore_paths = []
self.paths = paths
|
(self, options: dict, paths: list) -> NoneType
|
56,608 |
libsast.scanner
|
get_scan_files
|
Get files valid for scanning.
|
def get_scan_files(self, paths):
"""Get files valid for scanning."""
if not isinstance(paths, list):
raise InvalidPathError('Path should be a list')
all_files = set()
for path in paths:
pobj = Path(path)
if pobj.is_dir():
for pfile in pobj.rglob('*'):
if self.validate_file(pfile):
all_files.add(pfile)
else:
if self.validate_file(pobj):
all_files.add(pobj)
return all_files
|
(self, paths)
|
56,609 |
libsast.scanner
|
scan
|
Start Scan.
|
def scan(self) -> dict:
"""Start Scan."""
results = {}
valid_paths = self.get_scan_files(self.paths)
if not valid_paths:
return
if self.options.get('match_rules'):
results['pattern_matcher'] = PatternMatcher(
self.options).scan(valid_paths)
if self.options.get('choice_rules'):
results['choice_matcher'] = ChoiceMatcher(
self.options).scan(valid_paths)
if self.options.get('sgrep_rules'):
results['semantic_grep'] = SemanticGrep(
self.options).scan(valid_paths)
return results
|
(self) -> dict
|
56,610 |
libsast.scanner
|
validate_file
|
Check if we should scan the file.
|
def validate_file(self, path):
"""Check if we should scan the file."""
ignore_paths = any(
Path(pp).as_posix() in path.as_posix() for pp in self.ignore_paths)
ignore_files = path.name in self.ignore_filenames
ignore_exts = path.suffix.lower() in self.ignore_extensions
if (ignore_paths or ignore_files or ignore_exts):
return False
if not path.exists() or not path.is_file():
return False
return True
|
(self, path)
|
56,611 |
libsast.core_sgrep.semantic_sgrep
|
SemanticGrep
| null |
class SemanticGrep:
def __init__(self, options: dict) -> None:
self.scan_rules = options.get('sgrep_rules')
self.show_progress = options.get('show_progress')
exts = options.get('sgrep_extensions')
if exts:
self.exts = [ext.lower() for ext in exts]
else:
self.exts = []
self.findings = {
'matches': {},
'errors': [],
}
self.standards = standards.get_standards()
def scan(self, paths: list) -> dict:
"""Do sgrep scan."""
if self.exts:
filtered = []
for sfile in paths:
if sfile.suffix.lower() in self.exts:
filtered.append(sfile)
if filtered:
paths = filtered
if self.show_progress:
pbar = common.ProgressBar('Semantic Grep', len(paths))
sgrep_out = pbar.progress_function(
invoke_semgrep,
(paths, self.scan_rules))
else:
sgrep_out = invoke_semgrep(paths, self.scan_rules)
self.format_output(sgrep_out)
return self.findings
def format_output(self, results):
"""Format sgrep results."""
errs = self.findings.get('errors')
if errs:
self.findings['errors'] = errs
smatches = self.findings['matches']
for find in results['results']:
file_details = {
'file_path': find['path'],
'match_position': (find['start']['col'], find['end']['col']),
'match_lines': (find['start']['line'], find['end']['line']),
'match_string': find['extra']['lines'],
}
rule_id = find['check_id'].rsplit('.', 1)[1]
if rule_id in smatches:
smatches[rule_id]['files'].append(file_details)
else:
metadata = find['extra']['metadata']
metadata['description'] = find['extra']['message']
metadata['severity'] = find['extra']['severity']
smatches[rule_id] = {
'files': [file_details],
'metadata': metadata,
}
self.expand_mappings(smatches[rule_id])
def expand_mappings(self, meta):
"""Expand libsast standard mappings."""
meta_keys = meta['metadata'].keys()
for mkey in meta_keys:
if mkey not in self.standards.keys():
continue
to_expand = meta['metadata'][mkey]
expanded = self.standards[mkey].get(to_expand)
if expanded:
meta['metadata'][mkey] = expanded
|
(options: dict) -> None
|
56,612 |
libsast.core_sgrep.semantic_sgrep
|
__init__
| null |
def __init__(self, options: dict) -> None:
self.scan_rules = options.get('sgrep_rules')
self.show_progress = options.get('show_progress')
exts = options.get('sgrep_extensions')
if exts:
self.exts = [ext.lower() for ext in exts]
else:
self.exts = []
self.findings = {
'matches': {},
'errors': [],
}
self.standards = standards.get_standards()
|
(self, options: dict) -> NoneType
|
56,613 |
libsast.core_sgrep.semantic_sgrep
|
expand_mappings
|
Expand libsast standard mappings.
|
def expand_mappings(self, meta):
"""Expand libsast standard mappings."""
meta_keys = meta['metadata'].keys()
for mkey in meta_keys:
if mkey not in self.standards.keys():
continue
to_expand = meta['metadata'][mkey]
expanded = self.standards[mkey].get(to_expand)
if expanded:
meta['metadata'][mkey] = expanded
|
(self, meta)
|
56,614 |
libsast.core_sgrep.semantic_sgrep
|
format_output
|
Format sgrep results.
|
def format_output(self, results):
"""Format sgrep results."""
errs = self.findings.get('errors')
if errs:
self.findings['errors'] = errs
smatches = self.findings['matches']
for find in results['results']:
file_details = {
'file_path': find['path'],
'match_position': (find['start']['col'], find['end']['col']),
'match_lines': (find['start']['line'], find['end']['line']),
'match_string': find['extra']['lines'],
}
rule_id = find['check_id'].rsplit('.', 1)[1]
if rule_id in smatches:
smatches[rule_id]['files'].append(file_details)
else:
metadata = find['extra']['metadata']
metadata['description'] = find['extra']['message']
metadata['severity'] = find['extra']['severity']
smatches[rule_id] = {
'files': [file_details],
'metadata': metadata,
}
self.expand_mappings(smatches[rule_id])
|
(self, results)
|
56,615 |
libsast.core_sgrep.semantic_sgrep
|
scan
|
Do sgrep scan.
|
def scan(self, paths: list) -> dict:
"""Do sgrep scan."""
if self.exts:
filtered = []
for sfile in paths:
if sfile.suffix.lower() in self.exts:
filtered.append(sfile)
if filtered:
paths = filtered
if self.show_progress:
pbar = common.ProgressBar('Semantic Grep', len(paths))
sgrep_out = pbar.progress_function(
invoke_semgrep,
(paths, self.scan_rules))
else:
sgrep_out = invoke_semgrep(paths, self.scan_rules)
self.format_output(sgrep_out)
return self.findings
|
(self, paths: list) -> dict
|
56,622 |
pycape.cape
|
Cape
|
A websocket client for interacting with enclaves hosting Cape functions.
This is the main interface for interacting with Cape functions from Python.
See module-level documentation :mod:`pycape.cape` for usage example.
Args:
url: The Cape platform's websocket URL, which is responsible for forwarding
client requests to the proper enclave instances. If None, tries to load
value from the ``CAPE_ENCLAVE_HOST`` environment variable. If no such
variable value is supplied, defaults to ``"https://app.capeprivacy.com"``.
verbose: Boolean controlling verbose logging for the ``"pycape"`` logger.
If True, sets log-level to ``DEBUG``.
|
class Cape:
"""A websocket client for interacting with enclaves hosting Cape functions.
This is the main interface for interacting with Cape functions from Python.
See module-level documentation :mod:`pycape.cape` for usage example.
Args:
url: The Cape platform's websocket URL, which is responsible for forwarding
client requests to the proper enclave instances. If None, tries to load
value from the ``CAPE_ENCLAVE_HOST`` environment variable. If no such
variable value is supplied, defaults to ``"https://app.capeprivacy.com"``.
verbose: Boolean controlling verbose logging for the ``"pycape"`` logger.
If True, sets log-level to ``DEBUG``.
"""
def __init__(
self,
url: Optional[str] = None,
verbose: bool = False,
):
self._url = url or cape_config.ENCLAVE_HOST
self._root_cert = None
self._ctx = None
if verbose:
_logger.setLevel(logging.DEBUG)
@_synchronizer
async def close(self):
"""Closes the current enclave connection."""
if self._ctx is not None:
await self._ctx.close()
self._ctx = None
@_synchronizer
async def connect(
self,
function_ref: Union[str, os.PathLike, fref.FunctionRef],
token: Union[str, os.PathLike, tkn.Token],
pcrs: Optional[Dict[str, List[str]]] = None,
):
"""Connects to the enclave hosting the function denoted by ``function_ref``.
Note that this method creates a stateful websocket connection, which is a
necessary precondition for callers of :meth:`~Cape.invoke`. When using the
default Cape host, the enclave will terminate this websocket connection after
60s of inactivity. Care should be taken to close the websocket connection with
:meth:`~Cape.close` once all invocations have finished.
Args:
function_ref: Reference to a Cape deployed function. Must be convertible to
a :class:`~function_ref.FunctionRef`. See :meth:`Cape.function` for
a description of recognized values.
token: Personal Access Token scoped for the given Cape function. Must be
convertible to :class:`~token.Token`, see :meth:`Cape.token` for a
description of recognized values.
pcrs: An optional dictionary of PCR indexes to a list of expected or allowed
PCRs.
Raises:
RuntimeError: if the websocket response or the enclave attestation doc is
malformed, or if the enclave fails to return a function checksum
matching our own.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
function_ref = self.function(function_ref)
token = self.token(token)
await self._request_connection(function_ref, token, pcrs)
@_synchronizer
async def encrypt(
self,
input: bytes,
*,
username: Optional[str] = None,
key: Optional[bytes] = None,
key_path: Optional[Union[str, os.PathLike]] = None,
) -> bytes:
"""Encrypts inputs to Cape functions in Cape's encryption format.
The encrypted value can be used as input to Cape handlers by other callers of
:meth:`~Cape.invoke` or :meth:`~Cape.run` without giving them plaintext access
to it. The core encryption functionality uses envelope encryption; the value is
AES-encrypted with an ephemeral AES key, which is itself encrypted with the Cape
user's assigned RSA public key. The corresponding RSA private key is only
accessible from within a Cape enclave, which guarantees secrecy of the encrypted
value. See the Cape encrypt docs for further detail.
Args:
input: Input bytes to encrypt.
username: A Github username corresponding to a Cape user who's public key
you want to use for the encryption. See :meth:`Cape.key` for details.
key: Optional bytes for the Cape key. If None, will delegate to calling
:meth:`Cape.key` w/ the given ``key_path`` to retrieve the user's Cape
key.
key_path: Optional path to a locally-cached Cape key. See :meth:`Cape.key`
for details.
Returns:
Tagged ciphertext representing a base64-encoded Cape encryption of the
``input``.
Raises:
ValueError: if Cape key is not a properly-formatted RSA public key.
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
cape_key = key or await self.key(username=username, key_path=key_path)
ctxt = cape_encrypt.encrypt(input, cape_key)
# cape-encrypted ctxt must be b64-encoded and tagged
ctxt = base64.b64encode(ctxt)
return b"cape:" + ctxt
def function(
self,
identifier: Union[str, os.PathLike, fref.FunctionRef],
*,
checksum: Optional[str] = None,
) -> fref.FunctionRef:
"""Convenience function for creating a :class:`~.function_ref.FunctionRef`.
The ``identifier`` parameter is interepreted according to the following
priority:
- Filepath to a :class:`~.function_ref.FunctionRef` JSON. See
:meth:`~.function_ref.FunctionRef.from_json` for expected JSON structure.
- String representing a function ID
- String of the form "{username}/{fn_name}" representing a function name.
- A :class:`~function_ref.FunctionRef`. If its checksum is missing and a
``checksum`` argument is given, it will be added to the returned value.
Args:
identifier: A string identifier that can be converted into a
:class:`~.function_ref.FunctionRef`. See above for options.
checksum: keyword-only argument for the function checksum. Ignored if
``identifier`` points to a JSON.
"""
if isinstance(identifier, pathlib.Path):
return fref.FunctionRef.from_json(identifier)
if isinstance(identifier, str):
identifier_as_path = pathlib.Path(identifier)
if identifier_as_path.exists():
return fref.FunctionRef.from_json(identifier_as_path)
# not a path, try to interpret as function name
if len(identifier.split("/")) == 2:
return fref.FunctionRef(id=None, name=identifier, checksum=checksum)
# not a function name, try to interpret as function id
elif len(identifier) == 22:
return fref.FunctionRef(id=identifier, name=None, checksum=checksum)
if isinstance(identifier, fref.FunctionRef):
if checksum is None:
return identifier
elif identifier.checksum is None:
return fref.FunctionRef(
id=identifier.id, name=identifier.full_name, checksum=checksum
)
else:
if checksum == identifier.checksum:
return identifier
raise ValueError(
"Checksum mismatch: given `checksum` argument conflicts with "
"given FunctionRef's checksum."
)
raise ValueError("Unrecognized form of `identifier` argument: {identifier}.")
@_synchronizer
@_synchronizer.asynccontextmanager
async def function_context(
self,
function_ref: Union[str, os.PathLike, fref.FunctionRef],
token: Union[str, os.PathLike, tkn.Token],
pcrs: Optional[Dict[str, List[str]]] = None,
):
"""Creates a context manager for a given ``function_ref``'s enclave connection.
Note that this context manager accomplishes the same functionality as
:meth:`~Cape.connect`, except that it will also automatically
:meth:`~Cape.close` the connection when exiting the context.
**Usage** ::
cape = Cape(url="https://app.capeprivacy.com")
f = cape.function("function.json)
t = cape.token("pycape-dev.token")
with cape.function_context(f, t):
c1 = cape.invoke(3, 4, use_serdio=True)
print(c1) # 5
c2 = cape.invoke(5, 12, use_serdio=True)
print(c2) # 13
# websocket connection is automatically closed
Args:
function_ref: A function ID or :class:`~.function_ref.FunctionRef`
representing a deployed Cape function.
Raises:
RuntimeError: if the websocket response or the enclave attestation doc is
malformed, or if the enclave fails to return a function checksum
matching our own.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
try:
yield await self.connect(function_ref, token, pcrs)
finally:
await self.close()
@_synchronizer
async def invoke(
self, *args: Any, serde_hooks=None, use_serdio: bool = False, **kwargs: Any
) -> Any:
"""Invokes a function call from the currently connected websocket.
This method assumes that the client is currently maintaining an open websocket
connection to an enclave hosting a particular Cape function. Care should be
taken to ensure that the function_red that spawned the connection is the
correct one. The connection should be closed with :meth:`~Cape.close` once the
caller is finished with its invocations.
Args:
*args: Arguments to pass to the connected Cape function. If
``use_serdio=False``, we expect a single argument of type ``bytes``.
Otherwise, these arguments should match the positional arguments
of the undecorated Cape handler, and they will be auto-serialized by
Serdio before being sent in the request.
serde_hooks: An optional pair of serdio encoder/decoder hooks convertible
to :class:`serdio.SerdeHookBundle`. The hooks are necessary if the
``args`` / ``kwargs`` have any user-defined types that can't be handled
by vanilla Serdio. See :func:`serdio.bundle_serde_hooks` for supported
types.
use_serdio: Boolean controlling whether or not the inputs should be
auto-serialized by serdio.
kwargs: Keyword arguments to be passed to the connected Cape function.
These are treated the same way as the ``args`` are.
Returns:
If ``use_serdio=True``, returns the auto-deserialized result of calling the
connected Cape function on the given ``args`` / ``kwargs``.
If ``use_serdio=False``, returns the output of the Cape function as raw
bytes.
Raises:
RuntimeError: if serialized inputs could not be HPKE-encrypted, or if
websocket response is malformed.
"""
if serde_hooks is not None:
serde_hooks = serdio.bundle_serde_hooks(serde_hooks)
return await self._request_invocation(serde_hooks, use_serdio, *args, **kwargs)
@_synchronizer
async def key(
self,
*,
username: Optional[str] = None,
key_path: Optional[Union[str, os.PathLike]] = None,
pcrs: Optional[Dict[str, List[str]]] = None,
) -> bytes:
"""Load a Cape key from disk or download and persist an enclave-generated one.
If no username or key_path is provided, will try to load the currently logged-in
CLI user's key from a local cache.
Args:
username: An optional string representing the Github username of a Cape
user. The resulting public key will be associated with their account,
and data encrypted with this key will be available inside functions
that user has deployed.
key_path: The path to the Cape key file. If the file already exists, the key
will be read from disk and returned. Otherwise, a Cape key will be
requested from the Cape platform and written to this location.
If None, the default path is ``"$HOME/.config/cape/capekey.pub.der"``,
or alternatively whatever path is specified by expanding the env
variables ``CAPE_LOCAL_CONFIG_DIR / CAPE_LOCAL_CAPE_KEY_FILENAME``.
pcrs: A dictionary of PCR indexes to a list of potential values.
Returns:
Bytes containing the Cape key. The key is also cached on disk for later
use.
Raises:
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
if username is not None and key_path is not None:
raise ValueError("User provided both 'username' and 'key_path' arguments.")
if key_path is not None:
key_path = pathlib.Path(key_path)
else:
config_dir = pathlib.Path(cape_config.LOCAL_CONFIG_DIR)
if username is not None:
# look for locally-cached user key
key_qualifier = config_dir / "encryption_keys" / username
else:
# try to load the current CLI user's capekey
key_qualifier = config_dir
key_path = key_qualifier / cape_config.LOCAL_CAPE_KEY_FILENAME
if key_path.exists():
with open(key_path, "rb") as f:
cape_key = f.read()
return cape_key
if username is not None:
cape_key = await self._request_key_with_username(username, pcrs=pcrs)
await _persist_cape_key(cape_key, key_path)
return cape_key
raise ValueError(
"Cannot find a Cape key in the local cache. Either specify a username or "
"log into the Cape CLI and run `cape key` to locally cache your own "
"account's Cape key."
)
@_synchronizer
async def run(
self,
function_ref: Union[str, os.PathLike, fref.FunctionRef],
token: Union[str, os.PathLike, tkn.Token],
*args: Any,
pcrs: Optional[Dict[str, List[str]]] = None,
serde_hooks=None,
use_serdio: bool = False,
**kwargs: Any,
) -> Any:
"""Single-shot version of connect + invoke + close.
This method takes care of establishing a websocket connection via
:meth:`~Cape.connect`, invoking it via :meth:`~Cape.invoke`, and then finally
closing the connection with :meth:`~Cape.close`. This method should be
preferred when the caller doesn't need to invoke a Cape function more than once.
Args:
function_ref: A value convertible to a :class:`~.function_ref.FunctionRef`,
representing a deployed Cape function. See :meth:`Cape.function` for
recognized values.
*args: Arguments to pass to the connected Cape function. If
``use_serdio=False``, we expect a single argument of type ``bytes``.
Otherwise, these arguments should match the positional arguments
of the undecorated Cape handler, and they will be auto-serialized by
Serdio before being sent in the request.
serde_hooks: An optional pair of serdio encoder/decoder hooks convertible
to :class:`serdio.SerdeHookBundle`. The hooks are necessary if the
``args`` / ``kwargs`` have any user-defined types that can't be handled
by vanilla Serdio. See :func:`serdio.bundle_serde_hooks` for supported
types.
use_serdio: Boolean controlling whether or not the inputs should be
auto-serialized by serdio.
kwargs: Keyword arguments to be passed to the connected Cape function.
These are treated the same way as the ``args`` are.
Returns:
If ``use_serdio=True``, returns the auto-deserialized result of calling the
connected Cape function on the given ``args`` / ``kwargs``.
If ``use_serdio=False``, returns the output of the Cape function as raw
bytes.
Raises:
RuntimeError: if serialized inputs could not be HPKE-encrypted, or if
websocket response is malformed.
"""
if serde_hooks is not None:
serde_hooks = serdio.bundle_serde_hooks(serde_hooks)
async with self.function_context(function_ref, token, pcrs):
result = await self.invoke(
*args, serde_hooks=serde_hooks, use_serdio=use_serdio, **kwargs
)
return result
def token(self, token: Union[str, os.PathLike, tkn.Token]) -> tkn.Token:
"""Create or load a :class:`~token.Token`.
Args:
token: Filepath to a token file, or the raw token string itself.
Returns:
A :class:`~token.Token` that can be used to access users' deployed Cape
functions.
Raises:
TypeError: if the ``token`` argument type is unrecognized.
"""
token_out = None
if isinstance(token, pathlib.Path):
tokenfile = token
return tkn.Token.from_disk(tokenfile)
if isinstance(token, str):
# str could be a filename
if len(token) <= 255:
token_as_path = pathlib.Path(token)
token_out = _try_load_token_file(token_as_path)
return token_out or tkn.Token(token)
if isinstance(token, tkn.Token):
return token
raise TypeError(f"Expected token to be PathLike or str, found {type(token)}")
async def _request_connection(self, function_ref, token, pcrs=None):
if function_ref.id is not None:
fn_endpoint = f"{self._url}/v1/run/{function_ref.id}"
elif function_ref.full_name is not None:
fn_endpoint = f"{self._url}/v1/run/{function_ref.user}/{function_ref.name}"
self._root_cert = self._root_cert or attest.download_root_cert()
self._ctx = _EnclaveContext(
endpoint=fn_endpoint,
auth_protocol="cape.runtime",
auth_token=token.raw,
root_cert=self._root_cert,
)
attestation_doc = await self._ctx.bootstrap(pcrs)
user_data = attestation_doc.get("user_data")
checksum = function_ref.checksum
if checksum is not None and user_data is None:
# Close the connection explicitly before throwing exception
await self._ctx.close()
raise RuntimeError(
f"No function checksum received from enclave, expected{checksum}."
)
user_data_dict = json.loads(user_data)
received_checksum = user_data_dict.get("func_checksum")
if checksum is not None:
# Checksum is hex encoded, we manipulate it to string for comparison
received_checksum = str(base64.b64decode(received_checksum).hex())
if str(checksum) != str(received_checksum):
# Close the connection explicitly before throwing exception
await self._ctx.close()
raise RuntimeError(
"Returned checksum did not match provided, "
f"got: {received_checksum}, want: {checksum}."
)
return
async def _request_invocation(self, serde_hooks, use_serdio, *args, **kwargs):
# If multiple args and/or kwargs are supplied to the Cape function through
# Cape.run or Cape.invoke, before serialization, we pack them
# into a dictionary with the following keys:
# {"cape_fn_args": <tuple_args>, "cape_fn_kwargs": <dict_kwargs>}.
single_input = _maybe_get_single_input(args, kwargs)
if single_input is not None:
inputs = single_input
elif single_input is None and not use_serdio:
raise ValueError(
"Expected a single input of type 'bytes' when use_serdio=False.\n"
"Found:"
f"\t- args: {args}"
f"\t- kwargs: {kwargs}"
)
if serde_hooks is not None:
encoder_hook, decoder_hook = serde_hooks.unbundle()
use_serdio = True
else:
encoder_hook, decoder_hook = None, None
if use_serdio:
inputs = serdio.serialize(*args, encoder=encoder_hook, **kwargs)
if not isinstance(inputs, bytes):
raise TypeError(
f"The input type is: {type(inputs)}. Provide input as bytes or "
"set use_serdio=True for PyCape to serialize your input "
"with Serdio."
)
result = await self._ctx.invoke(inputs)
if use_serdio:
result = serdio.deserialize(result, decoder=decoder_hook)
return result
async def _request_key_with_username(
self,
username: str,
pcrs: Optional[Dict[str, List[str]]] = None,
) -> bytes:
user_key_endpoint = f"{self._url}/v1/user/{username}/key"
response = requests.get(user_key_endpoint).json()
adoc_blob = response.get("attestation_document", None)
if adoc_blob is None:
raise RuntimeError(
f"Bad response from '/v1/user/{username}/key' route, expected "
f"attestation_document key-value: {response}."
)
self._root_cert = self._root_cert or attest.download_root_cert()
doc_bytes = base64.b64decode(adoc_blob)
attestation_doc = attest.load_attestation_document(doc_bytes)
not_before = attest.get_certificate_not_before(attestation_doc["certificate"])
attestation_doc = attest.parse_attestation(
doc_bytes, self._root_cert, checkDate=not_before
)
if pcrs is not None:
attest.verify_pcrs(pcrs, attestation_doc)
user_data = attestation_doc.get("user_data")
user_data_dict = json.loads(user_data)
cape_key = user_data_dict.get("key")
if cape_key is None:
raise RuntimeError(
"Enclave response did not include a Cape key in attestation user data."
)
return base64.b64decode(cape_key)
async def _request_key_with_token(
self,
token: str,
pcrs: Optional[Dict[str, List[str]]] = None,
) -> bytes:
key_endpoint = f"{self._url}/v1/key"
self._root_cert = self._root_cert or attest.download_root_cert()
key_ctx = _EnclaveContext(
key_endpoint,
auth_protocol="cape.function",
auth_token=token,
root_cert=self._root_cert,
)
attestation_doc = await key_ctx.bootstrap(pcrs)
await key_ctx.close() # we have the attestation doc, no longer any need for ctx
user_data = attestation_doc.get("user_data")
user_data_dict = json.loads(user_data)
cape_key = user_data_dict.get("key")
if cape_key is None:
raise RuntimeError(
"Enclave response did not include a Cape key in attestation user data."
)
return base64.b64decode(cape_key)
|
(url: Optional[str] = None, verbose: bool = False)
|
56,623 |
pycape.cape
|
__init__
| null |
def __init__(
self,
url: Optional[str] = None,
verbose: bool = False,
):
self._url = url or cape_config.ENCLAVE_HOST
self._root_cert = None
self._ctx = None
if verbose:
_logger.setLevel(logging.DEBUG)
|
(self, url: Optional[str] = None, verbose: bool = False)
|
56,624 |
pycape.cape
|
_request_connection
| null |
def token(self, token: Union[str, os.PathLike, tkn.Token]) -> tkn.Token:
"""Create or load a :class:`~token.Token`.
Args:
token: Filepath to a token file, or the raw token string itself.
Returns:
A :class:`~token.Token` that can be used to access users' deployed Cape
functions.
Raises:
TypeError: if the ``token`` argument type is unrecognized.
"""
token_out = None
if isinstance(token, pathlib.Path):
tokenfile = token
return tkn.Token.from_disk(tokenfile)
if isinstance(token, str):
# str could be a filename
if len(token) <= 255:
token_as_path = pathlib.Path(token)
token_out = _try_load_token_file(token_as_path)
return token_out or tkn.Token(token)
if isinstance(token, tkn.Token):
return token
raise TypeError(f"Expected token to be PathLike or str, found {type(token)}")
|
(self, function_ref, token, pcrs=None)
|
56,628 |
pycape.cape
|
auto_close
|
Closes the current enclave connection.
|
@_synchronizer
async def key(
self,
*,
username: Optional[str] = None,
key_path: Optional[Union[str, os.PathLike]] = None,
pcrs: Optional[Dict[str, List[str]]] = None,
) -> bytes:
"""Load a Cape key from disk or download and persist an enclave-generated one.
If no username or key_path is provided, will try to load the currently logged-in
CLI user's key from a local cache.
Args:
username: An optional string representing the Github username of a Cape
user. The resulting public key will be associated with their account,
and data encrypted with this key will be available inside functions
that user has deployed.
key_path: The path to the Cape key file. If the file already exists, the key
will be read from disk and returned. Otherwise, a Cape key will be
requested from the Cape platform and written to this location.
If None, the default path is ``"$HOME/.config/cape/capekey.pub.der"``,
or alternatively whatever path is specified by expanding the env
variables ``CAPE_LOCAL_CONFIG_DIR / CAPE_LOCAL_CAPE_KEY_FILENAME``.
pcrs: A dictionary of PCR indexes to a list of potential values.
Returns:
Bytes containing the Cape key. The key is also cached on disk for later
use.
Raises:
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
if username is not None and key_path is not None:
raise ValueError("User provided both 'username' and 'key_path' arguments.")
if key_path is not None:
key_path = pathlib.Path(key_path)
else:
config_dir = pathlib.Path(cape_config.LOCAL_CONFIG_DIR)
if username is not None:
# look for locally-cached user key
key_qualifier = config_dir / "encryption_keys" / username
else:
# try to load the current CLI user's capekey
key_qualifier = config_dir
key_path = key_qualifier / cape_config.LOCAL_CAPE_KEY_FILENAME
if key_path.exists():
with open(key_path, "rb") as f:
cape_key = f.read()
return cape_key
if username is not None:
cape_key = await self._request_key_with_username(username, pcrs=pcrs)
await _persist_cape_key(cape_key, key_path)
return cape_key
raise ValueError(
"Cannot find a Cape key in the local cache. Either specify a username or "
"log into the Cape CLI and run `cape key` to locally cache your own "
"account's Cape key."
)
|
(self)
|
56,629 |
pycape.cape
|
auto_connect
|
Connects to the enclave hosting the function denoted by ``function_ref``.
Note that this method creates a stateful websocket connection, which is a
necessary precondition for callers of :meth:`~Cape.invoke`. When using the
default Cape host, the enclave will terminate this websocket connection after
60s of inactivity. Care should be taken to close the websocket connection with
:meth:`~Cape.close` once all invocations have finished.
Args:
function_ref: Reference to a Cape deployed function. Must be convertible to
a :class:`~function_ref.FunctionRef`. See :meth:`Cape.function` for
a description of recognized values.
token: Personal Access Token scoped for the given Cape function. Must be
convertible to :class:`~token.Token`, see :meth:`Cape.token` for a
description of recognized values.
pcrs: An optional dictionary of PCR indexes to a list of expected or allowed
PCRs.
Raises:
RuntimeError: if the websocket response or the enclave attestation doc is
malformed, or if the enclave fails to return a function checksum
matching our own.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
|
@_synchronizer
async def key(
self,
*,
username: Optional[str] = None,
key_path: Optional[Union[str, os.PathLike]] = None,
pcrs: Optional[Dict[str, List[str]]] = None,
) -> bytes:
"""Load a Cape key from disk or download and persist an enclave-generated one.
If no username or key_path is provided, will try to load the currently logged-in
CLI user's key from a local cache.
Args:
username: An optional string representing the Github username of a Cape
user. The resulting public key will be associated with their account,
and data encrypted with this key will be available inside functions
that user has deployed.
key_path: The path to the Cape key file. If the file already exists, the key
will be read from disk and returned. Otherwise, a Cape key will be
requested from the Cape platform and written to this location.
If None, the default path is ``"$HOME/.config/cape/capekey.pub.der"``,
or alternatively whatever path is specified by expanding the env
variables ``CAPE_LOCAL_CONFIG_DIR / CAPE_LOCAL_CAPE_KEY_FILENAME``.
pcrs: A dictionary of PCR indexes to a list of potential values.
Returns:
Bytes containing the Cape key. The key is also cached on disk for later
use.
Raises:
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
if username is not None and key_path is not None:
raise ValueError("User provided both 'username' and 'key_path' arguments.")
if key_path is not None:
key_path = pathlib.Path(key_path)
else:
config_dir = pathlib.Path(cape_config.LOCAL_CONFIG_DIR)
if username is not None:
# look for locally-cached user key
key_qualifier = config_dir / "encryption_keys" / username
else:
# try to load the current CLI user's capekey
key_qualifier = config_dir
key_path = key_qualifier / cape_config.LOCAL_CAPE_KEY_FILENAME
if key_path.exists():
with open(key_path, "rb") as f:
cape_key = f.read()
return cape_key
if username is not None:
cape_key = await self._request_key_with_username(username, pcrs=pcrs)
await _persist_cape_key(cape_key, key_path)
return cape_key
raise ValueError(
"Cannot find a Cape key in the local cache. Either specify a username or "
"log into the Cape CLI and run `cape key` to locally cache your own "
"account's Cape key."
)
|
(self, function_ref: Union[str, os.PathLike, pycape.function_ref.FunctionRef], token: Union[str, os.PathLike, pycape.token.Token], pcrs: Optional[Dict[str, List[str]]] = None)
|
56,630 |
pycape.cape
|
auto_encrypt
|
Encrypts inputs to Cape functions in Cape's encryption format.
The encrypted value can be used as input to Cape handlers by other callers of
:meth:`~Cape.invoke` or :meth:`~Cape.run` without giving them plaintext access
to it. The core encryption functionality uses envelope encryption; the value is
AES-encrypted with an ephemeral AES key, which is itself encrypted with the Cape
user's assigned RSA public key. The corresponding RSA private key is only
accessible from within a Cape enclave, which guarantees secrecy of the encrypted
value. See the Cape encrypt docs for further detail.
Args:
input: Input bytes to encrypt.
username: A Github username corresponding to a Cape user who's public key
you want to use for the encryption. See :meth:`Cape.key` for details.
key: Optional bytes for the Cape key. If None, will delegate to calling
:meth:`Cape.key` w/ the given ``key_path`` to retrieve the user's Cape
key.
key_path: Optional path to a locally-cached Cape key. See :meth:`Cape.key`
for details.
Returns:
Tagged ciphertext representing a base64-encoded Cape encryption of the
``input``.
Raises:
ValueError: if Cape key is not a properly-formatted RSA public key.
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
|
@_synchronizer
async def key(
self,
*,
username: Optional[str] = None,
key_path: Optional[Union[str, os.PathLike]] = None,
pcrs: Optional[Dict[str, List[str]]] = None,
) -> bytes:
"""Load a Cape key from disk or download and persist an enclave-generated one.
If no username or key_path is provided, will try to load the currently logged-in
CLI user's key from a local cache.
Args:
username: An optional string representing the Github username of a Cape
user. The resulting public key will be associated with their account,
and data encrypted with this key will be available inside functions
that user has deployed.
key_path: The path to the Cape key file. If the file already exists, the key
will be read from disk and returned. Otherwise, a Cape key will be
requested from the Cape platform and written to this location.
If None, the default path is ``"$HOME/.config/cape/capekey.pub.der"``,
or alternatively whatever path is specified by expanding the env
variables ``CAPE_LOCAL_CONFIG_DIR / CAPE_LOCAL_CAPE_KEY_FILENAME``.
pcrs: A dictionary of PCR indexes to a list of potential values.
Returns:
Bytes containing the Cape key. The key is also cached on disk for later
use.
Raises:
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
if username is not None and key_path is not None:
raise ValueError("User provided both 'username' and 'key_path' arguments.")
if key_path is not None:
key_path = pathlib.Path(key_path)
else:
config_dir = pathlib.Path(cape_config.LOCAL_CONFIG_DIR)
if username is not None:
# look for locally-cached user key
key_qualifier = config_dir / "encryption_keys" / username
else:
# try to load the current CLI user's capekey
key_qualifier = config_dir
key_path = key_qualifier / cape_config.LOCAL_CAPE_KEY_FILENAME
if key_path.exists():
with open(key_path, "rb") as f:
cape_key = f.read()
return cape_key
if username is not None:
cape_key = await self._request_key_with_username(username, pcrs=pcrs)
await _persist_cape_key(cape_key, key_path)
return cape_key
raise ValueError(
"Cannot find a Cape key in the local cache. Either specify a username or "
"log into the Cape CLI and run `cape key` to locally cache your own "
"account's Cape key."
)
|
(self, input: bytes, *, username: Optional[str] = None, key: Optional[bytes] = None, key_path: Union[str, os.PathLike, NoneType] = None) -> bytes
|
56,631 |
pycape.cape
|
function
|
Convenience function for creating a :class:`~.function_ref.FunctionRef`.
The ``identifier`` parameter is interepreted according to the following
priority:
- Filepath to a :class:`~.function_ref.FunctionRef` JSON. See
:meth:`~.function_ref.FunctionRef.from_json` for expected JSON structure.
- String representing a function ID
- String of the form "{username}/{fn_name}" representing a function name.
- A :class:`~function_ref.FunctionRef`. If its checksum is missing and a
``checksum`` argument is given, it will be added to the returned value.
Args:
identifier: A string identifier that can be converted into a
:class:`~.function_ref.FunctionRef`. See above for options.
checksum: keyword-only argument for the function checksum. Ignored if
``identifier`` points to a JSON.
|
def function(
self,
identifier: Union[str, os.PathLike, fref.FunctionRef],
*,
checksum: Optional[str] = None,
) -> fref.FunctionRef:
"""Convenience function for creating a :class:`~.function_ref.FunctionRef`.
The ``identifier`` parameter is interepreted according to the following
priority:
- Filepath to a :class:`~.function_ref.FunctionRef` JSON. See
:meth:`~.function_ref.FunctionRef.from_json` for expected JSON structure.
- String representing a function ID
- String of the form "{username}/{fn_name}" representing a function name.
- A :class:`~function_ref.FunctionRef`. If its checksum is missing and a
``checksum`` argument is given, it will be added to the returned value.
Args:
identifier: A string identifier that can be converted into a
:class:`~.function_ref.FunctionRef`. See above for options.
checksum: keyword-only argument for the function checksum. Ignored if
``identifier`` points to a JSON.
"""
if isinstance(identifier, pathlib.Path):
return fref.FunctionRef.from_json(identifier)
if isinstance(identifier, str):
identifier_as_path = pathlib.Path(identifier)
if identifier_as_path.exists():
return fref.FunctionRef.from_json(identifier_as_path)
# not a path, try to interpret as function name
if len(identifier.split("/")) == 2:
return fref.FunctionRef(id=None, name=identifier, checksum=checksum)
# not a function name, try to interpret as function id
elif len(identifier) == 22:
return fref.FunctionRef(id=identifier, name=None, checksum=checksum)
if isinstance(identifier, fref.FunctionRef):
if checksum is None:
return identifier
elif identifier.checksum is None:
return fref.FunctionRef(
id=identifier.id, name=identifier.full_name, checksum=checksum
)
else:
if checksum == identifier.checksum:
return identifier
raise ValueError(
"Checksum mismatch: given `checksum` argument conflicts with "
"given FunctionRef's checksum."
)
raise ValueError("Unrecognized form of `identifier` argument: {identifier}.")
|
(self, identifier: Union[str, os.PathLike, pycape.function_ref.FunctionRef], *, checksum: Optional[str] = None) -> pycape.function_ref.FunctionRef
|
56,632 |
pycape.cape
|
auto_function_context
|
Creates a context manager for a given ``function_ref``'s enclave connection.
Note that this context manager accomplishes the same functionality as
:meth:`~Cape.connect`, except that it will also automatically
:meth:`~Cape.close` the connection when exiting the context.
**Usage** ::
cape = Cape(url="https://app.capeprivacy.com")
f = cape.function("function.json)
t = cape.token("pycape-dev.token")
with cape.function_context(f, t):
c1 = cape.invoke(3, 4, use_serdio=True)
print(c1) # 5
c2 = cape.invoke(5, 12, use_serdio=True)
print(c2) # 13
# websocket connection is automatically closed
Args:
function_ref: A function ID or :class:`~.function_ref.FunctionRef`
representing a deployed Cape function.
Raises:
RuntimeError: if the websocket response or the enclave attestation doc is
malformed, or if the enclave fails to return a function checksum
matching our own.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
|
@_synchronizer
async def key(
self,
*,
username: Optional[str] = None,
key_path: Optional[Union[str, os.PathLike]] = None,
pcrs: Optional[Dict[str, List[str]]] = None,
) -> bytes:
"""Load a Cape key from disk or download and persist an enclave-generated one.
If no username or key_path is provided, will try to load the currently logged-in
CLI user's key from a local cache.
Args:
username: An optional string representing the Github username of a Cape
user. The resulting public key will be associated with their account,
and data encrypted with this key will be available inside functions
that user has deployed.
key_path: The path to the Cape key file. If the file already exists, the key
will be read from disk and returned. Otherwise, a Cape key will be
requested from the Cape platform and written to this location.
If None, the default path is ``"$HOME/.config/cape/capekey.pub.der"``,
or alternatively whatever path is specified by expanding the env
variables ``CAPE_LOCAL_CONFIG_DIR / CAPE_LOCAL_CAPE_KEY_FILENAME``.
pcrs: A dictionary of PCR indexes to a list of potential values.
Returns:
Bytes containing the Cape key. The key is also cached on disk for later
use.
Raises:
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
if username is not None and key_path is not None:
raise ValueError("User provided both 'username' and 'key_path' arguments.")
if key_path is not None:
key_path = pathlib.Path(key_path)
else:
config_dir = pathlib.Path(cape_config.LOCAL_CONFIG_DIR)
if username is not None:
# look for locally-cached user key
key_qualifier = config_dir / "encryption_keys" / username
else:
# try to load the current CLI user's capekey
key_qualifier = config_dir
key_path = key_qualifier / cape_config.LOCAL_CAPE_KEY_FILENAME
if key_path.exists():
with open(key_path, "rb") as f:
cape_key = f.read()
return cape_key
if username is not None:
cape_key = await self._request_key_with_username(username, pcrs=pcrs)
await _persist_cape_key(cape_key, key_path)
return cape_key
raise ValueError(
"Cannot find a Cape key in the local cache. Either specify a username or "
"log into the Cape CLI and run `cape key` to locally cache your own "
"account's Cape key."
)
|
(self, function_ref: Union[str, os.PathLike, pycape.function_ref.FunctionRef], token: Union[str, os.PathLike, pycape.token.Token], pcrs: Optional[Dict[str, List[str]]] = None)
|
56,633 |
pycape.cape
|
auto_invoke
|
Invokes a function call from the currently connected websocket.
This method assumes that the client is currently maintaining an open websocket
connection to an enclave hosting a particular Cape function. Care should be
taken to ensure that the function_red that spawned the connection is the
correct one. The connection should be closed with :meth:`~Cape.close` once the
caller is finished with its invocations.
Args:
*args: Arguments to pass to the connected Cape function. If
``use_serdio=False``, we expect a single argument of type ``bytes``.
Otherwise, these arguments should match the positional arguments
of the undecorated Cape handler, and they will be auto-serialized by
Serdio before being sent in the request.
serde_hooks: An optional pair of serdio encoder/decoder hooks convertible
to :class:`serdio.SerdeHookBundle`. The hooks are necessary if the
``args`` / ``kwargs`` have any user-defined types that can't be handled
by vanilla Serdio. See :func:`serdio.bundle_serde_hooks` for supported
types.
use_serdio: Boolean controlling whether or not the inputs should be
auto-serialized by serdio.
kwargs: Keyword arguments to be passed to the connected Cape function.
These are treated the same way as the ``args`` are.
Returns:
If ``use_serdio=True``, returns the auto-deserialized result of calling the
connected Cape function on the given ``args`` / ``kwargs``.
If ``use_serdio=False``, returns the output of the Cape function as raw
bytes.
Raises:
RuntimeError: if serialized inputs could not be HPKE-encrypted, or if
websocket response is malformed.
|
@_synchronizer
async def key(
self,
*,
username: Optional[str] = None,
key_path: Optional[Union[str, os.PathLike]] = None,
pcrs: Optional[Dict[str, List[str]]] = None,
) -> bytes:
"""Load a Cape key from disk or download and persist an enclave-generated one.
If no username or key_path is provided, will try to load the currently logged-in
CLI user's key from a local cache.
Args:
username: An optional string representing the Github username of a Cape
user. The resulting public key will be associated with their account,
and data encrypted with this key will be available inside functions
that user has deployed.
key_path: The path to the Cape key file. If the file already exists, the key
will be read from disk and returned. Otherwise, a Cape key will be
requested from the Cape platform and written to this location.
If None, the default path is ``"$HOME/.config/cape/capekey.pub.der"``,
or alternatively whatever path is specified by expanding the env
variables ``CAPE_LOCAL_CONFIG_DIR / CAPE_LOCAL_CAPE_KEY_FILENAME``.
pcrs: A dictionary of PCR indexes to a list of potential values.
Returns:
Bytes containing the Cape key. The key is also cached on disk for later
use.
Raises:
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
if username is not None and key_path is not None:
raise ValueError("User provided both 'username' and 'key_path' arguments.")
if key_path is not None:
key_path = pathlib.Path(key_path)
else:
config_dir = pathlib.Path(cape_config.LOCAL_CONFIG_DIR)
if username is not None:
# look for locally-cached user key
key_qualifier = config_dir / "encryption_keys" / username
else:
# try to load the current CLI user's capekey
key_qualifier = config_dir
key_path = key_qualifier / cape_config.LOCAL_CAPE_KEY_FILENAME
if key_path.exists():
with open(key_path, "rb") as f:
cape_key = f.read()
return cape_key
if username is not None:
cape_key = await self._request_key_with_username(username, pcrs=pcrs)
await _persist_cape_key(cape_key, key_path)
return cape_key
raise ValueError(
"Cannot find a Cape key in the local cache. Either specify a username or "
"log into the Cape CLI and run `cape key` to locally cache your own "
"account's Cape key."
)
|
(self, *args: Any, serde_hooks=None, use_serdio: bool = False, **kwargs: Any) -> Any
|
56,634 |
pycape.cape
|
auto_key
|
Load a Cape key from disk or download and persist an enclave-generated one.
If no username or key_path is provided, will try to load the currently logged-in
CLI user's key from a local cache.
Args:
username: An optional string representing the Github username of a Cape
user. The resulting public key will be associated with their account,
and data encrypted with this key will be available inside functions
that user has deployed.
key_path: The path to the Cape key file. If the file already exists, the key
will be read from disk and returned. Otherwise, a Cape key will be
requested from the Cape platform and written to this location.
If None, the default path is ``"$HOME/.config/cape/capekey.pub.der"``,
or alternatively whatever path is specified by expanding the env
variables ``CAPE_LOCAL_CONFIG_DIR / CAPE_LOCAL_CAPE_KEY_FILENAME``.
pcrs: A dictionary of PCR indexes to a list of potential values.
Returns:
Bytes containing the Cape key. The key is also cached on disk for later
use.
Raises:
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
|
@_synchronizer
async def key(
self,
*,
username: Optional[str] = None,
key_path: Optional[Union[str, os.PathLike]] = None,
pcrs: Optional[Dict[str, List[str]]] = None,
) -> bytes:
"""Load a Cape key from disk or download and persist an enclave-generated one.
If no username or key_path is provided, will try to load the currently logged-in
CLI user's key from a local cache.
Args:
username: An optional string representing the Github username of a Cape
user. The resulting public key will be associated with their account,
and data encrypted with this key will be available inside functions
that user has deployed.
key_path: The path to the Cape key file. If the file already exists, the key
will be read from disk and returned. Otherwise, a Cape key will be
requested from the Cape platform and written to this location.
If None, the default path is ``"$HOME/.config/cape/capekey.pub.der"``,
or alternatively whatever path is specified by expanding the env
variables ``CAPE_LOCAL_CONFIG_DIR / CAPE_LOCAL_CAPE_KEY_FILENAME``.
pcrs: A dictionary of PCR indexes to a list of potential values.
Returns:
Bytes containing the Cape key. The key is also cached on disk for later
use.
Raises:
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
if username is not None and key_path is not None:
raise ValueError("User provided both 'username' and 'key_path' arguments.")
if key_path is not None:
key_path = pathlib.Path(key_path)
else:
config_dir = pathlib.Path(cape_config.LOCAL_CONFIG_DIR)
if username is not None:
# look for locally-cached user key
key_qualifier = config_dir / "encryption_keys" / username
else:
# try to load the current CLI user's capekey
key_qualifier = config_dir
key_path = key_qualifier / cape_config.LOCAL_CAPE_KEY_FILENAME
if key_path.exists():
with open(key_path, "rb") as f:
cape_key = f.read()
return cape_key
if username is not None:
cape_key = await self._request_key_with_username(username, pcrs=pcrs)
await _persist_cape_key(cape_key, key_path)
return cape_key
raise ValueError(
"Cannot find a Cape key in the local cache. Either specify a username or "
"log into the Cape CLI and run `cape key` to locally cache your own "
"account's Cape key."
)
|
(self, *, username: Optional[str] = None, key_path: Union[str, os.PathLike, NoneType] = None, pcrs: Optional[Dict[str, List[str]]] = None) -> bytes
|
56,635 |
pycape.cape
|
auto_run
|
Single-shot version of connect + invoke + close.
This method takes care of establishing a websocket connection via
:meth:`~Cape.connect`, invoking it via :meth:`~Cape.invoke`, and then finally
closing the connection with :meth:`~Cape.close`. This method should be
preferred when the caller doesn't need to invoke a Cape function more than once.
Args:
function_ref: A value convertible to a :class:`~.function_ref.FunctionRef`,
representing a deployed Cape function. See :meth:`Cape.function` for
recognized values.
*args: Arguments to pass to the connected Cape function. If
``use_serdio=False``, we expect a single argument of type ``bytes``.
Otherwise, these arguments should match the positional arguments
of the undecorated Cape handler, and they will be auto-serialized by
Serdio before being sent in the request.
serde_hooks: An optional pair of serdio encoder/decoder hooks convertible
to :class:`serdio.SerdeHookBundle`. The hooks are necessary if the
``args`` / ``kwargs`` have any user-defined types that can't be handled
by vanilla Serdio. See :func:`serdio.bundle_serde_hooks` for supported
types.
use_serdio: Boolean controlling whether or not the inputs should be
auto-serialized by serdio.
kwargs: Keyword arguments to be passed to the connected Cape function.
These are treated the same way as the ``args`` are.
Returns:
If ``use_serdio=True``, returns the auto-deserialized result of calling the
connected Cape function on the given ``args`` / ``kwargs``.
If ``use_serdio=False``, returns the output of the Cape function as raw
bytes.
Raises:
RuntimeError: if serialized inputs could not be HPKE-encrypted, or if
websocket response is malformed.
|
@_synchronizer
async def key(
self,
*,
username: Optional[str] = None,
key_path: Optional[Union[str, os.PathLike]] = None,
pcrs: Optional[Dict[str, List[str]]] = None,
) -> bytes:
"""Load a Cape key from disk or download and persist an enclave-generated one.
If no username or key_path is provided, will try to load the currently logged-in
CLI user's key from a local cache.
Args:
username: An optional string representing the Github username of a Cape
user. The resulting public key will be associated with their account,
and data encrypted with this key will be available inside functions
that user has deployed.
key_path: The path to the Cape key file. If the file already exists, the key
will be read from disk and returned. Otherwise, a Cape key will be
requested from the Cape platform and written to this location.
If None, the default path is ``"$HOME/.config/cape/capekey.pub.der"``,
or alternatively whatever path is specified by expanding the env
variables ``CAPE_LOCAL_CONFIG_DIR / CAPE_LOCAL_CAPE_KEY_FILENAME``.
pcrs: A dictionary of PCR indexes to a list of potential values.
Returns:
Bytes containing the Cape key. The key is also cached on disk for later
use.
Raises:
RuntimeError: if the enclave attestation doc does not contain a Cape key,
if the websocket response or the attestation doc is malformed.
Exception: if the enclave threw an error while trying to fulfill the
connection request.
"""
if username is not None and key_path is not None:
raise ValueError("User provided both 'username' and 'key_path' arguments.")
if key_path is not None:
key_path = pathlib.Path(key_path)
else:
config_dir = pathlib.Path(cape_config.LOCAL_CONFIG_DIR)
if username is not None:
# look for locally-cached user key
key_qualifier = config_dir / "encryption_keys" / username
else:
# try to load the current CLI user's capekey
key_qualifier = config_dir
key_path = key_qualifier / cape_config.LOCAL_CAPE_KEY_FILENAME
if key_path.exists():
with open(key_path, "rb") as f:
cape_key = f.read()
return cape_key
if username is not None:
cape_key = await self._request_key_with_username(username, pcrs=pcrs)
await _persist_cape_key(cape_key, key_path)
return cape_key
raise ValueError(
"Cannot find a Cape key in the local cache. Either specify a username or "
"log into the Cape CLI and run `cape key` to locally cache your own "
"account's Cape key."
)
|
(self, function_ref: Union[str, os.PathLike, pycape.function_ref.FunctionRef], token: Union[str, os.PathLike, pycape.token.Token], *args: Any, pcrs: Optional[Dict[str, List[str]]] = None, serde_hooks=None, use_serdio: bool = False, **kwargs: Any) -> Any
|
56,636 |
pycape.cape
|
token
|
Create or load a :class:`~token.Token`.
Args:
token: Filepath to a token file, or the raw token string itself.
Returns:
A :class:`~token.Token` that can be used to access users' deployed Cape
functions.
Raises:
TypeError: if the ``token`` argument type is unrecognized.
|
def token(self, token: Union[str, os.PathLike, tkn.Token]) -> tkn.Token:
"""Create or load a :class:`~token.Token`.
Args:
token: Filepath to a token file, or the raw token string itself.
Returns:
A :class:`~token.Token` that can be used to access users' deployed Cape
functions.
Raises:
TypeError: if the ``token`` argument type is unrecognized.
"""
token_out = None
if isinstance(token, pathlib.Path):
tokenfile = token
return tkn.Token.from_disk(tokenfile)
if isinstance(token, str):
# str could be a filename
if len(token) <= 255:
token_as_path = pathlib.Path(token)
token_out = _try_load_token_file(token_as_path)
return token_out or tkn.Token(token)
if isinstance(token, tkn.Token):
return token
raise TypeError(f"Expected token to be PathLike or str, found {type(token)}")
|
(self, token: Union[str, os.PathLike, pycape.token.Token]) -> pycape.token.Token
|
56,637 |
pycape.function_ref
|
FunctionRef
|
A reference to a Cape function.
Args:
id: String denoting the function ID of the deployed Cape function.
Typically given with the output of the Cape CLI's ``deploy`` command.
name: String denoting the name of the deployed Cape function. Must be of the
form ``USER/FUNC_NAME`` where ``USER`` is the Github username of the Cape
user and ``FUNC_NAME`` is the name they gave for the function at
deploy-time.
checksum: Optional string denoting the checksum of the deployed Cape function.
If supplied as part of a ``FunctionRef``, the :class:`~pycape.cape.Cape`
client will verify that enclave responses includes a matching checksum
whenever the ``FunctionRef`` is included in Cape requests.
|
class FunctionRef:
"""A reference to a Cape function.
Args:
id: String denoting the function ID of the deployed Cape function.
Typically given with the output of the Cape CLI's ``deploy`` command.
name: String denoting the name of the deployed Cape function. Must be of the
form ``USER/FUNC_NAME`` where ``USER`` is the Github username of the Cape
user and ``FUNC_NAME`` is the name they gave for the function at
deploy-time.
checksum: Optional string denoting the checksum of the deployed Cape function.
If supplied as part of a ``FunctionRef``, the :class:`~pycape.cape.Cape`
client will verify that enclave responses includes a matching checksum
whenever the ``FunctionRef`` is included in Cape requests.
"""
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
checksum: Optional[str] = None,
):
id_ = id
if id_ is None and name is None:
raise ValueError(
"Must provide one of `id` or `name` arguments, found None for both."
)
if id_ is not None and not isinstance(id_, str):
raise TypeError(f"Function id must be a string, found {type(id_)}.")
self._id = id_
self._user = None
self._name = None
if name is not None:
if not isinstance(name, str):
raise TypeError(f"Function name must be a string, found {type(id_)}.")
terms = name.split("/")
if len(terms) != 2:
raise ValueError(
"Function name must be of form '<username>/<function_name>', "
f"found '{name}'."
)
self._user, self._name = terms
if checksum is not None and not isinstance(checksum, str):
raise TypeError(
f"Function checksum must be a string, found {type(checksum)}."
)
self._checksum = checksum
def __repr__(self):
return (
f"{self.__class__.__name__}(\n"
f" name={self.full_name},\n"
f" id={self.id},\n"
f" checksum={self.checksum},\n"
f")"
)
@property
def id(self):
return self._id
@property
def checksum(self):
return self._checksum
@property
def user(self):
return self._user
@property
def name(self):
return self._name
@property
def full_name(self):
if self.user is not None and self.name is not None:
return f"{self.user}/{self.name}"
@classmethod
def from_json(cls, function_json: Union[str, os.PathLike]) -> FunctionRef:
"""Construct a :class:`~.function_ref.FunctionRef` from a JSON string or file.
Args:
function_json: a JSON string or filepath containing function ID and
optional function checksum.
Returns:
A :class:`~.function_ref.FunctionRef` representing the deployed Cape
function.
Raises:
ValueError: if the json file doesn't exist, or the json is missing a
``function_id`` key-value.
TypeError: if ``function_json`` is neither Path-like nor str.
"""
if isinstance(function_json, pathlib.Path):
function_config = _try_load_json_file(function_json)
if function_config is None:
raise ValueError(f"JSON file not found @ {str(function_json)}")
elif isinstance(function_json, str):
# try to treat function_json as filepath str
json_path = pathlib.Path(function_json)
function_config = _try_load_json_file(json_path)
# if file not found, treat function_json as json str
function_config = function_config or json.loads(function_json)
else:
raise TypeError(
"The function_json argument expects a json string or "
f"a path to a json file, found: {type(function_json)}."
)
function_id = function_config.get("function_id")
function_name = function_config.get("function_name")
if function_id is None and function_name is None:
raise ValueError(
"Function JSON must have either function_id or function_name values, "
"found neither."
)
# warn user when they have a deprecated function token
function_token = function_config.get("function_token")
if function_token is not None:
_logger.warn(
"Ignoring function_token in FunctionRef json. Function tokens have "
"been removed. Instead, request a Personal Access Token from the "
"function owner and pass it to Cape.run. More info at "
"https://docs.capeprivacy.com/reference/user-tokens."
)
function_checksum = function_config.get("function_checksum")
return cls(function_id, function_name, function_checksum)
def to_json(self, path: Optional[Union[str, os.PathLike]] = None) -> Optional[str]:
"""Write this :class:`~.function_ref.FunctionRef` to a JSON string or file.
Args:
path: Optional file path to write the resulting JSON to.
Returns:
If ``path`` is None, a string with this :class:`~.function_ref.FunctionRef`
as a JSON struct.
"""
fn_ref_dict = {}
if self.id is not None:
fn_ref_dict["function_id"] = self.id
if self.user is not None and self.name is not None:
fn_ref_dict["function_name"] = f"{self.user}/{self.name}"
if self.checksum is not None:
fn_ref_dict["function_checksum"] = self.checksum
if path is None:
return json.dumps(fn_ref_dict)
with open(path, "w") as f:
json.dump(fn_ref_dict, f)
|
(id: 'Optional[str]' = None, name: 'Optional[str]' = None, checksum: 'Optional[str]' = None)
|
56,638 |
pycape.function_ref
|
__init__
| null |
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
checksum: Optional[str] = None,
):
id_ = id
if id_ is None and name is None:
raise ValueError(
"Must provide one of `id` or `name` arguments, found None for both."
)
if id_ is not None and not isinstance(id_, str):
raise TypeError(f"Function id must be a string, found {type(id_)}.")
self._id = id_
self._user = None
self._name = None
if name is not None:
if not isinstance(name, str):
raise TypeError(f"Function name must be a string, found {type(id_)}.")
terms = name.split("/")
if len(terms) != 2:
raise ValueError(
"Function name must be of form '<username>/<function_name>', "
f"found '{name}'."
)
self._user, self._name = terms
if checksum is not None and not isinstance(checksum, str):
raise TypeError(
f"Function checksum must be a string, found {type(checksum)}."
)
self._checksum = checksum
|
(self, id: Optional[str] = None, name: Optional[str] = None, checksum: Optional[str] = None)
|
56,639 |
pycape.function_ref
|
__repr__
| null |
def __repr__(self):
return (
f"{self.__class__.__name__}(\n"
f" name={self.full_name},\n"
f" id={self.id},\n"
f" checksum={self.checksum},\n"
f")"
)
|
(self)
|
56,640 |
pycape.function_ref
|
to_json
|
Write this :class:`~.function_ref.FunctionRef` to a JSON string or file.
Args:
path: Optional file path to write the resulting JSON to.
Returns:
If ``path`` is None, a string with this :class:`~.function_ref.FunctionRef`
as a JSON struct.
|
def to_json(self, path: Optional[Union[str, os.PathLike]] = None) -> Optional[str]:
"""Write this :class:`~.function_ref.FunctionRef` to a JSON string or file.
Args:
path: Optional file path to write the resulting JSON to.
Returns:
If ``path`` is None, a string with this :class:`~.function_ref.FunctionRef`
as a JSON struct.
"""
fn_ref_dict = {}
if self.id is not None:
fn_ref_dict["function_id"] = self.id
if self.user is not None and self.name is not None:
fn_ref_dict["function_name"] = f"{self.user}/{self.name}"
if self.checksum is not None:
fn_ref_dict["function_checksum"] = self.checksum
if path is None:
return json.dumps(fn_ref_dict)
with open(path, "w") as f:
json.dump(fn_ref_dict, f)
|
(self, path: Union[str, os.PathLike, NoneType] = None) -> Optional[str]
|
56,641 |
pycape.token
|
Token
|
A Cape Personal Access Token (PAT).
See https://docs.capeprivacy.com/reference/user-tokens for more info.
Args:
token: String representing the Personal Access Token.
|
class Token:
"""A Cape Personal Access Token (PAT).
See https://docs.capeprivacy.com/reference/user-tokens for more info.
Args:
token: String representing the Personal Access Token.
"""
def __init__(self, token: str):
self._token = token
@property
def token(self):
return self._token
@property
def raw(self):
return self._token
def to_disk(self, location: os.PathLike):
"""Write the PAT to ``location``."""
with open(location, "w") as f:
f.write(self.token)
@classmethod
def from_disk(cls, location: os.PathLike):
"""Load a PAT from ``location``."""
location = pathlib.Path(location)
if not location.exists():
raise ValueError(f"Token file not found at {str(location)}.")
with open(location, "r") as f:
token = f.read()
return cls(token)
|
(token: str)
|
56,642 |
pycape.token
|
__init__
| null |
def __init__(self, token: str):
self._token = token
|
(self, token: str)
|
56,643 |
pycape.token
|
to_disk
|
Write the PAT to ``location``.
|
def to_disk(self, location: os.PathLike):
"""Write the PAT to ``location``."""
with open(location, "w") as f:
f.write(self.token)
|
(self, location: os.PathLike)
|
56,651 |
nibabel.analyze
|
AnalyzeHeader
|
Class for basic analyze header
Implements zoom-only setting of affine transform, and no image
scaling
|
class AnalyzeHeader(LabeledWrapStruct, SpatialHeader):
"""Class for basic analyze header
Implements zoom-only setting of affine transform, and no image
scaling
"""
# Copies of module-level definitions
template_dtype = header_dtype
_data_type_codes = data_type_codes
# fields with recoders for their values
_field_recoders = {'datatype': data_type_codes}
# default x flip
default_x_flip = True
# data scaling capabilities
has_data_slope = False
has_data_intercept = False
sizeof_hdr = 348
def __init__(self, binaryblock=None, endianness=None, check=True):
"""Initialize header from binary data block
Parameters
----------
binaryblock : {None, string} optional
binary block to set into header. By default, None, in
which case we insert the default empty header block
endianness : {None, '<','>', other endian code} string, optional
endianness of the binaryblock. If None, guess endianness
from the data.
check : bool, optional
Whether to check content of header in initialization.
Default is True.
Examples
--------
>>> hdr1 = AnalyzeHeader() # an empty header
>>> hdr1.endianness == native_code
True
>>> hdr1.get_data_shape()
(0,)
>>> hdr1.set_data_shape((1,2,3)) # now with some content
>>> hdr1.get_data_shape()
(1, 2, 3)
We can set the binary block directly via this initialization.
Here we get it from the header we have just made
>>> binblock2 = hdr1.binaryblock
>>> hdr2 = AnalyzeHeader(binblock2)
>>> hdr2.get_data_shape()
(1, 2, 3)
Empty headers are native endian by default
>>> hdr2.endianness == native_code
True
You can pass valid opposite endian headers with the
``endianness`` parameter. Even empty headers can have
endianness
>>> hdr3 = AnalyzeHeader(endianness=swapped_code)
>>> hdr3.endianness == swapped_code
True
If you do not pass an endianness, and you pass some data, we
will try to guess from the passed data.
>>> binblock3 = hdr3.binaryblock
>>> hdr4 = AnalyzeHeader(binblock3)
>>> hdr4.endianness == swapped_code
True
"""
super().__init__(binaryblock, endianness, check)
@classmethod
def guessed_endian(klass, hdr):
"""Guess intended endianness from mapping-like ``hdr``
Parameters
----------
hdr : mapping-like
hdr for which to guess endianness
Returns
-------
endianness : {'<', '>'}
Guessed endianness of header
Examples
--------
Zeros header, no information, guess native
>>> hdr = AnalyzeHeader()
>>> hdr_data = np.zeros((), dtype=header_dtype)
>>> AnalyzeHeader.guessed_endian(hdr_data) == native_code
True
A valid native header is guessed native
>>> hdr_data = hdr.structarr.copy()
>>> AnalyzeHeader.guessed_endian(hdr_data) == native_code
True
And, when swapped, is guessed as swapped
>>> sw_hdr_data = hdr_data.byteswap(swapped_code)
>>> AnalyzeHeader.guessed_endian(sw_hdr_data) == swapped_code
True
The algorithm is as follows:
First, look at the first value in the ``dim`` field; this
should be between 0 and 7. If it is between 1 and 7, then
this must be a native endian header.
>>> hdr_data = np.zeros((), dtype=header_dtype) # blank binary data
>>> hdr_data['dim'][0] = 1
>>> AnalyzeHeader.guessed_endian(hdr_data) == native_code
True
>>> hdr_data['dim'][0] = 6
>>> AnalyzeHeader.guessed_endian(hdr_data) == native_code
True
>>> hdr_data['dim'][0] = -1
>>> AnalyzeHeader.guessed_endian(hdr_data) == swapped_code
True
If the first ``dim`` value is zeros, we need a tie breaker.
In that case we check the ``sizeof_hdr`` field. This should
be 348. If it looks like the byteswapped value of 348,
assumed swapped. Otherwise assume native.
>>> hdr_data = np.zeros((), dtype=header_dtype) # blank binary data
>>> AnalyzeHeader.guessed_endian(hdr_data) == native_code
True
>>> hdr_data['sizeof_hdr'] = 1543569408
>>> AnalyzeHeader.guessed_endian(hdr_data) == swapped_code
True
>>> hdr_data['sizeof_hdr'] = -1
>>> AnalyzeHeader.guessed_endian(hdr_data) == native_code
True
This is overridden by the ``dim[0]`` value though:
>>> hdr_data['sizeof_hdr'] = 1543569408
>>> hdr_data['dim'][0] = 1
>>> AnalyzeHeader.guessed_endian(hdr_data) == native_code
True
"""
dim0 = int(hdr['dim'][0])
if dim0 == 0:
if hdr['sizeof_hdr'].byteswap() == klass.sizeof_hdr:
return swapped_code
return native_code
elif 1 <= dim0 <= 7:
return native_code
return swapped_code
@classmethod
def default_structarr(klass, endianness=None):
"""Return header data for empty header with given endianness"""
hdr_data = super().default_structarr(endianness)
hdr_data['sizeof_hdr'] = klass.sizeof_hdr
hdr_data['dim'] = 1
hdr_data['dim'][0] = 0
hdr_data['pixdim'] = 1
hdr_data['datatype'] = 16 # float32
hdr_data['bitpix'] = 32
return hdr_data
@classmethod
def from_header(klass, header=None, check=True):
"""Class method to create header from another header
Parameters
----------
header : ``Header`` instance or mapping
a header of this class, or another class of header for
conversion to this type
check : {True, False}
whether to check header for integrity
Returns
-------
hdr : header instance
fresh header instance of our own class
"""
# own type, return copy
if type(header) == klass:
obj = header.copy()
if check:
obj.check_fix()
return obj
# not own type, make fresh header instance
obj = klass(check=check)
if header is None:
return obj
if hasattr(header, 'as_analyze_map'):
# header is convertible from a field mapping
mapping = header.as_analyze_map()
for key in mapping:
try:
obj[key] = mapping[key]
except (ValueError, KeyError):
# the presence of the mapping certifies the fields as being
# of the same meaning as for Analyze types, so we can
# safely discard fields with names not known to this header
# type on the basis they are from the wrong Analyze dialect
pass
# set any fields etc that are specific to this format (overridden by
# sub-classes)
obj._clean_after_mapping()
# Fallback basic conversion always done.
# More specific warning for unsupported datatypes
orig_code = header.get_data_dtype()
try:
obj.set_data_dtype(orig_code)
except HeaderDataError:
raise HeaderDataError(
f'Input header {header.__class__} has datatype '
f'{header.get_value_label("datatype")} '
f'but output header {klass} does not support it'
)
obj.set_data_dtype(header.get_data_dtype())
obj.set_data_shape(header.get_data_shape())
obj.set_zooms(header.get_zooms())
if check:
obj.check_fix()
return obj
def _clean_after_mapping(self):
"""Set format-specific stuff after converting header from mapping
This routine cleans up Analyze-type headers that have had their fields
set from an Analyze map returned by the ``as_analyze_map`` method.
Nifti 1 / 2, SPM Analyze, Analyze are all Analyze-type headers.
Because this map can set fields that are illegal for particular
subtypes of the Analyze header, this routine cleans these up before the
resulting header is checked and returned.
For example, a Nifti1 single (``.nii``) header has magic "n+1".
Passing the nifti single header for conversion to a Nifti1Pair header
using the ``as_analyze_map`` method will by default set the header
magic to "n+1", when it should be "ni1" for the pair header. This
method is for that kind of case - so the specific header can set fields
like magic correctly, even though the mapping has given a wrong value.
"""
# All current Nifti etc fields that are present in the Analyze header
# have the same meaning as they do for Analyze.
pass
def raw_data_from_fileobj(self, fileobj):
"""Read unscaled data array from `fileobj`
Parameters
----------
fileobj : file-like
Must be open, and implement ``read`` and ``seek`` methods
Returns
-------
arr : ndarray
unscaled data array
"""
dtype = self.get_data_dtype()
shape = self.get_data_shape()
offset = self.get_data_offset()
return array_from_file(shape, dtype, fileobj, offset)
def data_from_fileobj(self, fileobj):
"""Read scaled data array from `fileobj`
Use this routine to get the scaled image data from an image file
`fileobj`, given a header `self`. "Scaled" means, with any header
scaling factors applied to the raw data in the file. Use
`raw_data_from_fileobj` to get the raw data.
Parameters
----------
fileobj : file-like
Must be open, and implement ``read`` and ``seek`` methods
Returns
-------
arr : ndarray
scaled data array
Notes
-----
We use the header to get any scale or intercept values to apply to the
data. Raw Analyze files don't have scale factors or intercepts, but
this routine also works with formats based on Analyze, that do have
scaling, such as SPM analyze formats and NIfTI.
"""
# read unscaled data
data = self.raw_data_from_fileobj(fileobj)
# get scalings from header. Value of None means not present in header
slope, inter = self.get_slope_inter()
slope = 1.0 if slope is None else slope
inter = 0.0 if inter is None else inter
# Upcast as necessary for big slopes, intercepts
return apply_read_scaling(data, slope, inter)
def data_to_fileobj(self, data, fileobj, rescale=True):
"""Write `data` to `fileobj`, maybe rescaling data, modifying `self`
In writing the data, we match the header to the written data, by
setting the header scaling factors, iff `rescale` is True. Thus we
modify `self` in the process of writing the data.
Parameters
----------
data : array-like
data to write; should match header defined shape
fileobj : file-like object
Object with file interface, implementing ``write`` and
``seek``
rescale : {True, False}, optional
Whether to try and rescale data to match output dtype specified by
header. If True and scaling needed and header cannot scale, then
raise ``HeaderTypeError``.
Examples
--------
>>> from nibabel.analyze import AnalyzeHeader
>>> hdr = AnalyzeHeader()
>>> hdr.set_data_shape((1, 2, 3))
>>> hdr.set_data_dtype(np.float64)
>>> from io import BytesIO
>>> str_io = BytesIO()
>>> data = np.arange(6).reshape(1,2,3)
>>> hdr.data_to_fileobj(data, str_io)
>>> data.astype(np.float64).tobytes('F') == str_io.getvalue()
True
"""
data = np.asanyarray(data)
shape = self.get_data_shape()
if data.shape != shape:
raise HeaderDataError('Data should be shape (%s)' % ', '.join(str(s) for s in shape))
out_dtype = self.get_data_dtype()
if rescale:
try:
arr_writer = make_array_writer(
data, out_dtype, self.has_data_slope, self.has_data_intercept
)
except WriterError as e:
raise HeaderTypeError(str(e))
else:
arr_writer = ArrayWriter(data, out_dtype, check_scaling=False)
seek_tell(fileobj, self.get_data_offset())
arr_writer.to_fileobj(fileobj)
self.set_slope_inter(*get_slope_inter(arr_writer))
def get_data_dtype(self):
"""Get numpy dtype for data
For examples see ``set_data_dtype``
"""
code = int(self._structarr['datatype'])
dtype = self._data_type_codes.dtype[code]
return dtype.newbyteorder(self.endianness)
def set_data_dtype(self, datatype):
"""Set numpy dtype for data from code or dtype or type
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.set_data_dtype(np.uint8)
>>> hdr.get_data_dtype()
dtype('uint8')
>>> hdr.set_data_dtype(np.dtype(np.uint8))
>>> hdr.get_data_dtype()
dtype('uint8')
>>> hdr.set_data_dtype('implausible') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeaderDataError: data dtype "implausible" not recognized
>>> hdr.set_data_dtype('none') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeaderDataError: data dtype "none" known but not supported
>>> hdr.set_data_dtype(np.void) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeaderDataError: data dtype "<type 'numpy.void'>" known but not supported
"""
dt = datatype
if dt not in self._data_type_codes:
try:
dt = np.dtype(dt)
except TypeError:
raise HeaderDataError(f'data dtype "{datatype}" not recognized')
if dt not in self._data_type_codes:
raise HeaderDataError(f'data dtype "{datatype}" not supported')
code = self._data_type_codes[dt]
dtype = self._data_type_codes.dtype[code]
# test for void, being careful of user-defined types
if dtype.type is np.void and not dtype.fields:
raise HeaderDataError(f'data dtype "{datatype}" known but not supported')
self._structarr['datatype'] = code
self._structarr['bitpix'] = dtype.itemsize * 8
def get_data_shape(self):
"""Get shape of data
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.get_data_shape()
(0,)
>>> hdr.set_data_shape((1,2,3))
>>> hdr.get_data_shape()
(1, 2, 3)
Expanding number of dimensions gets default zooms
>>> hdr.get_zooms()
(1.0, 1.0, 1.0)
"""
dims = self._structarr['dim']
ndims = dims[0]
if ndims == 0:
return (0,)
return tuple(int(d) for d in dims[1 : ndims + 1])
def set_data_shape(self, shape):
"""Set shape of data
If ``ndims == len(shape)`` then we set zooms for dimensions higher than
``ndims`` to 1.0
Parameters
----------
shape : sequence
sequence of integers specifying data array shape
"""
dims = self._structarr['dim']
ndims = len(shape)
dims[:] = 1
dims[0] = ndims
try:
dims[1 : ndims + 1] = shape
except (ValueError, OverflowError):
# numpy 1.4.1 at least generates a ValueError from trying to set a
# python long into an int64 array (dims are int64 for nifti2)
values_fit = False
else:
values_fit = np.all(dims[1 : ndims + 1] == shape)
# Error if we did not succeed setting dimensions
if not values_fit:
raise HeaderDataError(f'shape {shape} does not fit in dim datatype')
self._structarr['pixdim'][ndims + 1 :] = 1.0
def get_base_affine(self):
"""Get affine from basic (shared) header fields
Note that we get the translations from the center of the
image.
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.set_data_shape((3, 5, 7))
>>> hdr.set_zooms((3, 2, 1))
>>> hdr.default_x_flip
True
>>> hdr.get_base_affine() # from center of image
array([[-3., 0., 0., 3.],
[ 0., 2., 0., -4.],
[ 0., 0., 1., -3.],
[ 0., 0., 0., 1.]])
"""
hdr = self._structarr
dims = hdr['dim']
ndim = dims[0]
return shape_zoom_affine(
hdr['dim'][1 : ndim + 1], hdr['pixdim'][1 : ndim + 1], self.default_x_flip
)
get_best_affine = get_base_affine
def get_zooms(self):
"""Get zooms from header
Returns
-------
z : tuple
tuple of header zoom values
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.get_zooms()
(1.0,)
>>> hdr.set_data_shape((1,2))
>>> hdr.get_zooms()
(1.0, 1.0)
>>> hdr.set_zooms((3, 4))
>>> hdr.get_zooms()
(3.0, 4.0)
"""
hdr = self._structarr
dims = hdr['dim']
ndim = dims[0]
if ndim == 0:
return (1.0,)
pixdims = hdr['pixdim']
return tuple(pixdims[1 : ndim + 1])
def set_zooms(self, zooms):
"""Set zooms into header fields
See docstring for ``get_zooms`` for examples
"""
hdr = self._structarr
dims = hdr['dim']
ndim = dims[0]
zooms = np.asarray(zooms)
if len(zooms) != ndim:
raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim))
if np.any(zooms < 0):
raise HeaderDataError('zooms must be positive')
pixdims = hdr['pixdim']
pixdims[1 : ndim + 1] = zooms[:]
def as_analyze_map(self):
"""Return header as mapping for conversion to Analyze types
Collect data from custom header type to fill in fields for Analyze and
derived header types (such as Nifti1 and Nifti2).
When Analyze types convert another header type to their own type, they
call this this method to check if there are other Analyze / Nifti
fields that the source header would like to set.
Returns
-------
analyze_map : mapping
Object that can be used as a mapping thus::
for key in analyze_map:
value = analyze_map[key]
where ``key`` is the name of a field that can be set in an Analyze
header type, such as Nifti1, and ``value`` is a value for the
field. For example, `analyze_map` might be a something like
``dict(regular='y', slice_duration=0.3)`` where ``regular`` is a
field present in both Analyze and Nifti1, and ``slice_duration`` is
a field restricted to Nifti1 and Nifti2. If a particular Analyze
header type does not recognize the field name, it will throw away
the value without error. See :meth:`Analyze.from_header`.
Notes
-----
You can also return a Nifti header with the relevant fields set.
Your header still needs methods ``get_data_dtype``, ``get_data_shape``
and ``get_zooms``, for the conversion, and these get called *after*
using the analyze map, so the methods will override values set in the
map.
"""
# In the case of Analyze types, the header is already such a mapping
return self
def set_data_offset(self, offset):
"""Set offset into data file to read data"""
self._structarr['vox_offset'] = offset
def get_data_offset(self):
"""Return offset into data file to read data
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.get_data_offset()
0
>>> hdr['vox_offset'] = 12
>>> hdr.get_data_offset()
12
"""
return int(self._structarr['vox_offset'])
def get_slope_inter(self):
"""Get scalefactor and intercept
These are not implemented for basic Analyze
"""
return None, None
def set_slope_inter(self, slope, inter=None):
"""Set slope and / or intercept into header
Set slope and intercept for image data, such that, if the image
data is ``arr``, then the scaled image data will be ``(arr *
slope) + inter``
In this case, for Analyze images, we can't store the slope or the
intercept, so this method only checks that `slope` is None or NaN or
1.0, and that `inter` is None or NaN or 0.
Parameters
----------
slope : None or float
If float, value must be NaN or 1.0 or we raise a ``HeaderTypeError``
inter : None or float, optional
If float, value must be 0.0 or we raise a ``HeaderTypeError``
"""
if (slope in (None, 1) or np.isnan(slope)) and (inter in (None, 0) or np.isnan(inter)):
return
raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 for Analyze headers')
@classmethod
def _get_checks(klass):
"""Return sequence of check functions for this class"""
return (klass._chk_sizeof_hdr, klass._chk_datatype, klass._chk_bitpix, klass._chk_pixdims)
""" Check functions in format expected by BatteryRunner class """
@classmethod
def _chk_sizeof_hdr(klass, hdr, fix=False):
rep = Report(HeaderDataError)
if hdr['sizeof_hdr'] == klass.sizeof_hdr:
return hdr, rep
rep.problem_level = 30
rep.problem_msg = 'sizeof_hdr should be ' + str(klass.sizeof_hdr)
if fix:
hdr['sizeof_hdr'] = klass.sizeof_hdr
rep.fix_msg = 'set sizeof_hdr to ' + str(klass.sizeof_hdr)
return hdr, rep
@classmethod
def _chk_datatype(klass, hdr, fix=False):
rep = Report(HeaderDataError)
code = int(hdr['datatype'])
try:
dtype = klass._data_type_codes.dtype[code]
except KeyError:
rep.problem_level = 40
rep.problem_msg = 'data code %d not recognized' % code
else:
if dtype.itemsize == 0:
rep.problem_level = 40
rep.problem_msg = 'data code %d not supported' % code
else:
return hdr, rep
if fix:
rep.fix_msg = 'not attempting fix'
return hdr, rep
@classmethod
def _chk_bitpix(klass, hdr, fix=False):
rep = Report(HeaderDataError)
code = int(hdr['datatype'])
try:
dt = klass._data_type_codes.dtype[code]
except KeyError:
rep.problem_level = 10
rep.problem_msg = 'no valid datatype to fix bitpix'
if fix:
rep.fix_msg = 'no way to fix bitpix'
return hdr, rep
bitpix = dt.itemsize * 8
if bitpix == hdr['bitpix']:
return hdr, rep
rep.problem_level = 10
rep.problem_msg = 'bitpix does not match datatype'
if fix:
hdr['bitpix'] = bitpix # inplace modification
rep.fix_msg = 'setting bitpix to match datatype'
return hdr, rep
@staticmethod
def _chk_pixdims(hdr, fix=False):
rep = Report(HeaderDataError)
pixdims = hdr['pixdim']
spat_dims = pixdims[1:4]
if not np.any(spat_dims <= 0):
return hdr, rep
neg_dims = spat_dims < 0
zero_dims = spat_dims == 0
pmsgs = []
fmsgs = []
if np.any(zero_dims):
level = 30
pmsgs.append('pixdim[1,2,3] should be non-zero')
if fix:
spat_dims[zero_dims] = 1
fmsgs.append('setting 0 dims to 1')
if np.any(neg_dims):
level = 35
pmsgs.append('pixdim[1,2,3] should be positive')
if fix:
spat_dims = np.abs(spat_dims)
fmsgs.append('setting to abs of pixdim values')
rep.problem_level = level
rep.problem_msg = ' and '.join(pmsgs)
if fix:
pixdims[1:4] = spat_dims
rep.fix_msg = ' and '.join(fmsgs)
return hdr, rep
@classmethod
def may_contain_header(klass, binaryblock):
if len(binaryblock) < klass.sizeof_hdr:
return False
hdr_struct = np.ndarray(
shape=(), dtype=header_dtype, buffer=binaryblock[: klass.sizeof_hdr]
)
bs_hdr_struct = hdr_struct.byteswap()
return 348 in (hdr_struct['sizeof_hdr'], bs_hdr_struct['sizeof_hdr'])
|
(binaryblock=None, endianness=None, check=True)
|
56,652 |
nibabel.wrapstruct
|
__eq__
|
equality between two structures defined by binaryblock
Examples
--------
>>> wstr = WrapStruct()
>>> wstr2 = WrapStruct()
>>> wstr == wstr2
True
>>> wstr3 = WrapStruct(endianness=swapped_code)
>>> wstr == wstr3
True
|
def __eq__(self, other):
"""equality between two structures defined by binaryblock
Examples
--------
>>> wstr = WrapStruct()
>>> wstr2 = WrapStruct()
>>> wstr == wstr2
True
>>> wstr3 = WrapStruct(endianness=swapped_code)
>>> wstr == wstr3
True
"""
this_end = self.endianness
this_bb = self.binaryblock
try:
other_end = other.endianness
other_bb = other.binaryblock
except AttributeError:
return False
if this_end == other_end:
return this_bb == other_bb
other_bb = other._structarr.byteswap().tobytes()
return this_bb == other_bb
|
(self, other)
|
56,653 |
nibabel.wrapstruct
|
__getitem__
|
Return values from structure data
Examples
--------
>>> wstr = WrapStruct()
>>> wstr['integer'] == 0
True
|
def __getitem__(self, item):
"""Return values from structure data
Examples
--------
>>> wstr = WrapStruct()
>>> wstr['integer'] == 0
True
"""
return self._structarr[item]
|
(self, item)
|
56,654 |
nibabel.analyze
|
__init__
|
Initialize header from binary data block
Parameters
----------
binaryblock : {None, string} optional
binary block to set into header. By default, None, in
which case we insert the default empty header block
endianness : {None, '<','>', other endian code} string, optional
endianness of the binaryblock. If None, guess endianness
from the data.
check : bool, optional
Whether to check content of header in initialization.
Default is True.
Examples
--------
>>> hdr1 = AnalyzeHeader() # an empty header
>>> hdr1.endianness == native_code
True
>>> hdr1.get_data_shape()
(0,)
>>> hdr1.set_data_shape((1,2,3)) # now with some content
>>> hdr1.get_data_shape()
(1, 2, 3)
We can set the binary block directly via this initialization.
Here we get it from the header we have just made
>>> binblock2 = hdr1.binaryblock
>>> hdr2 = AnalyzeHeader(binblock2)
>>> hdr2.get_data_shape()
(1, 2, 3)
Empty headers are native endian by default
>>> hdr2.endianness == native_code
True
You can pass valid opposite endian headers with the
``endianness`` parameter. Even empty headers can have
endianness
>>> hdr3 = AnalyzeHeader(endianness=swapped_code)
>>> hdr3.endianness == swapped_code
True
If you do not pass an endianness, and you pass some data, we
will try to guess from the passed data.
>>> binblock3 = hdr3.binaryblock
>>> hdr4 = AnalyzeHeader(binblock3)
>>> hdr4.endianness == swapped_code
True
|
def __init__(self, binaryblock=None, endianness=None, check=True):
"""Initialize header from binary data block
Parameters
----------
binaryblock : {None, string} optional
binary block to set into header. By default, None, in
which case we insert the default empty header block
endianness : {None, '<','>', other endian code} string, optional
endianness of the binaryblock. If None, guess endianness
from the data.
check : bool, optional
Whether to check content of header in initialization.
Default is True.
Examples
--------
>>> hdr1 = AnalyzeHeader() # an empty header
>>> hdr1.endianness == native_code
True
>>> hdr1.get_data_shape()
(0,)
>>> hdr1.set_data_shape((1,2,3)) # now with some content
>>> hdr1.get_data_shape()
(1, 2, 3)
We can set the binary block directly via this initialization.
Here we get it from the header we have just made
>>> binblock2 = hdr1.binaryblock
>>> hdr2 = AnalyzeHeader(binblock2)
>>> hdr2.get_data_shape()
(1, 2, 3)
Empty headers are native endian by default
>>> hdr2.endianness == native_code
True
You can pass valid opposite endian headers with the
``endianness`` parameter. Even empty headers can have
endianness
>>> hdr3 = AnalyzeHeader(endianness=swapped_code)
>>> hdr3.endianness == swapped_code
True
If you do not pass an endianness, and you pass some data, we
will try to guess from the passed data.
>>> binblock3 = hdr3.binaryblock
>>> hdr4 = AnalyzeHeader(binblock3)
>>> hdr4.endianness == swapped_code
True
"""
super().__init__(binaryblock, endianness, check)
|
(self, binaryblock=None, endianness=None, check=True)
|
56,655 |
nibabel.wrapstruct
|
__iter__
| null |
def __iter__(self):
return iter(self.keys())
|
(self)
|
56,657 |
nibabel.wrapstruct
|
__setitem__
|
Set values in structured data
Examples
--------
>>> wstr = WrapStruct()
>>> wstr['integer'] = 3
>>> wstr['integer']
array(3, dtype=int16)
|
def __setitem__(self, item, value):
"""Set values in structured data
Examples
--------
>>> wstr = WrapStruct()
>>> wstr['integer'] = 3
>>> wstr['integer']
array(3, dtype=int16)
"""
self._structarr[item] = value
|
(self, item, value)
|
56,658 |
nibabel.wrapstruct
|
__str__
|
Return string representation for printing
|
def __str__(self):
"""Return string representation for printing"""
summary = f"{self.__class__} object, endian='{self.endianness}'"
def _getter(obj, key):
try:
return obj.get_value_label(key)
except ValueError:
return obj[key]
return '\n'.join([summary, pretty_mapping(self, _getter)])
|
(self)
|
56,660 |
nibabel.analyze
|
_chk_pixdims
| null |
@staticmethod
def _chk_pixdims(hdr, fix=False):
rep = Report(HeaderDataError)
pixdims = hdr['pixdim']
spat_dims = pixdims[1:4]
if not np.any(spat_dims <= 0):
return hdr, rep
neg_dims = spat_dims < 0
zero_dims = spat_dims == 0
pmsgs = []
fmsgs = []
if np.any(zero_dims):
level = 30
pmsgs.append('pixdim[1,2,3] should be non-zero')
if fix:
spat_dims[zero_dims] = 1
fmsgs.append('setting 0 dims to 1')
if np.any(neg_dims):
level = 35
pmsgs.append('pixdim[1,2,3] should be positive')
if fix:
spat_dims = np.abs(spat_dims)
fmsgs.append('setting to abs of pixdim values')
rep.problem_level = level
rep.problem_msg = ' and '.join(pmsgs)
if fix:
pixdims[1:4] = spat_dims
rep.fix_msg = ' and '.join(fmsgs)
return hdr, rep
|
(hdr, fix=False)
|
56,661 |
nibabel.analyze
|
_clean_after_mapping
|
Set format-specific stuff after converting header from mapping
This routine cleans up Analyze-type headers that have had their fields
set from an Analyze map returned by the ``as_analyze_map`` method.
Nifti 1 / 2, SPM Analyze, Analyze are all Analyze-type headers.
Because this map can set fields that are illegal for particular
subtypes of the Analyze header, this routine cleans these up before the
resulting header is checked and returned.
For example, a Nifti1 single (``.nii``) header has magic "n+1".
Passing the nifti single header for conversion to a Nifti1Pair header
using the ``as_analyze_map`` method will by default set the header
magic to "n+1", when it should be "ni1" for the pair header. This
method is for that kind of case - so the specific header can set fields
like magic correctly, even though the mapping has given a wrong value.
|
def _clean_after_mapping(self):
"""Set format-specific stuff after converting header from mapping
This routine cleans up Analyze-type headers that have had their fields
set from an Analyze map returned by the ``as_analyze_map`` method.
Nifti 1 / 2, SPM Analyze, Analyze are all Analyze-type headers.
Because this map can set fields that are illegal for particular
subtypes of the Analyze header, this routine cleans these up before the
resulting header is checked and returned.
For example, a Nifti1 single (``.nii``) header has magic "n+1".
Passing the nifti single header for conversion to a Nifti1Pair header
using the ``as_analyze_map`` method will by default set the header
magic to "n+1", when it should be "ni1" for the pair header. This
method is for that kind of case - so the specific header can set fields
like magic correctly, even though the mapping has given a wrong value.
"""
# All current Nifti etc fields that are present in the Analyze header
# have the same meaning as they do for Analyze.
pass
|
(self)
|
56,662 |
nibabel.analyze
|
as_analyze_map
|
Return header as mapping for conversion to Analyze types
Collect data from custom header type to fill in fields for Analyze and
derived header types (such as Nifti1 and Nifti2).
When Analyze types convert another header type to their own type, they
call this this method to check if there are other Analyze / Nifti
fields that the source header would like to set.
Returns
-------
analyze_map : mapping
Object that can be used as a mapping thus::
for key in analyze_map:
value = analyze_map[key]
where ``key`` is the name of a field that can be set in an Analyze
header type, such as Nifti1, and ``value`` is a value for the
field. For example, `analyze_map` might be a something like
``dict(regular='y', slice_duration=0.3)`` where ``regular`` is a
field present in both Analyze and Nifti1, and ``slice_duration`` is
a field restricted to Nifti1 and Nifti2. If a particular Analyze
header type does not recognize the field name, it will throw away
the value without error. See :meth:`Analyze.from_header`.
Notes
-----
You can also return a Nifti header with the relevant fields set.
Your header still needs methods ``get_data_dtype``, ``get_data_shape``
and ``get_zooms``, for the conversion, and these get called *after*
using the analyze map, so the methods will override values set in the
map.
|
def as_analyze_map(self):
"""Return header as mapping for conversion to Analyze types
Collect data from custom header type to fill in fields for Analyze and
derived header types (such as Nifti1 and Nifti2).
When Analyze types convert another header type to their own type, they
call this this method to check if there are other Analyze / Nifti
fields that the source header would like to set.
Returns
-------
analyze_map : mapping
Object that can be used as a mapping thus::
for key in analyze_map:
value = analyze_map[key]
where ``key`` is the name of a field that can be set in an Analyze
header type, such as Nifti1, and ``value`` is a value for the
field. For example, `analyze_map` might be a something like
``dict(regular='y', slice_duration=0.3)`` where ``regular`` is a
field present in both Analyze and Nifti1, and ``slice_duration`` is
a field restricted to Nifti1 and Nifti2. If a particular Analyze
header type does not recognize the field name, it will throw away
the value without error. See :meth:`Analyze.from_header`.
Notes
-----
You can also return a Nifti header with the relevant fields set.
Your header still needs methods ``get_data_dtype``, ``get_data_shape``
and ``get_zooms``, for the conversion, and these get called *after*
using the analyze map, so the methods will override values set in the
map.
"""
# In the case of Analyze types, the header is already such a mapping
return self
|
(self)
|
56,663 |
nibabel.wrapstruct
|
as_byteswapped
|
return new byteswapped object with given ``endianness``
Guaranteed to make a copy even if endianness is the same as
the current endianness.
Parameters
----------
endianness : None or string, optional
endian code to which to swap. None means swap from current
endianness, and is the default
Returns
-------
wstr : ``WrapStruct``
``WrapStruct`` object with given endianness
Examples
--------
>>> wstr = WrapStruct()
>>> wstr.endianness == native_code
True
>>> bs_wstr = wstr.as_byteswapped()
>>> bs_wstr.endianness == swapped_code
True
>>> bs_wstr = wstr.as_byteswapped(swapped_code)
>>> bs_wstr.endianness == swapped_code
True
>>> bs_wstr is wstr
False
>>> bs_wstr == wstr
True
If you write to the resulting byteswapped data, it does not
change the original.
>>> bs_wstr['integer'] = 3
>>> bs_wstr == wstr
False
If you swap to the same endianness, it returns a copy
>>> nbs_wstr = wstr.as_byteswapped(native_code)
>>> nbs_wstr.endianness == native_code
True
>>> nbs_wstr is wstr
False
|
def as_byteswapped(self, endianness=None):
"""return new byteswapped object with given ``endianness``
Guaranteed to make a copy even if endianness is the same as
the current endianness.
Parameters
----------
endianness : None or string, optional
endian code to which to swap. None means swap from current
endianness, and is the default
Returns
-------
wstr : ``WrapStruct``
``WrapStruct`` object with given endianness
Examples
--------
>>> wstr = WrapStruct()
>>> wstr.endianness == native_code
True
>>> bs_wstr = wstr.as_byteswapped()
>>> bs_wstr.endianness == swapped_code
True
>>> bs_wstr = wstr.as_byteswapped(swapped_code)
>>> bs_wstr.endianness == swapped_code
True
>>> bs_wstr is wstr
False
>>> bs_wstr == wstr
True
If you write to the resulting byteswapped data, it does not
change the original.
>>> bs_wstr['integer'] = 3
>>> bs_wstr == wstr
False
If you swap to the same endianness, it returns a copy
>>> nbs_wstr = wstr.as_byteswapped(native_code)
>>> nbs_wstr.endianness == native_code
True
>>> nbs_wstr is wstr
False
"""
current = self.endianness
if endianness is None:
if current == native_code:
endianness = swapped_code
else:
endianness = native_code
else:
endianness = endian_codes[endianness]
if endianness == current:
return self.copy()
wstr_data = self._structarr.byteswap()
return self.__class__(wstr_data.tobytes(), endianness, check=False)
|
(self, endianness=None)
|
56,664 |
nibabel.wrapstruct
|
check_fix
|
Check structured data with checks
Parameters
----------
logger : None or logging.Logger
error_level : None or int
Level of error severity at which to raise error. Any error of
severity >= `error_level` will cause an exception.
|
def check_fix(self, logger=None, error_level=None):
"""Check structured data with checks
Parameters
----------
logger : None or logging.Logger
error_level : None or int
Level of error severity at which to raise error. Any error of
severity >= `error_level` will cause an exception.
"""
if logger is None:
logger = imageglobals.logger
if error_level is None:
error_level = imageglobals.error_level
battrun = BatteryRunner(self.__class__._get_checks())
self, reports = battrun.check_fix(self)
for report in reports:
report.log_raise(logger, error_level)
|
(self, logger=None, error_level=None)
|
56,665 |
nibabel.wrapstruct
|
copy
|
Return copy of structure
>>> wstr = WrapStruct()
>>> wstr['integer'] = 3
>>> wstr2 = wstr.copy()
>>> wstr2 is wstr
False
>>> wstr2['integer']
array(3, dtype=int16)
|
def copy(self):
"""Return copy of structure
>>> wstr = WrapStruct()
>>> wstr['integer'] = 3
>>> wstr2 = wstr.copy()
>>> wstr2 is wstr
False
>>> wstr2['integer']
array(3, dtype=int16)
"""
return self.__class__(self.binaryblock, self.endianness, check=False)
|
(self)
|
56,666 |
nibabel.analyze
|
data_from_fileobj
|
Read scaled data array from `fileobj`
Use this routine to get the scaled image data from an image file
`fileobj`, given a header `self`. "Scaled" means, with any header
scaling factors applied to the raw data in the file. Use
`raw_data_from_fileobj` to get the raw data.
Parameters
----------
fileobj : file-like
Must be open, and implement ``read`` and ``seek`` methods
Returns
-------
arr : ndarray
scaled data array
Notes
-----
We use the header to get any scale or intercept values to apply to the
data. Raw Analyze files don't have scale factors or intercepts, but
this routine also works with formats based on Analyze, that do have
scaling, such as SPM analyze formats and NIfTI.
|
def data_from_fileobj(self, fileobj):
"""Read scaled data array from `fileobj`
Use this routine to get the scaled image data from an image file
`fileobj`, given a header `self`. "Scaled" means, with any header
scaling factors applied to the raw data in the file. Use
`raw_data_from_fileobj` to get the raw data.
Parameters
----------
fileobj : file-like
Must be open, and implement ``read`` and ``seek`` methods
Returns
-------
arr : ndarray
scaled data array
Notes
-----
We use the header to get any scale or intercept values to apply to the
data. Raw Analyze files don't have scale factors or intercepts, but
this routine also works with formats based on Analyze, that do have
scaling, such as SPM analyze formats and NIfTI.
"""
# read unscaled data
data = self.raw_data_from_fileobj(fileobj)
# get scalings from header. Value of None means not present in header
slope, inter = self.get_slope_inter()
slope = 1.0 if slope is None else slope
inter = 0.0 if inter is None else inter
# Upcast as necessary for big slopes, intercepts
return apply_read_scaling(data, slope, inter)
|
(self, fileobj)
|
56,667 |
nibabel.analyze
|
data_to_fileobj
|
Write `data` to `fileobj`, maybe rescaling data, modifying `self`
In writing the data, we match the header to the written data, by
setting the header scaling factors, iff `rescale` is True. Thus we
modify `self` in the process of writing the data.
Parameters
----------
data : array-like
data to write; should match header defined shape
fileobj : file-like object
Object with file interface, implementing ``write`` and
``seek``
rescale : {True, False}, optional
Whether to try and rescale data to match output dtype specified by
header. If True and scaling needed and header cannot scale, then
raise ``HeaderTypeError``.
Examples
--------
>>> from nibabel.analyze import AnalyzeHeader
>>> hdr = AnalyzeHeader()
>>> hdr.set_data_shape((1, 2, 3))
>>> hdr.set_data_dtype(np.float64)
>>> from io import BytesIO
>>> str_io = BytesIO()
>>> data = np.arange(6).reshape(1,2,3)
>>> hdr.data_to_fileobj(data, str_io)
>>> data.astype(np.float64).tobytes('F') == str_io.getvalue()
True
|
def data_to_fileobj(self, data, fileobj, rescale=True):
"""Write `data` to `fileobj`, maybe rescaling data, modifying `self`
In writing the data, we match the header to the written data, by
setting the header scaling factors, iff `rescale` is True. Thus we
modify `self` in the process of writing the data.
Parameters
----------
data : array-like
data to write; should match header defined shape
fileobj : file-like object
Object with file interface, implementing ``write`` and
``seek``
rescale : {True, False}, optional
Whether to try and rescale data to match output dtype specified by
header. If True and scaling needed and header cannot scale, then
raise ``HeaderTypeError``.
Examples
--------
>>> from nibabel.analyze import AnalyzeHeader
>>> hdr = AnalyzeHeader()
>>> hdr.set_data_shape((1, 2, 3))
>>> hdr.set_data_dtype(np.float64)
>>> from io import BytesIO
>>> str_io = BytesIO()
>>> data = np.arange(6).reshape(1,2,3)
>>> hdr.data_to_fileobj(data, str_io)
>>> data.astype(np.float64).tobytes('F') == str_io.getvalue()
True
"""
data = np.asanyarray(data)
shape = self.get_data_shape()
if data.shape != shape:
raise HeaderDataError('Data should be shape (%s)' % ', '.join(str(s) for s in shape))
out_dtype = self.get_data_dtype()
if rescale:
try:
arr_writer = make_array_writer(
data, out_dtype, self.has_data_slope, self.has_data_intercept
)
except WriterError as e:
raise HeaderTypeError(str(e))
else:
arr_writer = ArrayWriter(data, out_dtype, check_scaling=False)
seek_tell(fileobj, self.get_data_offset())
arr_writer.to_fileobj(fileobj)
self.set_slope_inter(*get_slope_inter(arr_writer))
|
(self, data, fileobj, rescale=True)
|
56,668 |
nibabel.wrapstruct
|
get
|
Return value for the key k if present or d otherwise
|
def get(self, k, d=None):
"""Return value for the key k if present or d otherwise"""
return self._structarr[k] if k in self.keys() else d
|
(self, k, d=None)
|
56,669 |
nibabel.analyze
|
get_base_affine
|
Get affine from basic (shared) header fields
Note that we get the translations from the center of the
image.
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.set_data_shape((3, 5, 7))
>>> hdr.set_zooms((3, 2, 1))
>>> hdr.default_x_flip
True
>>> hdr.get_base_affine() # from center of image
array([[-3., 0., 0., 3.],
[ 0., 2., 0., -4.],
[ 0., 0., 1., -3.],
[ 0., 0., 0., 1.]])
|
def get_base_affine(self):
"""Get affine from basic (shared) header fields
Note that we get the translations from the center of the
image.
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.set_data_shape((3, 5, 7))
>>> hdr.set_zooms((3, 2, 1))
>>> hdr.default_x_flip
True
>>> hdr.get_base_affine() # from center of image
array([[-3., 0., 0., 3.],
[ 0., 2., 0., -4.],
[ 0., 0., 1., -3.],
[ 0., 0., 0., 1.]])
"""
hdr = self._structarr
dims = hdr['dim']
ndim = dims[0]
return shape_zoom_affine(
hdr['dim'][1 : ndim + 1], hdr['pixdim'][1 : ndim + 1], self.default_x_flip
)
|
(self)
|
56,671 |
nibabel.analyze
|
get_data_dtype
|
Get numpy dtype for data
For examples see ``set_data_dtype``
|
def get_data_dtype(self):
"""Get numpy dtype for data
For examples see ``set_data_dtype``
"""
code = int(self._structarr['datatype'])
dtype = self._data_type_codes.dtype[code]
return dtype.newbyteorder(self.endianness)
|
(self)
|
56,672 |
nibabel.analyze
|
get_data_offset
|
Return offset into data file to read data
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.get_data_offset()
0
>>> hdr['vox_offset'] = 12
>>> hdr.get_data_offset()
12
|
def get_data_offset(self):
"""Return offset into data file to read data
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.get_data_offset()
0
>>> hdr['vox_offset'] = 12
>>> hdr.get_data_offset()
12
"""
return int(self._structarr['vox_offset'])
|
(self)
|
56,673 |
nibabel.analyze
|
get_data_shape
|
Get shape of data
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.get_data_shape()
(0,)
>>> hdr.set_data_shape((1,2,3))
>>> hdr.get_data_shape()
(1, 2, 3)
Expanding number of dimensions gets default zooms
>>> hdr.get_zooms()
(1.0, 1.0, 1.0)
|
def get_data_shape(self):
"""Get shape of data
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.get_data_shape()
(0,)
>>> hdr.set_data_shape((1,2,3))
>>> hdr.get_data_shape()
(1, 2, 3)
Expanding number of dimensions gets default zooms
>>> hdr.get_zooms()
(1.0, 1.0, 1.0)
"""
dims = self._structarr['dim']
ndims = dims[0]
if ndims == 0:
return (0,)
return tuple(int(d) for d in dims[1 : ndims + 1])
|
(self)
|
56,674 |
nibabel.analyze
|
get_slope_inter
|
Get scalefactor and intercept
These are not implemented for basic Analyze
|
def get_slope_inter(self):
"""Get scalefactor and intercept
These are not implemented for basic Analyze
"""
return None, None
|
(self)
|
56,675 |
nibabel.wrapstruct
|
get_value_label
|
Returns label for coded field
A coded field is an int field containing codes that stand for
discrete values that also have string labels.
Parameters
----------
fieldname : str
name of header field to get label for
Returns
-------
label : str
label for code value in header field `fieldname`
Raises
------
ValueError
if field is not coded.
Examples
--------
>>> from nibabel.volumeutils import Recoder
>>> recoder = Recoder(((1, 'one'), (2, 'two')), ('code', 'label'))
>>> class C(LabeledWrapStruct):
... template_dtype = np.dtype([('datatype', 'i2')])
... _field_recoders = dict(datatype = recoder)
>>> hdr = C()
>>> hdr.get_value_label('datatype')
'<unknown code 0>'
>>> hdr['datatype'] = 2
>>> hdr.get_value_label('datatype')
'two'
|
def get_value_label(self, fieldname):
"""Returns label for coded field
A coded field is an int field containing codes that stand for
discrete values that also have string labels.
Parameters
----------
fieldname : str
name of header field to get label for
Returns
-------
label : str
label for code value in header field `fieldname`
Raises
------
ValueError
if field is not coded.
Examples
--------
>>> from nibabel.volumeutils import Recoder
>>> recoder = Recoder(((1, 'one'), (2, 'two')), ('code', 'label'))
>>> class C(LabeledWrapStruct):
... template_dtype = np.dtype([('datatype', 'i2')])
... _field_recoders = dict(datatype = recoder)
>>> hdr = C()
>>> hdr.get_value_label('datatype')
'<unknown code 0>'
>>> hdr['datatype'] = 2
>>> hdr.get_value_label('datatype')
'two'
"""
if fieldname not in self._field_recoders:
raise ValueError(f'{fieldname} not a coded field')
code = int(self._structarr[fieldname])
try:
return self._field_recoders[fieldname].label[code]
except KeyError:
return f'<unknown code {code}>'
|
(self, fieldname)
|
56,676 |
nibabel.analyze
|
get_zooms
|
Get zooms from header
Returns
-------
z : tuple
tuple of header zoom values
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.get_zooms()
(1.0,)
>>> hdr.set_data_shape((1,2))
>>> hdr.get_zooms()
(1.0, 1.0)
>>> hdr.set_zooms((3, 4))
>>> hdr.get_zooms()
(3.0, 4.0)
|
def get_zooms(self):
"""Get zooms from header
Returns
-------
z : tuple
tuple of header zoom values
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.get_zooms()
(1.0,)
>>> hdr.set_data_shape((1,2))
>>> hdr.get_zooms()
(1.0, 1.0)
>>> hdr.set_zooms((3, 4))
>>> hdr.get_zooms()
(3.0, 4.0)
"""
hdr = self._structarr
dims = hdr['dim']
ndim = dims[0]
if ndim == 0:
return (1.0,)
pixdims = hdr['pixdim']
return tuple(pixdims[1 : ndim + 1])
|
(self)
|
56,677 |
nibabel.wrapstruct
|
items
|
Return items from structured data
|
def items(self):
"""Return items from structured data"""
return zip(self.keys(), self.values())
|
(self)
|
56,678 |
nibabel.wrapstruct
|
keys
|
Return keys from structured data
|
def keys(self):
"""Return keys from structured data"""
return list(self.template_dtype.names)
|
(self)
|
56,679 |
nibabel.analyze
|
raw_data_from_fileobj
|
Read unscaled data array from `fileobj`
Parameters
----------
fileobj : file-like
Must be open, and implement ``read`` and ``seek`` methods
Returns
-------
arr : ndarray
unscaled data array
|
def raw_data_from_fileobj(self, fileobj):
"""Read unscaled data array from `fileobj`
Parameters
----------
fileobj : file-like
Must be open, and implement ``read`` and ``seek`` methods
Returns
-------
arr : ndarray
unscaled data array
"""
dtype = self.get_data_dtype()
shape = self.get_data_shape()
offset = self.get_data_offset()
return array_from_file(shape, dtype, fileobj, offset)
|
(self, fileobj)
|
56,680 |
nibabel.analyze
|
set_data_dtype
|
Set numpy dtype for data from code or dtype or type
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.set_data_dtype(np.uint8)
>>> hdr.get_data_dtype()
dtype('uint8')
>>> hdr.set_data_dtype(np.dtype(np.uint8))
>>> hdr.get_data_dtype()
dtype('uint8')
>>> hdr.set_data_dtype('implausible') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeaderDataError: data dtype "implausible" not recognized
>>> hdr.set_data_dtype('none') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeaderDataError: data dtype "none" known but not supported
>>> hdr.set_data_dtype(np.void) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeaderDataError: data dtype "<type 'numpy.void'>" known but not supported
|
def set_data_dtype(self, datatype):
"""Set numpy dtype for data from code or dtype or type
Examples
--------
>>> hdr = AnalyzeHeader()
>>> hdr.set_data_dtype(np.uint8)
>>> hdr.get_data_dtype()
dtype('uint8')
>>> hdr.set_data_dtype(np.dtype(np.uint8))
>>> hdr.get_data_dtype()
dtype('uint8')
>>> hdr.set_data_dtype('implausible') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeaderDataError: data dtype "implausible" not recognized
>>> hdr.set_data_dtype('none') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeaderDataError: data dtype "none" known but not supported
>>> hdr.set_data_dtype(np.void) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeaderDataError: data dtype "<type 'numpy.void'>" known but not supported
"""
dt = datatype
if dt not in self._data_type_codes:
try:
dt = np.dtype(dt)
except TypeError:
raise HeaderDataError(f'data dtype "{datatype}" not recognized')
if dt not in self._data_type_codes:
raise HeaderDataError(f'data dtype "{datatype}" not supported')
code = self._data_type_codes[dt]
dtype = self._data_type_codes.dtype[code]
# test for void, being careful of user-defined types
if dtype.type is np.void and not dtype.fields:
raise HeaderDataError(f'data dtype "{datatype}" known but not supported')
self._structarr['datatype'] = code
self._structarr['bitpix'] = dtype.itemsize * 8
|
(self, datatype)
|
56,681 |
nibabel.analyze
|
set_data_offset
|
Set offset into data file to read data
|
def set_data_offset(self, offset):
"""Set offset into data file to read data"""
self._structarr['vox_offset'] = offset
|
(self, offset)
|
56,682 |
nibabel.analyze
|
set_data_shape
|
Set shape of data
If ``ndims == len(shape)`` then we set zooms for dimensions higher than
``ndims`` to 1.0
Parameters
----------
shape : sequence
sequence of integers specifying data array shape
|
def set_data_shape(self, shape):
"""Set shape of data
If ``ndims == len(shape)`` then we set zooms for dimensions higher than
``ndims`` to 1.0
Parameters
----------
shape : sequence
sequence of integers specifying data array shape
"""
dims = self._structarr['dim']
ndims = len(shape)
dims[:] = 1
dims[0] = ndims
try:
dims[1 : ndims + 1] = shape
except (ValueError, OverflowError):
# numpy 1.4.1 at least generates a ValueError from trying to set a
# python long into an int64 array (dims are int64 for nifti2)
values_fit = False
else:
values_fit = np.all(dims[1 : ndims + 1] == shape)
# Error if we did not succeed setting dimensions
if not values_fit:
raise HeaderDataError(f'shape {shape} does not fit in dim datatype')
self._structarr['pixdim'][ndims + 1 :] = 1.0
|
(self, shape)
|
56,683 |
nibabel.analyze
|
set_slope_inter
|
Set slope and / or intercept into header
Set slope and intercept for image data, such that, if the image
data is ``arr``, then the scaled image data will be ``(arr *
slope) + inter``
In this case, for Analyze images, we can't store the slope or the
intercept, so this method only checks that `slope` is None or NaN or
1.0, and that `inter` is None or NaN or 0.
Parameters
----------
slope : None or float
If float, value must be NaN or 1.0 or we raise a ``HeaderTypeError``
inter : None or float, optional
If float, value must be 0.0 or we raise a ``HeaderTypeError``
|
def set_slope_inter(self, slope, inter=None):
"""Set slope and / or intercept into header
Set slope and intercept for image data, such that, if the image
data is ``arr``, then the scaled image data will be ``(arr *
slope) + inter``
In this case, for Analyze images, we can't store the slope or the
intercept, so this method only checks that `slope` is None or NaN or
1.0, and that `inter` is None or NaN or 0.
Parameters
----------
slope : None or float
If float, value must be NaN or 1.0 or we raise a ``HeaderTypeError``
inter : None or float, optional
If float, value must be 0.0 or we raise a ``HeaderTypeError``
"""
if (slope in (None, 1) or np.isnan(slope)) and (inter in (None, 0) or np.isnan(inter)):
return
raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 for Analyze headers')
|
(self, slope, inter=None)
|
56,684 |
nibabel.analyze
|
set_zooms
|
Set zooms into header fields
See docstring for ``get_zooms`` for examples
|
def set_zooms(self, zooms):
"""Set zooms into header fields
See docstring for ``get_zooms`` for examples
"""
hdr = self._structarr
dims = hdr['dim']
ndim = dims[0]
zooms = np.asarray(zooms)
if len(zooms) != ndim:
raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim))
if np.any(zooms < 0):
raise HeaderDataError('zooms must be positive')
pixdims = hdr['pixdim']
pixdims[1 : ndim + 1] = zooms[:]
|
(self, zooms)
|
56,685 |
nibabel.wrapstruct
|
values
|
Return values from structured data
|
def values(self):
"""Return values from structured data"""
data = self._structarr
return [data[key] for key in self.template_dtype.names]
|
(self)
|
56,686 |
nibabel.wrapstruct
|
write_to
|
Write structure to fileobj
Write starts at fileobj current file position.
Parameters
----------
fileobj : file-like object
Should implement ``write`` method
Returns
-------
None
Examples
--------
>>> wstr = WrapStruct()
>>> from io import BytesIO
>>> str_io = BytesIO()
>>> wstr.write_to(str_io)
>>> wstr.binaryblock == str_io.getvalue()
True
|
def write_to(self, fileobj):
"""Write structure to fileobj
Write starts at fileobj current file position.
Parameters
----------
fileobj : file-like object
Should implement ``write`` method
Returns
-------
None
Examples
--------
>>> wstr = WrapStruct()
>>> from io import BytesIO
>>> str_io = BytesIO()
>>> wstr.write_to(str_io)
>>> wstr.binaryblock == str_io.getvalue()
True
"""
fileobj.write(self.binaryblock)
|
(self, fileobj)
|
56,687 |
nibabel.analyze
|
AnalyzeImage
|
Class for basic Analyze format image
|
class AnalyzeImage(SpatialImage):
"""Class for basic Analyze format image"""
header_class: type[AnalyzeHeader] = AnalyzeHeader
header: AnalyzeHeader
_meta_sniff_len = header_class.sizeof_hdr
files_types: tuple[tuple[str, str], ...] = (('image', '.img'), ('header', '.hdr'))
valid_exts: tuple[str, ...] = ('.img', '.hdr')
_compressed_suffixes: tuple[str, ...] = ('.gz', '.bz2', '.zst')
makeable = True
rw = True
ImageArrayProxy = ArrayProxy
def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtype=None):
super().__init__(dataobj, affine, header, extra, file_map)
# Reset consumable values
self._header.set_data_offset(0)
self._header.set_slope_inter(None, None)
if dtype is not None:
self.set_data_dtype(dtype)
__init__.__doc__ = SpatialImage.__init__.__doc__
def get_data_dtype(self):
return self._header.get_data_dtype()
def set_data_dtype(self, dtype):
self._header.set_data_dtype(dtype)
@classmethod
def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None):
"""Class method to create image from mapping in ``file_map``
Parameters
----------
file_map : dict
Mapping with (kay, value) pairs of (``file_type``, FileHolder
instance giving file-likes for each file needed for this image
type.
mmap : {True, False, 'c', 'r'}, optional, keyword only
`mmap` controls the use of numpy memory mapping for reading image
array data. If False, do not try numpy ``memmap`` for data array.
If one of {'c', 'r'}, try numpy memmap with ``mode=mmap``. A
`mmap` value of True gives the same behavior as ``mmap='c'``. If
image data file cannot be memory-mapped, ignore `mmap` value and
read array from file.
keep_file_open : { None, True, False }, optional, keyword only
`keep_file_open` controls whether a new file handle is created
every time the image is accessed, or a single file handle is
created and used for the lifetime of this ``ArrayProxy``. If
``True``, a single file handle is created and used. If ``False``,
a new file handle is created every time the image is accessed.
If ``file_map`` refers to an open file handle, this setting has no
effect. The default value (``None``) will result in the value of
``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used.
Returns
-------
img : AnalyzeImage instance
"""
if mmap not in (True, False, 'c', 'r'):
raise ValueError("mmap should be one of {True, False, 'c', 'r'}")
hdr_fh, img_fh = klass._get_fileholders(file_map)
with hdr_fh.get_prepare_fileobj(mode='rb') as hdrf:
header = klass.header_class.from_fileobj(hdrf)
hdr_copy = header.copy()
imgf = img_fh.fileobj
if imgf is None:
imgf = img_fh.filename
data = klass.ImageArrayProxy(imgf, hdr_copy, mmap=mmap, keep_file_open=keep_file_open)
# Initialize without affine to allow header to pass through unmodified
img = klass(data, None, header, file_map=file_map)
# set affine from header though
img._affine = header.get_best_affine()
img._load_cache = {
'header': hdr_copy,
'affine': img._affine.copy(),
'file_map': copy_file_map(file_map),
}
return img
@staticmethod
def _get_fileholders(file_map):
"""Return fileholder for header and image
Allows single-file image types to return one fileholder for both types.
For Analyze there are two fileholders, one for the header, one for the
image.
"""
return file_map['header'], file_map['image']
def to_file_map(self, file_map=None, dtype=None):
"""Write image to `file_map` or contained ``self.file_map``
Parameters
----------
file_map : None or mapping, optional
files mapping. If None (default) use object's ``file_map``
attribute instead
dtype : dtype-like, optional
The on-disk data type to coerce the data array.
"""
if file_map is None:
file_map = self.file_map
data = np.asanyarray(self.dataobj)
self.update_header()
hdr = self._header
# Store consumable values for later restore
offset = hdr.get_data_offset()
data_dtype = hdr.get_data_dtype()
# Override dtype conditionally
if dtype is not None:
hdr.set_data_dtype(dtype)
out_dtype = hdr.get_data_dtype()
# Scalars of slope, offset to get immutable values
slope = hdr['scl_slope'].item() if hdr.has_data_slope else np.nan
inter = hdr['scl_inter'].item() if hdr.has_data_intercept else np.nan
# Check whether to calculate slope / inter
scale_me = np.all(np.isnan((slope, inter)))
try:
if scale_me:
arr_writer = make_array_writer(
data, out_dtype, hdr.has_data_slope, hdr.has_data_intercept
)
else:
arr_writer = ArrayWriter(data, out_dtype, check_scaling=False)
except WriterError:
# Restore any changed consumable values, in case caller catches
# Should match cleanup at the end of the method
hdr.set_data_offset(offset)
hdr.set_data_dtype(data_dtype)
if hdr.has_data_slope:
hdr['scl_slope'] = slope
if hdr.has_data_intercept:
hdr['scl_inter'] = inter
raise
hdr_fh, img_fh = self._get_fileholders(file_map)
# Check if hdr and img refer to same file; this can happen with odd
# analyze images but most often this is because it's a single nifti
# file
hdr_img_same = hdr_fh.same_file_as(img_fh)
hdrf = hdr_fh.get_prepare_fileobj(mode='wb')
if hdr_img_same:
imgf = hdrf
else:
imgf = img_fh.get_prepare_fileobj(mode='wb')
# Rescale values if asked
if scale_me:
hdr.set_slope_inter(*get_slope_inter(arr_writer))
# Write header
hdr.write_to(hdrf)
# Write image
# Seek to writing position, get there by writing zeros if seek fails
seek_tell(imgf, hdr.get_data_offset(), write0=True)
# Write array data
arr_writer.to_fileobj(imgf)
hdrf.close_if_mine()
if not hdr_img_same:
imgf.close_if_mine()
self._header = hdr
self.file_map = file_map
# Restore any changed consumable values
hdr.set_data_offset(offset)
hdr.set_data_dtype(data_dtype)
if hdr.has_data_slope:
hdr['scl_slope'] = slope
if hdr.has_data_intercept:
hdr['scl_inter'] = inter
|
(dataobj, affine, header=None, extra=None, file_map=None, dtype=None)
|
56,688 |
nibabel.spatialimages
|
__getitem__
|
No slicing or dictionary interface for images
Use the slicer attribute to perform cropping and subsampling at your
own risk.
|
def __getitem__(self, idx: object) -> None:
"""No slicing or dictionary interface for images
Use the slicer attribute to perform cropping and subsampling at your
own risk.
"""
raise TypeError(
'Cannot slice image objects; consider using `img.slicer[slice]` '
'to generate a sliced image (see documentation for caveats) or '
'slicing image array data with `img.dataobj[slice]` or '
'`img.get_fdata()[slice]`'
)
|
(self, idx: object) -> NoneType
|
56,689 |
nibabel.analyze
|
__init__
|
Initialize image
The image is a combination of (array-like, affine matrix, header), with
optional metadata in `extra`, and filename / file-like objects
contained in the `file_map` mapping.
Parameters
----------
dataobj : object
Object containing image data. It should be some object that returns an
array from ``np.asanyarray``. It should have a ``shape`` attribute
or property
affine : None or (4,4) array-like
homogeneous affine giving relationship between voxel coordinates and
world coordinates. Affine can also be None. In this case,
``obj.affine`` also returns None, and the affine as written to disk
will depend on the file format.
header : None or mapping or header instance, optional
metadata for this image format
extra : None or mapping, optional
metadata to associate with image that cannot be stored in the
metadata of this image type
file_map : mapping, optional
mapping giving file information for this image format
|
def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtype=None):
super().__init__(dataobj, affine, header, extra, file_map)
# Reset consumable values
self._header.set_data_offset(0)
self._header.set_slope_inter(None, None)
if dtype is not None:
self.set_data_dtype(dtype)
|
(self, dataobj, affine, header=None, extra=None, file_map=None, dtype=None)
|
56,690 |
nibabel.spatialimages
|
__str__
| null |
def __str__(self) -> str:
shape = self.shape
affine = self.affine
return f"""
{self.__class__}
data shape {shape}
affine:
{affine}
metadata:
{self._header}
"""
|
(self) -> str
|
56,691 |
nibabel.spatialimages
|
_affine2header
|
Unconditionally set affine into the header
|
def _affine2header(self) -> None:
"""Unconditionally set affine into the header"""
assert self._affine is not None
RZS = self._affine[:3, :3]
vox = np.sqrt(np.sum(RZS * RZS, axis=0))
hdr = self._header
zooms = list(hdr.get_zooms())
n_to_set = min(len(zooms), 3)
zooms[:n_to_set] = vox[:n_to_set]
hdr.set_zooms(zooms)
|
(self) -> NoneType
|
56,692 |
nibabel.analyze
|
_get_fileholders
|
Return fileholder for header and image
Allows single-file image types to return one fileholder for both types.
For Analyze there are two fileholders, one for the header, one for the
image.
|
@staticmethod
def _get_fileholders(file_map):
"""Return fileholder for header and image
Allows single-file image types to return one fileholder for both types.
For Analyze there are two fileholders, one for the header, one for the
image.
"""
return file_map['header'], file_map['image']
|
(file_map)
|
56,693 |
nibabel.spatialimages
|
as_reoriented
|
Apply an orientation change and return a new image
If ornt is identity transform, return the original image, unchanged
Parameters
----------
ornt : (n,2) orientation array
orientation transform. ``ornt[N,1]` is flip of axis N of the
array implied by `shape`, where 1 means no flip and -1 means
flip. For example, if ``N==0`` and ``ornt[0,1] == -1``, and
there's an array ``arr`` of shape `shape`, the flip would
correspond to the effect of ``np.flipud(arr)``. ``ornt[:,0]`` is
the transpose that needs to be done to the implied array, as in
``arr.transpose(ornt[:,0])``
Notes
-----
Subclasses should override this if they have additional requirements
when re-orienting an image.
|
def as_reoriented(self: SpatialImgT, ornt: Sequence[Sequence[int]]) -> SpatialImgT:
"""Apply an orientation change and return a new image
If ornt is identity transform, return the original image, unchanged
Parameters
----------
ornt : (n,2) orientation array
orientation transform. ``ornt[N,1]` is flip of axis N of the
array implied by `shape`, where 1 means no flip and -1 means
flip. For example, if ``N==0`` and ``ornt[0,1] == -1``, and
there's an array ``arr`` of shape `shape`, the flip would
correspond to the effect of ``np.flipud(arr)``. ``ornt[:,0]`` is
the transpose that needs to be done to the implied array, as in
``arr.transpose(ornt[:,0])``
Notes
-----
Subclasses should override this if they have additional requirements
when re-orienting an image.
"""
if np.array_equal(ornt, [[0, 1], [1, 1], [2, 1]]):
return self
t_arr = apply_orientation(np.asanyarray(self.dataobj), ornt)
new_aff = self.affine.dot(inv_ornt_aff(ornt, self.shape))
return self.__class__(t_arr, new_aff, self.header)
|
(self: ~SpatialImgT, ornt: collections.abc.Sequence[collections.abc.Sequence[int]]) -> ~SpatialImgT
|
56,694 |
nibabel.dataobj_images
|
get_data
|
Return image data from image with any necessary scaling applied
get_data() is deprecated in favor of get_fdata(), which has a more predictable return type. To obtain get_data() behavior going forward, use numpy.asanyarray(img.dataobj).
* deprecated from version: 3.0
* Raises <class 'nibabel.deprecator.ExpiredDeprecationError'> as of version: 5.0
|
def get_data(self, caching='fill'):
"""Return image data from image with any necessary scaling applied
.. WARNING::
We recommend you use the ``get_fdata`` method instead of the
``get_data`` method, because it is easier to predict the return
data type. ``get_data`` will be deprecated around November 2019
and removed around November 2021.
If you don't care about the predictability of the return data type,
and you want the minimum possible data size in memory, you can
replicate the array that would be returned by ``img.get_data()`` by
using ``np.asanyarray(img.dataobj)``.
The image ``dataobj`` property can be an array proxy or an array. An
array proxy is an object that knows how to load the image data from
disk. An image with an array proxy ``dataobj`` is a *proxy image*; an
image with an array in ``dataobj`` is an *array image*.
The default behavior for ``get_data()`` on a proxy image is to read the
data from the proxy, and store in an internal cache. Future calls to
``get_data`` will return the cached array. This is the behavior
selected with `caching` == "fill".
Once the data has been cached and returned from an array proxy, if you
modify the returned array, you will also modify the cached array
(because they are the same array). Regardless of the `caching` flag,
this is always true of an array image.
Parameters
----------
caching : {'fill', 'unchanged'}, optional
See the Notes section for a detailed explanation. This argument
specifies whether the image object should fill in an internal
cached reference to the returned image data array. "fill" specifies
that the image should fill an internal cached reference if
currently empty. Future calls to ``get_data`` will return this
cached reference. You might prefer "fill" to save the image object
from having to reload the array data from disk on each call to
``get_data``. "unchanged" means that the image should not fill in
the internal cached reference if the cache is currently empty. You
might prefer "unchanged" to "fill" if you want to make sure that
the call to ``get_data`` does not create an extra (cached)
reference to the returned array. In this case it is easier for
Python to free the memory from the returned array.
Returns
-------
data : array
array of image data
See also
--------
uncache: empty the array data cache
Notes
-----
All images have a property ``dataobj`` that represents the image array
data. Images that have been loaded from files usually do not load the
array data from file immediately, in order to reduce image load time
and memory use. For these images, ``dataobj`` is an *array proxy*; an
object that knows how to load the image array data from file.
By default (`caching` == "fill"), when you call ``get_data`` on a
proxy image, we load the array data from disk, store (cache) an
internal reference to this array data, and return the array. The next
time you call ``get_data``, you will get the cached reference to the
array, so we don't have to load the array data from disk again.
Array images have a ``dataobj`` property that already refers to an
array in memory, so there is no benefit to caching, and the `caching`
keywords have no effect.
For proxy images, you may not want to fill the cache after reading the
data from disk because the cache will hold onto the array memory until
the image object is deleted, or you use the image ``uncache`` method.
If you don't want to fill the cache, then always use
``get_data(caching='unchanged')``; in this case ``get_data`` will not
fill the cache (store the reference to the array) if the cache is empty
(no reference to the array). If the cache is full, "unchanged" leaves
the cache full and returns the cached array reference.
The cache can affect the behavior of the image, because if the cache is
full, or you have an array image, then modifying the returned array
will modify the result of future calls to ``get_data()``. For example
you might do this:
>>> import os
>>> import nibabel as nib
>>> from nibabel.testing import data_path
>>> img_fname = os.path.join(data_path, 'example4d.nii.gz')
>>> img = nib.load(img_fname) # This is a proxy image
>>> nib.is_proxy(img.dataobj)
True
The array is not yet cached by a call to "get_data", so:
>>> img.in_memory
False
After we call ``get_data`` using the default `caching` == 'fill', the
cache contains a reference to the returned array ``data``:
>>> data = img.get_data()
>>> img.in_memory
True
We modify an element in the returned data array:
>>> data[0, 0, 0, 0]
0
>>> data[0, 0, 0, 0] = 99
>>> data[0, 0, 0, 0]
99
The next time we call 'get_data', the method returns the cached
reference to the (modified) array:
>>> data_again = img.get_data()
>>> data_again is data
True
>>> data_again[0, 0, 0, 0]
99
If you had *initially* used `caching` == 'unchanged' then the returned
``data`` array would have been loaded from file, but not cached, and:
>>> img = nib.load(img_fname) # a proxy image again
>>> data = img.get_data(caching='unchanged')
>>> img.in_memory
False
>>> data[0, 0, 0] = 99
>>> data_again = img.get_data(caching='unchanged')
>>> data_again is data
False
>>> data_again[0, 0, 0, 0]
0
"""
if caching not in ('fill', 'unchanged'):
raise ValueError('caching value should be "fill" or "unchanged"')
if self._data_cache is not None:
return self._data_cache
data = np.asanyarray(self._dataobj)
if caching == 'fill':
self._data_cache = data
return data
|
(self, caching='fill')
|
56,695 |
nibabel.analyze
|
get_data_dtype
| null |
def get_data_dtype(self):
return self._header.get_data_dtype()
|
(self)
|
56,696 |
nibabel.dataobj_images
|
get_fdata
|
Return floating point image data with necessary scaling applied
The image ``dataobj`` property can be an array proxy or an array. An
array proxy is an object that knows how to load the image data from
disk. An image with an array proxy ``dataobj`` is a *proxy image*; an
image with an array in ``dataobj`` is an *array image*.
The default behavior for ``get_fdata()`` on a proxy image is to read
the data from the proxy, and store in an internal cache. Future calls
to ``get_fdata`` will return the cached array. This is the behavior
selected with `caching` == "fill".
Once the data has been cached and returned from an array proxy, if you
modify the returned array, you will also modify the cached array
(because they are the same array). Regardless of the `caching` flag,
this is always true of an array image.
Parameters
----------
caching : {'fill', 'unchanged'}, optional
See the Notes section for a detailed explanation. This argument
specifies whether the image object should fill in an internal
cached reference to the returned image data array. "fill" specifies
that the image should fill an internal cached reference if
currently empty. Future calls to ``get_fdata`` will return this
cached reference. You might prefer "fill" to save the image object
from having to reload the array data from disk on each call to
``get_fdata``. "unchanged" means that the image should not fill in
the internal cached reference if the cache is currently empty. You
might prefer "unchanged" to "fill" if you want to make sure that
the call to ``get_fdata`` does not create an extra (cached)
reference to the returned array. In this case it is easier for
Python to free the memory from the returned array.
dtype : numpy dtype specifier
A numpy dtype specifier specifying a floating point type. Data is
returned as this floating point type. Default is ``np.float64``.
Returns
-------
fdata : array
Array of image data of data type `dtype`.
See also
--------
uncache: empty the array data cache
Notes
-----
All images have a property ``dataobj`` that represents the image array
data. Images that have been loaded from files usually do not load the
array data from file immediately, in order to reduce image load time
and memory use. For these images, ``dataobj`` is an *array proxy*; an
object that knows how to load the image array data from file.
By default (`caching` == "fill"), when you call ``get_fdata`` on a
proxy image, we load the array data from disk, store (cache) an
internal reference to this array data, and return the array. The next
time you call ``get_fdata``, you will get the cached reference to the
array, so we don't have to load the array data from disk again.
Array images have a ``dataobj`` property that already refers to an
array in memory, so there is no benefit to caching, and the `caching`
keywords have no effect.
For proxy images, you may not want to fill the cache after reading the
data from disk because the cache will hold onto the array memory until
the image object is deleted, or you use the image ``uncache`` method.
If you don't want to fill the cache, then always use
``get_fdata(caching='unchanged')``; in this case ``get_fdata`` will not
fill the cache (store the reference to the array) if the cache is empty
(no reference to the array). If the cache is full, "unchanged" leaves
the cache full and returns the cached array reference.
The cache can effect the behavior of the image, because if the cache is
full, or you have an array image, then modifying the returned array
will modify the result of future calls to ``get_fdata()``. For example
you might do this:
>>> import os
>>> import nibabel as nib
>>> from nibabel.testing import data_path
>>> img_fname = os.path.join(data_path, 'example4d.nii.gz')
>>> img = nib.load(img_fname) # This is a proxy image
>>> nib.is_proxy(img.dataobj)
True
The array is not yet cached by a call to "get_fdata", so:
>>> img.in_memory
False
After we call ``get_fdata`` using the default `caching` == 'fill', the
cache contains a reference to the returned array ``data``:
>>> data = img.get_fdata()
>>> img.in_memory
True
We modify an element in the returned data array:
>>> data[0, 0, 0, 0]
0.0
>>> data[0, 0, 0, 0] = 99
>>> data[0, 0, 0, 0]
99.0
The next time we call 'get_fdata', the method returns the cached
reference to the (modified) array:
>>> data_again = img.get_fdata()
>>> data_again is data
True
>>> data_again[0, 0, 0, 0]
99.0
If you had *initially* used `caching` == 'unchanged' then the returned
``data`` array would have been loaded from file, but not cached, and:
>>> img = nib.load(img_fname) # a proxy image again
>>> data = img.get_fdata(caching='unchanged')
>>> img.in_memory
False
>>> data[0, 0, 0] = 99
>>> data_again = img.get_fdata(caching='unchanged')
>>> data_again is data
False
>>> data_again[0, 0, 0, 0]
0.0
|
def get_fdata(
self,
caching: ty.Literal['fill', 'unchanged'] = 'fill',
dtype: npt.DTypeLike = np.float64,
) -> np.ndarray[ty.Any, np.dtype[np.floating]]:
"""Return floating point image data with necessary scaling applied
The image ``dataobj`` property can be an array proxy or an array. An
array proxy is an object that knows how to load the image data from
disk. An image with an array proxy ``dataobj`` is a *proxy image*; an
image with an array in ``dataobj`` is an *array image*.
The default behavior for ``get_fdata()`` on a proxy image is to read
the data from the proxy, and store in an internal cache. Future calls
to ``get_fdata`` will return the cached array. This is the behavior
selected with `caching` == "fill".
Once the data has been cached and returned from an array proxy, if you
modify the returned array, you will also modify the cached array
(because they are the same array). Regardless of the `caching` flag,
this is always true of an array image.
Parameters
----------
caching : {'fill', 'unchanged'}, optional
See the Notes section for a detailed explanation. This argument
specifies whether the image object should fill in an internal
cached reference to the returned image data array. "fill" specifies
that the image should fill an internal cached reference if
currently empty. Future calls to ``get_fdata`` will return this
cached reference. You might prefer "fill" to save the image object
from having to reload the array data from disk on each call to
``get_fdata``. "unchanged" means that the image should not fill in
the internal cached reference if the cache is currently empty. You
might prefer "unchanged" to "fill" if you want to make sure that
the call to ``get_fdata`` does not create an extra (cached)
reference to the returned array. In this case it is easier for
Python to free the memory from the returned array.
dtype : numpy dtype specifier
A numpy dtype specifier specifying a floating point type. Data is
returned as this floating point type. Default is ``np.float64``.
Returns
-------
fdata : array
Array of image data of data type `dtype`.
See also
--------
uncache: empty the array data cache
Notes
-----
All images have a property ``dataobj`` that represents the image array
data. Images that have been loaded from files usually do not load the
array data from file immediately, in order to reduce image load time
and memory use. For these images, ``dataobj`` is an *array proxy*; an
object that knows how to load the image array data from file.
By default (`caching` == "fill"), when you call ``get_fdata`` on a
proxy image, we load the array data from disk, store (cache) an
internal reference to this array data, and return the array. The next
time you call ``get_fdata``, you will get the cached reference to the
array, so we don't have to load the array data from disk again.
Array images have a ``dataobj`` property that already refers to an
array in memory, so there is no benefit to caching, and the `caching`
keywords have no effect.
For proxy images, you may not want to fill the cache after reading the
data from disk because the cache will hold onto the array memory until
the image object is deleted, or you use the image ``uncache`` method.
If you don't want to fill the cache, then always use
``get_fdata(caching='unchanged')``; in this case ``get_fdata`` will not
fill the cache (store the reference to the array) if the cache is empty
(no reference to the array). If the cache is full, "unchanged" leaves
the cache full and returns the cached array reference.
The cache can effect the behavior of the image, because if the cache is
full, or you have an array image, then modifying the returned array
will modify the result of future calls to ``get_fdata()``. For example
you might do this:
>>> import os
>>> import nibabel as nib
>>> from nibabel.testing import data_path
>>> img_fname = os.path.join(data_path, 'example4d.nii.gz')
>>> img = nib.load(img_fname) # This is a proxy image
>>> nib.is_proxy(img.dataobj)
True
The array is not yet cached by a call to "get_fdata", so:
>>> img.in_memory
False
After we call ``get_fdata`` using the default `caching` == 'fill', the
cache contains a reference to the returned array ``data``:
>>> data = img.get_fdata()
>>> img.in_memory
True
We modify an element in the returned data array:
>>> data[0, 0, 0, 0]
0.0
>>> data[0, 0, 0, 0] = 99
>>> data[0, 0, 0, 0]
99.0
The next time we call 'get_fdata', the method returns the cached
reference to the (modified) array:
>>> data_again = img.get_fdata()
>>> data_again is data
True
>>> data_again[0, 0, 0, 0]
99.0
If you had *initially* used `caching` == 'unchanged' then the returned
``data`` array would have been loaded from file, but not cached, and:
>>> img = nib.load(img_fname) # a proxy image again
>>> data = img.get_fdata(caching='unchanged')
>>> img.in_memory
False
>>> data[0, 0, 0] = 99
>>> data_again = img.get_fdata(caching='unchanged')
>>> data_again is data
False
>>> data_again[0, 0, 0, 0]
0.0
"""
if caching not in ('fill', 'unchanged'):
raise ValueError('caching value should be "fill" or "unchanged"')
dtype = np.dtype(dtype)
if not issubclass(dtype.type, np.inexact):
raise ValueError(f'{dtype} should be floating point type')
# Return cache if cache present and of correct dtype.
if self._fdata_cache is not None:
if self._fdata_cache.dtype.type == dtype.type:
return self._fdata_cache
# Always return requested data type
# For array proxies, will attempt to confine data array to dtype
# during scaling
data = np.asanyarray(self._dataobj, dtype=dtype)
if caching == 'fill':
self._fdata_cache = data
return data
|
(self, caching: "ty.Literal['fill', 'unchanged']" = 'fill', dtype: 'npt.DTypeLike' = <class 'numpy.float64'>) -> 'np.ndarray[ty.Any, np.dtype[np.floating]]'
|
56,697 |
nibabel.filebasedimages
|
get_filename
|
Fetch the image filename
Parameters
----------
None
Returns
-------
fname : None or str
Returns None if there is no filename, or a filename string.
If an image may have several filenames associated with it (e.g.
Analyze ``.img, .hdr`` pair) then we return the more characteristic
filename (the ``.img`` filename in the case of Analyze')
|
def get_filename(self) -> str | None:
"""Fetch the image filename
Parameters
----------
None
Returns
-------
fname : None or str
Returns None if there is no filename, or a filename string.
If an image may have several filenames associated with it (e.g.
Analyze ``.img, .hdr`` pair) then we return the more characteristic
filename (the ``.img`` filename in the case of Analyze')
"""
# which filename is returned depends on the ordering of the
# 'files_types' class attribute - we return the name
# corresponding to the first in that tuple
characteristic_type = self.files_types[0][0]
return self.file_map[characteristic_type].filename
|
(self) -> str | None
|
56,698 |
nibabel.spatialimages
|
orthoview
|
Plot the image using OrthoSlicer3D
Returns
-------
viewer : instance of OrthoSlicer3D
The viewer.
Notes
-----
This requires matplotlib. If a non-interactive backend is used,
consider using viewer.show() (equivalently plt.show()) to show
the figure.
|
def orthoview(self) -> OrthoSlicer3D:
"""Plot the image using OrthoSlicer3D
Returns
-------
viewer : instance of OrthoSlicer3D
The viewer.
Notes
-----
This requires matplotlib. If a non-interactive backend is used,
consider using viewer.show() (equivalently plt.show()) to show
the figure.
"""
return OrthoSlicer3D(self.dataobj, self.affine, title=self.get_filename())
|
(self) -> nibabel.viewers.OrthoSlicer3D
|
56,699 |
nibabel.analyze
|
set_data_dtype
| null |
def set_data_dtype(self, dtype):
self._header.set_data_dtype(dtype)
|
(self, dtype)
|
56,700 |
nibabel.filebasedimages
|
set_filename
|
Sets the files in the object from a given filename
The different image formats may check whether the filename has
an extension characteristic of the format, and raise an error if
not.
Parameters
----------
filename : str or os.PathLike
If the image format only has one file associated with it,
this will be the only filename set into the image
``.file_map`` attribute. Otherwise, the image instance will
try and guess the other filenames from this given filename.
|
def set_filename(self, filename: str) -> None:
"""Sets the files in the object from a given filename
The different image formats may check whether the filename has
an extension characteristic of the format, and raise an error if
not.
Parameters
----------
filename : str or os.PathLike
If the image format only has one file associated with it,
this will be the only filename set into the image
``.file_map`` attribute. Otherwise, the image instance will
try and guess the other filenames from this given filename.
"""
self.file_map = self.__class__.filespec_to_file_map(filename)
|
(self, filename: str) -> NoneType
|
56,701 |
nibabel.analyze
|
to_file_map
|
Write image to `file_map` or contained ``self.file_map``
Parameters
----------
file_map : None or mapping, optional
files mapping. If None (default) use object's ``file_map``
attribute instead
dtype : dtype-like, optional
The on-disk data type to coerce the data array.
|
def to_file_map(self, file_map=None, dtype=None):
"""Write image to `file_map` or contained ``self.file_map``
Parameters
----------
file_map : None or mapping, optional
files mapping. If None (default) use object's ``file_map``
attribute instead
dtype : dtype-like, optional
The on-disk data type to coerce the data array.
"""
if file_map is None:
file_map = self.file_map
data = np.asanyarray(self.dataobj)
self.update_header()
hdr = self._header
# Store consumable values for later restore
offset = hdr.get_data_offset()
data_dtype = hdr.get_data_dtype()
# Override dtype conditionally
if dtype is not None:
hdr.set_data_dtype(dtype)
out_dtype = hdr.get_data_dtype()
# Scalars of slope, offset to get immutable values
slope = hdr['scl_slope'].item() if hdr.has_data_slope else np.nan
inter = hdr['scl_inter'].item() if hdr.has_data_intercept else np.nan
# Check whether to calculate slope / inter
scale_me = np.all(np.isnan((slope, inter)))
try:
if scale_me:
arr_writer = make_array_writer(
data, out_dtype, hdr.has_data_slope, hdr.has_data_intercept
)
else:
arr_writer = ArrayWriter(data, out_dtype, check_scaling=False)
except WriterError:
# Restore any changed consumable values, in case caller catches
# Should match cleanup at the end of the method
hdr.set_data_offset(offset)
hdr.set_data_dtype(data_dtype)
if hdr.has_data_slope:
hdr['scl_slope'] = slope
if hdr.has_data_intercept:
hdr['scl_inter'] = inter
raise
hdr_fh, img_fh = self._get_fileholders(file_map)
# Check if hdr and img refer to same file; this can happen with odd
# analyze images but most often this is because it's a single nifti
# file
hdr_img_same = hdr_fh.same_file_as(img_fh)
hdrf = hdr_fh.get_prepare_fileobj(mode='wb')
if hdr_img_same:
imgf = hdrf
else:
imgf = img_fh.get_prepare_fileobj(mode='wb')
# Rescale values if asked
if scale_me:
hdr.set_slope_inter(*get_slope_inter(arr_writer))
# Write header
hdr.write_to(hdrf)
# Write image
# Seek to writing position, get there by writing zeros if seek fails
seek_tell(imgf, hdr.get_data_offset(), write0=True)
# Write array data
arr_writer.to_fileobj(imgf)
hdrf.close_if_mine()
if not hdr_img_same:
imgf.close_if_mine()
self._header = hdr
self.file_map = file_map
# Restore any changed consumable values
hdr.set_data_offset(offset)
hdr.set_data_dtype(data_dtype)
if hdr.has_data_slope:
hdr['scl_slope'] = slope
if hdr.has_data_intercept:
hdr['scl_inter'] = inter
|
(self, file_map=None, dtype=None)
|
56,702 |
nibabel.filebasedimages
|
to_filename
|
Write image to files implied by filename string
Parameters
----------
filename : str or os.PathLike
filename to which to save image. We will parse `filename`
with ``filespec_to_file_map`` to work out names for image,
header etc.
\*\*kwargs : keyword arguments
Keyword arguments to format-specific save
Returns
-------
None
|
def to_filename(self, filename: FileSpec, **kwargs) -> None:
r"""Write image to files implied by filename string
Parameters
----------
filename : str or os.PathLike
filename to which to save image. We will parse `filename`
with ``filespec_to_file_map`` to work out names for image,
header etc.
\*\*kwargs : keyword arguments
Keyword arguments to format-specific save
Returns
-------
None
"""
self.file_map = self.filespec_to_file_map(filename)
self.to_file_map(**kwargs)
|
(self, filename: 'FileSpec', **kwargs) -> 'None'
|
56,703 |
nibabel.dataobj_images
|
uncache
|
Delete any cached read of data from proxied data
Remember there are two types of images:
* *array images* where the data ``img.dataobj`` is an array
* *proxy images* where the data ``img.dataobj`` is a proxy object
If you call ``img.get_fdata()`` on a proxy image, the result of reading
from the proxy gets cached inside the image object, and this cache is
what gets returned from the next call to ``img.get_fdata()``. If you
modify the returned data, as in::
data = img.get_fdata()
data[:] = 42
then the next call to ``img.get_fdata()`` returns the modified array,
whether the image is an array image or a proxy image::
assert np.all(img.get_fdata() == 42)
When you uncache an array image, this has no effect on the return of
``img.get_fdata()``, but when you uncache a proxy image, the result of
``img.get_fdata()`` returns to its original value.
|
def uncache(self) -> None:
"""Delete any cached read of data from proxied data
Remember there are two types of images:
* *array images* where the data ``img.dataobj`` is an array
* *proxy images* where the data ``img.dataobj`` is a proxy object
If you call ``img.get_fdata()`` on a proxy image, the result of reading
from the proxy gets cached inside the image object, and this cache is
what gets returned from the next call to ``img.get_fdata()``. If you
modify the returned data, as in::
data = img.get_fdata()
data[:] = 42
then the next call to ``img.get_fdata()`` returns the modified array,
whether the image is an array image or a proxy image::
assert np.all(img.get_fdata() == 42)
When you uncache an array image, this has no effect on the return of
``img.get_fdata()``, but when you uncache a proxy image, the result of
``img.get_fdata()`` returns to its original value.
"""
self._fdata_cache = None
self._data_cache = None
|
(self) -> NoneType
|
56,704 |
nibabel.spatialimages
|
update_header
|
Harmonize header with image data and affine
>>> data = np.zeros((2,3,4))
>>> affine = np.diag([1.0,2.0,3.0,1.0])
>>> img = SpatialImage(data, affine)
>>> img.shape == (2, 3, 4)
True
>>> img.update_header()
>>> img.header.get_data_shape() == (2, 3, 4)
True
>>> img.header.get_zooms()
(1.0, 2.0, 3.0)
|
def update_header(self) -> None:
"""Harmonize header with image data and affine
>>> data = np.zeros((2,3,4))
>>> affine = np.diag([1.0,2.0,3.0,1.0])
>>> img = SpatialImage(data, affine)
>>> img.shape == (2, 3, 4)
True
>>> img.update_header()
>>> img.header.get_data_shape() == (2, 3, 4)
True
>>> img.header.get_zooms()
(1.0, 2.0, 3.0)
"""
hdr = self._header
shape = self._dataobj.shape
# We need to update the header if the data shape has changed. It's a
# bit difficult to change the data shape using the standard API, but
# maybe it happened
if hdr.get_data_shape() != shape:
hdr.set_data_shape(shape)
# If the affine is not None, and it is different from the main affine
# in the header, update the header
if self._affine is None:
return
if np.allclose(self._affine, hdr.get_best_affine()):
return
self._affine2header()
|
(self) -> NoneType
|
56,705 |
nibabel.cifti2.cifti2
|
Cifti2Header
|
Class for CIFTI-2 header extension
|
class Cifti2Header(FileBasedHeader, xml.XmlSerializable):
"""Class for CIFTI-2 header extension"""
def __init__(self, matrix=None, version='2.0'):
FileBasedHeader.__init__(self)
xml.XmlSerializable.__init__(self)
if matrix is None:
matrix = Cifti2Matrix()
self.matrix = matrix
self.version = version
def _to_xml_element(self):
cifti = xml.Element('CIFTI')
cifti.attrib['Version'] = str(self.version)
mat_xml = self.matrix._to_xml_element()
if mat_xml is not None:
cifti.append(mat_xml)
return cifti
def __eq__(self, other):
return self.to_xml() == other.to_xml()
@classmethod
def may_contain_header(klass, binaryblock):
from .parse_cifti2 import _Cifti2AsNiftiHeader
return _Cifti2AsNiftiHeader.may_contain_header(binaryblock)
@property
def number_of_mapped_indices(self):
"""
Number of mapped indices
"""
return len(self.matrix)
@property
def mapped_indices(self):
"""
List of matrix indices that are mapped
"""
return self.matrix.mapped_indices
def get_index_map(self, index):
"""
Cifti2 Mapping class for a given index
Parameters
----------
index : int
Index for which we want to obtain the mapping.
Must be in the mapped_indices sequence.
Returns
-------
cifti2_map : Cifti2MatrixIndicesMap
Returns the Cifti2MatrixIndicesMap corresponding to
the given index.
"""
return self.matrix.get_index_map(index)
def get_axis(self, index):
"""
Generates the Cifti2 axis for a given dimension
Parameters
----------
index : int
Dimension for which we want to obtain the mapping.
Returns
-------
axis : :class:`.cifti2_axes.Axis`
"""
return self.matrix.get_axis(index)
@classmethod
def from_axes(cls, axes):
"""
Creates a new Cifti2 header based on the Cifti2 axes
Parameters
----------
axes : tuple of :class`.cifti2_axes.Axis`
sequence of Cifti2 axes describing each row/column of the matrix to be stored
Returns
-------
header : Cifti2Header
new header describing the rows/columns in a format consistent with Cifti2
"""
from . import cifti2_axes
return cifti2_axes.to_header(axes)
|
(matrix=None, version='2.0')
|
56,706 |
nibabel.cifti2.cifti2
|
__eq__
| null |
def __eq__(self, other):
return self.to_xml() == other.to_xml()
|
(self, other)
|
56,707 |
nibabel.cifti2.cifti2
|
__init__
| null |
def __init__(self, matrix=None, version='2.0'):
FileBasedHeader.__init__(self)
xml.XmlSerializable.__init__(self)
if matrix is None:
matrix = Cifti2Matrix()
self.matrix = matrix
self.version = version
|
(self, matrix=None, version='2.0')
|
56,708 |
nibabel.filebasedimages
|
__ne__
| null |
def __ne__(self, other: object) -> bool:
return not self == other
|
(self, other: object) -> bool
|
56,709 |
nibabel.cifti2.cifti2
|
_to_xml_element
| null |
def _to_xml_element(self):
cifti = xml.Element('CIFTI')
cifti.attrib['Version'] = str(self.version)
mat_xml = self.matrix._to_xml_element()
if mat_xml is not None:
cifti.append(mat_xml)
return cifti
|
(self)
|
56,710 |
nibabel.filebasedimages
|
copy
|
Copy object to independent representation
The copy should not be affected by any changes to the original
object.
|
def copy(self: HdrT) -> HdrT:
"""Copy object to independent representation
The copy should not be affected by any changes to the original
object.
"""
return deepcopy(self)
|
(self: ~HdrT) -> ~HdrT
|
56,711 |
nibabel.cifti2.cifti2
|
get_axis
|
Generates the Cifti2 axis for a given dimension
Parameters
----------
index : int
Dimension for which we want to obtain the mapping.
Returns
-------
axis : :class:`.cifti2_axes.Axis`
|
def get_axis(self, index):
"""
Generates the Cifti2 axis for a given dimension
Parameters
----------
index : int
Dimension for which we want to obtain the mapping.
Returns
-------
axis : :class:`.cifti2_axes.Axis`
"""
return self.matrix.get_axis(index)
|
(self, index)
|
56,712 |
nibabel.cifti2.cifti2
|
get_index_map
|
Cifti2 Mapping class for a given index
Parameters
----------
index : int
Index for which we want to obtain the mapping.
Must be in the mapped_indices sequence.
Returns
-------
cifti2_map : Cifti2MatrixIndicesMap
Returns the Cifti2MatrixIndicesMap corresponding to
the given index.
|
def get_index_map(self, index):
"""
Cifti2 Mapping class for a given index
Parameters
----------
index : int
Index for which we want to obtain the mapping.
Must be in the mapped_indices sequence.
Returns
-------
cifti2_map : Cifti2MatrixIndicesMap
Returns the Cifti2MatrixIndicesMap corresponding to
the given index.
"""
return self.matrix.get_index_map(index)
|
(self, index)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.