code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
gitignore_path = os.path.join(root, '.gitignore')
dir_patterns = ['.git']
file_patterns = []
if not os.path.exists(gitignore_path):
return (dir_patterns, file_patterns)
with open(gitignore_path, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
if '**' in line:
raise NotImplementedError('gitignore ** wildcards are not implemented')
if line.startswith('!'):
raise NotImplementedError('gitignore pattern negation is not implemented')
if line.startswith('/'):
raise NotImplementedError('gitignore anchored patterns are not implemented')
if line.startswith('\\#'):
line = '#' + line[2:]
if line.startswith('\\!'):
line = '!' + line[2:]
if line.endswith('/'):
dir_patterns.append(line[:-1])
else:
file_patterns.append(line)
return (dir_patterns, file_patterns)
|
def _gitignore(root)
|
Parses a .gitignore file and returns patterns to match dirs and files.
Only basic gitignore patterns are supported. Pattern negation, ** wildcards
and anchored patterns are not currently implemented.
:param root:
A unicode string of the path to the git repository
:return:
A 2-element tuple:
- 0: a list of unicode strings to match against dirs
- 1: a list of unicode strings to match against dirs and files
| 2.118818 | 1.868179 | 1.134162 |
proc = subprocess.Popen(
params,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd
)
stdout, stderr = proc.communicate()
code = proc.wait()
if code != 0:
e = OSError('subprocess exit code for %r was %d: %s' % (params, code, stderr))
e.stdout = stdout
e.stderr = stderr
raise e
return (stdout, stderr)
|
def _execute(params, cwd)
|
Executes a subprocess
:param params:
A list of the executable and arguments to pass to it
:param cwd:
The working directory to execute the command in
:return:
A 2-element tuple of (stdout, stderr)
| 2.503202 | 2.339519 | 1.069965 |
deps_dir = os.path.join(build_root, 'modularcrypto-deps')
if os.path.exists(deps_dir):
shutil.rmtree(deps_dir, ignore_errors=True)
os.mkdir(deps_dir)
try:
print("Staging ci dependencies")
_stage_requirements(deps_dir, os.path.join(package_root, 'requires', 'ci'))
print("Checking out modularcrypto packages for coverage")
for other_package in other_packages:
pkg_url = 'https://github.com/wbond/%s.git' % other_package
pkg_dir = os.path.join(build_root, other_package)
if os.path.exists(pkg_dir):
print("%s is already present" % other_package)
continue
print("Cloning %s" % pkg_url)
_execute(['git', 'clone', pkg_url], build_root)
print()
except (Exception):
if os.path.exists(deps_dir):
shutil.rmtree(deps_dir, ignore_errors=True)
raise
return True
|
def run()
|
Installs required development dependencies. Uses git to checkout other
modularcrypto repos for more accurate coverage data.
| 3.277247 | 2.935947 | 1.116249 |
print('Downloading %s' % url)
filename = os.path.basename(url)
dest_path = os.path.join(dest, filename)
if sys.platform == 'win32':
powershell_exe = os.path.join('system32\\WindowsPowerShell\\v1.0\\powershell.exe')
code = "[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12;"
code += "(New-Object Net.WebClient).DownloadFile('%s', '%s');" % (url, dest_path)
_execute([powershell_exe, '-Command', code], dest)
else:
_execute(['curl', '-L', '--silent', '--show-error', '-O', url], dest)
return dest_path
|
def _download(url, dest)
|
Downloads a URL to a directory
:param url:
The URL to download
:param dest:
The path to the directory to save the file in
:return:
The filesystem path to the saved file
| 2.417534 | 2.491804 | 0.970194 |
if path.endswith('.zip'):
return zipfile.ZipFile(path, 'r')
return tarfile.open(path, 'r')
|
def _open_archive(path)
|
:param path:
A unicode string of the filesystem path to the archive
:return:
An archive object
| 2.948803 | 3.062303 | 0.962936 |
if isinstance(archive, zipfile.ZipFile):
return archive.infolist()
return archive.getmembers()
|
def _list_archive_members(archive)
|
:param archive:
An archive from _open_archive()
:return:
A list of info objects to be used with _info_name() and _extract_info()
| 5.444828 | 4.690474 | 1.160827 |
common_root = None
for info in _list_archive_members(archive):
fn = _info_name(info)
if fn in set(['.', '/']):
continue
sep = None
if '/' in fn:
sep = '/'
elif '\\' in fn:
sep = '\\'
if sep is None:
root_dir = fn
else:
root_dir, _ = fn.split(sep, 1)
if common_root is None:
common_root = root_dir
else:
if common_root != root_dir:
return None
return common_root
|
def _archive_single_dir(archive)
|
Check if all members of the archive are in a single top-level directory
:param archive:
An archive from _open_archive()
:return:
None if not a single top level directory in archive, otherwise a
unicode string of the top level directory name
| 3.121292 | 2.740305 | 1.139031 |
if isinstance(info, zipfile.ZipInfo):
return info.filename.replace('\\', '/')
return info.name.replace('\\', '/')
|
def _info_name(info)
|
Returns a normalized file path for an archive info object
:param info:
An info object from _list_archive_members()
:return:
A unicode string with all directory separators normalized to "/"
| 4.354778 | 3.629455 | 1.199844 |
if isinstance(archive, zipfile.ZipFile):
fn = info.filename
is_dir = fn.endswith('/') or fn.endswith('\\')
out = archive.read(info)
if is_dir and out == b'':
return None
return out
info_file = archive.extractfile(info)
if info_file:
return info_file.read()
return None
|
def _extract_info(archive, info)
|
Extracts the contents of an archive info object
;param archive:
An archive from _open_archive()
:param info:
An info object from _list_archive_members()
:return:
None, or a byte string of the file contents
| 3.134584 | 3.11325 | 1.006853 |
if pkg_path.endswith('.exe'):
try:
zf = None
zf = zipfile.ZipFile(pkg_path, 'r')
# Exes have a PLATLIB folder containing everything we want
for zi in zf.infolist():
if not zi.filename.startswith('PLATLIB'):
continue
data = _extract_info(zf, zi)
if data is not None:
dst_path = os.path.join(deps_dir, zi.filename[8:])
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(data)
finally:
if zf:
zf.close()
return
if pkg_path.endswith('.whl'):
try:
zf = None
zf = zipfile.ZipFile(pkg_path, 'r')
# Wheels contain exactly what we need and nothing else
zf.extractall(deps_dir)
finally:
if zf:
zf.close()
return
# Source archives may contain a bunch of other things.
# The following code works for the packages coverage and
# configparser, which are the two we currently require that
# do not provide wheels
try:
ar = None
ar = _open_archive(pkg_path)
pkg_name = None
base_path = _archive_single_dir(ar) or ''
if len(base_path):
if '-' in base_path:
pkg_name, _ = base_path.split('-', 1)
base_path += '/'
base_pkg_path = None
if pkg_name is not None:
base_pkg_path = base_path + pkg_name + '/'
src_path = base_path + 'src/'
members = []
for info in _list_archive_members(ar):
fn = _info_name(info)
if base_pkg_path is not None and fn.startswith(base_pkg_path):
dst_path = fn[len(base_pkg_path) - len(pkg_name) - 1:]
members.append((info, dst_path))
continue
if fn.startswith(src_path):
members.append((info, fn[len(src_path):]))
continue
for info, path in members:
info_data = _extract_info(ar, info)
# Dirs won't return a file
if info_data is not None:
dst_path = os.path.join(deps_dir, path)
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(info_data)
finally:
if ar:
ar.close()
|
def _extract_package(deps_dir, pkg_path)
|
Extract a .whl, .zip, .tar.gz or .tar.bz2 into a package path to
use when running CI tasks
:param deps_dir:
A unicode string of the directory the package should be extracted to
:param pkg_path:
A unicode string of the path to the archive
| 2.568655 | 2.586225 | 0.993206 |
valid_tags = _pep425tags()
exe_suffix = None
if sys.platform == 'win32' and _pep425_implementation() == 'cp':
win_arch = 'win32' if sys.maxsize == 2147483647 else 'win-amd64'
version_info = sys.version_info
exe_suffix = '.%s-py%d.%d.exe' % (win_arch, version_info[0], version_info[1])
packages = _parse_requires(path)
for p in packages:
pkg = p['pkg']
if p['type'] == 'url':
if pkg.endswith('.zip') or pkg.endswith('.tar.gz') or pkg.endswith('.tar.bz2') or pkg.endswith('.whl'):
url = pkg
else:
raise Exception('Unable to install package from URL that is not an archive')
else:
pypi_json_url = 'https://pypi.org/pypi/%s/json' % pkg
json_dest = _download(pypi_json_url, deps_dir)
with open(json_dest, 'rb') as f:
pkg_info = json.loads(f.read().decode('utf-8'))
if os.path.exists(json_dest):
os.remove(json_dest)
latest = pkg_info['info']['version']
if p['type'] == '>=':
if _tuple_from_ver(p['ver']) > _tuple_from_ver(latest):
raise Exception('Unable to find version %s of %s, newest is %s' % (p['ver'], pkg, latest))
version = latest
elif p['type'] == '==':
if p['ver'] not in pkg_info['releases']:
raise Exception('Unable to find version %s of %s' % (p['ver'], pkg))
version = p['ver']
else:
version = latest
wheels = {}
whl = None
tar_bz2 = None
tar_gz = None
exe = None
for download in pkg_info['releases'][version]:
if exe_suffix and download['url'].endswith(exe_suffix):
exe = download['url']
if download['url'].endswith('.whl'):
parts = os.path.basename(download['url']).split('-')
tag_impl = parts[-3]
tag_abi = parts[-2]
tag_arch = parts[-1].split('.')[0]
wheels[(tag_impl, tag_abi, tag_arch)] = download['url']
if download['url'].endswith('.tar.bz2'):
tar_bz2 = download['url']
if download['url'].endswith('.tar.gz'):
tar_gz = download['url']
# Find the most-specific wheel possible
for tag in valid_tags:
if tag in wheels:
whl = wheels[tag]
break
if exe_suffix and exe:
url = exe
elif whl:
url = whl
elif tar_bz2:
url = tar_bz2
elif tar_gz:
url = tar_gz
else:
raise Exception('Unable to find suitable download for %s' % pkg)
local_path = _download(url, deps_dir)
_extract_package(deps_dir, local_path)
os.remove(local_path)
|
def _stage_requirements(deps_dir, path)
|
Installs requirements without using Python to download, since
different services are limiting to TLS 1.2, and older version of
Python do not support that
:param deps_dir:
A unicode path to a temporary diretory to use for downloads
:param path:
A unicoe filesystem path to a requirements file
| 2.327626 | 2.356812 | 0.987616 |
python_version = '.'.join(map(str_cls, sys.version_info[0:2]))
sys_platform = sys.platform
packages = []
with open(path, 'rb') as f:
contents = f.read().decode('utf-8')
for line in re.split(r'\r?\n', contents):
line = line.strip()
if not len(line):
continue
if re.match(r'^\s*#', line):
continue
if ';' in line:
package, cond = line.split(';', 1)
package = package.strip()
cond = cond.strip()
cond = cond.replace('sys_platform', repr(sys_platform))
cond = cond.replace('python_version', repr(python_version))
if not eval(cond):
continue
else:
package = line.strip()
if re.match(r'^\s*-r\s*', package):
sub_req_file = re.sub(r'^\s*-r\s*', '', package)
sub_req_file = os.path.abspath(os.path.join(os.path.dirname(path), sub_req_file))
packages.extend(_parse_requires(sub_req_file))
continue
if re.match(r'https?://', package):
packages.append({'type': 'url', 'pkg': package})
continue
if '>=' in package:
parts = package.split('>=')
package = parts[0].strip()
ver = parts[1].strip()
packages.append({'type': '>=', 'pkg': package, 'ver': ver})
continue
if '==' in package:
parts = package.split('==')
package = parts[0].strip()
ver = parts[1].strip()
packages.append({'type': '==', 'pkg': package, 'ver': ver})
continue
if re.search(r'[^ a-zA-Z0-9\-]', package):
raise Exception('Unsupported requirements format version constraint: %s' % package)
packages.append({'type': 'any', 'pkg': package})
return packages
|
def _parse_requires(path)
|
Does basic parsing of pip requirements files, to allow for
using something other than Python to do actual TLS requests
:param path:
A path to a requirements file
:return:
A list of dict objects containing the keys:
- 'type' ('any', 'url', '==', '>=')
- 'pkg'
- 'ver' (if 'type' == '==' or 'type' == '>=')
| 2.089046 | 1.921469 | 1.087213 |
if not isinstance(byte_string, byte_cls):
raise TypeError(unwrap(
'''
byte_string must be a byte string, not %s
''',
_type_name(byte_string)
))
return byte_string.find(b'-----BEGIN') != -1 or byte_string.find(b'---- BEGIN') != -1
|
def detect(byte_string)
|
Detect if a byte string seems to contain a PEM-encoded block
:param byte_string:
A byte string to look through
:return:
A boolean, indicating if a PEM-encoded block is contained in the byte
string
| 3.675075 | 3.130826 | 1.173836 |
if not isinstance(der_bytes, byte_cls):
raise TypeError(unwrap(
'''
der_bytes must be a byte string, not %s
''' % _type_name(der_bytes)
))
if not isinstance(type_name, str_cls):
raise TypeError(unwrap(
'''
type_name must be a unicode string, not %s
''',
_type_name(type_name)
))
type_name = type_name.upper().encode('ascii')
output = BytesIO()
output.write(b'-----BEGIN ')
output.write(type_name)
output.write(b'-----\n')
if headers:
for key in headers:
output.write(key.encode('ascii'))
output.write(b': ')
output.write(headers[key].encode('ascii'))
output.write(b'\n')
output.write(b'\n')
b64_bytes = base64.b64encode(der_bytes)
b64_len = len(b64_bytes)
i = 0
while i < b64_len:
output.write(b64_bytes[i:i + 64])
output.write(b'\n')
i += 64
output.write(b'-----END ')
output.write(type_name)
output.write(b'-----\n')
return output.getvalue()
|
def armor(type_name, der_bytes, headers=None)
|
Armors a DER-encoded byte string in PEM
:param type_name:
A unicode string that will be capitalized and placed in the header
and footer of the block. E.g. "CERTIFICATE", "PRIVATE KEY", etc. This
will appear as "-----BEGIN CERTIFICATE-----" and
"-----END CERTIFICATE-----".
:param der_bytes:
A byte string to be armored
:param headers:
An OrderedDict of the header lines to write after the BEGIN line
:return:
A byte string of the PEM block
| 1.713279 | 1.614682 | 1.061063 |
if not isinstance(pem_bytes, byte_cls):
raise TypeError(unwrap(
'''
pem_bytes must be a byte string, not %s
''',
_type_name(pem_bytes)
))
# Valid states include: "trash", "headers", "body"
state = 'trash'
headers = {}
base64_data = b''
object_type = None
found_start = False
found_end = False
for line in pem_bytes.splitlines(False):
if line == b'':
continue
if state == "trash":
# Look for a starting line since some CA cert bundle show the cert
# into in a parsed format above each PEM block
type_name_match = re.match(b'^(?:---- |-----)BEGIN ([A-Z0-9 ]+)(?: ----|-----)', line)
if not type_name_match:
continue
object_type = type_name_match.group(1).decode('ascii')
found_start = True
state = 'headers'
continue
if state == 'headers':
if line.find(b':') == -1:
state = 'body'
else:
decoded_line = line.decode('ascii')
name, value = decoded_line.split(':', 1)
headers[name] = value.strip()
continue
if state == 'body':
if line[0:5] in (b'-----', b'---- '):
der_bytes = base64.b64decode(base64_data)
yield (object_type, headers, der_bytes)
state = 'trash'
headers = {}
base64_data = b''
object_type = None
found_end = True
continue
base64_data += line
if not found_start or not found_end:
raise ValueError(unwrap(
'''
pem_bytes does not appear to contain PEM-encoded data - no
BEGIN/END combination found
'''
))
|
def _unarmor(pem_bytes)
|
Convert a PEM-encoded byte string into one or more DER-encoded byte strings
:param pem_bytes:
A byte string of the PEM-encoded data
:raises:
ValueError - when the pem_bytes do not appear to be PEM-encoded bytes
:return:
A generator of 3-element tuples in the format: (object_type, headers,
der_bytes). The object_type is a unicode string of what is between
"-----BEGIN " and "-----". Examples include: "CERTIFICATE",
"PUBLIC KEY", "PRIVATE KEY". The headers is a dict containing any lines
in the form "Name: Value" that are right after the begin line.
| 3.395435 | 3.172786 | 1.070175 |
generator = _unarmor(pem_bytes)
if not multiple:
return next(generator)
return generator
|
def unarmor(pem_bytes, multiple=False)
|
Convert a PEM-encoded byte string into a DER-encoded byte string
:param pem_bytes:
A byte string of the PEM-encoded data
:param multiple:
If True, function will return a generator
:raises:
ValueError - when the pem_bytes do not appear to be PEM-encoded bytes
:return:
A 3-element tuple (object_name, headers, der_bytes). The object_name is
a unicode string of what is between "-----BEGIN " and "-----". Examples
include: "CERTIFICATE", "PUBLIC KEY", "PRIVATE KEY". The headers is a
dict containing any lines in the form "Name: Value" that are right
after the begin line.
| 6.111073 | 9.62762 | 0.634744 |
algorithm = self['algorithm'].native
algo_map = {
'md2_rsa': 'rsassa_pkcs1v15',
'md5_rsa': 'rsassa_pkcs1v15',
'sha1_rsa': 'rsassa_pkcs1v15',
'sha224_rsa': 'rsassa_pkcs1v15',
'sha256_rsa': 'rsassa_pkcs1v15',
'sha384_rsa': 'rsassa_pkcs1v15',
'sha512_rsa': 'rsassa_pkcs1v15',
'rsassa_pkcs1v15': 'rsassa_pkcs1v15',
'rsassa_pss': 'rsassa_pss',
'sha1_dsa': 'dsa',
'sha224_dsa': 'dsa',
'sha256_dsa': 'dsa',
'dsa': 'dsa',
'sha1_ecdsa': 'ecdsa',
'sha224_ecdsa': 'ecdsa',
'sha256_ecdsa': 'ecdsa',
'sha384_ecdsa': 'ecdsa',
'sha512_ecdsa': 'ecdsa',
'sha3_224_ecdsa': 'ecdsa',
'sha3_256_ecdsa': 'ecdsa',
'sha3_384_ecdsa': 'ecdsa',
'sha3_512_ecdsa': 'ecdsa',
'ecdsa': 'ecdsa',
}
if algorithm in algo_map:
return algo_map[algorithm]
raise ValueError(unwrap(
'''
Signature algorithm not known for %s
''',
algorithm
))
|
def signature_algo(self)
|
:return:
A unicode string of "rsassa_pkcs1v15", "rsassa_pss", "dsa" or
"ecdsa"
| 1.470094 | 1.398143 | 1.051461 |
algorithm = self['algorithm'].native
algo_map = {
'md2_rsa': 'md2',
'md5_rsa': 'md5',
'sha1_rsa': 'sha1',
'sha224_rsa': 'sha224',
'sha256_rsa': 'sha256',
'sha384_rsa': 'sha384',
'sha512_rsa': 'sha512',
'sha1_dsa': 'sha1',
'sha224_dsa': 'sha224',
'sha256_dsa': 'sha256',
'sha1_ecdsa': 'sha1',
'sha224_ecdsa': 'sha224',
'sha256_ecdsa': 'sha256',
'sha384_ecdsa': 'sha384',
'sha512_ecdsa': 'sha512',
}
if algorithm in algo_map:
return algo_map[algorithm]
if algorithm == 'rsassa_pss':
return self['parameters']['hash_algorithm']['algorithm'].native
raise ValueError(unwrap(
'''
Hash algorithm not known for %s
''',
algorithm
))
|
def hash_algo(self)
|
:return:
A unicode string of "md2", "md5", "sha1", "sha224", "sha256",
"sha384", "sha512", "sha512_224", "sha512_256"
| 1.717846 | 1.681179 | 1.02181 |
r = int_from_bytes(data[0:len(data) // 2])
s = int_from_bytes(data[len(data) // 2:])
return cls({'r': r, 's': s})
|
def from_p1363(cls, data)
|
Reads a signature from a byte string encoding accordint to IEEE P1363,
which is used by Microsoft's BCryptSignHash() function.
:param data:
A byte string from BCryptSignHash()
:return:
A DSASignature object
| 3.092902 | 3.754016 | 0.823892 |
r_bytes = int_to_bytes(self['r'].native)
s_bytes = int_to_bytes(self['s'].native)
int_byte_length = max(len(r_bytes), len(s_bytes))
r_bytes = fill_width(r_bytes, int_byte_length)
s_bytes = fill_width(s_bytes, int_byte_length)
return r_bytes + s_bytes
|
def to_p1363(self)
|
Dumps a signature to a byte string compatible with Microsoft's
BCryptVerifySignature() function.
:return:
A byte string compatible with BCryptVerifySignature()
| 2.937663 | 2.708892 | 1.084452 |
encryption_algo = self['algorithm'].native
if encryption_algo == 'pbes2':
return self['parameters']['key_derivation_func']['algorithm'].native
if encryption_algo.find('.') == -1:
if encryption_algo.find('_') != -1:
encryption_algo, _ = encryption_algo.split('_', 1)
if encryption_algo == 'pbes1':
return 'pbkdf1'
if encryption_algo == 'pkcs12':
return 'pkcs12_kdf'
raise ValueError(unwrap(
'''
Encryption algorithm "%s" does not have a registered key
derivation function
''',
encryption_algo
))
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s", can not determine key
derivation function
''',
encryption_algo
))
|
def kdf(self)
|
Returns the name of the key derivation function to use.
:return:
A unicode from of one of the following: "pbkdf1", "pbkdf2",
"pkcs12_kdf"
| 3.454074 | 3.147467 | 1.097414 |
encryption_algo = self['algorithm'].native
if encryption_algo == 'pbes2':
return self['parameters']['key_derivation_func']['parameters']['prf']['algorithm'].native
if encryption_algo.find('.') == -1:
if encryption_algo.find('_') != -1:
_, hmac_algo, _ = encryption_algo.split('_', 2)
return hmac_algo
raise ValueError(unwrap(
'''
Encryption algorithm "%s" does not have a registered key
derivation function
''',
encryption_algo
))
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s", can not determine key
derivation hmac algorithm
''',
encryption_algo
))
|
def kdf_hmac(self)
|
Returns the HMAC algorithm to use with the KDF.
:return:
A unicode string of one of the following: "md2", "md5", "sha1",
"sha224", "sha256", "sha384", "sha512"
| 4.330334 | 4.332517 | 0.999496 |
encryption_algo = self['algorithm'].native
if encryption_algo == 'pbes2':
salt = self['parameters']['key_derivation_func']['parameters']['salt']
if salt.name == 'other_source':
raise ValueError(unwrap(
'''
Can not determine key derivation salt - the
reserved-for-future-use other source salt choice was
specified in the PBKDF2 params structure
'''
))
return salt.native
if encryption_algo.find('.') == -1:
if encryption_algo.find('_') != -1:
return self['parameters']['salt'].native
raise ValueError(unwrap(
'''
Encryption algorithm "%s" does not have a registered key
derivation function
''',
encryption_algo
))
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s", can not determine key
derivation salt
''',
encryption_algo
))
|
def kdf_salt(self)
|
Returns the byte string to use as the salt for the KDF.
:return:
A byte string
| 5.256238 | 5.339802 | 0.984351 |
encryption_algo = self['algorithm'].native
if encryption_algo == 'pbes2':
return self['parameters']['key_derivation_func']['parameters']['iteration_count'].native
if encryption_algo.find('.') == -1:
if encryption_algo.find('_') != -1:
return self['parameters']['iterations'].native
raise ValueError(unwrap(
'''
Encryption algorithm "%s" does not have a registered key
derivation function
''',
encryption_algo
))
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s", can not determine key
derivation iterations
''',
encryption_algo
))
|
def kdf_iterations(self)
|
Returns the number of iterations that should be run via the KDF.
:return:
An integer
| 4.459108 | 4.823882 | 0.924382 |
encryption_algo = self['algorithm'].native
if encryption_algo[0:3] == 'aes':
return {
'aes128_': 16,
'aes192_': 24,
'aes256_': 32,
}[encryption_algo[0:7]]
cipher_lengths = {
'des': 8,
'tripledes_3key': 24,
}
if encryption_algo in cipher_lengths:
return cipher_lengths[encryption_algo]
if encryption_algo == 'rc2':
rc2_params = self['parameters'].parsed['encryption_scheme']['parameters'].parsed
rc2_parameter_version = rc2_params['rc2_parameter_version'].native
# See page 24 of
# http://www.emc.com/collateral/white-papers/h11302-pkcs5v2-1-password-based-cryptography-standard-wp.pdf
encoded_key_bits_map = {
160: 5, # 40-bit
120: 8, # 64-bit
58: 16, # 128-bit
}
if rc2_parameter_version in encoded_key_bits_map:
return encoded_key_bits_map[rc2_parameter_version]
if rc2_parameter_version >= 256:
return rc2_parameter_version
if rc2_parameter_version is None:
return 4 # 32-bit default
raise ValueError(unwrap(
'''
Invalid RC2 parameter version found in EncryptionAlgorithm
parameters
'''
))
if encryption_algo == 'pbes2':
key_length = self['parameters']['key_derivation_func']['parameters']['key_length'].native
if key_length is not None:
return key_length
# If the KDF params don't specify the key size, we can infer it from
# the encryption scheme for all schemes except for RC5. However, in
# practical terms, neither OpenSSL or OS X support RC5 for PKCS#8
# so it is unlikely to be an issue that is run into.
return self['parameters']['encryption_scheme'].key_length
if encryption_algo.find('.') == -1:
return {
'pbes1_md2_des': 8,
'pbes1_md5_des': 8,
'pbes1_md2_rc2': 8,
'pbes1_md5_rc2': 8,
'pbes1_sha1_des': 8,
'pbes1_sha1_rc2': 8,
'pkcs12_sha1_rc4_128': 16,
'pkcs12_sha1_rc4_40': 5,
'pkcs12_sha1_tripledes_3key': 24,
'pkcs12_sha1_tripledes_2key': 16,
'pkcs12_sha1_rc2_128': 16,
'pkcs12_sha1_rc2_40': 5,
}[encryption_algo]
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s"
''',
encryption_algo
))
|
def key_length(self)
|
Returns the key length to pass to the cipher/kdf. The PKCS#5 spec does
not specify a way to store the RC5 key length, however this tends not
to be a problem since OpenSSL does not support RC5 in PKCS#8 and OS X
does not provide an RC5 cipher for use in the Security Transforms
library.
:raises:
ValueError - when the key length can not be determined
:return:
An integer representing the length in bytes
| 3.211862 | 3.064108 | 1.048221 |
encryption_algo = self['algorithm'].native
if encryption_algo[0:7] in set(['aes128_', 'aes192_', 'aes256_']):
return encryption_algo[7:]
if encryption_algo[0:6] == 'pbes1_':
return 'cbc'
if encryption_algo[0:7] == 'pkcs12_':
return 'cbc'
if encryption_algo in set(['des', 'tripledes_3key', 'rc2', 'rc5']):
return 'cbc'
if encryption_algo == 'pbes2':
return self['parameters']['encryption_scheme'].encryption_mode
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s"
''',
encryption_algo
))
|
def encryption_mode(self)
|
Returns the name of the encryption mode to use.
:return:
A unicode string from one of the following: "cbc", "ecb", "ofb",
"cfb", "wrap", "gcm", "ccm", "wrap_pad"
| 3.7495 | 3.599546 | 1.041659 |
encryption_algo = self['algorithm'].native
if encryption_algo[0:7] in set(['aes128_', 'aes192_', 'aes256_']):
return 'aes'
if encryption_algo in set(['des', 'rc2', 'rc5']):
return encryption_algo
if encryption_algo == 'tripledes_3key':
return 'tripledes'
if encryption_algo == 'pbes2':
return self['parameters']['encryption_scheme'].encryption_cipher
if encryption_algo.find('.') == -1:
return {
'pbes1_md2_des': 'des',
'pbes1_md5_des': 'des',
'pbes1_md2_rc2': 'rc2',
'pbes1_md5_rc2': 'rc2',
'pbes1_sha1_des': 'des',
'pbes1_sha1_rc2': 'rc2',
'pkcs12_sha1_rc4_128': 'rc4',
'pkcs12_sha1_rc4_40': 'rc4',
'pkcs12_sha1_tripledes_3key': 'tripledes',
'pkcs12_sha1_tripledes_2key': 'tripledes',
'pkcs12_sha1_rc2_128': 'rc2',
'pkcs12_sha1_rc2_40': 'rc2',
}[encryption_algo]
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s"
''',
encryption_algo
))
|
def encryption_cipher(self)
|
Returns the name of the symmetric encryption cipher to use. The key
length can be retrieved via the .key_length property to disabiguate
between different variations of TripleDES, AES, and the RC* ciphers.
:return:
A unicode string from one of the following: "rc2", "rc5", "des",
"tripledes", "aes"
| 2.258897 | 2.247345 | 1.00514 |
encryption_algo = self['algorithm'].native
if encryption_algo[0:7] in set(['aes128_', 'aes192_', 'aes256_']):
return 16
cipher_map = {
'des': 8,
'tripledes_3key': 8,
'rc2': 8,
}
if encryption_algo in cipher_map:
return cipher_map[encryption_algo]
if encryption_algo == 'rc5':
return self['parameters'].parsed['block_size_in_bits'].native / 8
if encryption_algo == 'pbes2':
return self['parameters']['encryption_scheme'].encryption_block_size
if encryption_algo.find('.') == -1:
return {
'pbes1_md2_des': 8,
'pbes1_md5_des': 8,
'pbes1_md2_rc2': 8,
'pbes1_md5_rc2': 8,
'pbes1_sha1_des': 8,
'pbes1_sha1_rc2': 8,
'pkcs12_sha1_rc4_128': 0,
'pkcs12_sha1_rc4_40': 0,
'pkcs12_sha1_tripledes_3key': 8,
'pkcs12_sha1_tripledes_2key': 8,
'pkcs12_sha1_rc2_128': 8,
'pkcs12_sha1_rc2_40': 8,
}[encryption_algo]
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s"
''',
encryption_algo
))
|
def encryption_block_size(self)
|
Returns the block size of the encryption cipher, in bytes.
:return:
An integer that is the block size in bytes
| 2.407027 | 2.477194 | 0.971675 |
encryption_algo = self['algorithm'].native
if encryption_algo in set(['rc2', 'rc5']):
return self['parameters'].parsed['iv'].native
# For DES/Triple DES and AES the IV is the entirety of the parameters
octet_string_iv_oids = set([
'des',
'tripledes_3key',
'aes128_cbc',
'aes192_cbc',
'aes256_cbc',
'aes128_ofb',
'aes192_ofb',
'aes256_ofb',
])
if encryption_algo in octet_string_iv_oids:
return self['parameters'].native
if encryption_algo == 'pbes2':
return self['parameters']['encryption_scheme'].encryption_iv
# All of the PBES1 algos use their KDF to create the IV. For the pbkdf1,
# the KDF is told to generate a key that is an extra 8 bytes long, and
# that is used for the IV. For the PKCS#12 KDF, it is called with an id
# of 2 to generate the IV. In either case, we can't return the IV
# without knowing the user's password.
if encryption_algo.find('.') == -1:
return None
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s"
''',
encryption_algo
))
|
def encryption_iv(self)
|
Returns the byte string of the initialization vector for the encryption
scheme. Only the PBES2 stores the IV in the params. For PBES1, the IV
is derived from the KDF and this property will return None.
:return:
A byte string or None
| 5.189391 | 4.756805 | 1.09094 |
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
%s value must be a unicode string, not %s
''',
type_name(self),
type_name(value)
))
if value.startswith('.'):
encoded_value = b'.' + value[1:].encode(self._encoding)
else:
encoded_value = value.encode(self._encoding)
self._unicode = value
self.contents = encoded_value
self._header = None
if self._trailer != b'':
self._trailer = b''
|
def set(self, value)
|
Sets the value of the DNS name
:param value:
A unicode string
| 3.567748 | 3.450544 | 1.033967 |
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
%s value must be a unicode string, not %s
''',
type_name(self),
type_name(value)
))
self._unicode = value
self.contents = iri_to_uri(value)
self._header = None
if self._trailer != b'':
self._trailer = b''
|
def set(self, value)
|
Sets the value of the string
:param value:
A unicode string
| 4.933894 | 4.72557 | 1.044085 |
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
%s value must be a unicode string, not %s
''',
type_name(self),
type_name(value)
))
if value.find('@') != -1:
mailbox, hostname = value.rsplit('@', 1)
encoded_value = mailbox.encode('ascii') + b'@' + hostname.encode('idna')
else:
encoded_value = value.encode('ascii')
self._normalized = True
self._unicode = value
self.contents = encoded_value
self._header = None
if self._trailer != b'':
self._trailer = b''
|
def set(self, value)
|
Sets the value of the string
:param value:
A unicode string
| 3.538913 | 3.494929 | 1.012585 |
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
%s value must be a unicode string, not %s
''',
type_name(self),
type_name(value)
))
original_value = value
has_cidr = value.find('/') != -1
cidr = 0
if has_cidr:
parts = value.split('/', 1)
value = parts[0]
cidr = int(parts[1])
if cidr < 0:
raise ValueError(unwrap(
'''
%s value contains a CIDR range less than 0
''',
type_name(self)
))
if value.find(':') != -1:
family = socket.AF_INET6
if cidr > 128:
raise ValueError(unwrap(
'''
%s value contains a CIDR range bigger than 128, the maximum
value for an IPv6 address
''',
type_name(self)
))
cidr_size = 128
else:
family = socket.AF_INET
if cidr > 32:
raise ValueError(unwrap(
'''
%s value contains a CIDR range bigger than 32, the maximum
value for an IPv4 address
''',
type_name(self)
))
cidr_size = 32
cidr_bytes = b''
if has_cidr:
cidr_mask = '1' * cidr
cidr_mask += '0' * (cidr_size - len(cidr_mask))
cidr_bytes = int_to_bytes(int(cidr_mask, 2))
cidr_bytes = (b'\x00' * ((cidr_size // 8) - len(cidr_bytes))) + cidr_bytes
self._native = original_value
self.contents = inet_pton(family, value) + cidr_bytes
self._bytes = self.contents
self._header = None
if self._trailer != b'':
self._trailer = b''
|
def set(self, value)
|
Sets the value of the object
:param value:
A unicode string containing an IPv4 address, IPv4 address with CIDR,
an IPv6 address or IPv6 address with CIDR
| 2.232602 | 2.16518 | 1.031139 |
if self.contents is None:
return None
if self._native is None:
byte_string = self.__bytes__()
byte_len = len(byte_string)
cidr_int = None
if byte_len in set([32, 16]):
value = inet_ntop(socket.AF_INET6, byte_string[0:16])
if byte_len > 16:
cidr_int = int_from_bytes(byte_string[16:])
elif byte_len in set([8, 4]):
value = inet_ntop(socket.AF_INET, byte_string[0:4])
if byte_len > 4:
cidr_int = int_from_bytes(byte_string[4:])
if cidr_int is not None:
cidr_bits = '{0:b}'.format(cidr_int)
cidr = len(cidr_bits.rstrip('0'))
value = value + '/' + str_cls(cidr)
self._native = value
return self._native
|
def native(self)
|
The native Python datatype representation of this value
:return:
A unicode string or None
| 2.747033 | 2.595093 | 1.058549 |
attr_name = cls.map(attr_name)
if attr_name in cls.preferred_order:
ordinal = cls.preferred_order.index(attr_name)
else:
ordinal = len(cls.preferred_order)
return (ordinal, attr_name)
|
def preferred_ordinal(cls, attr_name)
|
Returns an ordering value for a particular attribute key.
Unrecognized attributes and OIDs will be sorted lexically at the end.
:return:
An orderable value.
| 3.046507 | 2.875407 | 1.059505 |
if self._prepped is None:
self._prepped = self._ldap_string_prep(self['value'].native)
return self._prepped
|
def prepped_value(self)
|
Returns the value after being processed by the internationalized string
preparation as specified by RFC 5280
:return:
A unicode string
| 9.003098 | 6.479469 | 1.389481 |
output = []
values = self._get_values(self)
for key in sorted(values.keys()):
output.append('%s: %s' % (key, values[key]))
# Unit separator is used here since the normalization process for
# values moves any such character, and the keys are all dotted integers
# or under_score_words
return '\x1F'.join(output)
|
def hashable(self)
|
:return:
A unicode string that can be used as a dict key or in a set
| 15.441441 | 14.569115 | 1.059875 |
output = {}
[output.update([(ntv['type'].native, ntv.prepped_value)]) for ntv in rdn]
return output
|
def _get_values(self, rdn)
|
Returns a dict of prepped values contained in an RDN
:param rdn:
A RelativeDistinguishedName object
:return:
A dict object with unicode strings of NameTypeAndValue value field
values that have been prepped for comparison
| 15.777215 | 10.538069 | 1.497164 |
rdns = []
if not use_printable:
encoding_name = 'utf8_string'
encoding_class = UTF8String
else:
encoding_name = 'printable_string'
encoding_class = PrintableString
# Sort the attributes according to NameType.preferred_order
name_dict = OrderedDict(
sorted(
name_dict.items(),
key=lambda item: NameType.preferred_ordinal(item[0])
)
)
for attribute_name, attribute_value in name_dict.items():
attribute_name = NameType.map(attribute_name)
if attribute_name == 'email_address':
value = EmailAddress(attribute_value)
elif attribute_name == 'domain_component':
value = DNSName(attribute_value)
elif attribute_name in set(['dn_qualifier', 'country_name', 'serial_number']):
value = DirectoryString(
name='printable_string',
value=PrintableString(attribute_value)
)
else:
value = DirectoryString(
name=encoding_name,
value=encoding_class(attribute_value)
)
rdns.append(RelativeDistinguishedName([
NameTypeAndValue({
'type': attribute_name,
'value': value
})
]))
return cls(name='', value=RDNSequence(rdns))
|
def build(cls, name_dict, use_printable=False)
|
Creates a Name object from a dict of unicode string keys and values.
The keys should be from NameType._map, or a dotted-integer OID unicode
string.
:param name_dict:
A dict of name information, e.g. {"common_name": "Will Bond",
"country_name": "US", "organization": "Codex Non Sufficit LC"}
:param use_printable:
A bool - if PrintableString should be used for encoding instead of
UTF8String. This is for backwards compatibility with old software.
:return:
An x509.Name object
| 3.020569 | 2.816678 | 1.072387 |
if self._human_friendly is None:
data = OrderedDict()
last_field = None
for rdn in self.chosen:
for type_val in rdn:
field_name = type_val['type'].human_friendly
last_field = field_name
if field_name in data:
data[field_name] = [data[field_name]]
data[field_name].append(type_val['value'])
else:
data[field_name] = type_val['value']
to_join = []
keys = data.keys()
if last_field == 'Country':
keys = reversed(list(keys))
for key in keys:
value = data[key]
native_value = self._recursive_humanize(value)
to_join.append('%s: %s' % (key, native_value))
has_comma = False
for element in to_join:
if element.find(',') != -1:
has_comma = True
break
separator = ', ' if not has_comma else '; '
self._human_friendly = separator.join(to_join[::-1])
return self._human_friendly
|
def human_friendly(self)
|
:return:
A human-friendly unicode string containing the parts of the name
| 3.038481 | 2.910851 | 1.043846 |
if isinstance(value, list):
return', '.join(
reversed([self._recursive_humanize(sub_value) for sub_value in value])
)
return value.native
|
def _recursive_humanize(self, value)
|
Recursively serializes data compiled from the RDNSequence
:param value:
An Asn1Value object, or a list of Asn1Value objects
:return:
A unicode string
| 5.845351 | 5.102827 | 1.145512 |
if self._sha1 is None:
self._sha1 = hashlib.sha1(self.dump()).digest()
return self._sha1
|
def sha1(self)
|
:return:
The SHA1 hash of the DER-encoded bytes of this name
| 3.808002 | 3.457388 | 1.10141 |
if self._sha256 is None:
self._sha256 = hashlib.sha256(self.dump()).digest()
return self._sha256
|
def sha256(self)
|
:return:
The SHA-256 hash of the DER-encoded bytes of this name
| 2.873336 | 2.658635 | 1.080756 |
if self._url is False:
self._url = None
name = self['distribution_point']
if name.name != 'full_name':
raise ValueError(unwrap(
'''
CRL distribution points that are relative to the issuer are
not supported
'''
))
for general_name in name.chosen:
if general_name.name == 'uniform_resource_identifier':
url = general_name.native
if url.lower().startswith(('http://', 'https://', 'ldap://', 'ldaps://')):
self._url = url
break
return self._url
|
def url(self)
|
:return:
None or a unicode string of the distribution point's URL
| 5.217427 | 4.345011 | 1.200786 |
if self._issuer_serial is None:
self._issuer_serial = self.issuer.sha256 + b':' + str_cls(self.serial_number).encode('ascii')
return self._issuer_serial
|
def issuer_serial(self)
|
:return:
A byte string of the SHA-256 hash of the issuer concatenated with
the ascii character ":", concatenated with the serial number as
an ascii string
| 4.710316 | 3.213728 | 1.465686 |
if self._authority_issuer_serial is False:
akiv = self.authority_key_identifier_value
if akiv and akiv['authority_cert_issuer'].native:
issuer = self.authority_key_identifier_value['authority_cert_issuer'][0].chosen
# We untag the element since it is tagged via being a choice from GeneralName
issuer = issuer.untag()
authority_serial = self.authority_key_identifier_value['authority_cert_serial_number'].native
self._authority_issuer_serial = issuer.sha256 + b':' + str_cls(authority_serial).encode('ascii')
else:
self._authority_issuer_serial = None
return self._authority_issuer_serial
|
def authority_issuer_serial(self)
|
:return:
None or a byte string of the SHA-256 hash of the isser from the
authority key identifier extension concatenated with the ascii
character ":", concatenated with the serial number from the
authority key identifier extension as an ascii string
| 4.987234 | 4.344884 | 1.147841 |
if self._crl_distribution_points is None:
self._crl_distribution_points = self._get_http_crl_distribution_points(self.crl_distribution_points_value)
return self._crl_distribution_points
|
def crl_distribution_points(self)
|
Returns complete CRL URLs - does not include delta CRLs
:return:
A list of zero or more DistributionPoint objects
| 2.883528 | 2.829185 | 1.019208 |
if self._delta_crl_distribution_points is None:
self._delta_crl_distribution_points = self._get_http_crl_distribution_points(self.freshest_crl_value)
return self._delta_crl_distribution_points
|
def delta_crl_distribution_points(self)
|
Returns delta CRL URLs - does not include complete CRLs
:return:
A list of zero or more DistributionPoint objects
| 2.888135 | 2.786861 | 1.03634 |
output = []
if crl_distribution_points is None:
return []
for distribution_point in crl_distribution_points:
distribution_point_name = distribution_point['distribution_point']
if distribution_point_name is VOID:
continue
# RFC 5280 indicates conforming CA should not use the relative form
if distribution_point_name.name == 'name_relative_to_crl_issuer':
continue
# This library is currently only concerned with HTTP-based CRLs
for general_name in distribution_point_name.chosen:
if general_name.name == 'uniform_resource_identifier':
output.append(distribution_point)
return output
|
def _get_http_crl_distribution_points(self, crl_distribution_points)
|
Fetches the DistributionPoint object for non-relative, HTTP CRLs
referenced by the certificate
:param crl_distribution_points:
A CRLDistributionPoints object to grab the DistributionPoints from
:return:
A list of zero or more DistributionPoint objects
| 4.610622 | 4.25421 | 1.083779 |
if not self.authority_information_access_value:
return []
output = []
for entry in self.authority_information_access_value:
if entry['access_method'].native == 'ocsp':
location = entry['access_location']
if location.name != 'uniform_resource_identifier':
continue
url = location.native
if url.lower().startswith(('http://', 'https://', 'ldap://', 'ldaps://')):
output.append(url)
return output
|
def ocsp_urls(self)
|
:return:
A list of zero or more unicode strings of the OCSP URLs for this
cert
| 3.180342 | 2.976171 | 1.068602 |
if self._valid_domains is None:
self._valid_domains = []
# For the subject alt name extension, we can look at the name of
# the choice selected since it distinguishes between domain names,
# email addresses, IPs, etc
if self.subject_alt_name_value:
for general_name in self.subject_alt_name_value:
if general_name.name == 'dns_name' and general_name.native not in self._valid_domains:
self._valid_domains.append(general_name.native)
# If there was no subject alt name extension, and the common name
# in the subject looks like a domain, that is considered the valid
# list. This is done because according to
# https://tools.ietf.org/html/rfc6125#section-6.4.4, the common
# name should not be used if the subject alt name is present.
else:
pattern = re.compile('^(\\*\\.)?(?:[a-zA-Z0-9](?:[a-zA-Z0-9\\-]*[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,}$')
for rdn in self.subject.chosen:
for name_type_value in rdn:
if name_type_value['type'].native == 'common_name':
value = name_type_value['value'].native
if pattern.match(value):
self._valid_domains.append(value)
return self._valid_domains
|
def valid_domains(self)
|
:return:
A list of unicode strings of valid domain names for the certificate.
Wildcard certificates will have a domain in the form: *.example.com
| 3.277309 | 3.131069 | 1.046706 |
if self._valid_ips is None:
self._valid_ips = []
if self.subject_alt_name_value:
for general_name in self.subject_alt_name_value:
if general_name.name == 'ip_address':
self._valid_ips.append(general_name.native)
return self._valid_ips
|
def valid_ips(self)
|
:return:
A list of unicode strings of valid IP addresses for the certificate
| 3.462359 | 2.864549 | 1.208693 |
if self._self_issued is None:
self._self_issued = self.subject == self.issuer
return self._self_issued
|
def self_issued(self)
|
:return:
A boolean - if the certificate is self-issued, as defined by RFC
5280
| 3.959111 | 3.44253 | 1.150059 |
if self._self_signed is None:
self._self_signed = 'no'
if self.self_issued:
if self.key_identifier:
if not self.authority_key_identifier:
self._self_signed = 'maybe'
elif self.authority_key_identifier == self.key_identifier:
self._self_signed = 'maybe'
else:
self._self_signed = 'maybe'
return self._self_signed
|
def self_signed(self)
|
:return:
A unicode string of "no" or "maybe". The "maybe" result will
be returned if the certificate issuer and subject are the same.
If a key identifier and authority key identifier are present,
they will need to match otherwise "no" will be returned.
To verify is a certificate is truly self-signed, the signature
will need to be verified. See the certvalidator package for
one possible solution.
| 2.514514 | 2.019862 | 1.244894 |
if not isinstance(domain_ip, str_cls):
raise TypeError(unwrap(
'''
domain_ip must be a unicode string, not %s
''',
type_name(domain_ip)
))
encoded_domain_ip = domain_ip.encode('idna').decode('ascii').lower()
is_ipv6 = encoded_domain_ip.find(':') != -1
is_ipv4 = not is_ipv6 and re.match('^\\d+\\.\\d+\\.\\d+\\.\\d+$', encoded_domain_ip)
is_domain = not is_ipv6 and not is_ipv4
# Handle domain name checks
if is_domain:
if not self.valid_domains:
return False
domain_labels = encoded_domain_ip.split('.')
for valid_domain in self.valid_domains:
encoded_valid_domain = valid_domain.encode('idna').decode('ascii').lower()
valid_domain_labels = encoded_valid_domain.split('.')
# The domain must be equal in label length to match
if len(valid_domain_labels) != len(domain_labels):
continue
if valid_domain_labels == domain_labels:
return True
is_wildcard = self._is_wildcard_domain(encoded_valid_domain)
if is_wildcard and self._is_wildcard_match(domain_labels, valid_domain_labels):
return True
return False
# Handle IP address checks
if not self.valid_ips:
return False
family = socket.AF_INET if is_ipv4 else socket.AF_INET6
normalized_ip = inet_pton(family, encoded_domain_ip)
for valid_ip in self.valid_ips:
valid_family = socket.AF_INET if valid_ip.find('.') != -1 else socket.AF_INET6
normalized_valid_ip = inet_pton(valid_family, valid_ip)
if normalized_valid_ip == normalized_ip:
return True
return False
|
def is_valid_domain_ip(self, domain_ip)
|
Check if a domain name or IP address is valid according to the
certificate
:param domain_ip:
A unicode string of a domain name or IP address
:return:
A boolean - if the domain or IP is valid for the certificate
| 2.07419 | 2.02434 | 1.024625 |
# The * character must be present for a wildcard match, and if there is
# most than one, it is an invalid wildcard specification
if domain.count('*') != 1:
return False
labels = domain.lower().split('.')
if not labels:
return False
# Wildcards may only appear in the left-most label
if labels[0].find('*') == -1:
return False
# Wildcards may not be embedded in an A-label from an IDN
if labels[0][0:4] == 'xn--':
return False
return True
|
def _is_wildcard_domain(self, domain)
|
Checks if a domain is a valid wildcard according to
https://tools.ietf.org/html/rfc6125#section-6.4.3
:param domain:
A unicode string of the domain name, where any U-labels from an IDN
have been converted to A-labels
:return:
A boolean - if the domain is a valid wildcard domain
| 6.222802 | 5.533486 | 1.124572 |
first_domain_label = domain_labels[0]
other_domain_labels = domain_labels[1:]
wildcard_label = valid_domain_labels[0]
other_valid_domain_labels = valid_domain_labels[1:]
# The wildcard is only allowed in the first label, so if
# The subsequent labels are not equal, there is no match
if other_domain_labels != other_valid_domain_labels:
return False
if wildcard_label == '*':
return True
wildcard_regex = re.compile('^' + wildcard_label.replace('*', '.*') + '$')
if wildcard_regex.match(first_domain_label):
return True
return False
|
def _is_wildcard_match(self, domain_labels, valid_domain_labels)
|
Determines if the labels in a domain are a match for labels from a
wildcard valid domain name
:param domain_labels:
A list of unicode strings, with A-label form for IDNs, of the labels
in the domain name to check
:param valid_domain_labels:
A list of unicode strings, with A-label form for IDNs, of the labels
in a wildcard domain pattern
:return:
A boolean - if the domain matches the valid domain
| 2.610192 | 2.654389 | 0.98335 |
y2 = point.y * point.y
x3 = point.x * point.x * point.x
return (y2 - (x3 + self.a * point.x + self.b)) % self.p == 0
|
def contains(self, point)
|
:param point:
A Point object
:return:
Boolean if the point is on this curve
| 3.968583 | 3.61294 | 1.098436 |
# X9.62 B.3:
p = self.curve.p
a = self.curve.a
l_ = ((3 * self.x * self.x + a) * inverse_mod(2 * self.y, p)) % p
x3 = (l_ * l_ - 2 * self.x) % p
y3 = (l_ * (self.x - x3) - self.y) % p
return PrimePoint(self.curve, x3, y3)
|
def double(self)
|
:return:
A PrimePoint object that is twice this point
| 3.051482 | 2.540872 | 1.200959 |
print('Running flake8 %s' % flake8.__version__)
flake8_style = get_style_guide(config_file=os.path.join(package_root, 'tox.ini'))
paths = []
for _dir in [package_name, 'dev', 'tests']:
for root, _, filenames in os.walk(_dir):
for filename in filenames:
if not filename.endswith('.py'):
continue
paths.append(os.path.join(root, filename))
report = flake8_style.check_files(paths)
success = report.total_errors == 0
if success:
print('OK')
return success
|
def run()
|
Runs flake8 lint
:return:
A bool - if flake8 did not find any errors
| 3.061308 | 2.835523 | 1.079627 |
print('Python ' + sys.version.replace('\n', ''))
try:
oscrypto_tests_module_info = imp.find_module('tests', [os.path.join(build_root, 'oscrypto')])
oscrypto_tests = imp.load_module('oscrypto.tests', *oscrypto_tests_module_info)
oscrypto = oscrypto_tests.local_oscrypto()
print('\noscrypto backend: %s' % oscrypto.backend())
except (ImportError):
pass
if run_lint:
print('')
lint_result = run_lint()
else:
lint_result = True
if run_coverage:
print('\nRunning tests (via coverage.py)')
sys.stdout.flush()
tests_result = run_coverage(ci=True)
else:
print('\nRunning tests')
sys.stdout.flush()
tests_result = run_tests()
sys.stdout.flush()
return lint_result and tests_result
|
def run()
|
Runs the linter and tests
:return:
A bool - if the linter and tests ran successfully
| 3.774082 | 3.636233 | 1.03791 |
if address_family not in set([socket.AF_INET, socket.AF_INET6]):
raise ValueError(unwrap(
'''
address_family must be socket.AF_INET (%s) or socket.AF_INET6 (%s),
not %s
''',
repr(socket.AF_INET),
repr(socket.AF_INET6),
repr(address_family)
))
if not isinstance(packed_ip, byte_cls):
raise TypeError(unwrap(
'''
packed_ip must be a byte string, not %s
''',
type_name(packed_ip)
))
required_len = 4 if address_family == socket.AF_INET else 16
if len(packed_ip) != required_len:
raise ValueError(unwrap(
'''
packed_ip must be %d bytes long - is %d
''',
required_len,
len(packed_ip)
))
if address_family == socket.AF_INET:
return '%d.%d.%d.%d' % tuple(bytes_to_list(packed_ip))
octets = struct.unpack(b'!HHHHHHHH', packed_ip)
runs_of_zero = {}
longest_run = 0
zero_index = None
for i, octet in enumerate(octets + (-1,)):
if octet != 0:
if zero_index is not None:
length = i - zero_index
if length not in runs_of_zero:
runs_of_zero[length] = zero_index
longest_run = max(longest_run, length)
zero_index = None
elif zero_index is None:
zero_index = i
hexed = [hex(o)[2:] for o in octets]
if longest_run < 2:
return ':'.join(hexed)
zero_start = runs_of_zero[longest_run]
zero_end = zero_start + longest_run
return ':'.join(hexed[:zero_start]) + '::' + ':'.join(hexed[zero_end:])
|
def inet_ntop(address_family, packed_ip)
|
Windows compatibility shim for socket.inet_ntop().
:param address_family:
socket.AF_INET for IPv4 or socket.AF_INET6 for IPv6
:param packed_ip:
A byte string of the network form of an IP address
:return:
A unicode string of the IP address
| 2.042247 | 2.025353 | 1.008341 |
if address_family not in set([socket.AF_INET, socket.AF_INET6]):
raise ValueError(unwrap(
'''
address_family must be socket.AF_INET (%s) or socket.AF_INET6 (%s),
not %s
''',
repr(socket.AF_INET),
repr(socket.AF_INET6),
repr(address_family)
))
if not isinstance(ip_string, str_cls):
raise TypeError(unwrap(
'''
ip_string must be a unicode string, not %s
''',
type_name(ip_string)
))
if address_family == socket.AF_INET:
octets = ip_string.split('.')
error = len(octets) != 4
if not error:
ints = []
for o in octets:
o = int(o)
if o > 255 or o < 0:
error = True
break
ints.append(o)
if error:
raise ValueError(unwrap(
'''
ip_string must be a dotted string with four integers in the
range of 0 to 255, got %s
''',
repr(ip_string)
))
return struct.pack(b'!BBBB', *ints)
error = False
omitted = ip_string.count('::')
if omitted > 1:
error = True
elif omitted == 0:
octets = ip_string.split(':')
error = len(octets) != 8
else:
begin, end = ip_string.split('::')
begin_octets = begin.split(':')
end_octets = end.split(':')
missing = 8 - len(begin_octets) - len(end_octets)
octets = begin_octets + (['0'] * missing) + end_octets
if not error:
ints = []
for o in octets:
o = int(o, 16)
if o > 65535 or o < 0:
error = True
break
ints.append(o)
return struct.pack(b'!HHHHHHHH', *ints)
raise ValueError(unwrap(
'''
ip_string must be a valid ipv6 string, got %s
''',
repr(ip_string)
))
|
def inet_pton(address_family, ip_string)
|
Windows compatibility shim for socket.inet_ntop().
:param address_family:
socket.AF_INET for IPv4 or socket.AF_INET6 for IPv6
:param ip_string:
A unicode string of an IP address
:return:
A byte string of the network form of the IP address
| 1.767658 | 1.7636 | 1.002301 |
format = format.replace('%Y', '0000')
# Year 0 is 1BC and a leap year. Leap years repeat themselves
# every 28 years. Because of adjustments and the proleptic gregorian
# calendar, the simplest way to format is to substitute year 2000.
temp = date(2000, self.month, self.day)
if '%c' in format:
c_out = temp.strftime('%c')
# Handle full years
c_out = c_out.replace('2000', '0000')
c_out = c_out.replace('%', '%%')
format = format.replace('%c', c_out)
if '%x' in format:
x_out = temp.strftime('%x')
# Handle formats such as 08/16/2000 or 16.08.2000
x_out = x_out.replace('2000', '0000')
x_out = x_out.replace('%', '%%')
format = format.replace('%x', x_out)
return temp.strftime(format)
|
def _format(self, format)
|
Performs strftime(), always returning a unicode string
:param format:
A strftime() format string
:return:
A unicode string of the formatted date
| 3.52257 | 3.399749 | 1.036126 |
output = self._format(format)
if py2:
return output.encode('utf-8')
return output
|
def strftime(self, format)
|
Formats the date using strftime()
:param format:
The strftime() format string
:return:
The formatted date as a unicode string in Python 3 and a byte
string in Python 2
| 6.466551 | 6.568917 | 0.984417 |
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if year > 0:
cls = date
else:
cls = extended_date
return cls(
year,
month,
day
)
|
def replace(self, year=None, month=None, day=None)
|
Returns a new datetime.date or asn1crypto.util.extended_date
object with the specified components replaced
:return:
A datetime.date or asn1crypto.util.extended_date object
| 2.668661 | 2.251365 | 1.185353 |
if self.tzinfo is None:
return None
return self.tzinfo.utcoffset(self.replace(year=2000))
|
def utcoffset(self)
|
:return:
None or a datetime.timedelta() of the offset from UTC
| 3.936131 | 3.256015 | 1.20888 |
if self.tzinfo is None:
return None
return self.tzinfo.dst(self.replace(year=2000))
|
def dst(self)
|
:return:
None or a datetime.timedelta() of the daylight savings time offset
| 6.19387 | 3.674588 | 1.685595 |
if self.tzinfo is None:
return None
return self.tzinfo.tzname(self.replace(year=2000))
|
def tzname(self)
|
:return:
None or the name of the timezone as a unicode string in Python 3
and a byte string in Python 2
| 3.899357 | 3.32442 | 1.172943 |
format = format.replace('%Y', '0000')
# Year 0 is 1BC and a leap year. Leap years repeat themselves
# every 28 years. Because of adjustments and the proleptic gregorian
# calendar, the simplest way to format is to substitute year 2000.
temp = datetime(
2000,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo
)
if '%c' in format:
c_out = temp.strftime('%c')
# Handle full years
c_out = c_out.replace('2000', '0000')
c_out = c_out.replace('%', '%%')
format = format.replace('%c', c_out)
if '%x' in format:
x_out = temp.strftime('%x')
# Handle formats such as 08/16/2000 or 16.08.2000
x_out = x_out.replace('2000', '0000')
x_out = x_out.replace('%', '%%')
format = format.replace('%x', x_out)
return temp.strftime(format)
|
def _format(self, format)
|
Performs strftime(), always returning a unicode string
:param format:
A strftime() format string
:return:
A unicode string of the formatted datetime
| 3.195749 | 3.094748 | 1.032636 |
if self.microsecond == 0:
return self.strftime('0000-%%m-%%d%s%%H:%%M:%%S' % sep)
return self.strftime('0000-%%m-%%d%s%%H:%%M:%%S.%%f' % sep)
|
def isoformat(self, sep='T')
|
Formats the date as "%Y-%m-%d %H:%M:%S" with the sep param between the
date and time portions
:param set:
A single character of the separator to place between the date and
time
:return:
The formatted datetime as a unicode string in Python 3 and a byte
string in Python 2
| 3.052492 | 2.901801 | 1.05193 |
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is None:
tzinfo = self.tzinfo
if year > 0:
cls = datetime
else:
cls = extended_datetime
return cls(
year,
month,
day,
hour,
minute,
second,
microsecond,
tzinfo
)
|
def replace(self, year=None, month=None, day=None, hour=None, minute=None,
second=None, microsecond=None, tzinfo=None)
|
Returns a new datetime.datetime or asn1crypto.util.extended_datetime
object with the specified components replaced
:return:
A datetime.datetime or asn1crypto.util.extended_datetime object
| 1.551092 | 1.482409 | 1.046332 |
if self._issuer_name is False:
self._issuer_name = None
if self.certificate_issuer_value:
for general_name in self.certificate_issuer_value:
if general_name.name == 'directory_name':
self._issuer_name = general_name.chosen
break
return self._issuer_name
|
def issuer_name(self)
|
:return:
None, or an asn1crypto.x509.Name object for the issuer of the cert
| 3.715186 | 3.104805 | 1.196592 |
if self._issuer_cert_urls is None:
self._issuer_cert_urls = []
if self.authority_information_access_value:
for entry in self.authority_information_access_value:
if entry['access_method'].native == 'ca_issuers':
location = entry['access_location']
if location.name != 'uniform_resource_identifier':
continue
url = location.native
if url.lower()[0:7] == 'http://':
self._issuer_cert_urls.append(url)
return self._issuer_cert_urls
|
def issuer_cert_urls(self)
|
:return:
A list of unicode strings that are URLs that should contain either
an individual DER-encoded X.509 certificate, or a DER-encoded CMS
message containing multiple certificates
| 2.687798 | 2.570978 | 1.045438 |
if self._delta_crl_distribution_points is None:
self._delta_crl_distribution_points = []
if self.freshest_crl_value is not None:
for distribution_point in self.freshest_crl_value:
distribution_point_name = distribution_point['distribution_point']
# RFC 5280 indicates conforming CA should not use the relative form
if distribution_point_name.name == 'name_relative_to_crl_issuer':
continue
# This library is currently only concerned with HTTP-based CRLs
for general_name in distribution_point_name.chosen:
if general_name.name == 'uniform_resource_identifier':
self._delta_crl_distribution_points.append(distribution_point)
return self._delta_crl_distribution_points
|
def delta_crl_distribution_points(self)
|
Returns delta CRL URLs - only applies to complete CRLs
:return:
A list of zero or more DistributionPoint objects
| 3.443708 | 3.281429 | 1.049454 |
self._critical_extensions = set()
for extension in self['single_extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
|
def _set_extensions(self)
|
Sets common named extensions to private attributes and creates a list
of critical extensions
| 4.355501 | 3.609821 | 1.20657 |
print('%s%s Object #%s' % (prefix, type_name(self), id(self)))
if self._header:
print('%s Header: 0x%s' % (prefix, binascii.hexlify(self._header or b'').decode('utf-8')))
has_header = self.method is not None and self.class_ is not None and self.tag is not None
if has_header:
method_name = METHOD_NUM_TO_NAME_MAP.get(self.method)
class_name = CLASS_NUM_TO_NAME_MAP.get(self.class_)
if self.explicit is not None:
for class_, tag in self.explicit:
print(
'%s %s tag %s (explicitly tagged)' %
(
prefix,
CLASS_NUM_TO_NAME_MAP.get(class_),
tag
)
)
if has_header:
print('%s %s %s %s' % (prefix, method_name, class_name, self.tag))
elif self.implicit:
if has_header:
print('%s %s %s tag %s (implicitly tagged)' % (prefix, method_name, class_name, self.tag))
elif has_header:
print('%s %s %s tag %s' % (prefix, method_name, class_name, self.tag))
print('%s Data: 0x%s' % (prefix, binascii.hexlify(self.contents or b'').decode('utf-8')))
|
def _basic_debug(prefix, self)
|
Prints out basic information about an Asn1Value object. Extracted for reuse
among different classes that customize the debug information.
:param prefix:
A unicode string of spaces to prefix output line with
:param self:
The object to print the debugging information about
| 2.680213 | 2.586795 | 1.036113 |
if 'tag_type' in params:
if params['tag_type'] == 'explicit':
params['explicit'] = (params.get('class', 2), params['tag'])
elif params['tag_type'] == 'implicit':
params['implicit'] = (params.get('class', 2), params['tag'])
del params['tag_type']
del params['tag']
if 'class' in params:
del params['class']
|
def _tag_type_to_explicit_implicit(params)
|
Converts old-style "tag_type" and "tag" params to "explicit" and "implicit"
:param params:
A dict of parameters to convert from tag_type/tag to explicit/implicit
| 2.270038 | 2.244686 | 1.011294 |
_tag_type_to_explicit_implicit(params)
retag = False
if 'implicit' not in params:
if value.implicit is not False:
retag = True
else:
if isinstance(params['implicit'], tuple):
class_, tag = params['implicit']
else:
tag = params['implicit']
class_ = 'context'
if value.implicit is False:
retag = True
elif value.class_ != CLASS_NAME_TO_NUM_MAP[class_] or value.tag != tag:
retag = True
if params.get('explicit') != value.explicit:
retag = True
if retag:
return value.retag(params)
return value
|
def _fix_tagging(value, params)
|
Checks if a value is properly tagged based on the spec, and re/untags as
necessary
:param value:
An Asn1Value object
:param params:
A dict of spec params
:return:
An Asn1Value that is properly tagged
| 4.349213 | 4.004079 | 1.086195 |
# Handle situations where the spec is not known at setup time
if spec is None:
return (None, None)
required_class = spec.class_
required_tag = spec.tag
_tag_type_to_explicit_implicit(params)
if 'explicit' in params:
if isinstance(params['explicit'], tuple):
required_class, required_tag = params['explicit']
else:
required_class = 2
required_tag = params['explicit']
elif 'implicit' in params:
if isinstance(params['implicit'], tuple):
required_class, required_tag = params['implicit']
else:
required_class = 2
required_tag = params['implicit']
if required_class is not None and not isinstance(required_class, int_types):
required_class = CLASS_NAME_TO_NUM_MAP[required_class]
required_class = params.get('class_', required_class)
required_tag = params.get('tag', required_tag)
return (required_class, required_tag)
|
def _build_id_tuple(params, spec)
|
Builds a 2-element tuple used to identify fields by grabbing the class_
and tag from an Asn1Value class and the params dict being passed to it
:param params:
A dict of params to pass to spec
:param spec:
An Asn1Value class
:return:
A 2-element integer tuple in the form (class_, tag)
| 2.86558 | 2.584351 | 1.10882 |
encoded_len = len(encoded_data)
info, new_pointer = _parse(encoded_data, encoded_len, pointer)
if strict and new_pointer != pointer + encoded_len:
extra_bytes = pointer + encoded_len - new_pointer
raise ValueError('Extra data - %d bytes of trailing data were provided' % extra_bytes)
return (_build(*info, spec=spec, spec_params=spec_params), new_pointer)
|
def _parse_build(encoded_data, pointer=0, spec=None, spec_params=None, strict=False)
|
Parses a byte string generically, or using a spec with optional params
:param encoded_data:
A byte string that contains BER-encoded data
:param pointer:
The index in the byte string to parse from
:param spec:
A class derived from Asn1Value that defines what class_ and tag the
value should have, and the semantics of the encoded value. The
return value will be of this type. If omitted, the encoded value
will be decoded using the standard universal tag based on the
encoded tag number.
:param spec_params:
A dict of params to pass to the spec object
:param strict:
A boolean indicating if trailing data should be forbidden - if so, a
ValueError will be raised when trailing data exists
:return:
A 2-element tuple:
- 0: An object of the type spec, or if not specified, a child of Asn1Value
- 1: An integer indicating how many bytes were consumed
| 4.033727 | 4.056108 | 0.994482 |
new_obj = self.__class__()
new_obj.class_ = self.class_
new_obj.tag = self.tag
new_obj.implicit = self.implicit
new_obj.explicit = self.explicit
return new_obj
|
def _new_instance(self)
|
Constructs a new copy of the current object, preserving any tagging
:return:
An Asn1Value object
| 4.535094 | 2.955637 | 1.534388 |
# This is required to preserve the old API
if not isinstance(tagging, dict):
tagging = {tagging: tag}
new_obj = self.__class__(explicit=tagging.get('explicit'), implicit=tagging.get('implicit'))
new_obj._copy(self, copy.deepcopy)
return new_obj
|
def retag(self, tagging, tag=None)
|
Copies the object, applying a new tagging to it
:param tagging:
A dict containing the keys "explicit" and "implicit". Legacy
API allows a unicode string of "implicit" or "explicit".
:param tag:
A integer tag number. Only used when tagging is a unicode string.
:return:
An Asn1Value object
| 5.718264 | 4.089169 | 1.398393 |
new_obj = self.__class__()
new_obj._copy(self, copy.deepcopy)
return new_obj
|
def untag(self)
|
Copies the object, removing any special tagging from it
:return:
An Asn1Value object
| 11.570836 | 7.903033 | 1.464101 |
if self.__class__ != other.__class__:
raise TypeError(unwrap(
'''
Can not copy values from %s object to %s object
''',
type_name(other),
type_name(self)
))
self.contents = other.contents
self._native = copy_func(other._native)
|
def _copy(self, other, copy_func)
|
Copies the contents of another Asn1Value object to itself
:param object:
Another instance of the same class
:param copy_func:
An reference of copy.copy() or copy.deepcopy() to use when copying
lists, dicts and objects
| 4.523221 | 4.476891 | 1.010349 |
prefix = ' ' * nest_level
# This interacts with Any and moves the tag, implicit, explicit, _header,
# contents, _footer to the parsed value so duplicate data isn't present
has_parsed = hasattr(self, 'parsed')
_basic_debug(prefix, self)
if has_parsed:
self.parsed.debug(nest_level + 2)
elif hasattr(self, 'chosen'):
self.chosen.debug(nest_level + 2)
else:
if _PY2 and isinstance(self.native, byte_cls):
print('%s Native: b%s' % (prefix, repr(self.native)))
else:
print('%s Native: %s' % (prefix, self.native))
|
def debug(self, nest_level=1)
|
Show the binary data and parsed data in a tree structure
| 7.42379 | 7.056044 | 1.052118 |
contents = self.contents
if self._header is None or force:
if isinstance(self, Constructable) and self._indefinite:
self.method = 0
header = _dump_header(self.class_, self.method, self.tag, self.contents)
if self.explicit is not None:
for class_, tag in self.explicit:
header = _dump_header(class_, 1, tag, header + self.contents) + header
self._header = header
self._trailer = b''
return self._header + contents
|
def dump(self, force=False)
|
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
| 5.757895 | 5.427898 | 1.060796 |
cls = self.__class__
if cls._map is None or cls._reverse_map is not None:
return
cls._reverse_map = {}
for key, value in cls._map.items():
cls._reverse_map[value] = key
|
def _setup(self)
|
Generates _reverse_map from _map
| 3.405519 | 2.33429 | 1.45891 |
if other_class.tag != self.__class__.tag:
raise TypeError(unwrap(
'''
Can not covert a value from %s object to %s object since they
use different tags: %d versus %d
''',
type_name(other_class),
type_name(self),
other_class.tag,
self.__class__.tag
))
new_obj = other_class()
new_obj.class_ = self.class_
new_obj.implicit = self.implicit
new_obj.explicit = self.explicit
new_obj._header = self._header
new_obj.contents = self.contents
new_obj._trailer = self._trailer
if isinstance(self, Constructable):
new_obj.method = self.method
new_obj._indefinite = self._indefinite
return new_obj
|
def cast(self, other_class)
|
Converts the current object into an object of a different class. The
new class must use the ASN.1 encoding for the value.
:param other_class:
The class to instantiate the new object from
:return:
An instance of the type other_class
| 3.757854 | 3.308592 | 1.135787 |
if not self._indefinite:
return self._as_chunk()
pointer = self._chunks_offset
contents_len = len(self.contents)
output = None
while pointer < contents_len:
# We pass the current class as the spec so content semantics are preserved
sub_value, pointer = _parse_build(self.contents, pointer, spec=self.__class__)
if output is None:
output = sub_value._merge_chunks()
else:
output += sub_value._merge_chunks()
if output is None:
return self._as_chunk()
return output
|
def _merge_chunks(self)
|
:return:
A concatenation of the native values of the contained chunks
| 5.968594 | 5.727485 | 1.042097 |
if self._chunks_offset == 0:
return self.contents
return self.contents[self._chunks_offset:]
|
def _as_chunk(self)
|
A method to return a chunk of data that can be combined for
constructed method values
:return:
A native Python value that can be added together. Examples include
byte strings, unicode strings or tuples.
| 7.992477 | 6.725228 | 1.188432 |
super(Constructable, self)._copy(other, copy_func)
self.method = other.method
self._indefinite = other._indefinite
|
def _copy(self, other, copy_func)
|
Copies the contents of another Constructable object to itself
:param object:
Another instance of the same class
:param copy_func:
An reference of copy.copy() or copy.deepcopy() to use when copying
lists, dicts and objects
| 6.600653 | 5.857347 | 1.126901 |
if self._parsed is None or self._parsed[1:3] != (spec, spec_params):
try:
passed_params = spec_params or {}
_tag_type_to_explicit_implicit(passed_params)
if self.explicit is not None:
if 'explicit' in passed_params:
passed_params['explicit'] = self.explicit + passed_params['explicit']
else:
passed_params['explicit'] = self.explicit
contents = self._header + self.contents + self._trailer
parsed_value, _ = _parse_build(
contents,
spec=spec,
spec_params=passed_params
)
self._parsed = (parsed_value, spec, spec_params)
# Once we've parsed the Any value, clear any attributes from this object
# since they are now duplicate
self.tag = None
self.explicit = None
self.implicit = False
self._header = b''
self.contents = contents
self._trailer = b''
except (ValueError, TypeError) as e:
args = e.args[1:]
e.args = (e.args[0] + '\n while parsing %s' % type_name(self),) + args
raise e
return self._parsed[0]
|
def parse(self, spec=None, spec_params=None)
|
Parses the contents generically, or using a spec with optional params
:param spec:
A class derived from Asn1Value that defines what class_ and tag the
value should have, and the semantics of the encoded value. The
return value will be of this type. If omitted, the encoded value
will be decoded using the standard universal tag based on the
encoded tag number.
:param spec_params:
A dict of params to pass to the spec object
:return:
An object of the type spec, or if not present, a child of Asn1Value
| 4.249132 | 3.912024 | 1.086172 |
super(Any, self)._copy(other, copy_func)
self._parsed = copy_func(other._parsed)
|
def _copy(self, other, copy_func)
|
Copies the contents of another Any object to itself
:param object:
Another instance of the same class
:param copy_func:
An reference of copy.copy() or copy.deepcopy() to use when copying
lists, dicts and objects
| 6.979352 | 5.633157 | 1.238977 |
if self._parsed is None:
self.parse()
return self._parsed[0].dump(force=force)
|
def dump(self, force=False)
|
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
| 6.677338 | 5.422068 | 1.231511 |
if not isinstance(encoded_data, byte_cls):
raise TypeError('encoded_data must be a byte string, not %s' % type_name(encoded_data))
value, _ = _parse_build(encoded_data, spec=cls, spec_params=kwargs, strict=strict)
return value
|
def load(cls, encoded_data, strict=False, **kwargs)
|
Loads a BER/DER-encoded byte string using the current class as the spec
:param encoded_data:
A byte string of BER or DER encoded data
:param strict:
A boolean indicating if trailing data should be forbidden - if so, a
ValueError will be raised when trailing data exists
:return:
A instance of the current class
| 4.726273 | 4.575895 | 1.032863 |
cls = self.__class__
cls._id_map = {}
cls._name_map = {}
for index, info in enumerate(cls._alternatives):
if len(info) < 3:
info = info + ({},)
cls._alternatives[index] = info
id_ = _build_id_tuple(info[2], info[1])
cls._id_map[id_] = index
cls._name_map[info[0]] = index
|
def _setup(self)
|
Generates _id_map from _alternatives to allow validating contents
| 3.691597 | 2.898061 | 1.273817 |
if not self._name:
self._name = self._alternatives[self._choice][0]
return self._name
|
def name(self)
|
:return:
A unicode string of the field name of the chosen alternative
| 6.087773 | 3.907107 | 1.558128 |
if self._parsed is not None:
return self._parsed
try:
_, spec, params = self._alternatives[self._choice]
self._parsed, _ = _parse_build(self._contents, spec=spec, spec_params=params)
except (ValueError, TypeError) as e:
args = e.args[1:]
e.args = (e.args[0] + '\n while parsing %s' % type_name(self),) + args
raise e
|
def parse(self)
|
Parses the detected alternative
:return:
An Asn1Value object of the chosen alternative
| 5.693285 | 4.964105 | 1.146891 |
id_ = (class_, tag)
if self.explicit is not None:
if self.explicit[-1] != id_:
raise ValueError(unwrap(
'''
%s was explicitly tagged, but the value provided does not
match the class and tag
''',
type_name(self)
))
((class_, _, tag, _, _, _), _) = _parse(contents, len(contents))
id_ = (class_, tag)
if id_ in self._id_map:
self._choice = self._id_map[id_]
return
# This means the Choice was implicitly tagged
if self.class_ is not None and self.tag is not None:
if len(self._alternatives) > 1:
raise ValueError(unwrap(
'''
%s was implicitly tagged, but more than one alternative
exists
''',
type_name(self)
))
if id_ == (self.class_, self.tag):
self._choice = 0
return
asn1 = self._format_class_tag(class_, tag)
asn1s = [self._format_class_tag(pair[0], pair[1]) for pair in self._id_map]
raise ValueError(unwrap(
'''
Value %s did not match the class and tag of any of the alternatives
in %s: %s
''',
asn1,
type_name(self),
', '.join(asn1s)
))
|
def validate(self, class_, tag, contents)
|
Ensures that the class and tag specified exist as an alternative
:param class_:
The integer class_ from the encoded value header
:param tag:
The integer tag from the encoded value header
:param contents:
A byte string of the contents of the value - used when the object
is explicitly tagged
:raises:
ValueError - when value is not a valid alternative
| 3.657295 | 3.382966 | 1.081091 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.