diff --git a/ckpts/universal/global_step80/zero/13.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/13.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..5d63a67af9dce74be7f4ec16847a3baaf175c759 --- /dev/null +++ b/ckpts/universal/global_step80/zero/13.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a34a2a1580c149f6417f3440f8beceffbed628b704f54e49d68fc2c77ed64cb +size 16778396 diff --git a/ckpts/universal/global_step80/zero/13.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/13.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..bd1f167cb412a752d5394b9b799a28f62b5cb4f4 --- /dev/null +++ b/ckpts/universal/global_step80/zero/13.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4fafaee522fc5fcc3dfc2dabebd9466883145dd4d635290521bbba1996c1aef +size 16778411 diff --git a/ckpts/universal/global_step80/zero/16.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/16.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..93b4a42910e849e73fe46a80a3fa249cdb189262 --- /dev/null +++ b/ckpts/universal/global_step80/zero/16.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eca5dfc3b12c771303feef09f8cd6db30d4165806844fd473e3a20dc6be0a7b1 +size 9372 diff --git a/ckpts/universal/global_step80/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..532777d855e7e0c7062e7837c8eee2174a8f437e --- /dev/null +++ b/ckpts/universal/global_step80/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d394e9b1715caa08708a8828335cf2c1c88e9586f3f89d4ee0c19c78b1e40703 +size 9387 diff --git a/ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..70a1de82144da28809c48432bd853ec9c3b948db --- /dev/null +++ b/ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa5fddb0f52cd4a6f1e3468729a05e715060803e77ea16f8121fc3797f137f14 +size 50332828 diff --git a/ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f21d0a501ed0329ece30596a6e00798cb84283f3 --- /dev/null +++ b/ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f43dec9500120252ac82fd3d09f23fbfe9c57a4c4ce593fbb88bd5b7d33f268c +size 50332843 diff --git a/ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..8950953c618252c55a1f4b5993581f0e715ee14b --- /dev/null +++ b/ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5344df11a25c90747bd21f03b7b51c5486d64382559e526f6e13caf0724e1d8 +size 50332749 diff --git a/ckpts/universal/global_step80/zero/9.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/9.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..034bb15b4e78776f61a298ddb52c85cd1f1510b2 --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23fde5177554e6fda2b0638bd17ad1560f943b9a1b74fe3f4ccbf0646e95a8c0 +size 9372 diff --git a/ckpts/universal/global_step80/zero/9.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/9.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..10e0fd14345d2eeb42a77dd6efc90e960f821637 --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4306607fd45995d8104ed028e9c67223da4e014b7a1232986ef7e0bdc4413d40 +size 9387 diff --git a/ckpts/universal/global_step80/zero/9.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step80/zero/9.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..0126f621b104c75c77b4f09da7ba87cd0181ced2 --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11a0f04e1a72a1746f7fe46065aaae43d7bb5fd62891637081595b5e45b55fa6 +size 9293 diff --git a/venv/lib/python3.10/site-packages/_yaml/__init__.py b/venv/lib/python3.10/site-packages/_yaml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7baa8c4b68127d5cdf0be9a799429e61347c2694 --- /dev/null +++ b/venv/lib/python3.10/site-packages/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/venv/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f82bbc5bfd86b878d3b588a6b0e2522d926c5a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/cpuinfo/__init__.py b/venv/lib/python3.10/site-packages/cpuinfo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ae1cedef2202096faadc3ff0dd0614f8adf394 --- /dev/null +++ b/venv/lib/python3.10/site-packages/cpuinfo/__init__.py @@ -0,0 +1,5 @@ + +import sys +from cpuinfo.cpuinfo import * + + diff --git a/venv/lib/python3.10/site-packages/cpuinfo/__main__.py b/venv/lib/python3.10/site-packages/cpuinfo/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..09ebf764121cc9bae49421535e56d0c5a9394290 --- /dev/null +++ b/venv/lib/python3.10/site-packages/cpuinfo/__main__.py @@ -0,0 +1,5 @@ + +import cpuinfo + +cpuinfo.main() + diff --git a/venv/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e73f14afca4c2fb72fb3399d4e58a5f898a749c Binary files /dev/null and b/venv/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc b/venv/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ee62b1bbf18979f15e07efc5768e71b9b14c957 Binary files /dev/null and b/venv/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc b/venv/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c593693efa35ce9830258d0cb5410bd551f95c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/cpuinfo/cpuinfo.py b/venv/lib/python3.10/site-packages/cpuinfo/cpuinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..ea2f90e397404bb200d8c74a0331b9ede5c5b106 --- /dev/null +++ b/venv/lib/python3.10/site-packages/cpuinfo/cpuinfo.py @@ -0,0 +1,2827 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +# Copyright (c) 2014-2022 Matthew Brennan Jones +# Py-cpuinfo gets CPU info with pure Python +# It uses the MIT License +# It is hosted at: https://github.com/workhorsy/py-cpuinfo +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +CPUINFO_VERSION = (9, 0, 0) +CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION]) + +import os, sys +import platform +import multiprocessing +import ctypes + + +CAN_CALL_CPUID_IN_SUBPROCESS = True + +g_trace = None + + +class Trace(object): + def __init__(self, is_active, is_stored_in_string): + self._is_active = is_active + if not self._is_active: + return + + from datetime import datetime + from io import StringIO + + if is_stored_in_string: + self._output = StringIO() + else: + date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f") + self._output = open('cpuinfo_trace_{0}.trace'.format(date), 'w') + + self._stdout = StringIO() + self._stderr = StringIO() + self._err = None + + def header(self, msg): + if not self._is_active: return + + from inspect import stack + frame = stack()[1] + file = frame[1] + line = frame[2] + self._output.write("{0} ({1} {2})\n".format(msg, file, line)) + self._output.flush() + + def success(self): + if not self._is_active: return + + from inspect import stack + frame = stack()[1] + file = frame[1] + line = frame[2] + + self._output.write("Success ... ({0} {1})\n\n".format(file, line)) + self._output.flush() + + def fail(self, msg): + if not self._is_active: return + + from inspect import stack + frame = stack()[1] + file = frame[1] + line = frame[2] + + if isinstance(msg, str): + msg = ''.join(['\t' + line for line in msg.split('\n')]) + '\n' + + self._output.write(msg) + self._output.write("Failed ... ({0} {1})\n\n".format(file, line)) + self._output.flush() + elif isinstance(msg, Exception): + from traceback import format_exc + err_string = format_exc() + self._output.write("\tFailed ... ({0} {1})\n".format(file, line)) + self._output.write(''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n') + self._output.flush() + + def command_header(self, msg): + if not self._is_active: return + + from inspect import stack + frame = stack()[3] + file = frame[1] + line = frame[2] + self._output.write("\t{0} ({1} {2})\n".format(msg, file, line)) + self._output.flush() + + def command_output(self, msg, output): + if not self._is_active: return + + self._output.write("\t\t{0}\n".format(msg)) + self._output.write(''.join(['\t\t\t{0}\n'.format(n) for n in output.split('\n')]) + '\n') + self._output.flush() + + def keys(self, keys, info, new_info): + if not self._is_active: return + + from inspect import stack + frame = stack()[2] + file = frame[1] + line = frame[2] + + # List updated keys + self._output.write("\tChanged keys ({0} {1})\n".format(file, line)) + changed_keys = [key for key in keys if key in info and key in new_info and info[key] != new_info[key]] + if changed_keys: + for key in changed_keys: + self._output.write('\t\t{0}: {1} to {2}\n'.format(key, info[key], new_info[key])) + else: + self._output.write('\t\tNone\n') + + # List new keys + self._output.write("\tNew keys ({0} {1})\n".format(file, line)) + new_keys = [key for key in keys if key in new_info and key not in info] + if new_keys: + for key in new_keys: + self._output.write('\t\t{0}: {1}\n'.format(key, new_info[key])) + else: + self._output.write('\t\tNone\n') + + self._output.write('\n') + self._output.flush() + + def write(self, msg): + if not self._is_active: return + + self._output.write(msg + '\n') + self._output.flush() + + def to_dict(self, info, is_fail): + return { + 'output' : self._output.getvalue(), + 'stdout' : self._stdout.getvalue(), + 'stderr' : self._stderr.getvalue(), + 'info' : info, + 'err' : self._err, + 'is_fail' : is_fail + } + +class DataSource(object): + bits = platform.architecture()[0] + cpu_count = multiprocessing.cpu_count() + is_windows = platform.system().lower() == 'windows' + arch_string_raw = platform.machine() + uname_string_raw = platform.uname()[5] + can_cpuid = True + + @staticmethod + def has_proc_cpuinfo(): + return os.path.exists('/proc/cpuinfo') + + @staticmethod + def has_dmesg(): + return len(_program_paths('dmesg')) > 0 + + @staticmethod + def has_var_run_dmesg_boot(): + uname = platform.system().strip().strip('"').strip("'").strip().lower() + return 'linux' in uname and os.path.exists('/var/run/dmesg.boot') + + @staticmethod + def has_cpufreq_info(): + return len(_program_paths('cpufreq-info')) > 0 + + @staticmethod + def has_sestatus(): + return len(_program_paths('sestatus')) > 0 + + @staticmethod + def has_sysctl(): + return len(_program_paths('sysctl')) > 0 + + @staticmethod + def has_isainfo(): + return len(_program_paths('isainfo')) > 0 + + @staticmethod + def has_kstat(): + return len(_program_paths('kstat')) > 0 + + @staticmethod + def has_sysinfo(): + uname = platform.system().strip().strip('"').strip("'").strip().lower() + is_beos = 'beos' in uname or 'haiku' in uname + return is_beos and len(_program_paths('sysinfo')) > 0 + + @staticmethod + def has_lscpu(): + return len(_program_paths('lscpu')) > 0 + + @staticmethod + def has_ibm_pa_features(): + return len(_program_paths('lsprop')) > 0 + + @staticmethod + def has_wmic(): + returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version']) + return returncode == 0 and len(output) > 0 + + @staticmethod + def cat_proc_cpuinfo(): + return _run_and_get_stdout(['cat', '/proc/cpuinfo']) + + @staticmethod + def cpufreq_info(): + return _run_and_get_stdout(['cpufreq-info']) + + @staticmethod + def sestatus_b(): + return _run_and_get_stdout(['sestatus', '-b']) + + @staticmethod + def dmesg_a(): + return _run_and_get_stdout(['dmesg', '-a']) + + @staticmethod + def cat_var_run_dmesg_boot(): + return _run_and_get_stdout(['cat', '/var/run/dmesg.boot']) + + @staticmethod + def sysctl_machdep_cpu_hw_cpufrequency(): + return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency']) + + @staticmethod + def isainfo_vb(): + return _run_and_get_stdout(['isainfo', '-vb']) + + @staticmethod + def kstat_m_cpu_info(): + return _run_and_get_stdout(['kstat', '-m', 'cpu_info']) + + @staticmethod + def sysinfo_cpu(): + return _run_and_get_stdout(['sysinfo', '-cpu']) + + @staticmethod + def lscpu(): + return _run_and_get_stdout(['lscpu']) + + @staticmethod + def ibm_pa_features(): + import glob + + ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features') + if ibm_features: + return _run_and_get_stdout(['lsprop', ibm_features[0]]) + + @staticmethod + def wmic_cpu(): + return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list']) + + @staticmethod + def winreg_processor_brand(): + processor_brand = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "ProcessorNameString") + return processor_brand.strip() + + @staticmethod + def winreg_vendor_id_raw(): + vendor_id_raw = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "VendorIdentifier") + return vendor_id_raw + + @staticmethod + def winreg_arch_string_raw(): + arch_string_raw = _read_windows_registry_key(r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", "PROCESSOR_ARCHITECTURE") + return arch_string_raw + + @staticmethod + def winreg_hz_actual(): + hz_actual = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "~Mhz") + hz_actual = _to_decimal_string(hz_actual) + return hz_actual + + @staticmethod + def winreg_feature_bits(): + feature_bits = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "FeatureSet") + return feature_bits + + +def _program_paths(program_name): + paths = [] + exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep)) + for p in os.environ['PATH'].split(os.pathsep): + p = os.path.join(p, program_name) + if os.access(p, os.X_OK): + paths.append(p) + for e in exts: + pext = p + e + if os.access(pext, os.X_OK): + paths.append(pext) + return paths + +def _run_and_get_stdout(command, pipe_command=None): + from subprocess import Popen, PIPE + + g_trace.command_header('Running command "' + ' '.join(command) + '" ...') + + # Run the command normally + if not pipe_command: + p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + # Run the command and pipe it into another command + else: + p2 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + p1 = Popen(pipe_command, stdin=p2.stdout, stdout=PIPE, stderr=PIPE) + p2.stdout.close() + + # Get the stdout and stderr + stdout_output, stderr_output = p1.communicate() + stdout_output = stdout_output.decode(encoding='UTF-8') + stderr_output = stderr_output.decode(encoding='UTF-8') + + # Send the result to the logger + g_trace.command_output('return code:', str(p1.returncode)) + g_trace.command_output('stdout:', stdout_output) + + # Return the return code and stdout + return p1.returncode, stdout_output + +def _read_windows_registry_key(key_name, field_name): + g_trace.command_header('Reading Registry key "{0}" field "{1}" ...'.format(key_name, field_name)) + + try: + import _winreg as winreg + except ImportError as err: + try: + import winreg + except ImportError as err: + pass + + key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name) + value = winreg.QueryValueEx(key, field_name)[0] + winreg.CloseKey(key) + g_trace.command_output('value:', str(value)) + return value + +# Make sure we are running on a supported system +def _check_arch(): + arch, bits = _parse_arch(DataSource.arch_string_raw) + if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', + 'PPC_64', 'S390X', 'MIPS_32', 'MIPS_64', + "RISCV_32", "RISCV_64"]: + raise Exception("py-cpuinfo currently only works on X86 " + "and some ARM/PPC/S390X/MIPS/RISCV CPUs.") + +def _obj_to_b64(thing): + import pickle + import base64 + + a = thing + b = pickle.dumps(a) + c = base64.b64encode(b) + d = c.decode('utf8') + return d + +def _b64_to_obj(thing): + import pickle + import base64 + + try: + a = base64.b64decode(thing) + b = pickle.loads(a) + return b + except Exception: + return {} + +def _utf_to_str(input): + if isinstance(input, list): + return [_utf_to_str(element) for element in input] + elif isinstance(input, dict): + return {_utf_to_str(key): _utf_to_str(value) + for key, value in input.items()} + else: + return input + +def _copy_new_fields(info, new_info): + keys = [ + 'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly', + 'hz_advertised', 'hz_actual', 'arch', 'bits', 'count', + 'arch_string_raw', 'uname_string_raw', + 'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity', + 'stepping', 'model', 'family', + 'processor_type', 'flags', + 'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size' + ] + + g_trace.keys(keys, info, new_info) + + # Update the keys with new values + for key in keys: + if new_info.get(key, None) and not info.get(key, None): + info[key] = new_info[key] + elif key == 'flags' and new_info.get('flags'): + for f in new_info['flags']: + if f not in info['flags']: info['flags'].append(f) + info['flags'].sort() + +def _get_field_actual(cant_be_number, raw_string, field_names): + for line in raw_string.splitlines(): + for field_name in field_names: + field_name = field_name.lower() + if ':' in line: + left, right = line.split(':', 1) + left = left.strip().lower() + right = right.strip() + if left == field_name and len(right) > 0: + if cant_be_number: + if not right.isdigit(): + return right + else: + return right + + return None + +def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names): + retval = _get_field_actual(cant_be_number, raw_string, field_names) + + # Convert the return value + if retval and convert_to: + try: + retval = convert_to(retval) + except Exception: + retval = default_value + + # Return the default if there is no return value + if retval is None: + retval = default_value + + return retval + +def _to_decimal_string(ticks): + try: + # Convert to string + ticks = '{0}'.format(ticks) + # Sometimes ',' is used as a decimal separator + ticks = ticks.replace(',', '.') + + # Strip off non numbers and decimal places + ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip() + if ticks == '': + ticks = '0' + + # Add decimal if missing + if '.' not in ticks: + ticks = '{0}.0'.format(ticks) + + # Remove trailing zeros + ticks = ticks.rstrip('0') + + # Add one trailing zero for empty right side + if ticks.endswith('.'): + ticks = '{0}0'.format(ticks) + + # Make sure the number can be converted to a float + ticks = float(ticks) + ticks = '{0}'.format(ticks) + return ticks + except Exception: + return '0.0' + +def _hz_short_to_full(ticks, scale): + try: + # Make sure the number can be converted to a float + ticks = float(ticks) + ticks = '{0}'.format(ticks) + + # Scale the numbers + hz = ticks.lstrip('0') + old_index = hz.index('.') + hz = hz.replace('.', '') + hz = hz.ljust(scale + old_index+1, '0') + new_index = old_index + scale + hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:]) + left, right = hz.split('.') + left, right = int(left), int(right) + return (left, right) + except Exception: + return (0, 0) + +def _hz_friendly_to_full(hz_string): + try: + hz_string = hz_string.strip().lower() + hz, scale = (None, None) + + if hz_string.endswith('ghz'): + scale = 9 + elif hz_string.endswith('mhz'): + scale = 6 + elif hz_string.endswith('hz'): + scale = 0 + + hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip() + if not '.' in hz: + hz += '.0' + + hz, scale = _hz_short_to_full(hz, scale) + + return (hz, scale) + except Exception: + return (0, 0) + +def _hz_short_to_friendly(ticks, scale): + try: + # Get the raw Hz as a string + left, right = _hz_short_to_full(ticks, scale) + result = '{0}.{1}'.format(left, right) + + # Get the location of the dot, and remove said dot + dot_index = result.index('.') + result = result.replace('.', '') + + # Get the Hz symbol and scale + symbol = "Hz" + scale = 0 + if dot_index > 9: + symbol = "GHz" + scale = 9 + elif dot_index > 6: + symbol = "MHz" + scale = 6 + elif dot_index > 3: + symbol = "KHz" + scale = 3 + + # Get the Hz with the dot at the new scaled point + result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:]) + + # Format the ticks to have 4 numbers after the decimal + # and remove any superfluous zeroes. + result = '{0:.4f} {1}'.format(float(result), symbol) + result = result.rstrip('0') + return result + except Exception: + return '0.0000 Hz' + +def _to_friendly_bytes(input): + import re + + if not input: + return input + input = "{0}".format(input) + + formats = { + r"^[0-9]+B$" : 'B', + r"^[0-9]+K$" : 'KB', + r"^[0-9]+M$" : 'MB', + r"^[0-9]+G$" : 'GB' + } + + for pattern, friendly_size in formats.items(): + if re.match(pattern, input): + return "{0} {1}".format(input[ : -1].strip(), friendly_size) + + return input + +def _friendly_bytes_to_int(friendly_bytes): + input = friendly_bytes.lower() + + formats = [ + {'gib' : 1024 * 1024 * 1024}, + {'mib' : 1024 * 1024}, + {'kib' : 1024}, + + {'gb' : 1024 * 1024 * 1024}, + {'mb' : 1024 * 1024}, + {'kb' : 1024}, + + {'g' : 1024 * 1024 * 1024}, + {'m' : 1024 * 1024}, + {'k' : 1024}, + {'b' : 1}, + ] + + try: + for entry in formats: + pattern = list(entry.keys())[0] + multiplier = list(entry.values())[0] + if input.endswith(pattern): + return int(input.split(pattern)[0].strip()) * multiplier + + except Exception as err: + pass + + return friendly_bytes + +def _parse_cpu_brand_string(cpu_string): + # Just return 0 if the processor brand does not have the Hz + if not 'hz' in cpu_string.lower(): + return ('0.0', 0) + + hz = cpu_string.lower() + scale = 0 + + if hz.endswith('mhz'): + scale = 6 + elif hz.endswith('ghz'): + scale = 9 + if '@' in hz: + hz = hz.split('@')[1] + else: + hz = hz.rsplit(None, 1)[1] + + hz = hz.rstrip('mhz').rstrip('ghz').strip() + hz = _to_decimal_string(hz) + + return (hz, scale) + +def _parse_cpu_brand_string_dx(cpu_string): + import re + + # Find all the strings inside brackets () + starts = [m.start() for m in re.finditer(r"\(", cpu_string)] + ends = [m.start() for m in re.finditer(r"\)", cpu_string)] + insides = {k: v for k, v in zip(starts, ends)} + insides = [cpu_string[start+1 : end] for start, end in insides.items()] + + # Find all the fields + vendor_id, stepping, model, family = (None, None, None, None) + for inside in insides: + for pair in inside.split(','): + pair = [n.strip() for n in pair.split(':')] + if len(pair) > 1: + name, value = pair[0], pair[1] + if name == 'origin': + vendor_id = value.strip('"') + elif name == 'stepping': + stepping = int(value.lstrip('0x'), 16) + elif name == 'model': + model = int(value.lstrip('0x'), 16) + elif name in ['fam', 'family']: + family = int(value.lstrip('0x'), 16) + + # Find the Processor Brand + # Strip off extra strings in brackets at end + brand = cpu_string.strip() + is_working = True + while is_working: + is_working = False + for inside in insides: + full = "({0})".format(inside) + if brand.endswith(full): + brand = brand[ :-len(full)].strip() + is_working = True + + # Find the Hz in the brand string + hz_brand, scale = _parse_cpu_brand_string(brand) + + # Find Hz inside brackets () after the brand string + if hz_brand == '0.0': + for inside in insides: + hz = inside + for entry in ['GHz', 'MHz', 'Hz']: + if entry in hz: + hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)] + hz_brand, scale = _parse_cpu_brand_string(hz) + break + + return (hz_brand, scale, brand, vendor_id, stepping, model, family) + +def _parse_dmesg_output(output): + try: + # Get all the dmesg lines that might contain a CPU string + lines = output.split(' CPU0:')[1:] + \ + output.split(' CPU1:')[1:] + \ + output.split(' CPU:')[1:] + \ + output.split('\nCPU0:')[1:] + \ + output.split('\nCPU1:')[1:] + \ + output.split('\nCPU:')[1:] + lines = [l.split('\n')[0].strip() for l in lines] + + # Convert the lines to CPU strings + cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines] + + # Find the CPU string that has the most fields + best_string = None + highest_count = 0 + for cpu_string in cpu_strings: + count = sum([n is not None for n in cpu_string]) + if count > highest_count: + highest_count = count + best_string = cpu_string + + # If no CPU string was found, return {} + if not best_string: + return {} + + hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string + + # Origin + if ' Origin=' in output: + fields = output[output.find(' Origin=') : ].split('\n')[0] + fields = fields.strip().split() + fields = [n.strip().split('=') for n in fields] + fields = [{n[0].strip().lower() : n[1].strip()} for n in fields] + + for field in fields: + name = list(field.keys())[0] + value = list(field.values())[0] + + if name == 'origin': + vendor_id = value.strip('"') + elif name == 'stepping': + stepping = int(value.lstrip('0x'), 16) + elif name == 'model': + model = int(value.lstrip('0x'), 16) + elif name in ['fam', 'family']: + family = int(value.lstrip('0x'), 16) + + # Features + flag_lines = [] + for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']: + if category in output: + flag_lines.append(output.split(category)[1].split('\n')[0]) + + flags = [] + for line in flag_lines: + line = line.split('<')[1].split('>')[0].lower() + for flag in line.split(','): + flags.append(flag) + flags.sort() + + # Convert from GHz/MHz string to Hz + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + + # If advertised hz not found, use the actual hz + if hz_advertised == '0.0': + scale = 6 + hz_advertised = _to_decimal_string(hz_actual) + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + if hz_advertised and hz_advertised != '0.0': + info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale) + info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale) + + if hz_advertised and hz_advertised != '0.0': + info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale) + info['hz_actual'] = _hz_short_to_full(hz_actual, scale) + + return {k: v for k, v in info.items() if v} + except Exception as err: + g_trace.fail(err) + #raise + + return {} + +def _parse_arch(arch_string_raw): + import re + + arch, bits = None, None + arch_string_raw = arch_string_raw.lower() + + # X86 + if re.match(r'^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw): + arch = 'X86_32' + bits = 32 + elif re.match(r'^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw): + arch = 'X86_64' + bits = 64 + # ARM + elif re.match(r'^armv8-a|aarch64|arm64$', arch_string_raw): + arch = 'ARM_8' + bits = 64 + elif re.match(r'^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw): + arch = 'ARM_7' + bits = 32 + elif re.match(r'^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw): + arch = 'ARM_8' + bits = 32 + # PPC + elif re.match(r'^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw): + arch = 'PPC_32' + bits = 32 + elif re.match(r'^powerpc$|^ppc64$|^ppc64le$', arch_string_raw): + arch = 'PPC_64' + bits = 64 + # SPARC + elif re.match(r'^sparc32$|^sparc$', arch_string_raw): + arch = 'SPARC_32' + bits = 32 + elif re.match(r'^sparc64$|^sun4u$|^sun4v$', arch_string_raw): + arch = 'SPARC_64' + bits = 64 + # S390X + elif re.match(r'^s390x$', arch_string_raw): + arch = 'S390X' + bits = 64 + elif arch_string_raw == 'mips': + arch = 'MIPS_32' + bits = 32 + elif arch_string_raw == 'mips64': + arch = 'MIPS_64' + bits = 64 + # RISCV + elif re.match(r'^riscv$|^riscv32$|^riscv32be$', arch_string_raw): + arch = 'RISCV_32' + bits = 32 + elif re.match(r'^riscv64$|^riscv64be$', arch_string_raw): + arch = 'RISCV_64' + bits = 64 + + return (arch, bits) + +def _is_bit_set(reg, bit): + mask = 1 << bit + is_set = reg & mask > 0 + return is_set + + +def _is_selinux_enforcing(trace): + # Just return if the SE Linux Status Tool is not installed + if not DataSource.has_sestatus(): + trace.fail('Failed to find sestatus.') + return False + + # Run the sestatus, and just return if it failed to run + returncode, output = DataSource.sestatus_b() + if returncode != 0: + trace.fail('Failed to run sestatus. Skipping ...') + return False + + # Figure out if explicitly in enforcing mode + for line in output.splitlines(): + line = line.strip().lower() + if line.startswith("current mode:"): + if line.endswith("enforcing"): + return True + else: + return False + + # Figure out if we can execute heap and execute memory + can_selinux_exec_heap = False + can_selinux_exec_memory = False + for line in output.splitlines(): + line = line.strip().lower() + if line.startswith("allow_execheap") and line.endswith("on"): + can_selinux_exec_heap = True + elif line.startswith("allow_execmem") and line.endswith("on"): + can_selinux_exec_memory = True + + trace.command_output('can_selinux_exec_heap:', can_selinux_exec_heap) + trace.command_output('can_selinux_exec_memory:', can_selinux_exec_memory) + + return (not can_selinux_exec_heap or not can_selinux_exec_memory) + +def _filter_dict_keys_with_empty_values(info, acceptable_values = {}): + filtered_info = {} + for key in info: + value = info[key] + + # Keep if value is acceptable + if key in acceptable_values: + if acceptable_values[key] == value: + filtered_info[key] = value + continue + + # Filter out None, 0, "", (), {}, [] + if not value: + continue + + # Filter out (0, 0) + if value == (0, 0): + continue + + # Filter out -1 + if value == -1: + continue + + # Filter out strings that start with "0.0" + if type(value) == str and value.startswith('0.0'): + continue + + filtered_info[key] = value + + return filtered_info + +class ASM(object): + def __init__(self, restype=None, argtypes=(), machine_code=[]): + self.restype = restype + self.argtypes = argtypes + self.machine_code = machine_code + self.prochandle = None + self.mm = None + self.func = None + self.address = None + self.size = 0 + + def compile(self): + machine_code = bytes.join(b'', self.machine_code) + self.size = ctypes.c_size_t(len(machine_code)) + + if DataSource.is_windows: + # Allocate a memory segment the size of the machine code, and make it executable + size = len(machine_code) + # Alloc at least 1 page to ensure we own all pages that we want to change protection on + if size < 0x1000: size = 0x1000 + MEM_COMMIT = ctypes.c_ulong(0x1000) + PAGE_READWRITE = ctypes.c_ulong(0x4) + pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc + pfnVirtualAlloc.restype = ctypes.c_void_p + self.address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE) + if not self.address: + raise Exception("Failed to VirtualAlloc") + + # Copy the machine code into the memory segment + memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr) + if memmove(self.address, machine_code, size) < 0: + raise Exception("Failed to memmove") + + # Enable execute permissions + PAGE_EXECUTE = ctypes.c_ulong(0x10) + old_protect = ctypes.c_ulong(0) + pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect + res = pfnVirtualProtect(ctypes.c_void_p(self.address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect)) + if not res: + raise Exception("Failed VirtualProtect") + + # Flush Instruction Cache + # First, get process Handle + if not self.prochandle: + pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess + pfnGetCurrentProcess.restype = ctypes.c_void_p + self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess()) + # Actually flush cache + res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(self.address), ctypes.c_size_t(size)) + if not res: + raise Exception("Failed FlushInstructionCache") + else: + from mmap import mmap, MAP_PRIVATE, MAP_ANONYMOUS, PROT_WRITE, PROT_READ, PROT_EXEC + + # Allocate a private and executable memory segment the size of the machine code + machine_code = bytes.join(b'', self.machine_code) + self.size = len(machine_code) + self.mm = mmap(-1, self.size, flags=MAP_PRIVATE | MAP_ANONYMOUS, prot=PROT_WRITE | PROT_READ | PROT_EXEC) + + # Copy the machine code into the memory segment + self.mm.write(machine_code) + self.address = ctypes.addressof(ctypes.c_int.from_buffer(self.mm)) + + # Cast the memory segment into a function + functype = ctypes.CFUNCTYPE(self.restype, *self.argtypes) + self.func = functype(self.address) + + def run(self): + # Call the machine code like a function + retval = self.func() + + return retval + + def free(self): + # Free the function memory segment + if DataSource.is_windows: + MEM_RELEASE = ctypes.c_ulong(0x8000) + ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(self.address), ctypes.c_size_t(0), MEM_RELEASE) + else: + self.mm.close() + + self.prochandle = None + self.mm = None + self.func = None + self.address = None + self.size = 0 + + +class CPUID(object): + def __init__(self, trace=None): + if trace is None: + trace = Trace(False, False) + + # Figure out if SE Linux is on and in enforcing mode + self.is_selinux_enforcing = _is_selinux_enforcing(trace) + + def _asm_func(self, restype=None, argtypes=(), machine_code=[]): + asm = ASM(restype, argtypes, machine_code) + asm.compile() + return asm + + def _run_asm(self, *machine_code): + asm = ASM(ctypes.c_uint32, (), machine_code) + asm.compile() + retval = asm.run() + asm.free() + return retval + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID + def get_vendor_id(self): + # EBX + ebx = self._run_asm( + b"\x31\xC0", # xor eax,eax + b"\x0F\xA2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + b"\x31\xC0", # xor eax,eax + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # EDX + edx = self._run_asm( + b"\x31\xC0", # xor eax,eax + b"\x0f\xa2" # cpuid + b"\x89\xD0" # mov ax,dx + b"\xC3" # ret + ) + + # Each 4bits is a ascii letter in the name + vendor_id = [] + for reg in [ebx, edx, ecx]: + for n in [0, 8, 16, 24]: + vendor_id.append(chr((reg >> n) & 0xFF)) + vendor_id = ''.join(vendor_id) + + return vendor_id + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits + def get_info(self): + # EAX + eax = self._run_asm( + b"\xB8\x01\x00\x00\x00", # mov eax,0x1" + b"\x0f\xa2" # cpuid + b"\xC3" # ret + ) + + # Get the CPU info + stepping_id = (eax >> 0) & 0xF # 4 bits + model = (eax >> 4) & 0xF # 4 bits + family_id = (eax >> 8) & 0xF # 4 bits + processor_type = (eax >> 12) & 0x3 # 2 bits + extended_model_id = (eax >> 16) & 0xF # 4 bits + extended_family_id = (eax >> 20) & 0xFF # 8 bits + family = 0 + + if family_id in [15]: + family = extended_family_id + family_id + else: + family = family_id + + if family_id in [6, 15]: + model = (extended_model_id << 4) + model + + return { + 'stepping' : stepping_id, + 'model' : model, + 'family' : family, + 'processor_type' : processor_type + } + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported + def get_max_extension_support(self): + # Check for extension support + max_extension_support = self._run_asm( + b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000 + b"\x0f\xa2" # cpuid + b"\xC3" # ret + ) + + return max_extension_support + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits + def get_flags(self, max_extension_support): + # EDX + edx = self._run_asm( + b"\xB8\x01\x00\x00\x00", # mov eax,0x1" + b"\x0f\xa2" # cpuid + b"\x89\xD0" # mov ax,dx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + b"\xB8\x01\x00\x00\x00", # mov eax,0x1" + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # Get the CPU flags + flags = { + 'fpu' : _is_bit_set(edx, 0), + 'vme' : _is_bit_set(edx, 1), + 'de' : _is_bit_set(edx, 2), + 'pse' : _is_bit_set(edx, 3), + 'tsc' : _is_bit_set(edx, 4), + 'msr' : _is_bit_set(edx, 5), + 'pae' : _is_bit_set(edx, 6), + 'mce' : _is_bit_set(edx, 7), + 'cx8' : _is_bit_set(edx, 8), + 'apic' : _is_bit_set(edx, 9), + #'reserved1' : _is_bit_set(edx, 10), + 'sep' : _is_bit_set(edx, 11), + 'mtrr' : _is_bit_set(edx, 12), + 'pge' : _is_bit_set(edx, 13), + 'mca' : _is_bit_set(edx, 14), + 'cmov' : _is_bit_set(edx, 15), + 'pat' : _is_bit_set(edx, 16), + 'pse36' : _is_bit_set(edx, 17), + 'pn' : _is_bit_set(edx, 18), + 'clflush' : _is_bit_set(edx, 19), + #'reserved2' : _is_bit_set(edx, 20), + 'dts' : _is_bit_set(edx, 21), + 'acpi' : _is_bit_set(edx, 22), + 'mmx' : _is_bit_set(edx, 23), + 'fxsr' : _is_bit_set(edx, 24), + 'sse' : _is_bit_set(edx, 25), + 'sse2' : _is_bit_set(edx, 26), + 'ss' : _is_bit_set(edx, 27), + 'ht' : _is_bit_set(edx, 28), + 'tm' : _is_bit_set(edx, 29), + 'ia64' : _is_bit_set(edx, 30), + 'pbe' : _is_bit_set(edx, 31), + + 'pni' : _is_bit_set(ecx, 0), + 'pclmulqdq' : _is_bit_set(ecx, 1), + 'dtes64' : _is_bit_set(ecx, 2), + 'monitor' : _is_bit_set(ecx, 3), + 'ds_cpl' : _is_bit_set(ecx, 4), + 'vmx' : _is_bit_set(ecx, 5), + 'smx' : _is_bit_set(ecx, 6), + 'est' : _is_bit_set(ecx, 7), + 'tm2' : _is_bit_set(ecx, 8), + 'ssse3' : _is_bit_set(ecx, 9), + 'cid' : _is_bit_set(ecx, 10), + #'reserved3' : _is_bit_set(ecx, 11), + 'fma' : _is_bit_set(ecx, 12), + 'cx16' : _is_bit_set(ecx, 13), + 'xtpr' : _is_bit_set(ecx, 14), + 'pdcm' : _is_bit_set(ecx, 15), + #'reserved4' : _is_bit_set(ecx, 16), + 'pcid' : _is_bit_set(ecx, 17), + 'dca' : _is_bit_set(ecx, 18), + 'sse4_1' : _is_bit_set(ecx, 19), + 'sse4_2' : _is_bit_set(ecx, 20), + 'x2apic' : _is_bit_set(ecx, 21), + 'movbe' : _is_bit_set(ecx, 22), + 'popcnt' : _is_bit_set(ecx, 23), + 'tscdeadline' : _is_bit_set(ecx, 24), + 'aes' : _is_bit_set(ecx, 25), + 'xsave' : _is_bit_set(ecx, 26), + 'osxsave' : _is_bit_set(ecx, 27), + 'avx' : _is_bit_set(ecx, 28), + 'f16c' : _is_bit_set(ecx, 29), + 'rdrnd' : _is_bit_set(ecx, 30), + 'hypervisor' : _is_bit_set(ecx, 31) + } + + # Get a list of only the flags that are true + flags = [k for k, v in flags.items() if v] + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features + if max_extension_support >= 7: + # EBX + ebx = self._run_asm( + b"\x31\xC9", # xor ecx,ecx + b"\xB8\x07\x00\x00\x00" # mov eax,7 + b"\x0f\xa2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + b"\x31\xC9", # xor ecx,ecx + b"\xB8\x07\x00\x00\x00" # mov eax,7 + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # Get the extended CPU flags + extended_flags = { + #'fsgsbase' : _is_bit_set(ebx, 0), + #'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1), + 'sgx' : _is_bit_set(ebx, 2), + 'bmi1' : _is_bit_set(ebx, 3), + 'hle' : _is_bit_set(ebx, 4), + 'avx2' : _is_bit_set(ebx, 5), + #'reserved' : _is_bit_set(ebx, 6), + 'smep' : _is_bit_set(ebx, 7), + 'bmi2' : _is_bit_set(ebx, 8), + 'erms' : _is_bit_set(ebx, 9), + 'invpcid' : _is_bit_set(ebx, 10), + 'rtm' : _is_bit_set(ebx, 11), + 'pqm' : _is_bit_set(ebx, 12), + #'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13), + 'mpx' : _is_bit_set(ebx, 14), + 'pqe' : _is_bit_set(ebx, 15), + 'avx512f' : _is_bit_set(ebx, 16), + 'avx512dq' : _is_bit_set(ebx, 17), + 'rdseed' : _is_bit_set(ebx, 18), + 'adx' : _is_bit_set(ebx, 19), + 'smap' : _is_bit_set(ebx, 20), + 'avx512ifma' : _is_bit_set(ebx, 21), + 'pcommit' : _is_bit_set(ebx, 22), + 'clflushopt' : _is_bit_set(ebx, 23), + 'clwb' : _is_bit_set(ebx, 24), + 'intel_pt' : _is_bit_set(ebx, 25), + 'avx512pf' : _is_bit_set(ebx, 26), + 'avx512er' : _is_bit_set(ebx, 27), + 'avx512cd' : _is_bit_set(ebx, 28), + 'sha' : _is_bit_set(ebx, 29), + 'avx512bw' : _is_bit_set(ebx, 30), + 'avx512vl' : _is_bit_set(ebx, 31), + + 'prefetchwt1' : _is_bit_set(ecx, 0), + 'avx512vbmi' : _is_bit_set(ecx, 1), + 'umip' : _is_bit_set(ecx, 2), + 'pku' : _is_bit_set(ecx, 3), + 'ospke' : _is_bit_set(ecx, 4), + #'reserved' : _is_bit_set(ecx, 5), + 'avx512vbmi2' : _is_bit_set(ecx, 6), + #'reserved' : _is_bit_set(ecx, 7), + 'gfni' : _is_bit_set(ecx, 8), + 'vaes' : _is_bit_set(ecx, 9), + 'vpclmulqdq' : _is_bit_set(ecx, 10), + 'avx512vnni' : _is_bit_set(ecx, 11), + 'avx512bitalg' : _is_bit_set(ecx, 12), + #'reserved' : _is_bit_set(ecx, 13), + 'avx512vpopcntdq' : _is_bit_set(ecx, 14), + #'reserved' : _is_bit_set(ecx, 15), + #'reserved' : _is_bit_set(ecx, 16), + #'mpx0' : _is_bit_set(ecx, 17), + #'mpx1' : _is_bit_set(ecx, 18), + #'mpx2' : _is_bit_set(ecx, 19), + #'mpx3' : _is_bit_set(ecx, 20), + #'mpx4' : _is_bit_set(ecx, 21), + 'rdpid' : _is_bit_set(ecx, 22), + #'reserved' : _is_bit_set(ecx, 23), + #'reserved' : _is_bit_set(ecx, 24), + #'reserved' : _is_bit_set(ecx, 25), + #'reserved' : _is_bit_set(ecx, 26), + #'reserved' : _is_bit_set(ecx, 27), + #'reserved' : _is_bit_set(ecx, 28), + #'reserved' : _is_bit_set(ecx, 29), + 'sgx_lc' : _is_bit_set(ecx, 30), + #'reserved' : _is_bit_set(ecx, 31) + } + + # Get a list of only the flags that are true + extended_flags = [k for k, v in extended_flags.items() if v] + flags += extended_flags + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits + if max_extension_support >= 0x80000001: + # EBX + ebx = self._run_asm( + b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001 + b"\x0f\xa2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001 + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # Get the extended CPU flags + extended_flags = { + 'fpu' : _is_bit_set(ebx, 0), + 'vme' : _is_bit_set(ebx, 1), + 'de' : _is_bit_set(ebx, 2), + 'pse' : _is_bit_set(ebx, 3), + 'tsc' : _is_bit_set(ebx, 4), + 'msr' : _is_bit_set(ebx, 5), + 'pae' : _is_bit_set(ebx, 6), + 'mce' : _is_bit_set(ebx, 7), + 'cx8' : _is_bit_set(ebx, 8), + 'apic' : _is_bit_set(ebx, 9), + #'reserved' : _is_bit_set(ebx, 10), + 'syscall' : _is_bit_set(ebx, 11), + 'mtrr' : _is_bit_set(ebx, 12), + 'pge' : _is_bit_set(ebx, 13), + 'mca' : _is_bit_set(ebx, 14), + 'cmov' : _is_bit_set(ebx, 15), + 'pat' : _is_bit_set(ebx, 16), + 'pse36' : _is_bit_set(ebx, 17), + #'reserved' : _is_bit_set(ebx, 18), + 'mp' : _is_bit_set(ebx, 19), + 'nx' : _is_bit_set(ebx, 20), + #'reserved' : _is_bit_set(ebx, 21), + 'mmxext' : _is_bit_set(ebx, 22), + 'mmx' : _is_bit_set(ebx, 23), + 'fxsr' : _is_bit_set(ebx, 24), + 'fxsr_opt' : _is_bit_set(ebx, 25), + 'pdpe1gp' : _is_bit_set(ebx, 26), + 'rdtscp' : _is_bit_set(ebx, 27), + #'reserved' : _is_bit_set(ebx, 28), + 'lm' : _is_bit_set(ebx, 29), + '3dnowext' : _is_bit_set(ebx, 30), + '3dnow' : _is_bit_set(ebx, 31), + + 'lahf_lm' : _is_bit_set(ecx, 0), + 'cmp_legacy' : _is_bit_set(ecx, 1), + 'svm' : _is_bit_set(ecx, 2), + 'extapic' : _is_bit_set(ecx, 3), + 'cr8_legacy' : _is_bit_set(ecx, 4), + 'abm' : _is_bit_set(ecx, 5), + 'sse4a' : _is_bit_set(ecx, 6), + 'misalignsse' : _is_bit_set(ecx, 7), + '3dnowprefetch' : _is_bit_set(ecx, 8), + 'osvw' : _is_bit_set(ecx, 9), + 'ibs' : _is_bit_set(ecx, 10), + 'xop' : _is_bit_set(ecx, 11), + 'skinit' : _is_bit_set(ecx, 12), + 'wdt' : _is_bit_set(ecx, 13), + #'reserved' : _is_bit_set(ecx, 14), + 'lwp' : _is_bit_set(ecx, 15), + 'fma4' : _is_bit_set(ecx, 16), + 'tce' : _is_bit_set(ecx, 17), + #'reserved' : _is_bit_set(ecx, 18), + 'nodeid_msr' : _is_bit_set(ecx, 19), + #'reserved' : _is_bit_set(ecx, 20), + 'tbm' : _is_bit_set(ecx, 21), + 'topoext' : _is_bit_set(ecx, 22), + 'perfctr_core' : _is_bit_set(ecx, 23), + 'perfctr_nb' : _is_bit_set(ecx, 24), + #'reserved' : _is_bit_set(ecx, 25), + 'dbx' : _is_bit_set(ecx, 26), + 'perftsc' : _is_bit_set(ecx, 27), + 'pci_l2i' : _is_bit_set(ecx, 28), + #'reserved' : _is_bit_set(ecx, 29), + #'reserved' : _is_bit_set(ecx, 30), + #'reserved' : _is_bit_set(ecx, 31) + } + + # Get a list of only the flags that are true + extended_flags = [k for k, v in extended_flags.items() if v] + flags += extended_flags + + flags.sort() + return flags + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String + def get_processor_brand(self, max_extension_support): + processor_brand = "" + + # Processor brand string + if max_extension_support >= 0x80000004: + instructions = [ + b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002 + b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003 + b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004 + ] + for instruction in instructions: + # EAX + eax = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xC0" # mov ax,ax + b"\xC3" # ret + ) + + # EBX + ebx = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # EDX + edx = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xD0" # mov ax,dx + b"\xC3" # ret + ) + + # Combine each of the 4 bytes in each register into the string + for reg in [eax, ebx, ecx, edx]: + for n in [0, 8, 16, 24]: + processor_brand += chr((reg >> n) & 0xFF) + + # Strip off any trailing NULL terminators and white space + processor_brand = processor_brand.strip("\0").strip() + + return processor_brand + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features + def get_cache(self, max_extension_support): + cache_info = {} + + # Just return if the cache feature is not supported + if max_extension_support < 0x80000006: + return cache_info + + # ECX + ecx = self._run_asm( + b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006 + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + cache_info = { + 'size_b' : (ecx & 0xFF) * 1024, + 'associativity' : (ecx >> 12) & 0xF, + 'line_size_b' : (ecx >> 16) & 0xFFFF + } + + return cache_info + + def get_ticks_func(self): + retval = None + + if DataSource.bits == '32bit': + # Works on x86_32 + restype = None + argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)) + get_ticks_x86_32 = self._asm_func(restype, argtypes, + [ + b"\x55", # push bp + b"\x89\xE5", # mov bp,sp + b"\x31\xC0", # xor ax,ax + b"\x0F\xA2", # cpuid + b"\x0F\x31", # rdtsc + b"\x8B\x5D\x08", # mov bx,[di+0x8] + b"\x8B\x4D\x0C", # mov cx,[di+0xc] + b"\x89\x13", # mov [bp+di],dx + b"\x89\x01", # mov [bx+di],ax + b"\x5D", # pop bp + b"\xC3" # ret + ] + ) + + # Monkey patch func to combine high and low args into one return + old_func = get_ticks_x86_32.func + def new_func(): + # Pass two uint32s into function + high = ctypes.c_uint32(0) + low = ctypes.c_uint32(0) + old_func(ctypes.byref(high), ctypes.byref(low)) + + # Shift the two uint32s into one uint64 + retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value + return retval + get_ticks_x86_32.func = new_func + + retval = get_ticks_x86_32 + elif DataSource.bits == '64bit': + # Works on x86_64 + restype = ctypes.c_uint64 + argtypes = () + get_ticks_x86_64 = self._asm_func(restype, argtypes, + [ + b"\x48", # dec ax + b"\x31\xC0", # xor ax,ax + b"\x0F\xA2", # cpuid + b"\x0F\x31", # rdtsc + b"\x48", # dec ax + b"\xC1\xE2\x20", # shl dx,byte 0x20 + b"\x48", # dec ax + b"\x09\xD0", # or ax,dx + b"\xC3", # ret + ] + ) + + retval = get_ticks_x86_64 + return retval + + def get_raw_hz(self): + from time import sleep + + ticks_fn = self.get_ticks_func() + + start = ticks_fn.func() + sleep(1) + end = ticks_fn.func() + + ticks = (end - start) + ticks_fn.free() + + return ticks + +def _get_cpu_info_from_cpuid_actual(): + ''' + Warning! This function has the potential to crash the Python runtime. + Do not call it directly. Use the _get_cpu_info_from_cpuid function instead. + It will safely call this function in another process. + ''' + + from io import StringIO + + trace = Trace(True, True) + info = {} + + # Pipe stdout and stderr to strings + sys.stdout = trace._stdout + sys.stderr = trace._stderr + + try: + # Get the CPU arch and bits + arch, bits = _parse_arch(DataSource.arch_string_raw) + + # Return none if this is not an X86 CPU + if not arch in ['X86_32', 'X86_64']: + trace.fail('Not running on X86_32 or X86_64. Skipping ...') + return trace.to_dict(info, True) + + # Return none if SE Linux is in enforcing mode + cpuid = CPUID(trace) + if cpuid.is_selinux_enforcing: + trace.fail('SELinux is enforcing. Skipping ...') + return trace.to_dict(info, True) + + # Get the cpu info from the CPUID register + max_extension_support = cpuid.get_max_extension_support() + cache_info = cpuid.get_cache(max_extension_support) + info = cpuid.get_info() + + processor_brand = cpuid.get_processor_brand(max_extension_support) + + # Get the Hz and scale + hz_actual = cpuid.get_raw_hz() + hz_actual = _to_decimal_string(hz_actual) + + # Get the Hz and scale + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + info = { + 'vendor_id_raw' : cpuid.get_vendor_id(), + 'hardware_raw' : '', + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, 0), + + 'l2_cache_size' : cache_info['size_b'], + 'l2_cache_line_size' : cache_info['line_size_b'], + 'l2_cache_associativity' : cache_info['associativity'], + + 'stepping' : info['stepping'], + 'model' : info['model'], + 'family' : info['family'], + 'processor_type' : info['processor_type'], + 'flags' : cpuid.get_flags(max_extension_support) + } + + info = _filter_dict_keys_with_empty_values(info) + trace.success() + except Exception as err: + from traceback import format_exc + err_string = format_exc() + trace._err = ''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n' + return trace.to_dict(info, True) + + return trace.to_dict(info, False) + +def _get_cpu_info_from_cpuid_subprocess_wrapper(queue): + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + output = _get_cpu_info_from_cpuid_actual() + + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + queue.put(_obj_to_b64(output)) + +def _get_cpu_info_from_cpuid(): + ''' + Returns the CPU info gathered by querying the X86 cpuid register in a new process. + Returns {} on non X86 cpus. + Returns {} if SELinux is in enforcing mode. + ''' + + g_trace.header('Tying to get info from CPUID ...') + + from multiprocessing import Process, Queue + + # Return {} if can't cpuid + if not DataSource.can_cpuid: + g_trace.fail('Can\'t CPUID. Skipping ...') + return {} + + # Get the CPU arch and bits + arch, bits = _parse_arch(DataSource.arch_string_raw) + + # Return {} if this is not an X86 CPU + if not arch in ['X86_32', 'X86_64']: + g_trace.fail('Not running on X86_32 or X86_64. Skipping ...') + return {} + + try: + if CAN_CALL_CPUID_IN_SUBPROCESS: + # Start running the function in a subprocess + queue = Queue() + p = Process(target=_get_cpu_info_from_cpuid_subprocess_wrapper, args=(queue,)) + p.start() + + # Wait for the process to end, while it is still alive + while p.is_alive(): + p.join(0) + + # Return {} if it failed + if p.exitcode != 0: + g_trace.fail('Failed to run CPUID in process. Skipping ...') + return {} + + # Return {} if no results + if queue.empty(): + g_trace.fail('Failed to get anything from CPUID process. Skipping ...') + return {} + # Return the result, only if there is something to read + else: + output = _b64_to_obj(queue.get()) + import pprint + pp = pprint.PrettyPrinter(indent=4) + #pp.pprint(output) + + if 'output' in output and output['output']: + g_trace.write(output['output']) + + if 'stdout' in output and output['stdout']: + sys.stdout.write('{0}\n'.format(output['stdout'])) + sys.stdout.flush() + + if 'stderr' in output and output['stderr']: + sys.stderr.write('{0}\n'.format(output['stderr'])) + sys.stderr.flush() + + if 'is_fail' not in output: + g_trace.fail('Failed to get is_fail from CPUID process. Skipping ...') + return {} + + # Fail if there was an exception + if 'err' in output and output['err']: + g_trace.fail('Failed to run CPUID in process. Skipping ...') + g_trace.write(output['err']) + g_trace.write('Failed ...') + return {} + + if 'is_fail' in output and output['is_fail']: + g_trace.write('Failed ...') + return {} + + if 'info' not in output or not output['info']: + g_trace.fail('Failed to get return info from CPUID process. Skipping ...') + return {} + + return output['info'] + else: + # FIXME: This should write the values like in the above call to actual + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + output = _get_cpu_info_from_cpuid_actual() + + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + g_trace.success() + return output['info'] + except Exception as err: + g_trace.fail(err) + + # Return {} if everything failed + return {} + +def _get_cpu_info_from_proc_cpuinfo(): + ''' + Returns the CPU info gathered from /proc/cpuinfo. + Returns {} if /proc/cpuinfo is not found. + ''' + + g_trace.header('Tying to get info from /proc/cpuinfo ...') + + try: + # Just return {} if there is no cpuinfo + if not DataSource.has_proc_cpuinfo(): + g_trace.fail('Failed to find /proc/cpuinfo. Skipping ...') + return {} + + returncode, output = DataSource.cat_proc_cpuinfo() + if returncode != 0: + g_trace.fail('Failed to run cat /proc/cpuinfo. Skipping ...') + return {} + + # Various fields + vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor') + processor_brand = _get_field(True, output, None, None, 'model name', 'cpu', 'processor', 'uarch') + cache_size = _get_field(False, output, None, '', 'cache size') + stepping = _get_field(False, output, int, -1, 'stepping') + model = _get_field(False, output, int, -1, 'model') + family = _get_field(False, output, int, -1, 'cpu family') + hardware = _get_field(False, output, None, '', 'Hardware') + + # Flags + flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented') + if flags: + flags = flags.split() + flags.sort() + + # Check for other cache format + if not cache_size: + try: + for i in range(0, 10): + name = "cache{0}".format(i) + value = _get_field(False, output, None, None, name) + if value: + value = [entry.split('=') for entry in value.split(' ')] + value = dict(value) + if 'level' in value and value['level'] == '3' and 'size' in value: + cache_size = value['size'] + break + except Exception: + pass + + # Convert from MHz string to Hz + hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock', 'cpu MHz dynamic', 'cpu MHz static') + hz_actual = hz_actual.lower().rstrip('mhz').strip() + hz_actual = _to_decimal_string(hz_actual) + + # Convert from GHz/MHz string to Hz + hz_advertised, scale = (None, 0) + try: + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + except Exception: + pass + + info = { + 'hardware_raw' : hardware, + 'brand_raw' : processor_brand, + + 'l3_cache_size' : _friendly_bytes_to_int(cache_size), + 'flags' : flags, + 'vendor_id_raw' : vendor_id, + 'stepping' : stepping, + 'model' : model, + 'family' : family, + } + + # Make the Hz the same for actual and advertised if missing any + if not hz_advertised or hz_advertised == '0.0': + hz_advertised = hz_actual + scale = 6 + elif not hz_actual or hz_actual == '0.0': + hz_actual = hz_advertised + + # Add the Hz if there is one + if _hz_short_to_full(hz_advertised, scale) > (0, 0): + info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale) + info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale) + if _hz_short_to_full(hz_actual, scale) > (0, 0): + info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6) + info['hz_actual'] = _hz_short_to_full(hz_actual, 6) + + info = _filter_dict_keys_with_empty_values(info, {'stepping':0, 'model':0, 'family':0}) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_cpufreq_info(): + ''' + Returns the CPU info gathered from cpufreq-info. + Returns {} if cpufreq-info is not found. + ''' + + g_trace.header('Tying to get info from cpufreq-info ...') + + try: + hz_brand, scale = '0.0', 0 + + if not DataSource.has_cpufreq_info(): + g_trace.fail('Failed to find cpufreq-info. Skipping ...') + return {} + + returncode, output = DataSource.cpufreq_info() + if returncode != 0: + g_trace.fail('Failed to run cpufreq-info. Skipping ...') + return {} + + hz_brand = output.split('current CPU frequency is')[1].split('\n')[0] + i = hz_brand.find('Hz') + assert(i != -1) + hz_brand = hz_brand[0 : i+2].strip().lower() + + if hz_brand.endswith('mhz'): + scale = 6 + elif hz_brand.endswith('ghz'): + scale = 9 + hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip() + hz_brand = _to_decimal_string(hz_brand) + + info = { + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale), + 'hz_advertised' : _hz_short_to_full(hz_brand, scale), + 'hz_actual' : _hz_short_to_full(hz_brand, scale), + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_lscpu(): + ''' + Returns the CPU info gathered from lscpu. + Returns {} if lscpu is not found. + ''' + + g_trace.header('Tying to get info from lscpu ...') + + try: + if not DataSource.has_lscpu(): + g_trace.fail('Failed to find lscpu. Skipping ...') + return {} + + returncode, output = DataSource.lscpu() + if returncode != 0: + g_trace.fail('Failed to run lscpu. Skipping ...') + return {} + + info = {} + + new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz') + if new_hz: + new_hz = _to_decimal_string(new_hz) + scale = 6 + info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale) + info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale) + info['hz_advertised'] = _hz_short_to_full(new_hz, scale) + info['hz_actual'] = _hz_short_to_full(new_hz, scale) + + new_hz = _get_field(False, output, None, None, 'CPU dynamic MHz', 'CPU static MHz') + if new_hz: + new_hz = _to_decimal_string(new_hz) + scale = 6 + info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale) + info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale) + info['hz_advertised'] = _hz_short_to_full(new_hz, scale) + info['hz_actual'] = _hz_short_to_full(new_hz, scale) + + vendor_id = _get_field(False, output, None, None, 'Vendor ID') + if vendor_id: + info['vendor_id_raw'] = vendor_id + + brand = _get_field(False, output, None, None, 'Model name') + if brand: + info['brand_raw'] = brand + else: + brand = _get_field(False, output, None, None, 'Model') + if brand and not brand.isdigit(): + info['brand_raw'] = brand + + family = _get_field(False, output, None, None, 'CPU family') + if family and family.isdigit(): + info['family'] = int(family) + + stepping = _get_field(False, output, None, None, 'Stepping') + if stepping and stepping.isdigit(): + info['stepping'] = int(stepping) + + model = _get_field(False, output, None, None, 'Model') + if model and model.isdigit(): + info['model'] = int(model) + + l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache') + if l1_data_cache_size: + l1_data_cache_size = l1_data_cache_size.split('(')[0].strip() + info['l1_data_cache_size'] = _friendly_bytes_to_int(l1_data_cache_size) + + l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache') + if l1_instruction_cache_size: + l1_instruction_cache_size = l1_instruction_cache_size.split('(')[0].strip() + info['l1_instruction_cache_size'] = _friendly_bytes_to_int(l1_instruction_cache_size) + + l2_cache_size = _get_field(False, output, None, None, 'L2 cache', 'L2d cache') + if l2_cache_size: + l2_cache_size = l2_cache_size.split('(')[0].strip() + info['l2_cache_size'] = _friendly_bytes_to_int(l2_cache_size) + + l3_cache_size = _get_field(False, output, None, None, 'L3 cache') + if l3_cache_size: + l3_cache_size = l3_cache_size.split('(')[0].strip() + info['l3_cache_size'] = _friendly_bytes_to_int(l3_cache_size) + + # Flags + flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented') + if flags: + flags = flags.split() + flags.sort() + info['flags'] = flags + + info = _filter_dict_keys_with_empty_values(info, {'stepping':0, 'model':0, 'family':0}) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_dmesg(): + ''' + Returns the CPU info gathered from dmesg. + Returns {} if dmesg is not found or does not have the desired info. + ''' + + g_trace.header('Tying to get info from the dmesg ...') + + # Just return {} if this arch has an unreliable dmesg log + arch, bits = _parse_arch(DataSource.arch_string_raw) + if arch in ['S390X']: + g_trace.fail('Running on S390X. Skipping ...') + return {} + + # Just return {} if there is no dmesg + if not DataSource.has_dmesg(): + g_trace.fail('Failed to find dmesg. Skipping ...') + return {} + + # If dmesg fails return {} + returncode, output = DataSource.dmesg_a() + if output is None or returncode != 0: + g_trace.fail('Failed to run \"dmesg -a\". Skipping ...') + return {} + + info = _parse_dmesg_output(output) + g_trace.success() + return info + + +# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf +# page 767 +def _get_cpu_info_from_ibm_pa_features(): + ''' + Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features + Returns {} if lsprop is not found or ibm,pa-features does not have the desired info. + ''' + + g_trace.header('Tying to get info from lsprop ...') + + try: + # Just return {} if there is no lsprop + if not DataSource.has_ibm_pa_features(): + g_trace.fail('Failed to find lsprop. Skipping ...') + return {} + + # If ibm,pa-features fails return {} + returncode, output = DataSource.ibm_pa_features() + if output is None or returncode != 0: + g_trace.fail('Failed to glob /proc/device-tree/cpus/*/ibm,pa-features. Skipping ...') + return {} + + # Filter out invalid characters from output + value = output.split("ibm,pa-features")[1].lower() + value = [s for s in value if s in list('0123456789abcfed')] + value = ''.join(value) + + # Get data converted to Uint32 chunks + left = int(value[0 : 8], 16) + right = int(value[8 : 16], 16) + + # Get the CPU flags + flags = { + # Byte 0 + 'mmu' : _is_bit_set(left, 0), + 'fpu' : _is_bit_set(left, 1), + 'slb' : _is_bit_set(left, 2), + 'run' : _is_bit_set(left, 3), + #'reserved' : _is_bit_set(left, 4), + 'dabr' : _is_bit_set(left, 5), + 'ne' : _is_bit_set(left, 6), + 'wtr' : _is_bit_set(left, 7), + + # Byte 1 + 'mcr' : _is_bit_set(left, 8), + 'dsisr' : _is_bit_set(left, 9), + 'lp' : _is_bit_set(left, 10), + 'ri' : _is_bit_set(left, 11), + 'dabrx' : _is_bit_set(left, 12), + 'sprg3' : _is_bit_set(left, 13), + 'rislb' : _is_bit_set(left, 14), + 'pp' : _is_bit_set(left, 15), + + # Byte 2 + 'vpm' : _is_bit_set(left, 16), + 'dss_2.05' : _is_bit_set(left, 17), + #'reserved' : _is_bit_set(left, 18), + 'dar' : _is_bit_set(left, 19), + #'reserved' : _is_bit_set(left, 20), + 'ppr' : _is_bit_set(left, 21), + 'dss_2.02' : _is_bit_set(left, 22), + 'dss_2.06' : _is_bit_set(left, 23), + + # Byte 3 + 'lsd_in_dscr' : _is_bit_set(left, 24), + 'ugr_in_dscr' : _is_bit_set(left, 25), + #'reserved' : _is_bit_set(left, 26), + #'reserved' : _is_bit_set(left, 27), + #'reserved' : _is_bit_set(left, 28), + #'reserved' : _is_bit_set(left, 29), + #'reserved' : _is_bit_set(left, 30), + #'reserved' : _is_bit_set(left, 31), + + # Byte 4 + 'sso_2.06' : _is_bit_set(right, 0), + #'reserved' : _is_bit_set(right, 1), + #'reserved' : _is_bit_set(right, 2), + #'reserved' : _is_bit_set(right, 3), + #'reserved' : _is_bit_set(right, 4), + #'reserved' : _is_bit_set(right, 5), + #'reserved' : _is_bit_set(right, 6), + #'reserved' : _is_bit_set(right, 7), + + # Byte 5 + 'le' : _is_bit_set(right, 8), + 'cfar' : _is_bit_set(right, 9), + 'eb' : _is_bit_set(right, 10), + 'lsq_2.07' : _is_bit_set(right, 11), + #'reserved' : _is_bit_set(right, 12), + #'reserved' : _is_bit_set(right, 13), + #'reserved' : _is_bit_set(right, 14), + #'reserved' : _is_bit_set(right, 15), + + # Byte 6 + 'dss_2.07' : _is_bit_set(right, 16), + #'reserved' : _is_bit_set(right, 17), + #'reserved' : _is_bit_set(right, 18), + #'reserved' : _is_bit_set(right, 19), + #'reserved' : _is_bit_set(right, 20), + #'reserved' : _is_bit_set(right, 21), + #'reserved' : _is_bit_set(right, 22), + #'reserved' : _is_bit_set(right, 23), + + # Byte 7 + #'reserved' : _is_bit_set(right, 24), + #'reserved' : _is_bit_set(right, 25), + #'reserved' : _is_bit_set(right, 26), + #'reserved' : _is_bit_set(right, 27), + #'reserved' : _is_bit_set(right, 28), + #'reserved' : _is_bit_set(right, 29), + #'reserved' : _is_bit_set(right, 30), + #'reserved' : _is_bit_set(right, 31), + } + + # Get a list of only the flags that are true + flags = [k for k, v in flags.items() if v] + flags.sort() + + info = { + 'flags' : flags + } + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + return {} + + +def _get_cpu_info_from_cat_var_run_dmesg_boot(): + ''' + Returns the CPU info gathered from /var/run/dmesg.boot. + Returns {} if dmesg is not found or does not have the desired info. + ''' + + g_trace.header('Tying to get info from the /var/run/dmesg.boot log ...') + + # Just return {} if there is no /var/run/dmesg.boot + if not DataSource.has_var_run_dmesg_boot(): + g_trace.fail('Failed to find /var/run/dmesg.boot file. Skipping ...') + return {} + + # If dmesg.boot fails return {} + returncode, output = DataSource.cat_var_run_dmesg_boot() + if output is None or returncode != 0: + g_trace.fail('Failed to run \"cat /var/run/dmesg.boot\". Skipping ...') + return {} + + info = _parse_dmesg_output(output) + g_trace.success() + return info + + +def _get_cpu_info_from_sysctl(): + ''' + Returns the CPU info gathered from sysctl. + Returns {} if sysctl is not found. + ''' + + g_trace.header('Tying to get info from sysctl ...') + + try: + # Just return {} if there is no sysctl + if not DataSource.has_sysctl(): + g_trace.fail('Failed to find sysctl. Skipping ...') + return {} + + # If sysctl fails return {} + returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency() + if output is None or returncode != 0: + g_trace.fail('Failed to run \"sysctl machdep.cpu hw.cpufrequency\". Skipping ...') + return {} + + # Various fields + vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor') + processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string') + cache_size = _get_field(False, output, int, 0, 'machdep.cpu.cache.size') + stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping') + model = _get_field(False, output, int, 0, 'machdep.cpu.model') + family = _get_field(False, output, int, 0, 'machdep.cpu.family') + + # Flags + flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split() + flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split()) + flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split()) + flags.sort() + + # Convert from GHz/MHz string to Hz + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency') + hz_actual = _to_decimal_string(hz_actual) + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, 0), + + 'l2_cache_size' : int(cache_size) * 1024, + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + return {} + + +def _get_cpu_info_from_sysinfo(): + ''' + Returns the CPU info gathered from sysinfo. + Returns {} if sysinfo is not found. + ''' + + info = _get_cpu_info_from_sysinfo_v1() + info.update(_get_cpu_info_from_sysinfo_v2()) + return info + +def _get_cpu_info_from_sysinfo_v1(): + ''' + Returns the CPU info gathered from sysinfo. + Returns {} if sysinfo is not found. + ''' + + g_trace.header('Tying to get info from sysinfo version 1 ...') + + try: + # Just return {} if there is no sysinfo + if not DataSource.has_sysinfo(): + g_trace.fail('Failed to find sysinfo. Skipping ...') + return {} + + # If sysinfo fails return {} + returncode, output = DataSource.sysinfo_cpu() + if output is None or returncode != 0: + g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...') + return {} + + # Various fields + vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ') + processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip() + cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size') + stepping = int(output.split(', stepping ')[1].split(',')[0].strip()) + model = int(output.split(', model ')[1].split(',')[0].strip()) + family = int(output.split(', family ')[1].split(',')[0].strip()) + + # Flags + flags = [] + for line in output.split('\n'): + if line.startswith('\t\t'): + for flag in line.strip().lower().split(): + flags.append(flag) + flags.sort() + + # Convert from GHz/MHz string to Hz + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + hz_actual = hz_advertised + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, scale), + + 'l2_cache_size' : _to_friendly_bytes(cache_size), + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_sysinfo_v2(): + ''' + Returns the CPU info gathered from sysinfo. + Returns {} if sysinfo is not found. + ''' + + g_trace.header('Tying to get info from sysinfo version 2 ...') + + try: + # Just return {} if there is no sysinfo + if not DataSource.has_sysinfo(): + g_trace.fail('Failed to find sysinfo. Skipping ...') + return {} + + # If sysinfo fails return {} + returncode, output = DataSource.sysinfo_cpu() + if output is None or returncode != 0: + g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...') + return {} + + # Various fields + vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ') + processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip() + cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size') + signature = output.split('Signature:')[1].split('\n')[0].strip() + # + stepping = int(signature.split('stepping ')[1].split(',')[0].strip()) + model = int(signature.split('model ')[1].split(',')[0].strip()) + family = int(signature.split('family ')[1].split(',')[0].strip()) + + # Flags + def get_subsection_flags(output): + retval = [] + for line in output.split('\n')[1:]: + if not line.startswith(' ') and not line.startswith(' '): break + for entry in line.strip().lower().split(' '): + retval.append(entry) + return retval + + flags = get_subsection_flags(output.split('Features: ')[1]) + \ + get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \ + get_subsection_flags(output.split('Extended Features (0x80000001): ')[1]) + flags.sort() + + # Convert from GHz/MHz string to Hz + lines = [n for n in output.split('\n') if n] + raw_hz = lines[0].split('running at ')[1].strip().lower() + hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip() + hz_advertised = _to_decimal_string(hz_advertised) + hz_actual = hz_advertised + + scale = 0 + if raw_hz.endswith('mhz'): + scale = 6 + elif raw_hz.endswith('ghz'): + scale = 9 + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, scale), + + 'l2_cache_size' : _to_friendly_bytes(cache_size), + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_wmic(): + ''' + Returns the CPU info gathered from WMI. + Returns {} if not on Windows, or wmic is not installed. + ''' + g_trace.header('Tying to get info from wmic ...') + + try: + # Just return {} if not Windows or there is no wmic + if not DataSource.is_windows or not DataSource.has_wmic(): + g_trace.fail('Failed to find WMIC, or not on Windows. Skipping ...') + return {} + + returncode, output = DataSource.wmic_cpu() + if output is None or returncode != 0: + g_trace.fail('Failed to run wmic. Skipping ...') + return {} + + # Break the list into key values pairs + value = output.split("\n") + value = [s.rstrip().split('=') for s in value if '=' in s] + value = {k: v for k, v in value if v} + + # Get the advertised MHz + processor_brand = value.get('Name') + hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand) + + # Get the actual MHz + hz_actual = value.get('CurrentClockSpeed') + scale_actual = 6 + if hz_actual: + hz_actual = _to_decimal_string(hz_actual) + + # Get cache sizes + l2_cache_size = value.get('L2CacheSize') # NOTE: L2CacheSize is in kilobytes + if l2_cache_size: + l2_cache_size = int(l2_cache_size) * 1024 + + l3_cache_size = value.get('L3CacheSize') # NOTE: L3CacheSize is in kilobytes + if l3_cache_size: + l3_cache_size = int(l3_cache_size) * 1024 + + # Get family, model, and stepping + family, model, stepping = '', '', '' + description = value.get('Description') or value.get('Caption') + entries = description.split(' ') + + if 'Family' in entries and entries.index('Family') < len(entries)-1: + i = entries.index('Family') + family = int(entries[i + 1]) + + if 'Model' in entries and entries.index('Model') < len(entries)-1: + i = entries.index('Model') + model = int(entries[i + 1]) + + if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1: + i = entries.index('Stepping') + stepping = int(entries[i + 1]) + + info = { + 'vendor_id_raw' : value.get('Manufacturer'), + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised), + 'hz_actual' : _hz_short_to_full(hz_actual, scale_actual), + + 'l2_cache_size' : l2_cache_size, + 'l3_cache_size' : l3_cache_size, + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_registry(): + ''' + Returns the CPU info gathered from the Windows Registry. + Returns {} if not on Windows. + ''' + + g_trace.header('Tying to get info from Windows registry ...') + + try: + # Just return {} if not on Windows + if not DataSource.is_windows: + g_trace.fail('Not running on Windows. Skipping ...') + return {} + + # Get the CPU name + processor_brand = DataSource.winreg_processor_brand().strip() + + # Get the CPU vendor id + vendor_id = DataSource.winreg_vendor_id_raw() + + # Get the CPU arch and bits + arch_string_raw = DataSource.winreg_arch_string_raw() + arch, bits = _parse_arch(arch_string_raw) + + # Get the actual CPU Hz + hz_actual = DataSource.winreg_hz_actual() + hz_actual = _to_decimal_string(hz_actual) + + # Get the advertised CPU Hz + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + + # If advertised hz not found, use the actual hz + if hz_advertised == '0.0': + scale = 6 + hz_advertised = _to_decimal_string(hz_actual) + + # Get the CPU features + feature_bits = DataSource.winreg_feature_bits() + + def is_set(bit): + mask = 0x80000000 >> bit + retval = mask & feature_bits > 0 + return retval + + # http://en.wikipedia.org/wiki/CPUID + # http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean + # http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm + flags = { + 'fpu' : is_set(0), # Floating Point Unit + 'vme' : is_set(1), # V86 Mode Extensions + 'de' : is_set(2), # Debug Extensions - I/O breakpoints supported + 'pse' : is_set(3), # Page Size Extensions (4 MB pages supported) + 'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available + 'msr' : is_set(5), # Model Specific Registers + 'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages) + 'mce' : is_set(7), # Machine Check Exception supported + 'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available + 'apic' : is_set(9), # Local APIC present (multiprocessor operation support) + 'sepamd' : is_set(10), # Fast system calls (AMD only) + 'sep' : is_set(11), # Fast system calls + 'mtrr' : is_set(12), # Memory Type Range Registers + 'pge' : is_set(13), # Page Global Enable + 'mca' : is_set(14), # Machine Check Architecture + 'cmov' : is_set(15), # Conditional MOVe instructions + 'pat' : is_set(16), # Page Attribute Table + 'pse36' : is_set(17), # 36 bit Page Size Extensions + 'serial' : is_set(18), # Processor Serial Number + 'clflush' : is_set(19), # Cache Flush + #'reserved1' : is_set(20), # reserved + 'dts' : is_set(21), # Debug Trace Store + 'acpi' : is_set(22), # ACPI support + 'mmx' : is_set(23), # MultiMedia Extensions + 'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions + 'sse' : is_set(25), # SSE instructions + 'sse2' : is_set(26), # SSE2 (WNI) instructions + 'ss' : is_set(27), # self snoop + #'reserved2' : is_set(28), # reserved + 'tm' : is_set(29), # Automatic clock control + 'ia64' : is_set(30), # IA64 instructions + '3dnow' : is_set(31) # 3DNow! instructions available + } + + # Get a list of only the flags that are true + flags = [k for k, v in flags.items() if v] + flags.sort() + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, 6), + + 'flags' : flags + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + return {} + +def _get_cpu_info_from_kstat(): + ''' + Returns the CPU info gathered from isainfo and kstat. + Returns {} if isainfo or kstat are not found. + ''' + + g_trace.header('Tying to get info from kstat ...') + + try: + # Just return {} if there is no isainfo or kstat + if not DataSource.has_isainfo() or not DataSource.has_kstat(): + g_trace.fail('Failed to find isinfo or kstat. Skipping ...') + return {} + + # If isainfo fails return {} + returncode, flag_output = DataSource.isainfo_vb() + if flag_output is None or returncode != 0: + g_trace.fail('Failed to run \"isainfo -vb\". Skipping ...') + return {} + + # If kstat fails return {} + returncode, kstat = DataSource.kstat_m_cpu_info() + if kstat is None or returncode != 0: + g_trace.fail('Failed to run \"kstat -m cpu_info\". Skipping ...') + return {} + + # Various fields + vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip() + processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip() + stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip()) + model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip()) + family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip()) + + # Flags + flags = flag_output.strip().split('\n')[-1].strip().lower().split() + flags.sort() + + # Convert from GHz/MHz string to Hz + scale = 6 + hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip() + hz_advertised = _to_decimal_string(hz_advertised) + + # Convert from GHz/MHz string to Hz + hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip() + hz_actual = _to_decimal_string(hz_actual) + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, 0), + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + return {} + +def _get_cpu_info_from_platform_uname(): + + g_trace.header('Tying to get info from platform.uname ...') + + try: + uname = DataSource.uname_string_raw.split(',')[0] + + family, model, stepping = (None, None, None) + entries = uname.split(' ') + + if 'Family' in entries and entries.index('Family') < len(entries)-1: + i = entries.index('Family') + family = int(entries[i + 1]) + + if 'Model' in entries and entries.index('Model') < len(entries)-1: + i = entries.index('Model') + model = int(entries[i + 1]) + + if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1: + i = entries.index('Stepping') + stepping = int(entries[i + 1]) + + info = { + 'family' : family, + 'model' : model, + 'stepping' : stepping + } + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + return {} + +def _get_cpu_info_internal(): + ''' + Returns the CPU info by using the best sources of information for your OS. + Returns {} if nothing is found. + ''' + + g_trace.write('!' * 80) + + # Get the CPU arch and bits + arch, bits = _parse_arch(DataSource.arch_string_raw) + + friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits' + friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info) + PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize) + + info = { + 'python_version' : PYTHON_VERSION, + 'cpuinfo_version' : CPUINFO_VERSION, + 'cpuinfo_version_string' : CPUINFO_VERSION_STRING, + 'arch' : arch, + 'bits' : bits, + 'count' : DataSource.cpu_count, + 'arch_string_raw' : DataSource.arch_string_raw, + } + + g_trace.write("python_version: {0}".format(info['python_version'])) + g_trace.write("cpuinfo_version: {0}".format(info['cpuinfo_version'])) + g_trace.write("arch: {0}".format(info['arch'])) + g_trace.write("bits: {0}".format(info['bits'])) + g_trace.write("count: {0}".format(info['count'])) + g_trace.write("arch_string_raw: {0}".format(info['arch_string_raw'])) + + # Try the Windows wmic + _copy_new_fields(info, _get_cpu_info_from_wmic()) + + # Try the Windows registry + _copy_new_fields(info, _get_cpu_info_from_registry()) + + # Try /proc/cpuinfo + _copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo()) + + # Try cpufreq-info + _copy_new_fields(info, _get_cpu_info_from_cpufreq_info()) + + # Try LSCPU + _copy_new_fields(info, _get_cpu_info_from_lscpu()) + + # Try sysctl + _copy_new_fields(info, _get_cpu_info_from_sysctl()) + + # Try kstat + _copy_new_fields(info, _get_cpu_info_from_kstat()) + + # Try dmesg + _copy_new_fields(info, _get_cpu_info_from_dmesg()) + + # Try /var/run/dmesg.boot + _copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot()) + + # Try lsprop ibm,pa-features + _copy_new_fields(info, _get_cpu_info_from_ibm_pa_features()) + + # Try sysinfo + _copy_new_fields(info, _get_cpu_info_from_sysinfo()) + + # Try querying the CPU cpuid register + # FIXME: This should print stdout and stderr to trace log + _copy_new_fields(info, _get_cpu_info_from_cpuid()) + + # Try platform.uname + _copy_new_fields(info, _get_cpu_info_from_platform_uname()) + + g_trace.write('!' * 80) + + return info + +def get_cpu_info_json(): + ''' + Returns the CPU info by using the best sources of information for your OS. + Returns the result in a json string + ''' + + import json + + output = None + + # If running under pyinstaller, run normally + if getattr(sys, 'frozen', False): + info = _get_cpu_info_internal() + output = json.dumps(info) + output = "{0}".format(output) + # if not running under pyinstaller, run in another process. + # This is done because multiprocesing has a design flaw that + # causes non main programs to run multiple times on Windows. + else: + from subprocess import Popen, PIPE + + command = [sys.executable, __file__, '--json'] + p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + output = p1.communicate()[0] + + if p1.returncode != 0: + return "{}" + + output = output.decode(encoding='UTF-8') + + return output + +def get_cpu_info(): + ''' + Returns the CPU info by using the best sources of information for your OS. + Returns the result in a dict + ''' + + import json + + output = get_cpu_info_json() + + # Convert JSON to Python with non unicode strings + output = json.loads(output, object_hook = _utf_to_str) + + return output + +def main(): + from argparse import ArgumentParser + import json + + # Parse args + parser = ArgumentParser(description='Gets CPU info with pure Python') + parser.add_argument('--json', action='store_true', help='Return the info in JSON format') + parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo') + parser.add_argument('--trace', action='store_true', help='Traces code paths used to find CPU info to file') + args = parser.parse_args() + + global g_trace + g_trace = Trace(args.trace, False) + + try: + _check_arch() + except Exception as err: + sys.stderr.write(str(err) + "\n") + sys.exit(1) + + info = _get_cpu_info_internal() + + if not info: + sys.stderr.write("Failed to find cpu info\n") + sys.exit(1) + + if args.json: + print(json.dumps(info)) + elif args.version: + print(CPUINFO_VERSION_STRING) + else: + print('Python Version: {0}'.format(info.get('python_version', ''))) + print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', ''))) + print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', ''))) + print('Hardware Raw: {0}'.format(info.get('hardware_raw', ''))) + print('Brand Raw: {0}'.format(info.get('brand_raw', ''))) + print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', ''))) + print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', ''))) + print('Hz Advertised: {0}'.format(info.get('hz_advertised', ''))) + print('Hz Actual: {0}'.format(info.get('hz_actual', ''))) + print('Arch: {0}'.format(info.get('arch', ''))) + print('Bits: {0}'.format(info.get('bits', ''))) + print('Count: {0}'.format(info.get('count', ''))) + print('Arch String Raw: {0}'.format(info.get('arch_string_raw', ''))) + print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', ''))) + print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', ''))) + print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', ''))) + print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', ''))) + print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', ''))) + print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', ''))) + print('Stepping: {0}'.format(info.get('stepping', ''))) + print('Model: {0}'.format(info.get('model', ''))) + print('Family: {0}'.format(info.get('family', ''))) + print('Processor Type: {0}'.format(info.get('processor_type', ''))) + print('Flags: {0}'.format(', '.join(info.get('flags', '')))) + + +if __name__ == '__main__': + main() +else: + g_trace = Trace(False, False) + _check_arch() diff --git a/venv/lib/python3.10/site-packages/dateutil/__init__.py b/venv/lib/python3.10/site-packages/dateutil/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a2c19c06fe14476a9bfa4f1f60de7a997a41191c --- /dev/null +++ b/venv/lib/python3.10/site-packages/dateutil/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +import sys + +try: + from ._version import version as __version__ +except ImportError: + __version__ = 'unknown' + +__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', + 'utils', 'zoneinfo'] + +def __getattr__(name): + import importlib + + if name in __all__: + return importlib.import_module("." + name, __name__) + raise AttributeError( + "module {!r} has not attribute {!r}".format(__name__, name) + ) + + +def __dir__(): + # __dir__ should include all the lazy-importable modules as well. + return [x for x in globals() if x not in sys.modules] + __all__ diff --git a/venv/lib/python3.10/site-packages/dateutil/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/dateutil/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d60d20069fc194c6c0cb95e88cd4d4a329ff2318 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dateutil/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dateutil/__pycache__/relativedelta.cpython-310.pyc b/venv/lib/python3.10/site-packages/dateutil/__pycache__/relativedelta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f403517d76e6040cd4d16f23a9d686d45d08d636 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dateutil/__pycache__/relativedelta.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dateutil/__pycache__/tzwin.cpython-310.pyc b/venv/lib/python3.10/site-packages/dateutil/__pycache__/tzwin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c559d0421bbd9bb7f4a42e47c51d4468f2d30bc3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dateutil/__pycache__/tzwin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dateutil/_common.py b/venv/lib/python3.10/site-packages/dateutil/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..4eb2659bd2986125fcfb4afea5bae9efc2dcd1a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dateutil/_common.py @@ -0,0 +1,43 @@ +""" +Common code used in multiple modules. +""" + + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __hash__(self): + return hash(( + self.weekday, + self.n, + )) + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +# vim:ts=4:sw=4:et diff --git a/venv/lib/python3.10/site-packages/dateutil/_version.py b/venv/lib/python3.10/site-packages/dateutil/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..ddda98098527a73348e694c2edb691fd625475fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/dateutil/_version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '2.9.0.post0' +__version_tuple__ = version_tuple = (2, 9, 0) diff --git a/venv/lib/python3.10/site-packages/dateutil/easter.py b/venv/lib/python3.10/site-packages/dateutil/easter.py new file mode 100644 index 0000000000000000000000000000000000000000..f74d1f7442473997245ac683b8a269a3574d1ba4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dateutil/easter.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic Easter computing method for any given year, using +Western, Orthodox or Julian algorithms. +""" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different Easter + calculation methods: + + 1. Original calculation in Julian calendar, valid in + dates after 326 AD + 2. Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3. Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + * ``EASTER_JULIAN = 1`` + * ``EASTER_ORTHODOX = 2`` + * ``EASTER_WESTERN = 3`` + + The default method is method 3. + + More about the algorithm may be found at: + + `GM Arts: Easter Algorithms `_ + + and + + `The Calendar FAQ: Easter `_ + + """ + + if not (1 <= method <= 3): + raise ValueError("invalid method") + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g + 15) % 30 + j = (y + y//4 + i) % 7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e + y//100 - 16 - (y//100 - 16)//4 + else: + # New method + c = y//100 + h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 + i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) + j = (y + y//4 + i + 2 - c + c//4) % 7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i - j + e + d = 1 + (p + 27 + (p + 6)//40) % 31 + m = 3 + (p + 26)//30 + return datetime.date(int(y), int(m), int(d)) diff --git a/venv/lib/python3.10/site-packages/dateutil/relativedelta.py b/venv/lib/python3.10/site-packages/dateutil/relativedelta.py new file mode 100644 index 0000000000000000000000000000000000000000..cd323a549e0f182541ebcde2d2ea1adfbbd9701e --- /dev/null +++ b/venv/lib/python3.10/site-packages/dateutil/relativedelta.py @@ -0,0 +1,599 @@ +# -*- coding: utf-8 -*- +import datetime +import calendar + +import operator +from math import copysign + +from six import integer_types +from warnings import warn + +from ._common import weekday + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + + +class relativedelta(object): + """ + The relativedelta type is designed to be applied to an existing datetime and + can replace specific components of that datetime, or represents an interval + of time. + + It is based on the specification of the excellent work done by M.-A. Lemburg + in his + `mx.DateTime `_ extension. + However, notice that this type does *NOT* implement the same algorithm as + his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + + There are two different ways to build a relativedelta instance. The + first one is passing it two date/datetime classes:: + + relativedelta(datetime1, datetime2) + + The second one is passing it any number of the following keyword arguments:: + + relativedelta(arg1=x,arg2=y,arg3=z...) + + year, month, day, hour, minute, second, microsecond: + Absolute information (argument is singular); adding or subtracting a + relativedelta with absolute information does not perform an arithmetic + operation, but rather REPLACES the corresponding value in the + original datetime with the value(s) in relativedelta. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative (argument is plural); adding + or subtracting a relativedelta with relative information performs + the corresponding arithmetic operation on the original datetime value + with the information in the relativedelta. + + weekday: + One of the weekday instances (MO, TU, etc) available in the + relativedelta module. These instances may receive a parameter N, + specifying the Nth weekday, which could be positive or negative + (like MO(+1) or MO(-2)). Not specifying it is the same as specifying + +1. You can also use an integer, where 0=MO. This argument is always + relative e.g. if the calculated date is already Monday, using MO(1) + or MO(-1) won't change the day. To effectively make it absolute, use + it in combination with the day argument (e.g. day=1, MO(1) for first + Monday of the month). + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + + There are relative and absolute forms of the keyword + arguments. The plural is relative, and the singular is + absolute. For each argument in the order below, the absolute form + is applied first (by setting each attribute to that value) and + then the relative form (by adding the value to the attribute). + + The order of attributes considered when this relativedelta is + added to a datetime is: + + 1. Year + 2. Month + 3. Day + 4. Hours + 5. Minutes + 6. Seconds + 7. Microseconds + + Finally, weekday is applied, using the rule described above. + + For example + + >>> from datetime import datetime + >>> from dateutil.relativedelta import relativedelta, MO + >>> dt = datetime(2018, 4, 9, 13, 37, 0) + >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) + >>> dt + delta + datetime.datetime(2018, 4, 2, 14, 37) + + First, the day is set to 1 (the first of the month), then 25 hours + are added, to get to the 2nd day and 14th hour, finally the + weekday is applied, but since the 2nd is already a Monday there is + no effect. + + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + + if dt1 and dt2: + # datetime is a subclass of date. So both must be date + if not (isinstance(dt1, datetime.date) and + isinstance(dt2, datetime.date)): + raise TypeError("relativedelta only diffs datetime/date") + + # We allow two dates, or two datetimes, so we coerce them to be + # of the same type + if (isinstance(dt1, datetime.datetime) != + isinstance(dt2, datetime.datetime)): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + # Get year / month delta between the two + months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) + self._set_months(months) + + # Remove the year/month delta so the timedelta is just well-defined + # time units (seconds, days and microseconds) + dtm = self.__radd__(dt2) + + # If we've overshot our target, make an adjustment + if dt1 < dt2: + compare = operator.gt + increment = 1 + else: + compare = operator.lt + increment = -1 + + while compare(dt1, dtm): + months += increment + self._set_months(months) + dtm = self.__radd__(dt2) + + # Get the timedelta between the "months-adjusted" date and dt1 + delta = dt1 - dtm + self.seconds = delta.seconds + delta.days * 86400 + self.microseconds = delta.microseconds + else: + # Check for non-integer values in integer-only quantities + if any(x is not None and x != int(x) for x in (years, months)): + raise ValueError("Non-integer years and months are " + "ambiguous and not currently supported.") + + # Relative information + self.years = int(years) + self.months = int(months) + self.days = days + weeks * 7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + + # Absolute information + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if any(x is not None and int(x) != x + for x in (year, month, day, hour, + minute, second, microsecond)): + # For now we'll deprecate floats - later it'll be an error. + warn("Non-integer value passed as absolute information. " + + "This is not a well-defined condition and will raise " + + "errors in future versions.", DeprecationWarning) + + if isinstance(weekday, integer_types): + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31, 59, 90, 120, 151, 181, 212, + 243, 273, 304, 334, 366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError("invalid year day (%d)" % yday) + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = _sign(self.microseconds) + div, mod = divmod(self.microseconds * s, 1000000) + self.microseconds = mod * s + self.seconds += div * s + if abs(self.seconds) > 59: + s = _sign(self.seconds) + div, mod = divmod(self.seconds * s, 60) + self.seconds = mod * s + self.minutes += div * s + if abs(self.minutes) > 59: + s = _sign(self.minutes) + div, mod = divmod(self.minutes * s, 60) + self.minutes = mod * s + self.hours += div * s + if abs(self.hours) > 23: + s = _sign(self.hours) + div, mod = divmod(self.hours * s, 24) + self.hours = mod * s + self.days += div * s + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years += div * s + if (self.hours or self.minutes or self.seconds or self.microseconds + or self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + @property + def weeks(self): + return int(self.days / 7.0) + + @weeks.setter + def weeks(self, value): + self.days = self.days - (self.weeks * 7) + value * 7 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years = div * s + else: + self.years = 0 + + def normalized(self): + """ + Return a version of this object represented entirely using integer + values for the relative attributes. + + >>> relativedelta(days=1.5, hours=2).normalized() + relativedelta(days=+1, hours=+14) + + :return: + Returns a :class:`dateutil.relativedelta.relativedelta` object. + """ + # Cascade remainders down (rounding each to roughly nearest microsecond) + days = int(self.days) + + hours_f = round(self.hours + 24 * (self.days - days), 11) + hours = int(hours_f) + + minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) + minutes = int(minutes_f) + + seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) + seconds = int(seconds_f) + + microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) + + # Constructor carries overflow back up with call to _fix() + return self.__class__(years=self.years, months=self.months, + days=days, hours=hours, minutes=minutes, + seconds=seconds, microseconds=microseconds, + leapdays=self.leapdays, year=self.year, + month=self.month, day=self.day, + weekday=self.weekday, hour=self.hour, + minute=self.minute, second=self.second, + microsecond=self.microsecond) + + def __add__(self, other): + if isinstance(other, relativedelta): + return self.__class__(years=other.years + self.years, + months=other.months + self.months, + days=other.days + self.days, + hours=other.hours + self.hours, + minutes=other.minutes + self.minutes, + seconds=other.seconds + self.seconds, + microseconds=(other.microseconds + + self.microseconds), + leapdays=other.leapdays or self.leapdays, + year=(other.year if other.year is not None + else self.year), + month=(other.month if other.month is not None + else self.month), + day=(other.day if other.day is not None + else self.day), + weekday=(other.weekday if other.weekday is not None + else self.weekday), + hour=(other.hour if other.hour is not None + else self.hour), + minute=(other.minute if other.minute is not None + else self.minute), + second=(other.second if other.second is not None + else self.second), + microsecond=(other.microsecond if other.microsecond + is not None else + self.microsecond)) + if isinstance(other, datetime.timedelta): + return self.__class__(years=self.years, + months=self.months, + days=self.days + other.days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds + other.seconds, + microseconds=self.microseconds + other.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + if not isinstance(other, datetime.date): + return NotImplemented + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth) - 1) * 7 + if nth > 0: + jumpdays += (7 - ret.weekday() + weekday) % 7 + else: + jumpdays += (ret.weekday() - weekday) % 7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented # In case the other object defines __rsub__ + return self.__class__(years=self.years - other.years, + months=self.months - other.months, + days=self.days - other.days, + hours=self.hours - other.hours, + minutes=self.minutes - other.minutes, + seconds=self.seconds - other.seconds, + microseconds=self.microseconds - other.microseconds, + leapdays=self.leapdays or other.leapdays, + year=(self.year if self.year is not None + else other.year), + month=(self.month if self.month is not None else + other.month), + day=(self.day if self.day is not None else + other.day), + weekday=(self.weekday if self.weekday is not None else + other.weekday), + hour=(self.hour if self.hour is not None else + other.hour), + minute=(self.minute if self.minute is not None else + other.minute), + second=(self.second if self.second is not None else + other.second), + microsecond=(self.microsecond if self.microsecond + is not None else + other.microsecond)) + + def __abs__(self): + return self.__class__(years=abs(self.years), + months=abs(self.months), + days=abs(self.days), + hours=abs(self.hours), + minutes=abs(self.minutes), + seconds=abs(self.seconds), + microseconds=abs(self.microseconds), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __neg__(self): + return self.__class__(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __bool__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + # Compatibility with Python 2.x + __nonzero__ = __bool__ + + def __mul__(self, other): + try: + f = float(other) + except TypeError: + return NotImplemented + + return self.__class__(years=int(self.years * f), + months=int(self.months * f), + days=int(self.days * f), + hours=int(self.hours * f), + minutes=int(self.minutes * f), + seconds=int(self.seconds * f), + microseconds=int(self.microseconds * f), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + __rmul__ = __mul__ + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.microseconds == other.microseconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + def __hash__(self): + return hash(( + self.weekday, + self.years, + self.months, + self.days, + self.hours, + self.minutes, + self.seconds, + self.microseconds, + self.leapdays, + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + )) + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + try: + reciprocal = 1 / float(other) + except TypeError: + return NotImplemented + + return self.__mul__(reciprocal) + + __truediv__ = __div__ + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("{attr}={value:+g}".format(attr=attr, value=value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("{attr}={value}".format(attr=attr, value=repr(value))) + return "{classname}({attrs})".format(classname=self.__class__.__name__, + attrs=", ".join(l)) + + +def _sign(x): + return int(copysign(1, x)) + +# vim:ts=4:sw=4:et diff --git a/venv/lib/python3.10/site-packages/dateutil/tzwin.py b/venv/lib/python3.10/site-packages/dateutil/tzwin.py new file mode 100644 index 0000000000000000000000000000000000000000..cebc673e40fc376653ebf037e96f0a6d0b33e906 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dateutil/tzwin.py @@ -0,0 +1,2 @@ +# tzwin has moved to dateutil.tz.win +from .tz.win import * diff --git a/venv/lib/python3.10/site-packages/dateutil/utils.py b/venv/lib/python3.10/site-packages/dateutil/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dd2d245a0bebcd5fc37ac20526aabbd5358dab0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/dateutil/utils.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" +This module offers general convenience and utility functions for dealing with +datetimes. + +.. versionadded:: 2.7.0 +""" +from __future__ import unicode_literals + +from datetime import datetime, time + + +def today(tzinfo=None): + """ + Returns a :py:class:`datetime` representing the current day at midnight + + :param tzinfo: + The time zone to attach (also used to determine the current day). + + :return: + A :py:class:`datetime.datetime` object representing the current day + at midnight. + """ + + dt = datetime.now(tzinfo) + return datetime.combine(dt.date(), time(0, tzinfo=tzinfo)) + + +def default_tzinfo(dt, tzinfo): + """ + Sets the ``tzinfo`` parameter on naive datetimes only + + This is useful for example when you are provided a datetime that may have + either an implicit or explicit time zone, such as when parsing a time zone + string. + + .. doctest:: + + >>> from dateutil.tz import tzoffset + >>> from dateutil.parser import parse + >>> from dateutil.utils import default_tzinfo + >>> dflt_tz = tzoffset("EST", -18000) + >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz)) + 2014-01-01 12:30:00+00:00 + >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz)) + 2014-01-01 12:30:00-05:00 + + :param dt: + The datetime on which to replace the time zone + + :param tzinfo: + The :py:class:`datetime.tzinfo` subclass instance to assign to + ``dt`` if (and only if) it is naive. + + :return: + Returns an aware :py:class:`datetime.datetime`. + """ + if dt.tzinfo is not None: + return dt + else: + return dt.replace(tzinfo=tzinfo) + + +def within_delta(dt1, dt2, delta): + """ + Useful for comparing two datetimes that may have a negligible difference + to be considered equal. + """ + delta = abs(delta) + difference = dt1 - dt2 + return -delta <= difference <= delta diff --git a/venv/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7391ac8000ed1fc4245ea335cd90821f9251c900 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-310.pyc b/venv/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a5113af5665d562082ef3ac84c014e97b73ea55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9ecdc7586d08805bc984539f6672476e86e538b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2005-2021 Fredrik Johansson and mpmath contributors + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff --git a/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..994b48acdba5cd0fdfb28cd1fbb0a84ebf81cba5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA @@ -0,0 +1,233 @@ +Metadata-Version: 2.1 +Name: mpmath +Version: 1.3.0 +Summary: Python library for arbitrary-precision floating-point arithmetic +Home-page: http://mpmath.org/ +Author: Fredrik Johansson +Author-email: fredrik.johansson@gmail.com +License: BSD +Project-URL: Source, https://github.com/fredrik-johansson/mpmath +Project-URL: Tracker, https://github.com/fredrik-johansson/mpmath/issues +Project-URL: Documentation, http://mpmath.org/doc/current/ +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +License-File: LICENSE +Provides-Extra: develop +Requires-Dist: pytest (>=4.6) ; extra == 'develop' +Requires-Dist: pycodestyle ; extra == 'develop' +Requires-Dist: pytest-cov ; extra == 'develop' +Requires-Dist: codecov ; extra == 'develop' +Requires-Dist: wheel ; extra == 'develop' +Provides-Extra: docs +Requires-Dist: sphinx ; extra == 'docs' +Provides-Extra: gmpy +Requires-Dist: gmpy2 (>=2.1.0a4) ; (platform_python_implementation != "PyPy") and extra == 'gmpy' +Provides-Extra: tests +Requires-Dist: pytest (>=4.6) ; extra == 'tests' + +mpmath +====== + +|pypi version| |Build status| |Code coverage status| |Zenodo Badge| + +.. |pypi version| image:: https://img.shields.io/pypi/v/mpmath.svg + :target: https://pypi.python.org/pypi/mpmath +.. |Build status| image:: https://github.com/fredrik-johansson/mpmath/workflows/test/badge.svg + :target: https://github.com/fredrik-johansson/mpmath/actions?workflow=test +.. |Code coverage status| image:: https://codecov.io/gh/fredrik-johansson/mpmath/branch/master/graph/badge.svg + :target: https://codecov.io/gh/fredrik-johansson/mpmath +.. |Zenodo Badge| image:: https://zenodo.org/badge/2934512.svg + :target: https://zenodo.org/badge/latestdoi/2934512 + +A Python library for arbitrary-precision floating-point arithmetic. + +Website: http://mpmath.org/ +Main author: Fredrik Johansson + +Mpmath is free software released under the New BSD License (see the +LICENSE file for details) + +0. History and credits +---------------------- + +The following people (among others) have contributed major patches +or new features to mpmath: + +* Pearu Peterson +* Mario Pernici +* Ondrej Certik +* Vinzent Steinberg +* Nimish Telang +* Mike Taschuk +* Case Van Horsen +* Jorn Baayen +* Chris Smith +* Juan Arias de Reyna +* Ioannis Tziakos +* Aaron Meurer +* Stefan Krastanov +* Ken Allen +* Timo Hartmann +* Sergey B Kirpichev +* Kris Kuhlman +* Paul Masson +* Michael Kagalenko +* Jonathan Warner +* Max Gaukler +* Guillermo Navas-Palencia +* Nike Dattani + +Numerous other people have contributed by reporting bugs, +requesting new features, or suggesting improvements to the +documentation. + +For a detailed changelog, including individual contributions, +see the CHANGES file. + +Fredrik's work on mpmath during summer 2008 was sponsored by Google +as part of the Google Summer of Code program. + +Fredrik's work on mpmath during summer 2009 was sponsored by the +American Institute of Mathematics under the support of the National Science +Foundation Grant No. 0757627 (FRG: L-functions and Modular Forms). + +Any opinions, findings, and conclusions or recommendations expressed in this +material are those of the author(s) and do not necessarily reflect the +views of the sponsors. + +Credit also goes to: + +* The authors of the GMP library and the Python wrapper + gmpy, enabling mpmath to become much faster at + high precision +* The authors of MPFR, pari/gp, MPFUN, and other arbitrary- + precision libraries, whose documentation has been helpful + for implementing many of the algorithms in mpmath +* Wikipedia contributors; Abramowitz & Stegun; Gradshteyn & Ryzhik; + Wolfram Research for MathWorld and the Wolfram Functions site. + These are the main references used for special functions + implementations. +* George Brandl for developing the Sphinx documentation tool + used to build mpmath's documentation + +Release history: + +* Version 1.3.0 released on March 7, 2023 +* Version 1.2.0 released on February 1, 2021 +* Version 1.1.0 released on December 11, 2018 +* Version 1.0.0 released on September 27, 2017 +* Version 0.19 released on June 10, 2014 +* Version 0.18 released on December 31, 2013 +* Version 0.17 released on February 1, 2011 +* Version 0.16 released on September 24, 2010 +* Version 0.15 released on June 6, 2010 +* Version 0.14 released on February 5, 2010 +* Version 0.13 released on August 13, 2009 +* Version 0.12 released on June 9, 2009 +* Version 0.11 released on January 26, 2009 +* Version 0.10 released on October 15, 2008 +* Version 0.9 released on August 23, 2008 +* Version 0.8 released on April 20, 2008 +* Version 0.7 released on March 12, 2008 +* Version 0.6 released on January 13, 2008 +* Version 0.5 released on November 24, 2007 +* Version 0.4 released on November 3, 2007 +* Version 0.3 released on October 5, 2007 +* Version 0.2 released on October 2, 2007 +* Version 0.1 released on September 27, 2007 + +1. Download & installation +-------------------------- + +Mpmath requires Python 2.7 or 3.5 (or later versions). It has been tested +with CPython 2.7, 3.5 through 3.7 and for PyPy. + +The latest release of mpmath can be downloaded from the mpmath +website and from https://github.com/fredrik-johansson/mpmath/releases + +It should also be available in the Python Package Index at +https://pypi.python.org/pypi/mpmath + +To install latest release of Mpmath with pip, simply run + +``pip install mpmath`` + +Or unpack the mpmath archive and run + +``python setup.py install`` + +Mpmath can also be installed using + +``python -m easy_install mpmath`` + +The latest development code is available from +https://github.com/fredrik-johansson/mpmath + +See the main documentation for more detailed instructions. + +2. Running tests +---------------- + +The unit tests in mpmath/tests/ can be run via the script +runtests.py, but it is recommended to run them with py.test +(https://pytest.org/), especially +to generate more useful reports in case there are failures. + +You may also want to check out the demo scripts in the demo +directory. + +The master branch is automatically tested by Travis CI. + +3. Documentation +---------------- + +Documentation in reStructuredText format is available in the +doc directory included with the source package. These files +are human-readable, but can be compiled to prettier HTML using +the build.py script (requires Sphinx, http://sphinx.pocoo.org/). + +See setup.txt in the documentation for more information. + +The most recent documentation is also available in HTML format: + +http://mpmath.org/doc/current/ + +4. Known problems +----------------- + +Mpmath is a work in progress. Major issues include: + +* Some functions may return incorrect values when given extremely + large arguments or arguments very close to singularities. + +* Directed rounding works for arithmetic operations. It is implemented + heuristically for other operations, and their results may be off by one + or two units in the last place (even if otherwise accurate). + +* Some IEEE 754 features are not available. Inifinities and NaN are + partially supported; denormal rounding is currently not available + at all. + +* The interface for switching precision and rounding is not finalized. + The current method is not threadsafe. + +5. Help and bug reports +----------------------- + +General questions and comments can be sent to the mpmath mailinglist, +mpmath@googlegroups.com + +You can also report bugs and send patches to the mpmath issue tracker, +https://github.com/fredrik-johansson/mpmath/issues diff --git a/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..21bdf458683495b2e0d7f52464d33fdc500333e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD @@ -0,0 +1,180 @@ +mpmath-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +mpmath-1.3.0.dist-info/LICENSE,sha256=wmyugdpFCOXiSZhXd6M4IfGDIj67dNf4z7-Q_n7vL7c,1537 +mpmath-1.3.0.dist-info/METADATA,sha256=RLZupES5wNGa6UgV01a_BHrmtoDBkmi1wmVofNaoFAY,8630 +mpmath-1.3.0.dist-info/RECORD,, +mpmath-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +mpmath-1.3.0.dist-info/top_level.txt,sha256=BUVWrh8EVlkOhM1n3X9S8msTaVcC-3s6Sjt60avHYus,7 +mpmath/__init__.py,sha256=skFYTSwfwDBLChAV6pI3SdewgAQR3UBtyrfIK_Jdn-g,8765 +mpmath/__pycache__/__init__.cpython-310.pyc,, +mpmath/__pycache__/ctx_base.cpython-310.pyc,, +mpmath/__pycache__/ctx_fp.cpython-310.pyc,, +mpmath/__pycache__/ctx_iv.cpython-310.pyc,, +mpmath/__pycache__/ctx_mp.cpython-310.pyc,, +mpmath/__pycache__/ctx_mp_python.cpython-310.pyc,, +mpmath/__pycache__/function_docs.cpython-310.pyc,, +mpmath/__pycache__/identification.cpython-310.pyc,, +mpmath/__pycache__/math2.cpython-310.pyc,, +mpmath/__pycache__/rational.cpython-310.pyc,, +mpmath/__pycache__/usertools.cpython-310.pyc,, +mpmath/__pycache__/visualization.cpython-310.pyc,, +mpmath/calculus/__init__.py,sha256=UAgCIJ1YmaeyTqpNzjBlCZGeIzLtUZMEEpl99VWNjus,162 +mpmath/calculus/__pycache__/__init__.cpython-310.pyc,, +mpmath/calculus/__pycache__/approximation.cpython-310.pyc,, +mpmath/calculus/__pycache__/calculus.cpython-310.pyc,, +mpmath/calculus/__pycache__/differentiation.cpython-310.pyc,, +mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc,, +mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc,, +mpmath/calculus/__pycache__/odes.cpython-310.pyc,, +mpmath/calculus/__pycache__/optimization.cpython-310.pyc,, +mpmath/calculus/__pycache__/polynomials.cpython-310.pyc,, +mpmath/calculus/__pycache__/quadrature.cpython-310.pyc,, +mpmath/calculus/approximation.py,sha256=vyzu3YI6r63Oq1KFHrQz02mGXAcH23emqNYhJuUaFZ4,8817 +mpmath/calculus/calculus.py,sha256=A0gSp0hxSyEDfugJViY3CeWalF-vK701YftzrjSQzQ4,112 +mpmath/calculus/differentiation.py,sha256=2L6CBj8xtX9iip98NPbKsLtwtRjxi571wYmTMHFeL90,20226 +mpmath/calculus/extrapolation.py,sha256=xM0rvk2DFEF4iR1Jhl-Y3aS93iW9VVJX7y9IGpmzC-A,73306 +mpmath/calculus/inverselaplace.py,sha256=5-pn8N_t0PtgBTXixsXZ4xxrihK2J5gYsVfTKfDx4gA,36056 +mpmath/calculus/odes.py,sha256=gaHiw7IJjsONNTAa6izFPZpmcg9uyTp8MULnGdzTIGo,9908 +mpmath/calculus/optimization.py,sha256=bKnShXElBOmVOIOlFeksDsYCp9fYSmYwKmXDt0z26MM,32856 +mpmath/calculus/polynomials.py,sha256=D16BhU_SHbVi06IxNwABHR-H77IylndNsN3muPTuFYs,7877 +mpmath/calculus/quadrature.py,sha256=n-avtS8E43foV-5tr5lofgOBaiMUYE8AJjQcWI9QcKk,42432 +mpmath/ctx_base.py,sha256=rfjmfMyA55x8R_cWFINUwWVTElfZmyx5erKDdauSEVw,15985 +mpmath/ctx_fp.py,sha256=ctUjx_NoU0iFWk05cXDYCL2ZtLZOlWs1n6Zao3pbG2g,6572 +mpmath/ctx_iv.py,sha256=tqdMr-GDfkZk1EhoGeCAajy7pQv-RWtrVqhYjfI8r4g,17211 +mpmath/ctx_mp.py,sha256=d3r4t7xHNqSFtmqsA9Btq1Npy3WTM-pcM2_jeCyECxY,49452 +mpmath/ctx_mp_python.py,sha256=3olYWo4lk1SnQ0A_IaZ181qqG8u5pxGat_v-L4Qtn3Y,37815 +mpmath/function_docs.py,sha256=g4PP8n6ILXmHcLyA50sxK6Tmp_Z4_pRN-wDErU8D1i4,283512 +mpmath/functions/__init__.py,sha256=YXVdhqv-6LKm6cr5xxtTNTtuD9zDPKGQl8GmS0xz2xo,330 +mpmath/functions/__pycache__/__init__.cpython-310.pyc,, +mpmath/functions/__pycache__/bessel.cpython-310.pyc,, +mpmath/functions/__pycache__/elliptic.cpython-310.pyc,, +mpmath/functions/__pycache__/expintegrals.cpython-310.pyc,, +mpmath/functions/__pycache__/factorials.cpython-310.pyc,, +mpmath/functions/__pycache__/functions.cpython-310.pyc,, +mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc,, +mpmath/functions/__pycache__/orthogonal.cpython-310.pyc,, +mpmath/functions/__pycache__/qfunctions.cpython-310.pyc,, +mpmath/functions/__pycache__/rszeta.cpython-310.pyc,, +mpmath/functions/__pycache__/signals.cpython-310.pyc,, +mpmath/functions/__pycache__/theta.cpython-310.pyc,, +mpmath/functions/__pycache__/zeta.cpython-310.pyc,, +mpmath/functions/__pycache__/zetazeros.cpython-310.pyc,, +mpmath/functions/bessel.py,sha256=dUPLu8frlK-vmf3-irX_7uvwyw4xccv6EIizmIZ88kM,37938 +mpmath/functions/elliptic.py,sha256=qz0yVMb4lWEeOTDL_DWz5u5awmGIPKAsuZFJXgwHJNU,42237 +mpmath/functions/expintegrals.py,sha256=75X_MRdYc1F_X73bgNiOJqwRlS2hqAzcFLl3RM2tCDc,11644 +mpmath/functions/factorials.py,sha256=8_6kCR7e4k1GwxiAOJu0NRadeF4jA28qx4hidhu4ILk,5273 +mpmath/functions/functions.py,sha256=ub2JExvqzCWLkm5yAm72Fr6fdWmZZUknq9_3w9MEigI,18100 +mpmath/functions/hypergeometric.py,sha256=Z0OMAMC4ylK42n_SnamyFVnUx6zHLyCLCoJDSZ1JrHY,51570 +mpmath/functions/orthogonal.py,sha256=FabkxKfBoSseA5flWu1a3re-2BYaew9augqIsT8LaLw,16097 +mpmath/functions/qfunctions.py,sha256=a3EHGKQt_jMd4x9I772Jz-TGFnGY-arWqPvZGz9QSe0,7633 +mpmath/functions/rszeta.py,sha256=yuUVp4ilIyDmXyE3WTBxDDjwfEJNypJnbPS-xPH5How,46184 +mpmath/functions/signals.py,sha256=ELotwQaW1CDpv-eeJzOZ5c23NhfaZcj9_Gkb3psvS0Q,703 +mpmath/functions/theta.py,sha256=KggOocczoMG6_HMoal4oEP7iZ4SKOou9JFE-WzY2r3M,37320 +mpmath/functions/zeta.py,sha256=ue7JY7GXA0oX8q08sQJl2CSRrZ7kOt8HsftpVjnTwrE,36410 +mpmath/functions/zetazeros.py,sha256=uq6TVyZBcY2MLX7VSdVfn0TOkowBLM9fXtnySEwaNzw,30858 +mpmath/identification.py,sha256=7aMdngRAaeL_MafDUNbmEIlGQSklHDZ8pmPFt-OLgkw,29253 +mpmath/libmp/__init__.py,sha256=UCDjLZw4brbklaCmSixCcPdLdHkz8sF_-6F_wr0duAg,3790 +mpmath/libmp/__pycache__/__init__.cpython-310.pyc,, +mpmath/libmp/__pycache__/backend.cpython-310.pyc,, +mpmath/libmp/__pycache__/gammazeta.cpython-310.pyc,, +mpmath/libmp/__pycache__/libelefun.cpython-310.pyc,, +mpmath/libmp/__pycache__/libhyper.cpython-310.pyc,, +mpmath/libmp/__pycache__/libintmath.cpython-310.pyc,, +mpmath/libmp/__pycache__/libmpc.cpython-310.pyc,, +mpmath/libmp/__pycache__/libmpf.cpython-310.pyc,, +mpmath/libmp/__pycache__/libmpi.cpython-310.pyc,, +mpmath/libmp/backend.py,sha256=26A8pUkaGov26vrrFNQVyWJ5LDtK8sl3UHrYLecaTjA,3360 +mpmath/libmp/gammazeta.py,sha256=Xqdw6PMoswDaSca_sOs-IglRuk3fb8c9p43M_lbcrlc,71469 +mpmath/libmp/libelefun.py,sha256=joBZP4FOdxPfieWso1LPtSr6dHydpG_LQiF_bYQYWMg,43861 +mpmath/libmp/libhyper.py,sha256=J9fmdDF6u27EcssEWvBuVaAa3hFjPvPN1SgRgu1dEbc,36624 +mpmath/libmp/libintmath.py,sha256=aIRT0rkUZ_sdGQf3TNCLd-pBMvtQWjssbvFLfK7U0jc,16688 +mpmath/libmp/libmpc.py,sha256=KBndUjs5YVS32-Id3fflDfYgpdW1Prx6zfo8Ez5Qbrs,26875 +mpmath/libmp/libmpf.py,sha256=vpP0kNVkScbCVoZogJ4Watl4I7Ce0d4dzHVjfVe57so,45021 +mpmath/libmp/libmpi.py,sha256=u0I5Eiwkqa-4-dXETi5k7MuaxBeZbvCAPFtl93U9YF0,27622 +mpmath/math2.py,sha256=O5Dglg81SsW0wfHDUJcXOD8-cCaLvbVIvyw0sVmRbpI,18561 +mpmath/matrices/__init__.py,sha256=ETzGDciYbq9ftiKwaMbJ15EI-KNXHrzRb-ZHehhqFjs,94 +mpmath/matrices/__pycache__/__init__.cpython-310.pyc,, +mpmath/matrices/__pycache__/calculus.cpython-310.pyc,, +mpmath/matrices/__pycache__/eigen.cpython-310.pyc,, +mpmath/matrices/__pycache__/eigen_symmetric.cpython-310.pyc,, +mpmath/matrices/__pycache__/linalg.cpython-310.pyc,, +mpmath/matrices/__pycache__/matrices.cpython-310.pyc,, +mpmath/matrices/calculus.py,sha256=PNRq-p2nxgT-fzC54K2depi8ddhdx6Q86G8qpUiHeUY,18609 +mpmath/matrices/eigen.py,sha256=GbDXI3CixzEdXxr1G86uUWkAngAvd-05MmSQ-Tsu_5k,24394 +mpmath/matrices/eigen_symmetric.py,sha256=FPKPeQr1cGYw6Y6ea32a1YdEWQDLP6JlQHEA2WfNLYg,58534 +mpmath/matrices/linalg.py,sha256=04C3ijzMFom7ob5fXBCDfyPPdo3BIboIeE8x2A6vqF0,26958 +mpmath/matrices/matrices.py,sha256=o78Eq62EHQnxcsR0LBoWDEGREOoN4L2iDM1q3dQrw0o,32331 +mpmath/rational.py,sha256=64d56fvZXngYZT7nOAHeFRUX77eJ1A0R3rpfWBU-mSo,5976 +mpmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mpmath/tests/__pycache__/__init__.cpython-310.pyc,, +mpmath/tests/__pycache__/extratest_gamma.cpython-310.pyc,, +mpmath/tests/__pycache__/extratest_zeta.cpython-310.pyc,, +mpmath/tests/__pycache__/runtests.cpython-310.pyc,, +mpmath/tests/__pycache__/test_basic_ops.cpython-310.pyc,, +mpmath/tests/__pycache__/test_bitwise.cpython-310.pyc,, +mpmath/tests/__pycache__/test_calculus.cpython-310.pyc,, +mpmath/tests/__pycache__/test_compatibility.cpython-310.pyc,, +mpmath/tests/__pycache__/test_convert.cpython-310.pyc,, +mpmath/tests/__pycache__/test_diff.cpython-310.pyc,, +mpmath/tests/__pycache__/test_division.cpython-310.pyc,, +mpmath/tests/__pycache__/test_eigen.cpython-310.pyc,, +mpmath/tests/__pycache__/test_eigen_symmetric.cpython-310.pyc,, +mpmath/tests/__pycache__/test_elliptic.cpython-310.pyc,, +mpmath/tests/__pycache__/test_fp.cpython-310.pyc,, +mpmath/tests/__pycache__/test_functions.cpython-310.pyc,, +mpmath/tests/__pycache__/test_functions2.cpython-310.pyc,, +mpmath/tests/__pycache__/test_gammazeta.cpython-310.pyc,, +mpmath/tests/__pycache__/test_hp.cpython-310.pyc,, +mpmath/tests/__pycache__/test_identify.cpython-310.pyc,, +mpmath/tests/__pycache__/test_interval.cpython-310.pyc,, +mpmath/tests/__pycache__/test_levin.cpython-310.pyc,, +mpmath/tests/__pycache__/test_linalg.cpython-310.pyc,, +mpmath/tests/__pycache__/test_matrices.cpython-310.pyc,, +mpmath/tests/__pycache__/test_mpmath.cpython-310.pyc,, +mpmath/tests/__pycache__/test_ode.cpython-310.pyc,, +mpmath/tests/__pycache__/test_pickle.cpython-310.pyc,, +mpmath/tests/__pycache__/test_power.cpython-310.pyc,, +mpmath/tests/__pycache__/test_quad.cpython-310.pyc,, +mpmath/tests/__pycache__/test_rootfinding.cpython-310.pyc,, +mpmath/tests/__pycache__/test_special.cpython-310.pyc,, +mpmath/tests/__pycache__/test_str.cpython-310.pyc,, +mpmath/tests/__pycache__/test_summation.cpython-310.pyc,, +mpmath/tests/__pycache__/test_trig.cpython-310.pyc,, +mpmath/tests/__pycache__/test_visualization.cpython-310.pyc,, +mpmath/tests/__pycache__/torture.cpython-310.pyc,, +mpmath/tests/extratest_gamma.py,sha256=xidhXUelILcxtiPGoTBHjqUOKIJzEaZ_v3nntGQyWZQ,7228 +mpmath/tests/extratest_zeta.py,sha256=sg10j9RhjBpV2EdUqyYhGV2ERWvM--EvwwGIz6HTmlw,1003 +mpmath/tests/runtests.py,sha256=7NUV82F3K_5AhU8mCLUFf5OibtT7uloFCwPyM3l71wM,5189 +mpmath/tests/test_basic_ops.py,sha256=dsB8DRG-GrPzBaZ-bIauYabaeqXbfqBo9SIP9BqcTSs,15348 +mpmath/tests/test_bitwise.py,sha256=-nLYhgQbhDza3SQM63BhktYntACagqMYx9ib3dPnTKM,7686 +mpmath/tests/test_calculus.py,sha256=4oxtNfMpO4RLLoOzrv7r9-h8BcqfBsJIE6UpsHe7c4w,9187 +mpmath/tests/test_compatibility.py,sha256=_t3ASZ3jhfAMnN1voWX7PDNIDzn-3PokkJGIdT1x7y0,2306 +mpmath/tests/test_convert.py,sha256=JPcDcTJIWh5prIxjx5DM1aNWgqlUoF2KpHvAgK3uHi4,8834 +mpmath/tests/test_diff.py,sha256=qjiF8NxQ8vueuZ5ZHGPQ-kjcj_I7Jh_fEdFtaA8DzEI,2466 +mpmath/tests/test_division.py,sha256=6lUeZfmaBWvvszdqlWLMHgXPjVsxvW1WZpd4-jFWCpU,5340 +mpmath/tests/test_eigen.py,sha256=2mnqVATGbsJkvSVHPpitfAk881twFfb3LsO3XikV9Hs,3905 +mpmath/tests/test_eigen_symmetric.py,sha256=v0VimCicIU2owASDMBaP-t-30uq-pXcsglt95KBtNO4,8778 +mpmath/tests/test_elliptic.py,sha256=Kjiwq9Bb6N_OOzzWewGQ1M_PMa7vRs42V0t90gloZxo,26225 +mpmath/tests/test_fp.py,sha256=AJo0FTyH4BuUnUsv176LD956om308KGYndy-b54KGxM,89997 +mpmath/tests/test_functions.py,sha256=b47VywdomoOX6KmMmz9-iv2IqVIydwKSuUw2pWlFHrY,30955 +mpmath/tests/test_functions2.py,sha256=vlw2RWhL1oTcifnOMDx1a_YzN96UgNNIE5STeKRv1HY,96990 +mpmath/tests/test_gammazeta.py,sha256=AB34O0DV7AlEf9Z4brnCadeQU5-uAwhWRw5FZas65DA,27917 +mpmath/tests/test_hp.py,sha256=6hcENu6Te2klPEiTSeLBIRPlH7PADlJwFKbx8xpnOhg,10461 +mpmath/tests/test_identify.py,sha256=lGUIPfrB2paTg0cFUo64GmMzF77F9gs9FQjX7gxGHV8,692 +mpmath/tests/test_interval.py,sha256=TjYd7a9ca6iRJiLjw06isLeZTuGoGAPmgleDZ0cYfJ0,17527 +mpmath/tests/test_levin.py,sha256=P8M11yV1dj_gdSNv5xuwCzFiF86QyRDtPMjURy6wJ28,5090 +mpmath/tests/test_linalg.py,sha256=miKEnwB8iwWV13hi1bF1cg3hgB4rTKOR0fvDVfWmXds,10440 +mpmath/tests/test_matrices.py,sha256=qyA4Ml2CvNvW034lzB01G6wVgNr7UrgZqh2wkMXtpzM,7944 +mpmath/tests/test_mpmath.py,sha256=LVyJUeofiaxW-zLKWVBCz59L9UQsjlW0Ts9_oBiEv_4,196 +mpmath/tests/test_ode.py,sha256=zAxexBH4fnmFNO4bvEHbug1NJWC5zqfFaVDlYijowkY,1822 +mpmath/tests/test_pickle.py,sha256=Y8CKmDLFsJHUqG8CDaBw5ilrPP4YT1xijVduLpQ7XFE,401 +mpmath/tests/test_power.py,sha256=sz_K02SmNxpa6Kb1uJLN_N4tXTJGdQ___vPRshEN7Gk,5227 +mpmath/tests/test_quad.py,sha256=49Ltft0vZ_kdKLL5s-Kj-BzAVoF5LPVEUeNUzdOkghI,3893 +mpmath/tests/test_rootfinding.py,sha256=umQegEaKHmYOEl5jEyoD-VLKDtXsTJJkepKEr4c0dC0,3132 +mpmath/tests/test_special.py,sha256=YbMIoMIkJEvvKYIzS0CXthJFG0--j6un7-tcE6b7FPM,2848 +mpmath/tests/test_str.py,sha256=0WsGD9hMPRi8zcuYMA9Cu2mOvQiCFskPwMsMf8lBDK4,544 +mpmath/tests/test_summation.py,sha256=fdNlsvRVOsbWxbhlyDLDaEO2S8kTJrRMKIvB5-aNci0,2035 +mpmath/tests/test_trig.py,sha256=zPtkIEnZaThxcWur4k7BX8-2Jmj-AhO191Svv7ANYUU,4799 +mpmath/tests/test_visualization.py,sha256=1PqtkoUx-WsKYgTRiu5o9pBc85kwhf1lzU2eobDQCJM,944 +mpmath/tests/torture.py,sha256=LD95oES7JY2KroELK-m-jhvtbvZaKChnt0Cq7kFMNCw,7868 +mpmath/usertools.py,sha256=a-TDw7XSRsPdBEffxOooDV4WDFfuXnO58P75dcAD87I,3029 +mpmath/visualization.py,sha256=pnnbjcd9AhFVRBZavYX5gjx4ytK_kXoDDisYR6EpXhs,10627 diff --git a/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..57e3d840d59a650ac5bccbad5baeec47d155f0ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..dda7c273a8dd1c6adffa9d2d9901e0ce6876f4ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +mpmath diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bbfae718496373363cce83005d20fe37308e51d Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/graphml.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/graphml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..830854aaea13d4d45596cd190ca1439623ce9b88 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/graphml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ecbfb367f2d5c63726f0a1d644587d42acba469 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/p2g.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/p2g.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ebda39fcf60f9246b93cd1fb1a2d22551ab9201 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/p2g.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4e077524d5640aab30b51d1a9e93a1396e483d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__init__.py b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ee9d1240efd4691fe52d91690e99428ac75e11c --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__init__.py @@ -0,0 +1,18 @@ +""" +********* +JSON data +********* +Generate and parse JSON serializable data for NetworkX graphs. + +These formats are suitable for use with the d3.js examples https://d3js.org/ + +The three formats that you can generate with NetworkX are: + + - node-link like in the d3.js example https://bl.ocks.org/mbostock/4062045 + - tree like in the d3.js example https://bl.ocks.org/mbostock/4063550 + - adjacency like in the d3.js example https://bost.ocks.org/mike/miserables/ +""" +from networkx.readwrite.json_graph.node_link import * +from networkx.readwrite.json_graph.adjacency import * +from networkx.readwrite.json_graph.tree import * +from networkx.readwrite.json_graph.cytoscape import * diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fb6c5f22c30909d52a8abf8302581c58c4b3064 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f91ad6773f8923412fe7fca411d9dba17d4c8491 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c022ba8726d18bd0e3986c175c2f036c3c7cfeb Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6124d0d7598b452291f863db1f82f08e35e7fe55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f553ebacfbd5b962790bf9d44773f242a450d43 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/adjacency.py b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/adjacency.py new file mode 100644 index 0000000000000000000000000000000000000000..3b05747565e73388b0871fbb7daf0f85ad2ce98b --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/adjacency.py @@ -0,0 +1,156 @@ +import networkx as nx + +__all__ = ["adjacency_data", "adjacency_graph"] + +_attrs = {"id": "id", "key": "key"} + + +def adjacency_data(G, attrs=_attrs): + """Returns data in adjacency format that is suitable for JSON serialization + and use in JavaScript documents. + + Parameters + ---------- + G : NetworkX graph + + attrs : dict + A dictionary that contains two keys 'id' and 'key'. The corresponding + values provide the attribute names for storing NetworkX-internal graph + data. The values should be unique. Default value: + :samp:`dict(id='id', key='key')`. + + If some user-defined graph data use these attribute names as data keys, + they may be silently dropped. + + Returns + ------- + data : dict + A dictionary with adjacency formatted data. + + Raises + ------ + NetworkXError + If values in attrs are not unique. + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.Graph([(1, 2)]) + >>> data = json_graph.adjacency_data(G) + + To serialize with json + + >>> import json + >>> s = json.dumps(data) + + Notes + ----- + Graph, node, and link attributes will be written when using this format + but attribute keys must be strings if you want to serialize the resulting + data with JSON. + + The default value of attrs will be changed in a future release of NetworkX. + + See Also + -------- + adjacency_graph, node_link_data, tree_data + """ + multigraph = G.is_multigraph() + id_ = attrs["id"] + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else attrs["key"] + if id_ == key: + raise nx.NetworkXError("Attribute names are not unique.") + data = {} + data["directed"] = G.is_directed() + data["multigraph"] = multigraph + data["graph"] = list(G.graph.items()) + data["nodes"] = [] + data["adjacency"] = [] + for n, nbrdict in G.adjacency(): + data["nodes"].append({**G.nodes[n], id_: n}) + adj = [] + if multigraph: + for nbr, keys in nbrdict.items(): + for k, d in keys.items(): + adj.append({**d, id_: nbr, key: k}) + else: + for nbr, d in nbrdict.items(): + adj.append({**d, id_: nbr}) + data["adjacency"].append(adj) + return data + + +@nx._dispatchable(graphs=None, returns_graph=True) +def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs): + """Returns graph from adjacency data format. + + Parameters + ---------- + data : dict + Adjacency list formatted graph data + + directed : bool + If True, and direction not specified in data, return a directed graph. + + multigraph : bool + If True, and multigraph not specified in data, return a multigraph. + + attrs : dict + A dictionary that contains two keys 'id' and 'key'. The corresponding + values provide the attribute names for storing NetworkX-internal graph + data. The values should be unique. Default value: + :samp:`dict(id='id', key='key')`. + + Returns + ------- + G : NetworkX graph + A NetworkX graph object + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.Graph([(1, 2)]) + >>> data = json_graph.adjacency_data(G) + >>> H = json_graph.adjacency_graph(data) + + Notes + ----- + The default value of attrs will be changed in a future release of NetworkX. + + See Also + -------- + adjacency_graph, node_link_data, tree_data + """ + multigraph = data.get("multigraph", multigraph) + directed = data.get("directed", directed) + if multigraph: + graph = nx.MultiGraph() + else: + graph = nx.Graph() + if directed: + graph = graph.to_directed() + id_ = attrs["id"] + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else attrs["key"] + graph.graph = dict(data.get("graph", [])) + mapping = [] + for d in data["nodes"]: + node_data = d.copy() + node = node_data.pop(id_) + mapping.append(node) + graph.add_node(node) + graph.nodes[node].update(node_data) + for i, d in enumerate(data["adjacency"]): + source = mapping[i] + for tdata in d: + target_data = tdata.copy() + target = target_data.pop(id_) + if not multigraph: + graph.add_edge(source, target) + graph[source][target].update(target_data) + else: + ky = target_data.pop(key, None) + graph.add_edge(source, target, key=ky) + graph[source][target][ky].update(target_data) + return graph diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/cytoscape.py b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/cytoscape.py new file mode 100644 index 0000000000000000000000000000000000000000..2f3b2176ab403fa9b85acdded5b97a6ebc728855 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/cytoscape.py @@ -0,0 +1,178 @@ +import networkx as nx + +__all__ = ["cytoscape_data", "cytoscape_graph"] + + +def cytoscape_data(G, name="name", ident="id"): + """Returns data in Cytoscape JSON format (cyjs). + + Parameters + ---------- + G : NetworkX Graph + The graph to convert to cytoscape format + name : string + A string which is mapped to the 'name' node element in cyjs format. + Must not have the same value as `ident`. + ident : string + A string which is mapped to the 'id' node element in cyjs format. + Must not have the same value as `name`. + + Returns + ------- + data: dict + A dictionary with cyjs formatted data. + + Raises + ------ + NetworkXError + If the values for `name` and `ident` are identical. + + See Also + -------- + cytoscape_graph: convert a dictionary in cyjs format to a graph + + References + ---------- + .. [1] Cytoscape user's manual: + http://manual.cytoscape.org/en/stable/index.html + + Examples + -------- + >>> G = nx.path_graph(2) + >>> nx.cytoscape_data(G) # doctest: +SKIP + {'data': [], + 'directed': False, + 'multigraph': False, + 'elements': {'nodes': [{'data': {'id': '0', 'value': 0, 'name': '0'}}, + {'data': {'id': '1', 'value': 1, 'name': '1'}}], + 'edges': [{'data': {'source': 0, 'target': 1}}]}} + """ + if name == ident: + raise nx.NetworkXError("name and ident must be different.") + + jsondata = {"data": list(G.graph.items())} + jsondata["directed"] = G.is_directed() + jsondata["multigraph"] = G.is_multigraph() + jsondata["elements"] = {"nodes": [], "edges": []} + nodes = jsondata["elements"]["nodes"] + edges = jsondata["elements"]["edges"] + + for i, j in G.nodes.items(): + n = {"data": j.copy()} + n["data"]["id"] = j.get(ident) or str(i) + n["data"]["value"] = i + n["data"]["name"] = j.get(name) or str(i) + nodes.append(n) + + if G.is_multigraph(): + for e in G.edges(keys=True): + n = {"data": G.adj[e[0]][e[1]][e[2]].copy()} + n["data"]["source"] = e[0] + n["data"]["target"] = e[1] + n["data"]["key"] = e[2] + edges.append(n) + else: + for e in G.edges(): + n = {"data": G.adj[e[0]][e[1]].copy()} + n["data"]["source"] = e[0] + n["data"]["target"] = e[1] + edges.append(n) + return jsondata + + +@nx._dispatchable(graphs=None, returns_graph=True) +def cytoscape_graph(data, name="name", ident="id"): + """ + Create a NetworkX graph from a dictionary in cytoscape JSON format. + + Parameters + ---------- + data : dict + A dictionary of data conforming to cytoscape JSON format. + name : string + A string which is mapped to the 'name' node element in cyjs format. + Must not have the same value as `ident`. + ident : string + A string which is mapped to the 'id' node element in cyjs format. + Must not have the same value as `name`. + + Returns + ------- + graph : a NetworkX graph instance + The `graph` can be an instance of `Graph`, `DiGraph`, `MultiGraph`, or + `MultiDiGraph` depending on the input data. + + Raises + ------ + NetworkXError + If the `name` and `ident` attributes are identical. + + See Also + -------- + cytoscape_data: convert a NetworkX graph to a dict in cyjs format + + References + ---------- + .. [1] Cytoscape user's manual: + http://manual.cytoscape.org/en/stable/index.html + + Examples + -------- + >>> data_dict = { + ... "data": [], + ... "directed": False, + ... "multigraph": False, + ... "elements": { + ... "nodes": [ + ... {"data": {"id": "0", "value": 0, "name": "0"}}, + ... {"data": {"id": "1", "value": 1, "name": "1"}}, + ... ], + ... "edges": [{"data": {"source": 0, "target": 1}}], + ... }, + ... } + >>> G = nx.cytoscape_graph(data_dict) + >>> G.name + '' + >>> G.nodes() + NodeView((0, 1)) + >>> G.nodes(data=True)[0] + {'id': '0', 'value': 0, 'name': '0'} + >>> G.edges(data=True) + EdgeDataView([(0, 1, {'source': 0, 'target': 1})]) + """ + if name == ident: + raise nx.NetworkXError("name and ident must be different.") + + multigraph = data.get("multigraph") + directed = data.get("directed") + if multigraph: + graph = nx.MultiGraph() + else: + graph = nx.Graph() + if directed: + graph = graph.to_directed() + graph.graph = dict(data.get("data")) + for d in data["elements"]["nodes"]: + node_data = d["data"].copy() + node = d["data"]["value"] + + if d["data"].get(name): + node_data[name] = d["data"].get(name) + if d["data"].get(ident): + node_data[ident] = d["data"].get(ident) + + graph.add_node(node) + graph.nodes[node].update(node_data) + + for d in data["elements"]["edges"]: + edge_data = d["data"].copy() + sour = d["data"]["source"] + targ = d["data"]["target"] + if multigraph: + key = d["data"].get("key", 0) + graph.add_edge(sour, targ, key=key) + graph.edges[sour, targ, key].update(edge_data) + else: + graph.add_edge(sour, targ) + graph.edges[sour, targ].update(edge_data) + return graph diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/node_link.py b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/node_link.py new file mode 100644 index 0000000000000000000000000000000000000000..e29100f448aeefd34802d42297ecd1120e1920a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/node_link.py @@ -0,0 +1,244 @@ +from itertools import chain, count + +import networkx as nx + +__all__ = ["node_link_data", "node_link_graph"] + + +_attrs = { + "source": "source", + "target": "target", + "name": "id", + "key": "key", + "link": "links", +} + + +def _to_tuple(x): + """Converts lists to tuples, including nested lists. + + All other non-list inputs are passed through unmodified. This function is + intended to be used to convert potentially nested lists from json files + into valid nodes. + + Examples + -------- + >>> _to_tuple([1, 2, [3, 4]]) + (1, 2, (3, 4)) + """ + if not isinstance(x, tuple | list): + return x + return tuple(map(_to_tuple, x)) + + +def node_link_data( + G, + *, + source="source", + target="target", + name="id", + key="key", + link="links", +): + """Returns data in node-link format that is suitable for JSON serialization + and use in JavaScript documents. + + Parameters + ---------- + G : NetworkX graph + source : string + A string that provides the 'source' attribute name for storing NetworkX-internal graph data. + target : string + A string that provides the 'target' attribute name for storing NetworkX-internal graph data. + name : string + A string that provides the 'name' attribute name for storing NetworkX-internal graph data. + key : string + A string that provides the 'key' attribute name for storing NetworkX-internal graph data. + link : string + A string that provides the 'link' attribute name for storing NetworkX-internal graph data. + + Returns + ------- + data : dict + A dictionary with node-link formatted data. + + Raises + ------ + NetworkXError + If the values of 'source', 'target' and 'key' are not unique. + + Examples + -------- + >>> G = nx.Graph([("A", "B")]) + >>> data1 = nx.node_link_data(G) + >>> data1 + {'directed': False, 'multigraph': False, 'graph': {}, 'nodes': [{'id': 'A'}, {'id': 'B'}], 'links': [{'source': 'A', 'target': 'B'}]} + + To serialize with JSON + + >>> import json + >>> s1 = json.dumps(data1) + >>> s1 + '{"directed": false, "multigraph": false, "graph": {}, "nodes": [{"id": "A"}, {"id": "B"}], "links": [{"source": "A", "target": "B"}]}' + + A graph can also be serialized by passing `node_link_data` as an encoder function. The two methods are equivalent. + + >>> s1 = json.dumps(G, default=nx.node_link_data) + >>> s1 + '{"directed": false, "multigraph": false, "graph": {}, "nodes": [{"id": "A"}, {"id": "B"}], "links": [{"source": "A", "target": "B"}]}' + + The attribute names for storing NetworkX-internal graph data can + be specified as keyword options. + + >>> H = nx.gn_graph(2) + >>> data2 = nx.node_link_data(H, link="edges", source="from", target="to") + >>> data2 + {'directed': True, 'multigraph': False, 'graph': {}, 'nodes': [{'id': 0}, {'id': 1}], 'edges': [{'from': 1, 'to': 0}]} + + Notes + ----- + Graph, node, and link attributes are stored in this format. Note that + attribute keys will be converted to strings in order to comply with JSON. + + Attribute 'key' is only used for multigraphs. + + To use `node_link_data` in conjunction with `node_link_graph`, + the keyword names for the attributes must match. + + + See Also + -------- + node_link_graph, adjacency_data, tree_data + """ + multigraph = G.is_multigraph() + + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else key + if len({source, target, key}) < 3: + raise nx.NetworkXError("Attribute names are not unique.") + data = { + "directed": G.is_directed(), + "multigraph": multigraph, + "graph": G.graph, + "nodes": [{**G.nodes[n], name: n} for n in G], + } + if multigraph: + data[link] = [ + {**d, source: u, target: v, key: k} + for u, v, k, d in G.edges(keys=True, data=True) + ] + else: + data[link] = [{**d, source: u, target: v} for u, v, d in G.edges(data=True)] + return data + + +@nx._dispatchable(graphs=None, returns_graph=True) +def node_link_graph( + data, + directed=False, + multigraph=True, + *, + source="source", + target="target", + name="id", + key="key", + link="links", +): + """Returns graph from node-link data format. + Useful for de-serialization from JSON. + + Parameters + ---------- + data : dict + node-link formatted graph data + + directed : bool + If True, and direction not specified in data, return a directed graph. + + multigraph : bool + If True, and multigraph not specified in data, return a multigraph. + + source : string + A string that provides the 'source' attribute name for storing NetworkX-internal graph data. + target : string + A string that provides the 'target' attribute name for storing NetworkX-internal graph data. + name : string + A string that provides the 'name' attribute name for storing NetworkX-internal graph data. + key : string + A string that provides the 'key' attribute name for storing NetworkX-internal graph data. + link : string + A string that provides the 'link' attribute name for storing NetworkX-internal graph data. + + Returns + ------- + G : NetworkX graph + A NetworkX graph object + + Examples + -------- + + Create data in node-link format by converting a graph. + + >>> G = nx.Graph([("A", "B")]) + >>> data = nx.node_link_data(G) + >>> data + {'directed': False, 'multigraph': False, 'graph': {}, 'nodes': [{'id': 'A'}, {'id': 'B'}], 'links': [{'source': 'A', 'target': 'B'}]} + + Revert data in node-link format to a graph. + + >>> H = nx.node_link_graph(data) + >>> print(H.edges) + [('A', 'B')] + + To serialize and deserialize a graph with JSON, + + >>> import json + >>> d = json.dumps(node_link_data(G)) + >>> H = node_link_graph(json.loads(d)) + >>> print(G.edges, H.edges) + [('A', 'B')] [('A', 'B')] + + + Notes + ----- + Attribute 'key' is only used for multigraphs. + + To use `node_link_data` in conjunction with `node_link_graph`, + the keyword names for the attributes must match. + + See Also + -------- + node_link_data, adjacency_data, tree_data + """ + multigraph = data.get("multigraph", multigraph) + directed = data.get("directed", directed) + if multigraph: + graph = nx.MultiGraph() + else: + graph = nx.Graph() + if directed: + graph = graph.to_directed() + + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else key + graph.graph = data.get("graph", {}) + c = count() + for d in data["nodes"]: + node = _to_tuple(d.get(name, next(c))) + nodedata = {str(k): v for k, v in d.items() if k != name} + graph.add_node(node, **nodedata) + for d in data[link]: + src = tuple(d[source]) if isinstance(d[source], list) else d[source] + tgt = tuple(d[target]) if isinstance(d[target], list) else d[target] + if not multigraph: + edgedata = {str(k): v for k, v in d.items() if k != source and k != target} + graph.add_edge(src, tgt, **edgedata) + else: + ky = d.get(key, None) + edgedata = { + str(k): v + for k, v in d.items() + if k != source and k != target and k != key + } + graph.add_edge(src, tgt, ky, **edgedata) + return graph diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__init__.py b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71d82f83046e242904d2985994824ebef6f6e997 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd1010c1bd0d75fbfc2e00632499acf77414467e Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa508445dd5f65ad25e65bd4c0ec78a86fae1a90 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1aad1520c0158c84d2b2f7cabaa831bda188b231 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff7b4df3de767bc378972f99d4409b0292340314 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_adjacency.py b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_adjacency.py new file mode 100644 index 0000000000000000000000000000000000000000..37506382c55a110b26fdba32a268545d23f4474b --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_adjacency.py @@ -0,0 +1,78 @@ +import copy +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import adjacency_data, adjacency_graph +from networkx.utils import graphs_equal + + +class TestAdjacency: + def test_graph(self): + G = nx.path_graph(4) + H = adjacency_graph(adjacency_data(G)) + assert graphs_equal(G, H) + + def test_graph_attributes(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph["foo"] = "bar" + G.graph[1] = "one" + + H = adjacency_graph(adjacency_data(G)) + assert graphs_equal(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + d = json.dumps(adjacency_data(G)) + H = adjacency_graph(json.loads(d)) + assert graphs_equal(G, H) + assert H.graph["foo"] == "bar" + assert H.graph[1] == "one" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + def test_digraph(self): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + H = adjacency_graph(adjacency_data(G)) + assert H.is_directed() + assert graphs_equal(G, H) + + def test_multidigraph(self): + G = nx.MultiDiGraph() + nx.add_path(G, [1, 2, 3]) + H = adjacency_graph(adjacency_data(G)) + assert H.is_directed() + assert H.is_multigraph() + assert graphs_equal(G, H) + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2, key="first") + G.add_edge(1, 2, key="second", color="blue") + H = adjacency_graph(adjacency_data(G)) + assert graphs_equal(G, H) + assert H[1][2]["second"]["color"] == "blue" + + def test_input_data_is_not_modified_when_building_graph(self): + G = nx.path_graph(4) + input_data = adjacency_data(G) + orig_data = copy.deepcopy(input_data) + # Ensure input is unmodified by deserialisation + assert graphs_equal(G, adjacency_graph(input_data)) + assert input_data == orig_data + + def test_adjacency_form_json_serialisable(self): + G = nx.path_graph(4) + H = adjacency_graph(json.loads(json.dumps(adjacency_data(G)))) + assert graphs_equal(G, H) + + def test_exception(self): + with pytest.raises(nx.NetworkXError): + G = nx.MultiDiGraph() + attrs = {"id": "node", "key": "node"} + adjacency_data(G, attrs) diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_cytoscape.py b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_cytoscape.py new file mode 100644 index 0000000000000000000000000000000000000000..5d47f21f4217d1997165c4f19feb67d283d2dab2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_cytoscape.py @@ -0,0 +1,78 @@ +import copy +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import cytoscape_data, cytoscape_graph + + +def test_graph(): + G = nx.path_graph(4) + H = cytoscape_graph(cytoscape_data(G)) + assert nx.is_isomorphic(G, H) + + +def test_input_data_is_not_modified_when_building_graph(): + G = nx.path_graph(4) + input_data = cytoscape_data(G) + orig_data = copy.deepcopy(input_data) + # Ensure input is unmodified by cytoscape_graph (gh-4173) + cytoscape_graph(input_data) + assert input_data == orig_data + + +def test_graph_attributes(): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph["foo"] = "bar" + G.graph[1] = "one" + G.add_node(3, name="node", id="123") + + H = cytoscape_graph(cytoscape_data(G)) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + assert H.nodes[3]["name"] == "node" + assert H.nodes[3]["id"] == "123" + + d = json.dumps(cytoscape_data(G)) + H = cytoscape_graph(json.loads(d)) + assert H.graph["foo"] == "bar" + assert H.graph[1] == "one" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + assert H.nodes[3]["name"] == "node" + assert H.nodes[3]["id"] == "123" + + +def test_digraph(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + H = cytoscape_graph(cytoscape_data(G)) + assert H.is_directed() + assert nx.is_isomorphic(G, H) + + +def test_multidigraph(): + G = nx.MultiDiGraph() + nx.add_path(G, [1, 2, 3]) + H = cytoscape_graph(cytoscape_data(G)) + assert H.is_directed() + assert H.is_multigraph() + + +def test_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key="first") + G.add_edge(1, 2, key="second", color="blue") + H = cytoscape_graph(cytoscape_data(G)) + assert nx.is_isomorphic(G, H) + assert H[1][2]["second"]["color"] == "blue" + + +def test_exception(): + with pytest.raises(nx.NetworkXError): + G = nx.MultiDiGraph() + cytoscape_data(G, name="foo", ident="foo") diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py new file mode 100644 index 0000000000000000000000000000000000000000..a432666b650a44013a7b5c385d516585e4d6cab3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py @@ -0,0 +1,144 @@ +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import node_link_data, node_link_graph + + +class TestNodeLink: + # TODO: To be removed when signature change complete + def test_custom_attrs_dep(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph[1] = "one" + G.graph["foo"] = "bar" + + attrs = { + "source": "c_source", + "target": "c_target", + "name": "c_id", + "key": "c_key", + "link": "c_links", + } + + H = node_link_graph(node_link_data(G, **attrs), multigraph=False, **attrs) + assert nx.is_isomorphic(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + # provide only a partial dictionary of keywords. + # This is similar to an example in the doc string + attrs = { + "link": "c_links", + "source": "c_source", + "target": "c_target", + } + H = node_link_graph(node_link_data(G, **attrs), multigraph=False, **attrs) + assert nx.is_isomorphic(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + def test_exception_dep(self): + with pytest.raises(nx.NetworkXError): + G = nx.MultiDiGraph() + node_link_data(G, name="node", source="node", target="node", key="node") + + def test_graph(self): + G = nx.path_graph(4) + H = node_link_graph(node_link_data(G)) + assert nx.is_isomorphic(G, H) + + def test_graph_attributes(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph[1] = "one" + G.graph["foo"] = "bar" + + H = node_link_graph(node_link_data(G)) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + d = json.dumps(node_link_data(G)) + H = node_link_graph(json.loads(d)) + assert H.graph["foo"] == "bar" + assert H.graph["1"] == "one" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + def test_digraph(self): + G = nx.DiGraph() + H = node_link_graph(node_link_data(G)) + assert H.is_directed() + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2, key="first") + G.add_edge(1, 2, key="second", color="blue") + H = node_link_graph(node_link_data(G)) + assert nx.is_isomorphic(G, H) + assert H[1][2]["second"]["color"] == "blue" + + def test_graph_with_tuple_nodes(self): + G = nx.Graph() + G.add_edge((0, 0), (1, 0), color=[255, 255, 0]) + d = node_link_data(G) + dumped_d = json.dumps(d) + dd = json.loads(dumped_d) + H = node_link_graph(dd) + assert H.nodes[(0, 0)] == G.nodes[(0, 0)] + assert H[(0, 0)][(1, 0)]["color"] == [255, 255, 0] + + def test_unicode_keys(self): + q = "qualité" + G = nx.Graph() + G.add_node(1, **{q: q}) + s = node_link_data(G) + output = json.dumps(s, ensure_ascii=False) + data = json.loads(output) + H = node_link_graph(data) + assert H.nodes[1][q] == q + + def test_exception(self): + with pytest.raises(nx.NetworkXError): + G = nx.MultiDiGraph() + attrs = {"name": "node", "source": "node", "target": "node", "key": "node"} + node_link_data(G, **attrs) + + def test_string_ids(self): + q = "qualité" + G = nx.DiGraph() + G.add_node("A") + G.add_node(q) + G.add_edge("A", q) + data = node_link_data(G) + assert data["links"][0]["source"] == "A" + assert data["links"][0]["target"] == q + H = node_link_graph(data) + assert nx.is_isomorphic(G, H) + + def test_custom_attrs(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph[1] = "one" + G.graph["foo"] = "bar" + + attrs = { + "source": "c_source", + "target": "c_target", + "name": "c_id", + "key": "c_key", + "link": "c_links", + } + + H = node_link_graph(node_link_data(G, **attrs), multigraph=False, **attrs) + assert nx.is_isomorphic(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_tree.py b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..643a14d89b5211f2d97b98f2e227e68361781b97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tests/test_tree.py @@ -0,0 +1,48 @@ +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import tree_data, tree_graph + + +def test_graph(): + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3], color="red") + G.add_edge(1, 2, foo=7) + G.add_edge(1, 3, foo=10) + G.add_edge(3, 4, foo=10) + H = tree_graph(tree_data(G, 1)) + assert nx.is_isomorphic(G, H) + + +def test_graph_attributes(): + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3], color="red") + G.add_edge(1, 2, foo=7) + G.add_edge(1, 3, foo=10) + G.add_edge(3, 4, foo=10) + H = tree_graph(tree_data(G, 1)) + assert H.nodes[1]["color"] == "red" + + d = json.dumps(tree_data(G, 1)) + H = tree_graph(json.loads(d)) + assert H.nodes[1]["color"] == "red" + + +def test_exceptions(): + with pytest.raises(TypeError, match="is not a tree."): + G = nx.complete_graph(3) + tree_data(G, 0) + with pytest.raises(TypeError, match="is not directed."): + G = nx.path_graph(3) + tree_data(G, 0) + with pytest.raises(TypeError, match="is not weakly connected."): + G = nx.path_graph(3, create_using=nx.DiGraph) + G.add_edge(2, 0) + G.add_node(3) + tree_data(G, 0) + with pytest.raises(nx.NetworkXError, match="must be different."): + G = nx.MultiDiGraph() + G.add_node(0) + tree_data(G, 0, ident="node", children="node") diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tree.py b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tree.py new file mode 100644 index 0000000000000000000000000000000000000000..22b07b09d277815e824b1dd8c5b82a149ed14e1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/tree.py @@ -0,0 +1,137 @@ +from itertools import chain + +import networkx as nx + +__all__ = ["tree_data", "tree_graph"] + + +def tree_data(G, root, ident="id", children="children"): + """Returns data in tree format that is suitable for JSON serialization + and use in JavaScript documents. + + Parameters + ---------- + G : NetworkX graph + G must be an oriented tree + + root : node + The root of the tree + + ident : string + Attribute name for storing NetworkX-internal graph data. `ident` must + have a different value than `children`. The default is 'id'. + + children : string + Attribute name for storing NetworkX-internal graph data. `children` + must have a different value than `ident`. The default is 'children'. + + Returns + ------- + data : dict + A dictionary with node-link formatted data. + + Raises + ------ + NetworkXError + If `children` and `ident` attributes are identical. + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.DiGraph([(1, 2)]) + >>> data = json_graph.tree_data(G, root=1) + + To serialize with json + + >>> import json + >>> s = json.dumps(data) + + Notes + ----- + Node attributes are stored in this format but keys + for attributes must be strings if you want to serialize with JSON. + + Graph and edge attributes are not stored. + + See Also + -------- + tree_graph, node_link_data, adjacency_data + """ + if G.number_of_nodes() != G.number_of_edges() + 1: + raise TypeError("G is not a tree.") + if not G.is_directed(): + raise TypeError("G is not directed.") + if not nx.is_weakly_connected(G): + raise TypeError("G is not weakly connected.") + + if ident == children: + raise nx.NetworkXError("The values for `id` and `children` must be different.") + + def add_children(n, G): + nbrs = G[n] + if len(nbrs) == 0: + return [] + children_ = [] + for child in nbrs: + d = {**G.nodes[child], ident: child} + c = add_children(child, G) + if c: + d[children] = c + children_.append(d) + return children_ + + return {**G.nodes[root], ident: root, children: add_children(root, G)} + + +@nx._dispatchable(graphs=None, returns_graph=True) +def tree_graph(data, ident="id", children="children"): + """Returns graph from tree data format. + + Parameters + ---------- + data : dict + Tree formatted graph data + + ident : string + Attribute name for storing NetworkX-internal graph data. `ident` must + have a different value than `children`. The default is 'id'. + + children : string + Attribute name for storing NetworkX-internal graph data. `children` + must have a different value than `ident`. The default is 'children'. + + Returns + ------- + G : NetworkX DiGraph + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.DiGraph([(1, 2)]) + >>> data = json_graph.tree_data(G, root=1) + >>> H = json_graph.tree_graph(data) + + See Also + -------- + tree_data, node_link_data, adjacency_data + """ + graph = nx.DiGraph() + + def add_children(parent, children_): + for data in children_: + child = data[ident] + graph.add_edge(parent, child) + grandchildren = data.get(children, []) + if grandchildren: + add_children(child, grandchildren) + nodedata = { + str(k): v for k, v in data.items() if k != ident and k != children + } + graph.add_node(child, **nodedata) + + root = data[ident] + children_ = data.get(children, []) + nodedata = {str(k): v for k, v in data.items() if k != ident and k != children} + graph.add_node(root, **nodedata) + add_children(root, children_) + return graph diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__init__.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..270265b83f590e045a0e39078a6e1b556a686677 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ead1139ec0678f2f199913baf967b2b93976587 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_edgelist.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_edgelist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2be695464470cf1f23ee39e80ab8f328da334b2a Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_edgelist.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_gexf.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_gexf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0b13d260b508f6b80df2e20865d1420c054896a Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_gexf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99200c87fde1e7fcf10f3c9761001d21bfc7f8cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_graph6.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_graph6.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9877a40547a6b1301a4b16e3bb0ce168280ee70 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_graph6.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_graphml.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_graphml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..def8efa0ccdfc951117a5e8d29fc45f04be30016 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_graphml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_leda.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_leda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da2e8e7a788d5b941c0fb728f150bfff3812130c Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_leda.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_p2g.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_p2g.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a4ff6ff4da76d5da83c9b0d33d5f296c43d72a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_p2g.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3009fe4dfe0ef7029a03e8cf7b03929efe043cde Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_sparse6.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_sparse6.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ce85f81827749318e4c4a79117f831c53a1eba6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_sparse6.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d2ad34a3ad1c6be6381793751a033bfdf816ea8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/__pycache__/test_text.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_adjlist.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_adjlist.py new file mode 100644 index 0000000000000000000000000000000000000000..c8d0f54654d936559ea02ab6227086f8dc1f293a --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_adjlist.py @@ -0,0 +1,240 @@ +""" + Unit tests for adjlist. +""" +import io + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class TestAdjlist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + cls.XG = nx.MultiGraph() + cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)]) + cls.XDG = nx.MultiDiGraph(cls.XG) + + def test_read_multiline_adjlist_1(self): + # Unit test for https://networkx.lanl.gov/trac/ticket/252 + s = b"""# comment line +1 2 +# comment line +2 +3 +""" + bytesIO = io.BytesIO(s) + G = nx.read_multiline_adjlist(bytesIO) + adj = {"1": {"3": {}, "2": {}}, "3": {"1": {}}, "2": {"1": {}}} + assert graphs_equal(G, nx.Graph(adj)) + + def test_unicode(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname) + assert graphs_equal(G, H) + + def test_latin1_err(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + fname = tmp_path / "adjlist.txt" + with pytest.raises(UnicodeEncodeError): + nx.write_multiline_adjlist(G, fname, encoding="latin-1") + + def test_latin1(self, tmp_path): + G = nx.Graph() + name1 = "Bj" + chr(246) + "rk" + name2 = chr(220) + "ber" + G.add_edge(name1, "Radiohead", **{name2: 3}) + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname, encoding="latin-1") + H = nx.read_multiline_adjlist(fname, encoding="latin-1") + assert graphs_equal(G, H) + + def test_parse_adjlist(self): + lines = ["1 2 5", "2 3 4", "3 5", "4", "5"] + nx.parse_adjlist(lines, nodetype=int) # smoke test + with pytest.raises(TypeError): + nx.parse_adjlist(lines, nodetype="int") + lines = ["1 2 5", "2 b", "c"] + with pytest.raises(TypeError): + nx.parse_adjlist(lines, nodetype=int) + + def test_adjlist_graph(self, tmp_path): + G = self.G + fname = tmp_path / "adjlist.txt" + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname) + H2 = nx.read_adjlist(fname) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_adjlist_digraph(self, tmp_path): + G = self.DG + fname = tmp_path / "adjlist.txt" + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, create_using=nx.DiGraph()) + H2 = nx.read_adjlist(fname, create_using=nx.DiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_adjlist_integers(self, tmp_path): + fname = tmp_path / "adjlist.txt" + G = nx.convert_node_labels_to_integers(self.G) + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, nodetype=int) + H2 = nx.read_adjlist(fname, nodetype=int) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_adjlist_multigraph(self, tmp_path): + G = self.XG + fname = tmp_path / "adjlist.txt" + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_adjlist_multidigraph(self, tmp_path): + G = self.XDG + fname = tmp_path / "adjlist.txt" + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + H2 = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_adjlist_delimiter(self): + fh = io.BytesIO() + G = nx.path_graph(3) + nx.write_adjlist(G, fh, delimiter=":") + fh.seek(0) + H = nx.read_adjlist(fh, nodetype=int, delimiter=":") + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + +class TestMultilineAdjlist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + cls.DG.remove_edge("b", "a") + cls.DG.remove_edge("b", "c") + cls.XG = nx.MultiGraph() + cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)]) + cls.XDG = nx.MultiDiGraph(cls.XG) + + def test_parse_multiline_adjlist(self): + lines = [ + "1 2", + "b {'weight':3, 'name': 'Frodo'}", + "c {}", + "d 1", + "e {'weight':6, 'name': 'Saruman'}", + ] + nx.parse_multiline_adjlist(iter(lines)) # smoke test + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines), nodetype=int) + nx.parse_multiline_adjlist(iter(lines), edgetype=str) # smoke test + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines), nodetype=int) + lines = ["1 a"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines)) + lines = ["a 2"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines), nodetype=int) + lines = ["1 2"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines)) + lines = ["1 2", "2 {}"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines)) + + def test_multiline_adjlist_graph(self, tmp_path): + G = self.G + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname) + H2 = nx.read_multiline_adjlist(fname) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_multiline_adjlist_digraph(self, tmp_path): + G = self.DG + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname, create_using=nx.DiGraph()) + H2 = nx.read_multiline_adjlist(fname, create_using=nx.DiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_multiline_adjlist_integers(self, tmp_path): + fname = tmp_path / "adjlist.txt" + G = nx.convert_node_labels_to_integers(self.G) + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname, nodetype=int) + H2 = nx.read_multiline_adjlist(fname, nodetype=int) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_multiline_adjlist_multigraph(self, tmp_path): + G = self.XG + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = nx.read_multiline_adjlist( + fname, nodetype=int, create_using=nx.MultiGraph() + ) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_multiline_adjlist_multidigraph(self, tmp_path): + G = self.XDG + fname = tmp_path / "adjlist.txt" + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist( + fname, nodetype=int, create_using=nx.MultiDiGraph() + ) + H2 = nx.read_multiline_adjlist( + fname, nodetype=int, create_using=nx.MultiDiGraph() + ) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_multiline_adjlist_delimiter(self): + fh = io.BytesIO() + G = nx.path_graph(3) + nx.write_multiline_adjlist(G, fh, delimiter=":") + fh.seek(0) + H = nx.read_multiline_adjlist(fh, nodetype=int, delimiter=":") + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_edgelist.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_edgelist.py new file mode 100644 index 0000000000000000000000000000000000000000..29a536d53e95e3448efa5f7c65ff1fb2cb8b0ba5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_edgelist.py @@ -0,0 +1,296 @@ +""" + Unit tests for edgelists. +""" +import io +import textwrap + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + +edges_no_data = textwrap.dedent( + """ + # comment line + 1 2 + # comment line + 2 3 + """ +) + + +edges_with_values = textwrap.dedent( + """ + # comment line + 1 2 2.0 + # comment line + 2 3 3.0 + """ +) + + +edges_with_weight = textwrap.dedent( + """ + # comment line + 1 2 {'weight':2.0} + # comment line + 2 3 {'weight':3.0} + """ +) + + +edges_with_multiple_attrs = textwrap.dedent( + """ + # comment line + 1 2 {'weight':2.0, 'color':'green'} + # comment line + 2 3 {'weight':3.0, 'color':'red'} + """ +) + + +edges_with_multiple_attrs_csv = textwrap.dedent( + """ + # comment line + 1, 2, {'weight':2.0, 'color':'green'} + # comment line + 2, 3, {'weight':3.0, 'color':'red'} + """ +) + + +_expected_edges_weights = [(1, 2, {"weight": 2.0}), (2, 3, {"weight": 3.0})] +_expected_edges_multiattr = [ + (1, 2, {"weight": 2.0, "color": "green"}), + (2, 3, {"weight": 3.0, "color": "red"}), +] + + +@pytest.mark.parametrize( + ("data", "extra_kwargs"), + ( + (edges_no_data, {}), + (edges_with_values, {}), + (edges_with_weight, {}), + (edges_with_multiple_attrs, {}), + (edges_with_multiple_attrs_csv, {"delimiter": ","}), + ), +) +def test_read_edgelist_no_data(data, extra_kwargs): + bytesIO = io.BytesIO(data.encode("utf-8")) + G = nx.read_edgelist(bytesIO, nodetype=int, data=False, **extra_kwargs) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + +def test_read_weighted_edgelist(): + bytesIO = io.BytesIO(edges_with_values.encode("utf-8")) + G = nx.read_weighted_edgelist(bytesIO, nodetype=int) + assert edges_equal(G.edges(data=True), _expected_edges_weights) + + +@pytest.mark.parametrize( + ("data", "extra_kwargs", "expected"), + ( + (edges_with_weight, {}, _expected_edges_weights), + (edges_with_multiple_attrs, {}, _expected_edges_multiattr), + (edges_with_multiple_attrs_csv, {"delimiter": ","}, _expected_edges_multiattr), + ), +) +def test_read_edgelist_with_data(data, extra_kwargs, expected): + bytesIO = io.BytesIO(data.encode("utf-8")) + G = nx.read_edgelist(bytesIO, nodetype=int, **extra_kwargs) + assert edges_equal(G.edges(data=True), expected) + + +@pytest.fixture +def example_graph(): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 3.0), (2, 3, 27.0), (3, 4, 3.0)]) + return G + + +def test_parse_edgelist_no_data(example_graph): + G = example_graph + H = nx.parse_edgelist(["1 2", "2 3", "3 4"], nodetype=int) + assert nodes_equal(G.nodes, H.nodes) + assert edges_equal(G.edges, H.edges) + + +def test_parse_edgelist_with_data_dict(example_graph): + G = example_graph + H = nx.parse_edgelist( + ["1 2 {'weight': 3}", "2 3 {'weight': 27}", "3 4 {'weight': 3.0}"], nodetype=int + ) + assert nodes_equal(G.nodes, H.nodes) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + + +def test_parse_edgelist_with_data_list(example_graph): + G = example_graph + H = nx.parse_edgelist( + ["1 2 3", "2 3 27", "3 4 3.0"], nodetype=int, data=(("weight", float),) + ) + assert nodes_equal(G.nodes, H.nodes) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + + +def test_parse_edgelist(): + # ignore lines with less than 2 nodes + lines = ["1;2", "2 3", "3 4"] + G = nx.parse_edgelist(lines, nodetype=int) + assert list(G.edges()) == [(2, 3), (3, 4)] + # unknown nodetype + with pytest.raises(TypeError, match="Failed to convert nodes"): + lines = ["1 2", "2 3", "3 4"] + nx.parse_edgelist(lines, nodetype="nope") + # lines have invalid edge format + with pytest.raises(TypeError, match="Failed to convert edge data"): + lines = ["1 2 3", "2 3", "3 4"] + nx.parse_edgelist(lines, nodetype=int) + # edge data and data_keys not the same length + with pytest.raises(IndexError, match="not the same length"): + lines = ["1 2 3", "2 3 27", "3 4 3.0"] + nx.parse_edgelist( + lines, nodetype=int, data=(("weight", float), ("capacity", int)) + ) + # edge data can't be converted to edge type + with pytest.raises(TypeError, match="Failed to convert"): + lines = ["1 2 't1'", "2 3 't3'", "3 4 't3'"] + nx.parse_edgelist(lines, nodetype=int, data=(("weight", float),)) + + +def test_comments_None(): + edgelist = ["node#1 node#2", "node#2 node#3"] + # comments=None supported to ignore all comment characters + G = nx.parse_edgelist(edgelist, comments=None) + H = nx.Graph([e.split(" ") for e in edgelist]) + assert edges_equal(G.edges, H.edges) + + +class TestEdgelist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + cls.XG = nx.MultiGraph() + cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)]) + cls.XDG = nx.MultiDiGraph(cls.XG) + + def test_write_edgelist_1(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + nx.write_edgelist(G, fh, data=False) + fh.seek(0) + assert fh.read() == b"1 2\n2 3\n" + + def test_write_edgelist_2(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + nx.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {}\n2 3 {}\n" + + def test_write_edgelist_3(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + nx.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {'weight': 2.0}\n2 3 {'weight': 3.0}\n" + + def test_write_edgelist_4(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + nx.write_edgelist(G, fh, data=[("weight")]) + fh.seek(0) + assert fh.read() == b"1 2 2.0\n2 3 3.0\n" + + def test_unicode(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname) + assert graphs_equal(G, H) + + def test_latin1_issue(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + fname = tmp_path / "el.txt" + with pytest.raises(UnicodeEncodeError): + nx.write_edgelist(G, fname, encoding="latin-1") + + def test_latin1(self, tmp_path): + G = nx.Graph() + name1 = "Bj" + chr(246) + "rk" + name2 = chr(220) + "ber" + G.add_edge(name1, "Radiohead", **{name2: 3}) + fname = tmp_path / "el.txt" + + nx.write_edgelist(G, fname, encoding="latin-1") + H = nx.read_edgelist(fname, encoding="latin-1") + assert graphs_equal(G, H) + + def test_edgelist_graph(self, tmp_path): + G = self.G + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname) + H2 = nx.read_edgelist(fname) + assert H is not H2 # they should be different graphs + G.remove_node("g") # isolated nodes are not written in edgelist + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_edgelist_digraph(self, tmp_path): + G = self.DG + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, create_using=nx.DiGraph()) + H2 = nx.read_edgelist(fname, create_using=nx.DiGraph()) + assert H is not H2 # they should be different graphs + G.remove_node("g") # isolated nodes are not written in edgelist + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_edgelist_integers(self, tmp_path): + G = nx.convert_node_labels_to_integers(self.G) + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, nodetype=int) + # isolated nodes are not written in edgelist + G.remove_nodes_from(list(nx.isolates(G))) + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_edgelist_multigraph(self, tmp_path): + G = self.XG + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + def test_edgelist_multidigraph(self, tmp_path): + G = self.XDG + fname = tmp_path / "el.txt" + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + H2 = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_gexf.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_gexf.py new file mode 100644 index 0000000000000000000000000000000000000000..6ff14c99b1d5df41003b705b840a0968e0439239 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_gexf.py @@ -0,0 +1,557 @@ +import io +import time + +import pytest + +import networkx as nx + + +class TestGEXF: + @classmethod + def setup_class(cls): + cls.simple_directed_data = """ + + + + + + + + + + + +""" + cls.simple_directed_graph = nx.DiGraph() + cls.simple_directed_graph.add_node("0", label="Hello") + cls.simple_directed_graph.add_node("1", label="World") + cls.simple_directed_graph.add_edge("0", "1", id="0") + + cls.simple_directed_fh = io.BytesIO(cls.simple_directed_data.encode("UTF-8")) + + cls.attribute_data = """\ + + + Gephi.org + A Web network + + + + + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + cls.attribute_graph = nx.DiGraph() + cls.attribute_graph.graph["node_default"] = {"frog": True} + cls.attribute_graph.add_node( + "0", label="Gephi", url="https://gephi.org", indegree=1, frog=False + ) + cls.attribute_graph.add_node( + "1", label="Webatlas", url="http://webatlas.fr", indegree=2, frog=False + ) + cls.attribute_graph.add_node( + "2", label="RTGI", url="http://rtgi.fr", indegree=1, frog=True + ) + cls.attribute_graph.add_node( + "3", + label="BarabasiLab", + url="http://barabasilab.com", + indegree=1, + frog=True, + ) + cls.attribute_graph.add_edge("0", "1", id="0", label="foo") + cls.attribute_graph.add_edge("0", "2", id="1") + cls.attribute_graph.add_edge("1", "0", id="2") + cls.attribute_graph.add_edge("2", "1", id="3") + cls.attribute_graph.add_edge("0", "3", id="4") + cls.attribute_fh = io.BytesIO(cls.attribute_data.encode("UTF-8")) + + cls.simple_undirected_data = """ + + + + + + + + + + + +""" + cls.simple_undirected_graph = nx.Graph() + cls.simple_undirected_graph.add_node("0", label="Hello") + cls.simple_undirected_graph.add_node("1", label="World") + cls.simple_undirected_graph.add_edge("0", "1", id="0") + + cls.simple_undirected_fh = io.BytesIO( + cls.simple_undirected_data.encode("UTF-8") + ) + + def test_read_simple_directed_graphml(self): + G = self.simple_directed_graph + H = nx.read_gexf(self.simple_directed_fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_write_read_simple_directed_graphml(self): + G = self.simple_directed_graph + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_read_simple_undirected_graphml(self): + G = self.simple_undirected_graph + H = nx.read_gexf(self.simple_undirected_fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + self.simple_undirected_fh.seek(0) + + def test_read_attribute_graphml(self): + G = self.attribute_graph + H = nx.read_gexf(self.attribute_fh) + assert sorted(G.nodes(True)) == sorted(H.nodes(data=True)) + ge = sorted(G.edges(data=True)) + he = sorted(H.edges(data=True)) + for a, b in zip(ge, he): + assert a == b + self.attribute_fh.seek(0) + + def test_directed_edge_in_undirected(self): + s = """ + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_gexf, fh) + + def test_undirected_edge_in_directed(self): + s = """ + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_gexf, fh) + + def test_key_raises(self): + s = """ + + + + + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_gexf, fh) + + def test_relabel(self): + s = """ + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_gexf(fh, relabel=True) + assert sorted(G.nodes()) == ["Hello", "Word"] + + def test_default_attribute(self): + G = nx.Graph() + G.add_node(1, label="1", color="green") + nx.add_path(G, [0, 1, 2, 3]) + G.add_edge(1, 2, foo=3) + G.graph["node_default"] = {"color": "yellow"} + G.graph["edge_default"] = {"foo": 7} + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + # Reading a gexf graph always sets mode attribute to either + # 'static' or 'dynamic'. Remove the mode attribute from the + # read graph for the sake of comparing remaining attributes. + del H.graph["mode"] + assert G.graph == H.graph + + def test_serialize_ints_to_strings(self): + G = nx.Graph() + G.add_node(1, id=7, label=77) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert list(H) == [7] + assert H.nodes[7]["label"] == "77" + + def test_write_with_node_attributes(self): + # Addresses #673. + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3)]) + for i in range(4): + G.nodes[i]["id"] = i + G.nodes[i]["label"] = i + G.nodes[i]["pid"] = i + G.nodes[i]["start"] = i + G.nodes[i]["end"] = i + 1 + + expected = f""" + + NetworkX {nx.__version__} + + + + + + + + + + + + + + +""" + obtained = "\n".join(nx.generate_gexf(G)) + assert expected == obtained + + def test_edge_id_construct(self): + G = nx.Graph() + G.add_edges_from([(0, 1, {"id": 0}), (1, 2, {"id": 2}), (2, 3)]) + + expected = f""" + + NetworkX {nx.__version__} + + + + + + + + + + + + + + +""" + + obtained = "\n".join(nx.generate_gexf(G)) + assert expected == obtained + + def test_numpy_type(self): + np = pytest.importorskip("numpy") + G = nx.path_graph(4) + nx.set_node_attributes(G, {n: n for n in np.arange(4)}, "number") + G[0][1]["edge-number"] = np.float64(1.1) + + expected = f""" + + NetworkX {nx.__version__} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + obtained = "\n".join(nx.generate_gexf(G)) + assert expected == obtained + + def test_bool(self): + G = nx.Graph() + G.add_node(1, testattr=True) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert H.nodes[1]["testattr"] + + # Test for NaN, INF and -INF + def test_specials(self): + from math import isnan + + inf, nan = float("inf"), float("nan") + G = nx.Graph() + G.add_node(1, testattr=inf, strdata="inf", key="a") + G.add_node(2, testattr=nan, strdata="nan", key="b") + G.add_node(3, testattr=-inf, strdata="-inf", key="c") + + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + filetext = fh.read() + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + + assert b"INF" in filetext + assert b"NaN" in filetext + assert b"-INF" in filetext + + assert H.nodes[1]["testattr"] == inf + assert isnan(H.nodes[2]["testattr"]) + assert H.nodes[3]["testattr"] == -inf + + assert H.nodes[1]["strdata"] == "inf" + assert H.nodes[2]["strdata"] == "nan" + assert H.nodes[3]["strdata"] == "-inf" + + assert H.nodes[1]["networkx_key"] == "a" + assert H.nodes[2]["networkx_key"] == "b" + assert H.nodes[3]["networkx_key"] == "c" + + def test_simple_list(self): + G = nx.Graph() + list_value = [(1, 2, 3), (9, 1, 2)] + G.add_node(1, key=list_value) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert H.nodes[1]["networkx_key"] == list_value + + def test_dynamic_mode(self): + G = nx.Graph() + G.add_node(1, label="1", color="green") + G.graph["mode"] = "dynamic" + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_multigraph_with_missing_attributes(self): + G = nx.MultiGraph() + G.add_node(0, label="1", color="green") + G.add_node(1, label="2", color="green") + G.add_edge(0, 1, id="0", weight=3, type="undirected", start=0, end=1) + G.add_edge(0, 1, id="1", label="foo", start=0, end=1) + G.add_edge(0, 1) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_missing_viz_attributes(self): + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["viz"] = {"size": 54} + G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1, "z": 0} + G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256} + G.nodes[0]["viz"]["shape"] = "http://random.url" + G.nodes[0]["viz"]["thickness"] = 2 + fh = io.BytesIO() + nx.write_gexf(G, fh, version="1.1draft") + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + # Test missing alpha value for version >draft1.1 - set default alpha value + # to 1.0 instead of `None` when writing for better general compatibility + fh = io.BytesIO() + # G.nodes[0]["viz"]["color"] does not have an alpha value explicitly defined + # so the default is used instead + nx.write_gexf(G, fh, version="1.2draft") + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert H.nodes[0]["viz"]["color"]["a"] == 1.0 + + # Second graph for the other branch + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["viz"] = {"size": 54} + G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1, "z": 0} + G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256, "a": 0.5} + G.nodes[0]["viz"]["shape"] = "ftp://random.url" + G.nodes[0]["viz"]["thickness"] = 2 + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_slice_and_spell(self): + # Test spell first, so version = 1.2 + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["spells"] = [(1, 2)] + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["slices"] = [(1, 2)] + fh = io.BytesIO() + nx.write_gexf(G, fh, version="1.1draft") + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_add_parent(self): + G = nx.Graph() + G.add_node(0, label="1", color="green", parents=[1, 2]) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_gml.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_gml.py new file mode 100644 index 0000000000000000000000000000000000000000..f575ad269cf33c940a204aed398460a420550cc7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_gml.py @@ -0,0 +1,744 @@ +import codecs +import io +import math +from ast import literal_eval +from contextlib import contextmanager +from textwrap import dedent + +import pytest + +import networkx as nx +from networkx.readwrite.gml import literal_destringizer, literal_stringizer + + +class TestGraph: + @classmethod + def setup_class(cls): + cls.simple_data = """Creator "me" +Version "xx" +graph [ + comment "This is a sample graph" + directed 1 + IsPlanar 1 + pos [ x 0 y 1 ] + node [ + id 1 + label "Node 1" + pos [ x 1 y 1 ] + ] + node [ + id 2 + pos [ x 1 y 2 ] + label "Node 2" + ] + node [ + id 3 + label "Node 3" + pos [ x 1 y 3 ] + ] + edge [ + source 1 + target 2 + label "Edge from node 1 to node 2" + color [line "blue" thickness 3] + + ] + edge [ + source 2 + target 3 + label "Edge from node 2 to node 3" + ] + edge [ + source 3 + target 1 + label "Edge from node 3 to node 1" + ] +] +""" + + def test_parse_gml_cytoscape_bug(self): + # example from issue #321, originally #324 in trac + cytoscape_example = """ +Creator "Cytoscape" +Version 1.0 +graph [ + node [ + root_index -3 + id -3 + graphics [ + x -96.0 + y -67.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node2" + ] + node [ + root_index -2 + id -2 + graphics [ + x 63.0 + y 37.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node1" + ] + node [ + root_index -1 + id -1 + graphics [ + x -31.0 + y -17.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node0" + ] + edge [ + root_index -2 + target -2 + source -1 + graphics [ + width 1.5 + fill "#0000ff" + type "line" + Line [ + ] + source_arrow 0 + target_arrow 3 + ] + label "DirectedEdge" + ] + edge [ + root_index -1 + target -1 + source -3 + graphics [ + width 1.5 + fill "#0000ff" + type "line" + Line [ + ] + source_arrow 0 + target_arrow 3 + ] + label "DirectedEdge" + ] +] +""" + nx.parse_gml(cytoscape_example) + + def test_parse_gml(self): + G = nx.parse_gml(self.simple_data, label="label") + assert sorted(G.nodes()) == ["Node 1", "Node 2", "Node 3"] + assert sorted(G.edges()) == [ + ("Node 1", "Node 2"), + ("Node 2", "Node 3"), + ("Node 3", "Node 1"), + ] + + assert sorted(G.edges(data=True)) == [ + ( + "Node 1", + "Node 2", + { + "color": {"line": "blue", "thickness": 3}, + "label": "Edge from node 1 to node 2", + }, + ), + ("Node 2", "Node 3", {"label": "Edge from node 2 to node 3"}), + ("Node 3", "Node 1", {"label": "Edge from node 3 to node 1"}), + ] + + def test_read_gml(self, tmp_path): + fname = tmp_path / "test.gml" + with open(fname, "w") as fh: + fh.write(self.simple_data) + Gin = nx.read_gml(fname, label="label") + G = nx.parse_gml(self.simple_data, label="label") + assert sorted(G.nodes(data=True)) == sorted(Gin.nodes(data=True)) + assert sorted(G.edges(data=True)) == sorted(Gin.edges(data=True)) + + def test_labels_are_strings(self): + # GML requires labels to be strings (i.e., in quotes) + answer = """graph [ + node [ + id 0 + label "1203" + ] +]""" + G = nx.Graph() + G.add_node(1203) + data = "\n".join(nx.generate_gml(G, stringizer=literal_stringizer)) + assert data == answer + + def test_relabel_duplicate(self): + data = """ +graph +[ + label "" + directed 1 + node + [ + id 0 + label "same" + ] + node + [ + id 1 + label "same" + ] +] +""" + fh = io.BytesIO(data.encode("UTF-8")) + fh.seek(0) + pytest.raises(nx.NetworkXError, nx.read_gml, fh, label="label") + + @pytest.mark.parametrize("stringizer", (None, literal_stringizer)) + def test_tuplelabels(self, stringizer): + # https://github.com/networkx/networkx/pull/1048 + # Writing tuple labels to GML failed. + G = nx.Graph() + G.add_edge((0, 1), (1, 0)) + data = "\n".join(nx.generate_gml(G, stringizer=stringizer)) + answer = """graph [ + node [ + id 0 + label "(0,1)" + ] + node [ + id 1 + label "(1,0)" + ] + edge [ + source 0 + target 1 + ] +]""" + assert data == answer + + def test_quotes(self, tmp_path): + # https://github.com/networkx/networkx/issues/1061 + # Encoding quotes as HTML entities. + G = nx.path_graph(1) + G.name = "path_graph(1)" + attr = 'This is "quoted" and this is a copyright: ' + chr(169) + G.nodes[0]["demo"] = attr + with open(tmp_path / "test.gml", "w+b") as fobj: + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + name "path_graph(1)" + node [ + id 0 + label "0" + demo "This is "quoted" and this is a copyright: ©" + ] +]""" + assert data == answer + + def test_unicode_node(self, tmp_path): + node = "node" + chr(169) + G = nx.Graph() + G.add_node(node) + with open(tmp_path / "test.gml", "w+b") as fobj: + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + node [ + id 0 + label "node©" + ] +]""" + assert data == answer + + def test_float_label(self, tmp_path): + node = 1.0 + G = nx.Graph() + G.add_node(node) + with open(tmp_path / "test.gml", "w+b") as fobj: + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + node [ + id 0 + label "1.0" + ] +]""" + assert data == answer + + def test_special_float_label(self, tmp_path): + special_floats = [float("nan"), float("+inf"), float("-inf")] + try: + import numpy as np + + special_floats += [np.nan, np.inf, np.inf * -1] + except ImportError: + special_floats += special_floats + + G = nx.cycle_graph(len(special_floats)) + attrs = dict(enumerate(special_floats)) + nx.set_node_attributes(G, attrs, "nodefloat") + edges = list(G.edges) + attrs = {edges[i]: value for i, value in enumerate(special_floats)} + nx.set_edge_attributes(G, attrs, "edgefloat") + + with open(tmp_path / "test.gml", "w+b") as fobj: + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + node [ + id 0 + label "0" + nodefloat NAN + ] + node [ + id 1 + label "1" + nodefloat +INF + ] + node [ + id 2 + label "2" + nodefloat -INF + ] + node [ + id 3 + label "3" + nodefloat NAN + ] + node [ + id 4 + label "4" + nodefloat +INF + ] + node [ + id 5 + label "5" + nodefloat -INF + ] + edge [ + source 0 + target 1 + edgefloat NAN + ] + edge [ + source 0 + target 5 + edgefloat +INF + ] + edge [ + source 1 + target 2 + edgefloat -INF + ] + edge [ + source 2 + target 3 + edgefloat NAN + ] + edge [ + source 3 + target 4 + edgefloat +INF + ] + edge [ + source 4 + target 5 + edgefloat -INF + ] +]""" + assert data == answer + + fobj.seek(0) + graph = nx.read_gml(fobj) + for indx, value in enumerate(special_floats): + node_value = graph.nodes[str(indx)]["nodefloat"] + if math.isnan(value): + assert math.isnan(node_value) + else: + assert node_value == value + + edge = edges[indx] + string_edge = (str(edge[0]), str(edge[1])) + edge_value = graph.edges[string_edge]["edgefloat"] + if math.isnan(value): + assert math.isnan(edge_value) + else: + assert edge_value == value + + def test_name(self): + G = nx.parse_gml('graph [ name "x" node [ id 0 label "x" ] ]') + assert "x" == G.graph["name"] + G = nx.parse_gml('graph [ node [ id 0 label "x" ] ]') + assert "" == G.name + assert "name" not in G.graph + + def test_graph_types(self): + for directed in [None, False, True]: + for multigraph in [None, False, True]: + gml = "graph [" + if directed is not None: + gml += " directed " + str(int(directed)) + if multigraph is not None: + gml += " multigraph " + str(int(multigraph)) + gml += ' node [ id 0 label "0" ]' + gml += " edge [ source 0 target 0 ]" + gml += " ]" + G = nx.parse_gml(gml) + assert bool(directed) == G.is_directed() + assert bool(multigraph) == G.is_multigraph() + gml = "graph [\n" + if directed is True: + gml += " directed 1\n" + if multigraph is True: + gml += " multigraph 1\n" + gml += """ node [ + id 0 + label "0" + ] + edge [ + source 0 + target 0 +""" + if multigraph: + gml += " key 0\n" + gml += " ]\n]" + assert gml == "\n".join(nx.generate_gml(G)) + + def test_data_types(self): + data = [ + True, + False, + 10**20, + -2e33, + "'", + '"&&&""', + [{(b"\xfd",): "\x7f", chr(0x4444): (1, 2)}, (2, "3")], + ] + data.append(chr(0x14444)) + data.append(literal_eval("{2.3j, 1 - 2.3j, ()}")) + G = nx.Graph() + G.name = data + G.graph["data"] = data + G.add_node(0, int=-1, data={"data": data}) + G.add_edge(0, 0, float=-2.5, data=data) + gml = "\n".join(nx.generate_gml(G, stringizer=literal_stringizer)) + G = nx.parse_gml(gml, destringizer=literal_destringizer) + assert data == G.name + assert {"name": data, "data": data} == G.graph + assert list(G.nodes(data=True)) == [(0, {"int": -1, "data": {"data": data}})] + assert list(G.edges(data=True)) == [(0, 0, {"float": -2.5, "data": data})] + G = nx.Graph() + G.graph["data"] = "frozenset([1, 2, 3])" + G = nx.parse_gml(nx.generate_gml(G), destringizer=literal_eval) + assert G.graph["data"] == "frozenset([1, 2, 3])" + + def test_escape_unescape(self): + gml = """graph [ + name "&"䑄��&unknown;" +]""" + G = nx.parse_gml(gml) + assert ( + '&"\x0f' + chr(0x4444) + "��&unknown;" + == G.name + ) + gml = "\n".join(nx.generate_gml(G)) + alnu = "#1234567890;&#x1234567890abcdef" + answer = ( + """graph [ + name "&"䑄&""" + + alnu + + """;&unknown;" +]""" + ) + assert answer == gml + + def test_exceptions(self, tmp_path): + pytest.raises(ValueError, literal_destringizer, "(") + pytest.raises(ValueError, literal_destringizer, "frozenset([1, 2, 3])") + pytest.raises(ValueError, literal_destringizer, literal_destringizer) + pytest.raises(ValueError, literal_stringizer, frozenset([1, 2, 3])) + pytest.raises(ValueError, literal_stringizer, literal_stringizer) + with open(tmp_path / "test.gml", "w+b") as f: + f.write(codecs.BOM_UTF8 + b"graph[]") + f.seek(0) + pytest.raises(nx.NetworkXError, nx.read_gml, f) + + def assert_parse_error(gml): + pytest.raises(nx.NetworkXError, nx.parse_gml, gml) + + assert_parse_error(["graph [\n\n", "]"]) + assert_parse_error("") + assert_parse_error('Creator ""') + assert_parse_error("0") + assert_parse_error("graph ]") + assert_parse_error("graph [ 1 ]") + assert_parse_error("graph [ 1.E+2 ]") + assert_parse_error('graph [ "A" ]') + assert_parse_error("graph [ ] graph ]") + assert_parse_error("graph [ ] graph [ ]") + assert_parse_error("graph [ data [1, 2, 3] ]") + assert_parse_error("graph [ node [ ] ]") + assert_parse_error("graph [ node [ id 0 ] ]") + nx.parse_gml('graph [ node [ id "a" ] ]', label="id") + assert_parse_error("graph [ node [ id 0 label 0 ] node [ id 0 label 1 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] node [ id 1 label 0 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 0 ] ]") + nx.parse_gml("graph [edge [ source 0 target 0 ] node [ id 0 label 0 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 1 target 0 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 0 target 1 ] ]") + assert_parse_error( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 ] edge [ source 1 target 0 ] ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 ] edge [ source 1 target 0 ] " + "directed 1 ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 ] edge [ source 0 target 1 ]" + "multigraph 1 ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 key 0 ] edge [ source 0 target 1 ]" + "multigraph 1 ]" + ) + assert_parse_error( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 key 0 ] edge [ source 0 target 1 key 0 ]" + "multigraph 1 ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 key 0 ] edge [ source 1 target 0 key 0 ]" + "directed 1 multigraph 1 ]" + ) + + # Tests for string convertible alphanumeric id and label values + nx.parse_gml("graph [edge [ source a target a ] node [ id a label b ] ]") + nx.parse_gml( + "graph [ node [ id n42 label 0 ] node [ id x43 label 1 ]" + "edge [ source n42 target x43 key 0 ]" + "edge [ source x43 target n42 key 0 ]" + "directed 1 multigraph 1 ]" + ) + assert_parse_error( + "graph [edge [ source '\u4200' target '\u4200' ] " + + "node [ id '\u4200' label b ] ]" + ) + + def assert_generate_error(*args, **kwargs): + pytest.raises( + nx.NetworkXError, lambda: list(nx.generate_gml(*args, **kwargs)) + ) + + G = nx.Graph() + G.graph[3] = 3 + assert_generate_error(G) + G = nx.Graph() + G.graph["3"] = 3 + assert_generate_error(G) + G = nx.Graph() + G.graph["data"] = frozenset([1, 2, 3]) + assert_generate_error(G, stringizer=literal_stringizer) + + def test_label_kwarg(self): + G = nx.parse_gml(self.simple_data, label="id") + assert sorted(G.nodes) == [1, 2, 3] + labels = [G.nodes[n]["label"] for n in sorted(G.nodes)] + assert labels == ["Node 1", "Node 2", "Node 3"] + + G = nx.parse_gml(self.simple_data, label=None) + assert sorted(G.nodes) == [1, 2, 3] + labels = [G.nodes[n]["label"] for n in sorted(G.nodes)] + assert labels == ["Node 1", "Node 2", "Node 3"] + + def test_outofrange_integers(self, tmp_path): + # GML restricts integers to 32 signed bits. + # Check that we honor this restriction on export + G = nx.Graph() + # Test export for numbers that barely fit or don't fit into 32 bits, + # and 3 numbers in the middle + numbers = { + "toosmall": (-(2**31)) - 1, + "small": -(2**31), + "med1": -4, + "med2": 0, + "med3": 17, + "big": (2**31) - 1, + "toobig": 2**31, + } + G.add_node("Node", **numbers) + + fname = tmp_path / "test.gml" + nx.write_gml(G, fname) + # Check that the export wrote the nonfitting numbers as strings + G2 = nx.read_gml(fname) + for attr, value in G2.nodes["Node"].items(): + if attr == "toosmall" or attr == "toobig": + assert type(value) == str + else: + assert type(value) == int + + def test_multiline(self): + # example from issue #6836 + multiline_example = """ +graph +[ + node + [ + id 0 + label "multiline node" + label2 "multiline1 + multiline2 + multiline3" + alt_name "id 0" + ] +] +""" + G = nx.parse_gml(multiline_example) + assert G.nodes["multiline node"] == { + "label2": "multiline1 multiline2 multiline3", + "alt_name": "id 0", + } + + +@contextmanager +def byte_file(): + _file_handle = io.BytesIO() + yield _file_handle + _file_handle.seek(0) + + +class TestPropertyLists: + def test_writing_graph_with_multi_element_property_list(self): + g = nx.Graph() + g.add_node("n1", properties=["element", 0, 1, 2.5, True, False]) + with byte_file() as f: + nx.write_gml(g, f) + result = f.read().decode() + + assert result == dedent( + """\ + graph [ + node [ + id 0 + label "n1" + properties "element" + properties 0 + properties 1 + properties 2.5 + properties 1 + properties 0 + ] + ] + """ + ) + + def test_writing_graph_with_one_element_property_list(self): + g = nx.Graph() + g.add_node("n1", properties=["element"]) + with byte_file() as f: + nx.write_gml(g, f) + result = f.read().decode() + + assert result == dedent( + """\ + graph [ + node [ + id 0 + label "n1" + properties "_networkx_list_start" + properties "element" + ] + ] + """ + ) + + def test_reading_graph_with_list_property(self): + with byte_file() as f: + f.write( + dedent( + """ + graph [ + node [ + id 0 + label "n1" + properties "element" + properties 0 + properties 1 + properties 2.5 + ] + ] + """ + ).encode("ascii") + ) + f.seek(0) + graph = nx.read_gml(f) + assert graph.nodes(data=True)["n1"] == {"properties": ["element", 0, 1, 2.5]} + + def test_reading_graph_with_single_element_list_property(self): + with byte_file() as f: + f.write( + dedent( + """ + graph [ + node [ + id 0 + label "n1" + properties "_networkx_list_start" + properties "element" + ] + ] + """ + ).encode("ascii") + ) + f.seek(0) + graph = nx.read_gml(f) + assert graph.nodes(data=True)["n1"] == {"properties": ["element"]} + + +@pytest.mark.parametrize("coll", ([], ())) +def test_stringize_empty_list_tuple(coll): + G = nx.path_graph(2) + G.nodes[0]["test"] = coll # test serializing an empty collection + f = io.BytesIO() + nx.write_gml(G, f) # Smoke test - should not raise + f.seek(0) + H = nx.read_gml(f) + assert H.nodes["0"]["test"] == coll # Check empty list round-trips properly + # Check full round-tripping. Note that nodes are loaded as strings by + # default, so there needs to be some remapping prior to comparison + H = nx.relabel_nodes(H, {"0": 0, "1": 1}) + assert nx.utils.graphs_equal(G, H) + # Same as above, but use destringizer for node remapping. Should have no + # effect on node attr + f.seek(0) + H = nx.read_gml(f, destringizer=int) + assert nx.utils.graphs_equal(G, H) diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_graph6.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_graph6.py new file mode 100644 index 0000000000000000000000000000000000000000..a80326946c611751c1d27a3a10e74b64f2d379d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_graph6.py @@ -0,0 +1,168 @@ +from io import BytesIO + +import pytest + +import networkx as nx +import networkx.readwrite.graph6 as g6 +from networkx.utils import edges_equal, nodes_equal + + +class TestGraph6Utils: + def test_n_data_n_conversion(self): + for i in [0, 1, 42, 62, 63, 64, 258047, 258048, 7744773, 68719476735]: + assert g6.data_to_n(g6.n_to_data(i))[0] == i + assert g6.data_to_n(g6.n_to_data(i))[1] == [] + assert g6.data_to_n(g6.n_to_data(i) + [42, 43])[1] == [42, 43] + + +class TestFromGraph6Bytes: + def test_from_graph6_bytes(self): + data = b"DF{" + G = nx.from_graph6_bytes(data) + assert nodes_equal(G.nodes(), [0, 1, 2, 3, 4]) + assert edges_equal( + G.edges(), [(0, 3), (0, 4), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + ) + + def test_read_equals_from_bytes(self): + data = b"DF{" + G = nx.from_graph6_bytes(data) + fh = BytesIO(data) + Gin = nx.read_graph6(fh) + assert nodes_equal(G.nodes(), Gin.nodes()) + assert edges_equal(G.edges(), Gin.edges()) + + +class TestReadGraph6: + def test_read_many_graph6(self): + """Test for reading many graphs from a file into a list.""" + data = b"DF{\nD`{\nDqK\nD~{\n" + fh = BytesIO(data) + glist = nx.read_graph6(fh) + assert len(glist) == 4 + for G in glist: + assert sorted(G) == list(range(5)) + + +class TestWriteGraph6: + """Unit tests for writing a graph to a file in graph6 format.""" + + def test_null_graph(self): + result = BytesIO() + nx.write_graph6(nx.null_graph(), result) + assert result.getvalue() == b">>graph6<>graph6<<@\n" + + def test_complete_graph(self): + result = BytesIO() + nx.write_graph6(nx.complete_graph(4), result) + assert result.getvalue() == b">>graph6<>graph6<>graph6<>graph6<>graph6<<@\n" + + def test_complete_graph(self): + assert g6.to_graph6_bytes(nx.complete_graph(4)) == b">>graph6<>graph6< + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + cls.simple_directed_graph = nx.DiGraph() + cls.simple_directed_graph.add_node("n10") + cls.simple_directed_graph.add_edge("n0", "n2", id="foo") + cls.simple_directed_graph.add_edge("n0", "n2") + cls.simple_directed_graph.add_edges_from( + [ + ("n1", "n2"), + ("n2", "n3"), + ("n3", "n5"), + ("n3", "n4"), + ("n4", "n6"), + ("n6", "n5"), + ("n5", "n7"), + ("n6", "n8"), + ("n8", "n7"), + ("n8", "n9"), + ] + ) + cls.simple_directed_fh = io.BytesIO(cls.simple_directed_data.encode("UTF-8")) + + cls.attribute_data = """ + + + yellow + + + + + green + + + + blue + + + red + + + + turquoise + + + 1.0 + + + 1.0 + + + 2.0 + + + + + + 1.1 + + + +""" + cls.attribute_graph = nx.DiGraph(id="G") + cls.attribute_graph.graph["node_default"] = {"color": "yellow"} + cls.attribute_graph.add_node("n0", color="green") + cls.attribute_graph.add_node("n2", color="blue") + cls.attribute_graph.add_node("n3", color="red") + cls.attribute_graph.add_node("n4") + cls.attribute_graph.add_node("n5", color="turquoise") + cls.attribute_graph.add_edge("n0", "n2", id="e0", weight=1.0) + cls.attribute_graph.add_edge("n0", "n1", id="e1", weight=1.0) + cls.attribute_graph.add_edge("n1", "n3", id="e2", weight=2.0) + cls.attribute_graph.add_edge("n3", "n2", id="e3") + cls.attribute_graph.add_edge("n2", "n4", id="e4") + cls.attribute_graph.add_edge("n3", "n5", id="e5") + cls.attribute_graph.add_edge("n5", "n4", id="e6", weight=1.1) + cls.attribute_fh = io.BytesIO(cls.attribute_data.encode("UTF-8")) + + cls.node_attribute_default_data = """ + + false + 0 + 0 + 0.0 + 0.0 + Foo + + + + + + + """ + cls.node_attribute_default_graph = nx.DiGraph(id="G") + cls.node_attribute_default_graph.graph["node_default"] = { + "boolean_attribute": False, + "int_attribute": 0, + "long_attribute": 0, + "float_attribute": 0.0, + "double_attribute": 0.0, + "string_attribute": "Foo", + } + cls.node_attribute_default_graph.add_node("n0") + cls.node_attribute_default_graph.add_node("n1") + cls.node_attribute_default_graph.add_edge("n0", "n1", id="e0") + cls.node_attribute_default_fh = io.BytesIO( + cls.node_attribute_default_data.encode("UTF-8") + ) + + cls.attribute_named_key_ids_data = """ + + + + + + + val1 + val2 + + + val_one + val2 + + + edge_value + + + +""" + cls.attribute_named_key_ids_graph = nx.DiGraph() + cls.attribute_named_key_ids_graph.add_node("0", prop1="val1", prop2="val2") + cls.attribute_named_key_ids_graph.add_node("1", prop1="val_one", prop2="val2") + cls.attribute_named_key_ids_graph.add_edge("0", "1", edge_prop="edge_value") + fh = io.BytesIO(cls.attribute_named_key_ids_data.encode("UTF-8")) + cls.attribute_named_key_ids_fh = fh + + cls.attribute_numeric_type_data = """ + + + + + + 1 + + + 2.0 + + + 1 + + + k + + + 1.0 + + + +""" + cls.attribute_numeric_type_graph = nx.DiGraph() + cls.attribute_numeric_type_graph.add_node("n0", weight=1) + cls.attribute_numeric_type_graph.add_node("n1", weight=2.0) + cls.attribute_numeric_type_graph.add_edge("n0", "n1", weight=1) + cls.attribute_numeric_type_graph.add_edge("n1", "n1", weight=1.0) + fh = io.BytesIO(cls.attribute_numeric_type_data.encode("UTF-8")) + cls.attribute_numeric_type_fh = fh + + cls.simple_undirected_data = """ + + + + + + + + + + +""" + # + cls.simple_undirected_graph = nx.Graph() + cls.simple_undirected_graph.add_node("n10") + cls.simple_undirected_graph.add_edge("n0", "n2", id="foo") + cls.simple_undirected_graph.add_edges_from([("n1", "n2"), ("n2", "n3")]) + fh = io.BytesIO(cls.simple_undirected_data.encode("UTF-8")) + cls.simple_undirected_fh = fh + + cls.undirected_multigraph_data = """ + + + + + + + + + + +""" + cls.undirected_multigraph = nx.MultiGraph() + cls.undirected_multigraph.add_node("n10") + cls.undirected_multigraph.add_edge("n0", "n2", id="e0") + cls.undirected_multigraph.add_edge("n1", "n2", id="e1") + cls.undirected_multigraph.add_edge("n2", "n1", id="e2") + fh = io.BytesIO(cls.undirected_multigraph_data.encode("UTF-8")) + cls.undirected_multigraph_fh = fh + + cls.undirected_multigraph_no_multiedge_data = """ + + + + + + + + + + +""" + cls.undirected_multigraph_no_multiedge = nx.MultiGraph() + cls.undirected_multigraph_no_multiedge.add_node("n10") + cls.undirected_multigraph_no_multiedge.add_edge("n0", "n2", id="e0") + cls.undirected_multigraph_no_multiedge.add_edge("n1", "n2", id="e1") + cls.undirected_multigraph_no_multiedge.add_edge("n2", "n3", id="e2") + fh = io.BytesIO(cls.undirected_multigraph_no_multiedge_data.encode("UTF-8")) + cls.undirected_multigraph_no_multiedge_fh = fh + + cls.multigraph_only_ids_for_multiedges_data = """ + + + + + + + + + + +""" + cls.multigraph_only_ids_for_multiedges = nx.MultiGraph() + cls.multigraph_only_ids_for_multiedges.add_node("n10") + cls.multigraph_only_ids_for_multiedges.add_edge("n0", "n2") + cls.multigraph_only_ids_for_multiedges.add_edge("n1", "n2", id="e1") + cls.multigraph_only_ids_for_multiedges.add_edge("n2", "n1", id="e2") + fh = io.BytesIO(cls.multigraph_only_ids_for_multiedges_data.encode("UTF-8")) + cls.multigraph_only_ids_for_multiedges_fh = fh + + +class TestReadGraphML(BaseGraphML): + def test_read_simple_directed_graphml(self): + G = self.simple_directed_graph + H = nx.read_graphml(self.simple_directed_fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + PG = nx.parse_graphml(self.simple_directed_data) + assert sorted(G.nodes()) == sorted(PG.nodes()) + assert sorted(G.edges()) == sorted(PG.edges()) + assert sorted(G.edges(data=True)) == sorted(PG.edges(data=True)) + + def test_read_simple_undirected_graphml(self): + G = self.simple_undirected_graph + H = nx.read_graphml(self.simple_undirected_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.simple_undirected_fh.seek(0) + + PG = nx.parse_graphml(self.simple_undirected_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_undirected_multigraph_graphml(self): + G = self.undirected_multigraph + H = nx.read_graphml(self.undirected_multigraph_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.undirected_multigraph_fh.seek(0) + + PG = nx.parse_graphml(self.undirected_multigraph_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_undirected_multigraph_no_multiedge_graphml(self): + G = self.undirected_multigraph_no_multiedge + H = nx.read_graphml(self.undirected_multigraph_no_multiedge_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.undirected_multigraph_no_multiedge_fh.seek(0) + + PG = nx.parse_graphml(self.undirected_multigraph_no_multiedge_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_undirected_multigraph_only_ids_for_multiedges_graphml(self): + G = self.multigraph_only_ids_for_multiedges + H = nx.read_graphml(self.multigraph_only_ids_for_multiedges_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.multigraph_only_ids_for_multiedges_fh.seek(0) + + PG = nx.parse_graphml(self.multigraph_only_ids_for_multiedges_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_attribute_graphml(self): + G = self.attribute_graph + H = nx.read_graphml(self.attribute_fh) + assert nodes_equal(G.nodes(True), sorted(H.nodes(data=True))) + ge = sorted(G.edges(data=True)) + he = sorted(H.edges(data=True)) + for a, b in zip(ge, he): + assert a == b + self.attribute_fh.seek(0) + + PG = nx.parse_graphml(self.attribute_data) + assert sorted(G.nodes(True)) == sorted(PG.nodes(data=True)) + ge = sorted(G.edges(data=True)) + he = sorted(PG.edges(data=True)) + for a, b in zip(ge, he): + assert a == b + + def test_node_default_attribute_graphml(self): + G = self.node_attribute_default_graph + H = nx.read_graphml(self.node_attribute_default_fh) + assert G.graph["node_default"] == H.graph["node_default"] + + def test_directed_edge_in_undirected(self): + s = """ + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_undirected_edge_in_directed(self): + s = """ + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_key_raise(self): + s = """ + + + yellow + + + + + green + + + + blue + + + 1.0 + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_hyperedge_raise(self): + s = """ + + + yellow + + + + + green + + + + blue + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_multigraph_keys(self): + # Test that reading multigraphs uses edge id attributes as keys + s = """ + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + expected = [("n0", "n1", "e0"), ("n0", "n1", "e1")] + assert sorted(G.edges(keys=True)) == expected + fh.seek(0) + H = nx.parse_graphml(s) + assert sorted(H.edges(keys=True)) == expected + + def test_preserve_multi_edge_data(self): + """ + Test that data and keys of edges are preserved on consequent + write and reads + """ + G = nx.MultiGraph() + G.add_node(1) + G.add_node(2) + G.add_edges_from( + [ + # edges with no data, no keys: + (1, 2), + # edges with only data: + (1, 2, {"key": "data_key1"}), + (1, 2, {"id": "data_id2"}), + (1, 2, {"key": "data_key3", "id": "data_id3"}), + # edges with both data and keys: + (1, 2, 103, {"key": "data_key4"}), + (1, 2, 104, {"id": "data_id5"}), + (1, 2, 105, {"key": "data_key6", "id": "data_id7"}), + ] + ) + fh = io.BytesIO() + nx.write_graphml(G, fh) + fh.seek(0) + H = nx.read_graphml(fh, node_type=int) + assert edges_equal(G.edges(data=True, keys=True), H.edges(data=True, keys=True)) + assert G._adj == H._adj + + Gadj = { + str(node): { + str(nbr): {str(ekey): dd for ekey, dd in key_dict.items()} + for nbr, key_dict in nbr_dict.items() + } + for node, nbr_dict in G._adj.items() + } + fh.seek(0) + HH = nx.read_graphml(fh, node_type=str, edge_key_type=str) + assert Gadj == HH._adj + + fh.seek(0) + string_fh = fh.read() + HH = nx.parse_graphml(string_fh, node_type=str, edge_key_type=str) + assert Gadj == HH._adj + + def test_yfiles_extension(self): + data = """ + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + 2 + + + + + + + + + + + + 3 + + + + + + + + + + + + + + + + + + + + +""" + fh = io.BytesIO(data.encode("UTF-8")) + G = nx.read_graphml(fh, force_multigraph=True) + assert list(G.edges()) == [("n0", "n1")] + assert G.has_edge("n0", "n1", key="e0") + assert G.nodes["n0"]["label"] == "1" + assert G.nodes["n1"]["label"] == "2" + assert G.nodes["n2"]["label"] == "3" + assert G.nodes["n0"]["shape_type"] == "rectangle" + assert G.nodes["n1"]["shape_type"] == "rectangle" + assert G.nodes["n2"]["shape_type"] == "com.yworks.flowchart.terminator" + assert G.nodes["n2"]["description"] == "description\nline1\nline2" + fh.seek(0) + G = nx.read_graphml(fh) + assert list(G.edges()) == [("n0", "n1")] + assert G["n0"]["n1"]["id"] == "e0" + assert G.nodes["n0"]["label"] == "1" + assert G.nodes["n1"]["label"] == "2" + assert G.nodes["n2"]["label"] == "3" + assert G.nodes["n0"]["shape_type"] == "rectangle" + assert G.nodes["n1"]["shape_type"] == "rectangle" + assert G.nodes["n2"]["shape_type"] == "com.yworks.flowchart.terminator" + assert G.nodes["n2"]["description"] == "description\nline1\nline2" + + H = nx.parse_graphml(data, force_multigraph=True) + assert list(H.edges()) == [("n0", "n1")] + assert H.has_edge("n0", "n1", key="e0") + assert H.nodes["n0"]["label"] == "1" + assert H.nodes["n1"]["label"] == "2" + assert H.nodes["n2"]["label"] == "3" + + H = nx.parse_graphml(data) + assert list(H.edges()) == [("n0", "n1")] + assert H["n0"]["n1"]["id"] == "e0" + assert H.nodes["n0"]["label"] == "1" + assert H.nodes["n1"]["label"] == "2" + assert H.nodes["n2"]["label"] == "3" + + def test_bool(self): + s = """ + + + false + + + + true + + + + false + + + FaLsE + + + True + + + 0 + + + 1 + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + H = nx.parse_graphml(s) + for graph in [G, H]: + assert graph.nodes["n0"]["test"] + assert not graph.nodes["n2"]["test"] + assert not graph.nodes["n3"]["test"] + assert graph.nodes["n4"]["test"] + assert not graph.nodes["n5"]["test"] + assert graph.nodes["n6"]["test"] + + def test_graphml_header_line(self): + good = """ + + + false + + + + true + + + +""" + bad = """ + + + false + + + + true + + + +""" + ugly = """ + + + false + + + + true + + + +""" + for s in (good, bad): + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + H = nx.parse_graphml(s) + for graph in [G, H]: + assert graph.nodes["n0"]["test"] + + fh = io.BytesIO(ugly.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, ugly) + + def test_read_attributes_with_groups(self): + data = """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + + + Group 3 + + + + + + + + + + Folder 3 + + + + + + + + + + + + + + + + + + + + + Group 1 + + + + + + + + + + Folder 1 + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + 3 + + + + + + + + + + + + + + + + + + + + + + + + Group 2 + + + + + + + + + + Folder 2 + + + + + + + + + + + + + + + + + + 5 + + + + + + + + + + + + + + + + + + + 6 + + + + + + + + + + + + + + + + + + + + + + + 9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + # verify that nodes / attributes are correctly read when part of a group + fh = io.BytesIO(data.encode("UTF-8")) + G = nx.read_graphml(fh) + data = [x for _, x in G.nodes(data=True)] + assert len(data) == 9 + for node_data in data: + assert node_data["CustomProperty"] != "" + + def test_long_attribute_type(self): + # test that graphs with attr.type="long" (as produced by botch and + # dose3) can be parsed + s = """ + + + + + 4284 + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + expected = [("n1", {"cudfversion": 4284})] + assert sorted(G.nodes(data=True)) == expected + fh.seek(0) + H = nx.parse_graphml(s) + assert sorted(H.nodes(data=True)) == expected + + +class TestWriteGraphML(BaseGraphML): + writer = staticmethod(nx.write_graphml_lxml) + + @classmethod + def setup_class(cls): + BaseGraphML.setup_class() + _ = pytest.importorskip("lxml.etree") + + def test_write_interface(self): + try: + import lxml.etree + + assert nx.write_graphml == nx.write_graphml_lxml + except ImportError: + assert nx.write_graphml == nx.write_graphml_xml + + def test_write_read_simple_directed_graphml(self): + G = self.simple_directed_graph + G.graph["hi"] = "there" + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_GraphMLWriter_add_graphs(self): + gmlw = GraphMLWriter() + G = self.simple_directed_graph + H = G.copy() + gmlw.add_graphs([G, H]) + + def test_write_read_simple_no_prettyprint(self): + G = self.simple_directed_graph + G.graph["hi"] = "there" + G.graph["id"] = "1" + fh = io.BytesIO() + self.writer(G, fh, prettyprint=False) + fh.seek(0) + H = nx.read_graphml(fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_write_read_attribute_named_key_ids_graphml(self): + from xml.etree.ElementTree import parse + + G = self.attribute_named_key_ids_graph + fh = io.BytesIO() + self.writer(G, fh, named_key_ids=True) + fh.seek(0) + H = nx.read_graphml(fh) + fh.seek(0) + + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + self.attribute_named_key_ids_fh.seek(0) + + xml = parse(fh) + # Children are the key elements, and the graph element + children = list(xml.getroot()) + assert len(children) == 4 + + keys = [child.items() for child in children[:3]] + + assert len(keys) == 3 + assert ("id", "edge_prop") in keys[0] + assert ("attr.name", "edge_prop") in keys[0] + assert ("id", "prop2") in keys[1] + assert ("attr.name", "prop2") in keys[1] + assert ("id", "prop1") in keys[2] + assert ("attr.name", "prop1") in keys[2] + + # Confirm the read graph nodes/edge are identical when compared to + # default writing behavior. + default_behavior_fh = io.BytesIO() + nx.write_graphml(G, default_behavior_fh) + default_behavior_fh.seek(0) + H = nx.read_graphml(default_behavior_fh) + + named_key_ids_behavior_fh = io.BytesIO() + nx.write_graphml(G, named_key_ids_behavior_fh, named_key_ids=True) + named_key_ids_behavior_fh.seek(0) + J = nx.read_graphml(named_key_ids_behavior_fh) + + assert all(n1 == n2 for (n1, n2) in zip(H.nodes, J.nodes)) + assert all(e1 == e2 for (e1, e2) in zip(H.edges, J.edges)) + + def test_write_read_attribute_numeric_type_graphml(self): + from xml.etree.ElementTree import parse + + G = self.attribute_numeric_type_graph + fh = io.BytesIO() + self.writer(G, fh, infer_numeric_types=True) + fh.seek(0) + H = nx.read_graphml(fh) + fh.seek(0) + + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + self.attribute_numeric_type_fh.seek(0) + + xml = parse(fh) + # Children are the key elements, and the graph element + children = list(xml.getroot()) + assert len(children) == 3 + + keys = [child.items() for child in children[:2]] + + assert len(keys) == 2 + assert ("attr.type", "double") in keys[0] + assert ("attr.type", "double") in keys[1] + + def test_more_multigraph_keys(self, tmp_path): + """Writing keys as edge id attributes means keys become strings. + The original keys are stored as data, so read them back in + if `str(key) == edge_id` + This allows the adjacency to remain the same. + """ + G = nx.MultiGraph() + G.add_edges_from([("a", "b", 2), ("a", "b", 3)]) + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname) + assert H.is_multigraph() + assert edges_equal(G.edges(keys=True), H.edges(keys=True)) + assert G._adj == H._adj + + def test_default_attribute(self): + G = nx.Graph(name="Fred") + G.add_node(1, label=1, color="green") + nx.add_path(G, [0, 1, 2, 3]) + G.add_edge(1, 2, weight=3) + G.graph["node_default"] = {"color": "yellow"} + G.graph["edge_default"] = {"weight": 7} + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh, node_type=int) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert G.graph == H.graph + + def test_mixed_type_attributes(self): + G = nx.MultiGraph() + G.add_node("n0", special=False) + G.add_node("n1", special=0) + G.add_edge("n0", "n1", special=False) + G.add_edge("n0", "n1", special=0) + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert not H.nodes["n0"]["special"] + assert H.nodes["n1"]["special"] == 0 + assert not H.edges["n0", "n1", 0]["special"] + assert H.edges["n0", "n1", 1]["special"] == 0 + + def test_str_number_mixed_type_attributes(self): + G = nx.MultiGraph() + G.add_node("n0", special="hello") + G.add_node("n1", special=0) + G.add_edge("n0", "n1", special="hello") + G.add_edge("n0", "n1", special=0) + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert H.nodes["n0"]["special"] == "hello" + assert H.nodes["n1"]["special"] == 0 + assert H.edges["n0", "n1", 0]["special"] == "hello" + assert H.edges["n0", "n1", 1]["special"] == 0 + + def test_mixed_int_type_number_attributes(self): + np = pytest.importorskip("numpy") + G = nx.MultiGraph() + G.add_node("n0", special=np.int64(0)) + G.add_node("n1", special=1) + G.add_edge("n0", "n1", special=np.int64(2)) + G.add_edge("n0", "n1", special=3) + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert H.nodes["n0"]["special"] == 0 + assert H.nodes["n1"]["special"] == 1 + assert H.edges["n0", "n1", 0]["special"] == 2 + assert H.edges["n0", "n1", 1]["special"] == 3 + + def test_multigraph_to_graph(self, tmp_path): + # test converting multigraph to graph if no parallel edges found + G = nx.MultiGraph() + G.add_edges_from([("a", "b", 2), ("b", "c", 3)]) # no multiedges + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname) + assert not H.is_multigraph() + H = nx.read_graphml(fname, force_multigraph=True) + assert H.is_multigraph() + + # add a multiedge + G.add_edge("a", "b", "e-id") + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname) + assert H.is_multigraph() + H = nx.read_graphml(fname, force_multigraph=True) + assert H.is_multigraph() + + def test_write_generate_edge_id_from_attribute(self, tmp_path): + from xml.etree.ElementTree import parse + + G = nx.Graph() + G.add_edges_from([("a", "b"), ("b", "c"), ("a", "c")]) + edge_attributes = {e: str(e) for e in G.edges} + nx.set_edge_attributes(G, edge_attributes, "eid") + fname = tmp_path / "test.graphml" + # set edge_id_from_attribute e.g. "eid" for write_graphml() + self.writer(G, fname, edge_id_from_attribute="eid") + # set edge_id_from_attribute e.g. "eid" for generate_graphml() + generator = nx.generate_graphml(G, edge_id_from_attribute="eid") + + H = nx.read_graphml(fname) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + # NetworkX adds explicit edge "id" from file as attribute + nx.set_edge_attributes(G, edge_attributes, "id") + assert edges_equal(G.edges(data=True), H.edges(data=True)) + + tree = parse(fname) + children = list(tree.getroot()) + assert len(children) == 2 + edge_ids = [ + edge.attrib["id"] + for edge in tree.getroot().findall( + ".//{http://graphml.graphdrawing.org/xmlns}edge" + ) + ] + # verify edge id value is equal to specified attribute value + assert sorted(edge_ids) == sorted(edge_attributes.values()) + + # check graphml generated from generate_graphml() + data = "".join(generator) + J = nx.parse_graphml(data) + assert sorted(G.nodes()) == sorted(J.nodes()) + assert sorted(G.edges()) == sorted(J.edges()) + # NetworkX adds explicit edge "id" from file as attribute + nx.set_edge_attributes(G, edge_attributes, "id") + assert edges_equal(G.edges(data=True), J.edges(data=True)) + + def test_multigraph_write_generate_edge_id_from_attribute(self, tmp_path): + from xml.etree.ElementTree import parse + + G = nx.MultiGraph() + G.add_edges_from([("a", "b"), ("b", "c"), ("a", "c"), ("a", "b")]) + edge_attributes = {e: str(e) for e in G.edges} + nx.set_edge_attributes(G, edge_attributes, "eid") + fname = tmp_path / "test.graphml" + # set edge_id_from_attribute e.g. "eid" for write_graphml() + self.writer(G, fname, edge_id_from_attribute="eid") + # set edge_id_from_attribute e.g. "eid" for generate_graphml() + generator = nx.generate_graphml(G, edge_id_from_attribute="eid") + + H = nx.read_graphml(fname) + assert H.is_multigraph() + H = nx.read_graphml(fname, force_multigraph=True) + assert H.is_multigraph() + + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert sorted(data.get("eid") for u, v, data in H.edges(data=True)) == sorted( + edge_attributes.values() + ) + # NetworkX uses edge_ids as keys in multigraphs if no key + assert sorted(key for u, v, key in H.edges(keys=True)) == sorted( + edge_attributes.values() + ) + + tree = parse(fname) + children = list(tree.getroot()) + assert len(children) == 2 + edge_ids = [ + edge.attrib["id"] + for edge in tree.getroot().findall( + ".//{http://graphml.graphdrawing.org/xmlns}edge" + ) + ] + # verify edge id value is equal to specified attribute value + assert sorted(edge_ids) == sorted(edge_attributes.values()) + + # check graphml generated from generate_graphml() + graphml_data = "".join(generator) + J = nx.parse_graphml(graphml_data) + assert J.is_multigraph() + + assert nodes_equal(G.nodes(), J.nodes()) + assert edges_equal(G.edges(), J.edges()) + assert sorted(data.get("eid") for u, v, data in J.edges(data=True)) == sorted( + edge_attributes.values() + ) + # NetworkX uses edge_ids as keys in multigraphs if no key + assert sorted(key for u, v, key in J.edges(keys=True)) == sorted( + edge_attributes.values() + ) + + def test_numpy_float64(self, tmp_path): + np = pytest.importorskip("numpy") + wt = np.float64(3.4) + G = nx.Graph([(1, 2, {"weight": wt})]) + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname, node_type=int) + assert G.edges == H.edges + wtG = G[1][2]["weight"] + wtH = H[1][2]["weight"] + assert wtG == pytest.approx(wtH, abs=1e-6) + assert type(wtG) == np.float64 + assert type(wtH) == float + + def test_numpy_float32(self, tmp_path): + np = pytest.importorskip("numpy") + wt = np.float32(3.4) + G = nx.Graph([(1, 2, {"weight": wt})]) + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname, node_type=int) + assert G.edges == H.edges + wtG = G[1][2]["weight"] + wtH = H[1][2]["weight"] + assert wtG == pytest.approx(wtH, abs=1e-6) + assert type(wtG) == np.float32 + assert type(wtH) == float + + def test_numpy_float64_inference(self, tmp_path): + np = pytest.importorskip("numpy") + G = self.attribute_numeric_type_graph + G.edges[("n1", "n1")]["weight"] = np.float64(1.1) + fname = tmp_path / "test.graphml" + self.writer(G, fname, infer_numeric_types=True) + H = nx.read_graphml(fname) + assert G._adj == H._adj + + def test_unicode_attributes(self, tmp_path): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + node_type = str + G.add_edge(name1, "Radiohead", foo=name2) + fname = tmp_path / "test.graphml" + self.writer(G, fname) + H = nx.read_graphml(fname, node_type=node_type) + assert G._adj == H._adj + + def test_unicode_escape(self): + # test for handling json escaped strings in python 2 Issue #1880 + import json + + a = {"a": '{"a": "123"}'} # an object with many chars to escape + sa = json.dumps(a) + G = nx.Graph() + G.graph["test"] = sa + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert G.graph["test"] == H.graph["test"] + + +class TestXMLGraphML(TestWriteGraphML): + writer = staticmethod(nx.write_graphml_xml) + + @classmethod + def setup_class(cls): + TestWriteGraphML.setup_class() + + +def test_exception_for_unsupported_datatype_node_attr(): + """Test that a detailed exception is raised when an attribute is of a type + not supported by GraphML, e.g. a list""" + pytest.importorskip("lxml.etree") + # node attribute + G = nx.Graph() + G.add_node(0, my_list_attribute=[0, 1, 2]) + fh = io.BytesIO() + with pytest.raises(TypeError, match="GraphML does not support"): + nx.write_graphml(G, fh) + + +def test_exception_for_unsupported_datatype_edge_attr(): + """Test that a detailed exception is raised when an attribute is of a type + not supported by GraphML, e.g. a list""" + pytest.importorskip("lxml.etree") + # edge attribute + G = nx.Graph() + G.add_edge(0, 1, my_list_attribute=[0, 1, 2]) + fh = io.BytesIO() + with pytest.raises(TypeError, match="GraphML does not support"): + nx.write_graphml(G, fh) + + +def test_exception_for_unsupported_datatype_graph_attr(): + """Test that a detailed exception is raised when an attribute is of a type + not supported by GraphML, e.g. a list""" + pytest.importorskip("lxml.etree") + # graph attribute + G = nx.Graph() + G.graph["my_list_attribute"] = [0, 1, 2] + fh = io.BytesIO() + with pytest.raises(TypeError, match="GraphML does not support"): + nx.write_graphml(G, fh) + + +def test_empty_attribute(): + """Tests that a GraphML string with an empty attribute can be parsed + correctly.""" + s = """ + + + + + + aaa + bbb + + + ccc + + + + """ + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + assert G.nodes["0"] == {"foo": "aaa", "bar": "bbb"} + assert G.nodes["1"] == {"foo": "ccc", "bar": ""} diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_leda.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_leda.py new file mode 100644 index 0000000000000000000000000000000000000000..8ac5ecc34bf9b42bd49e316bdc72e0e56c76a616 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_leda.py @@ -0,0 +1,30 @@ +import io + +import networkx as nx + + +class TestLEDA: + def test_parse_leda(self): + data = """#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|""" + G = nx.parse_leda(data) + G = nx.parse_leda(data.split("\n")) + assert sorted(G.nodes()) == ["v1", "v2", "v3", "v4", "v5"] + assert sorted(G.edges(data=True)) == [ + ("v1", "v2", {"label": "4"}), + ("v1", "v3", {"label": "3"}), + ("v2", "v3", {"label": "2"}), + ("v3", "v4", {"label": "3"}), + ("v3", "v5", {"label": "7"}), + ("v4", "v5", {"label": "6"}), + ("v5", "v1", {"label": "foo"}), + ] + + def test_read_LEDA(self): + fh = io.BytesIO() + data = """#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|""" + G = nx.parse_leda(data) + fh.write(data.encode("UTF-8")) + fh.seek(0) + Gin = nx.read_leda(fh) + assert sorted(G.nodes()) == sorted(Gin.nodes()) + assert sorted(G.edges()) == sorted(Gin.edges()) diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_p2g.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_p2g.py new file mode 100644 index 0000000000000000000000000000000000000000..e4c50de7f382f62d4ae6e0cc0443e480487c65e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_p2g.py @@ -0,0 +1,62 @@ +import io + +import networkx as nx +from networkx.readwrite.p2g import read_p2g, write_p2g +from networkx.utils import edges_equal + + +class TestP2G: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + + def test_read_p2g(self): + s = b"""\ +name +3 4 +a +1 2 +b + +c +0 2 +""" + bytesIO = io.BytesIO(s) + G = read_p2g(bytesIO) + assert G.name == "name" + assert sorted(G) == ["a", "b", "c"] + edges = [(str(u), str(v)) for u, v in G.edges()] + assert edges_equal(G.edges(), [("a", "c"), ("a", "b"), ("c", "a"), ("c", "c")]) + + def test_write_p2g(self): + s = b"""foo +3 2 +1 +1 +2 +2 +3 + +""" + fh = io.BytesIO() + G = nx.DiGraph() + G.name = "foo" + G.add_edges_from([(1, 2), (2, 3)]) + write_p2g(G, fh) + fh.seek(0) + r = fh.read() + assert r == s + + def test_write_read_p2g(self): + fh = io.BytesIO() + G = nx.DiGraph() + G.name = "foo" + G.add_edges_from([("a", "b"), ("b", "c")]) + write_p2g(G, fh) + fh.seek(0) + H = read_p2g(fh) + assert edges_equal(G.edges(), H.edges()) diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_pajek.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_pajek.py new file mode 100644 index 0000000000000000000000000000000000000000..6160c8bce033edd178cf02b9ce5a82ae5cd692e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_pajek.py @@ -0,0 +1,125 @@ +""" +Pajek tests +""" +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestPajek: + @classmethod + def setup_class(cls): + cls.data = """*network Tralala\n*vertices 4\n 1 "A1" 0.0938 0.0896 ellipse x_fact 1 y_fact 1\n 2 "Bb" 0.8188 0.2458 ellipse x_fact 1 y_fact 1\n 3 "C" 0.3688 0.7792 ellipse x_fact 1\n 4 "D2" 0.9583 0.8563 ellipse x_fact 1\n*arcs\n1 1 1 h2 0 w 3 c Blue s 3 a1 -130 k1 0.6 a2 -130 k2 0.6 ap 0.5 l "Bezier loop" lc BlueViolet fos 20 lr 58 lp 0.3 la 360\n2 1 1 h2 0 a1 120 k1 1.3 a2 -120 k2 0.3 ap 25 l "Bezier arc" lphi 270 la 180 lr 19 lp 0.5\n1 2 1 h2 0 a1 40 k1 2.8 a2 30 k2 0.8 ap 25 l "Bezier arc" lphi 90 la 0 lp 0.65\n4 2 -1 h2 0 w 1 k1 -2 k2 250 ap 25 l "Circular arc" c Red lc OrangeRed\n3 4 1 p Dashed h2 0 w 2 c OliveGreen ap 25 l "Straight arc" lc PineGreen\n1 3 1 p Dashed h2 0 w 5 k1 -1 k2 -20 ap 25 l "Oval arc" c Brown lc Black\n3 3 -1 h1 6 w 1 h2 12 k1 -2 k2 -15 ap 0.5 l "Circular loop" c Red lc OrangeRed lphi 270 la 180""" + cls.G = nx.MultiDiGraph() + cls.G.add_nodes_from(["A1", "Bb", "C", "D2"]) + cls.G.add_edges_from( + [ + ("A1", "A1"), + ("A1", "Bb"), + ("A1", "C"), + ("Bb", "A1"), + ("C", "C"), + ("C", "D2"), + ("D2", "Bb"), + ] + ) + + cls.G.graph["name"] = "Tralala" + + def test_parse_pajek_simple(self): + # Example without node positions or shape + data = """*Vertices 2\n1 "1"\n2 "2"\n*Edges\n1 2\n2 1""" + G = nx.parse_pajek(data) + assert sorted(G.nodes()) == ["1", "2"] + assert edges_equal(G.edges(), [("1", "2"), ("1", "2")]) + + def test_parse_pajek(self): + G = nx.parse_pajek(self.data) + assert sorted(G.nodes()) == ["A1", "Bb", "C", "D2"] + assert edges_equal( + G.edges(), + [ + ("A1", "A1"), + ("A1", "Bb"), + ("A1", "C"), + ("Bb", "A1"), + ("C", "C"), + ("C", "D2"), + ("D2", "Bb"), + ], + ) + + def test_parse_pajet_mat(self): + data = """*Vertices 3\n1 "one"\n2 "two"\n3 "three"\n*Matrix\n1 1 0\n0 1 0\n0 1 0\n""" + G = nx.parse_pajek(data) + assert set(G.nodes()) == {"one", "two", "three"} + assert G.nodes["two"] == {"id": "2"} + assert edges_equal( + set(G.edges()), + {("one", "one"), ("two", "one"), ("two", "two"), ("two", "three")}, + ) + + def test_read_pajek(self, tmp_path): + G = nx.parse_pajek(self.data) + # Read data from file + fname = tmp_path / "test.pjk" + with open(fname, "wb") as fh: + fh.write(self.data.encode("UTF-8")) + + Gin = nx.read_pajek(fname) + assert sorted(G.nodes()) == sorted(Gin.nodes()) + assert edges_equal(G.edges(), Gin.edges()) + assert self.G.graph == Gin.graph + for n in G: + assert G.nodes[n] == Gin.nodes[n] + + def test_write_pajek(self): + import io + + G = nx.parse_pajek(self.data) + fh = io.BytesIO() + nx.write_pajek(G, fh) + fh.seek(0) + H = nx.read_pajek(fh) + assert nodes_equal(list(G), list(H)) + assert edges_equal(list(G.edges()), list(H.edges())) + # Graph name is left out for now, therefore it is not tested. + # assert_equal(G.graph, H.graph) + + def test_ignored_attribute(self): + import io + + G = nx.Graph() + fh = io.BytesIO() + G.add_node(1, int_attr=1) + G.add_node(2, empty_attr=" ") + G.add_edge(1, 2, int_attr=2) + G.add_edge(2, 3, empty_attr=" ") + + import warnings + + with warnings.catch_warnings(record=True) as w: + nx.write_pajek(G, fh) + assert len(w) == 4 + + def test_noname(self): + # Make sure we can parse a line such as: *network + # Issue #952 + line = "*network\n" + other_lines = self.data.split("\n")[1:] + data = line + "\n".join(other_lines) + G = nx.parse_pajek(data) + + def test_unicode(self): + import io + + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", foo=name2) + fh = io.BytesIO() + nx.write_pajek(G, fh) + fh.seek(0) + H = nx.read_pajek(fh) + assert nodes_equal(list(G), list(H)) + assert edges_equal(list(G.edges()), list(H.edges())) + assert G.graph == H.graph diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_sparse6.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_sparse6.py new file mode 100644 index 0000000000000000000000000000000000000000..344ad0e45ff42daea5c7cf99c56411e57173ddfe --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_sparse6.py @@ -0,0 +1,166 @@ +from io import BytesIO + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestSparseGraph6: + def test_from_sparse6_bytes(self): + data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM" + G = nx.from_sparse6_bytes(data) + assert nodes_equal( + sorted(G.nodes()), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], + ) + assert edges_equal( + G.edges(), + [ + (0, 1), + (0, 2), + (0, 3), + (1, 12), + (1, 14), + (2, 13), + (2, 15), + (3, 16), + (3, 17), + (4, 7), + (4, 9), + (4, 11), + (5, 6), + (5, 8), + (5, 9), + (6, 10), + (6, 11), + (7, 8), + (7, 10), + (8, 12), + (9, 15), + (10, 14), + (11, 13), + (12, 16), + (13, 17), + (14, 17), + (15, 16), + ], + ) + + def test_from_bytes_multigraph_graph(self): + graph_data = b":An" + G = nx.from_sparse6_bytes(graph_data) + assert type(G) == nx.Graph + multigraph_data = b":Ab" + M = nx.from_sparse6_bytes(multigraph_data) + assert type(M) == nx.MultiGraph + + def test_read_sparse6(self): + data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM" + G = nx.from_sparse6_bytes(data) + fh = BytesIO(data) + Gin = nx.read_sparse6(fh) + assert nodes_equal(G.nodes(), Gin.nodes()) + assert edges_equal(G.edges(), Gin.edges()) + + def test_read_many_graph6(self): + # Read many graphs into list + data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM\n" b":Q___dCfDEdcEgcbEGbFIaJ`JaHN`IM" + fh = BytesIO(data) + glist = nx.read_sparse6(fh) + assert len(glist) == 2 + for G in glist: + assert nodes_equal( + G.nodes(), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], + ) + + +class TestWriteSparse6: + """Unit tests for writing graphs in the sparse6 format. + + Most of the test cases were checked against the sparse6 encoder in Sage. + + """ + + def test_null_graph(self): + G = nx.null_graph() + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:?\n" + + def test_trivial_graph(self): + G = nx.trivial_graph() + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:@\n" + + def test_empty_graph(self): + G = nx.empty_graph(5) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:D\n" + + def test_large_empty_graph(self): + G = nx.empty_graph(68) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:~?@C\n" + + def test_very_large_empty_graph(self): + G = nx.empty_graph(258049) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:~~???~?@\n" + + def test_complete_graph(self): + G = nx.complete_graph(4) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:CcKI\n" + + def test_no_header(self): + G = nx.complete_graph(4) + result = BytesIO() + nx.write_sparse6(G, result, header=False) + assert result.getvalue() == b":CcKI\n" + + def test_padding(self): + codes = (b":Cdv", b":DaYn", b":EaYnN", b":FaYnL", b":GaYnLz") + for n, code in enumerate(codes, start=4): + G = nx.path_graph(n) + result = BytesIO() + nx.write_sparse6(G, result, header=False) + assert result.getvalue() == code + b"\n" + + def test_complete_bipartite(self): + G = nx.complete_bipartite_graph(6, 9) + result = BytesIO() + nx.write_sparse6(G, result) + # Compared with sage + expected = b">>sparse6<<:Nk" + b"?G`cJ" * 9 + b"\n" + assert result.getvalue() == expected + + def test_read_write_inverse(self): + for i in list(range(13)) + [31, 47, 62, 63, 64, 72]: + m = min(2 * i, i * i // 2) + g = nx.random_graphs.gnm_random_graph(i, m, seed=i) + gstr = BytesIO() + nx.write_sparse6(g, gstr, header=False) + # Strip the trailing newline. + gstr = gstr.getvalue().rstrip() + g2 = nx.from_sparse6_bytes(gstr) + assert g2.order() == g.order() + assert edges_equal(g2.edges(), g.edges()) + + def test_no_directed_graphs(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.write_sparse6(nx.DiGraph(), BytesIO()) + + def test_write_path(self, tmp_path): + # Get a valid temporary file name + fullfilename = str(tmp_path / "test.s6") + # file should be closed now, so write_sparse6 can open it + nx.write_sparse6(nx.null_graph(), fullfilename) + with open(fullfilename, mode="rb") as fh: + assert fh.read() == b">>sparse6<<:?\n" diff --git a/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_text.py b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_text.py new file mode 100644 index 0000000000000000000000000000000000000000..0f788280d6d1e736910c9c9cf19abd96a48a1cba --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/readwrite/tests/test_text.py @@ -0,0 +1,1809 @@ +import random +from itertools import product +from textwrap import dedent + +import pytest + +import networkx as nx + + +def test_forest_str_directed(): + # Create a directed forest with labels + graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + for node in graph.nodes: + graph.nodes[node]["label"] = "node_" + chr(ord("a") + node) + + node_target = dedent( + """ + ╙── 0 + ├─╼ 1 + │ ├─╼ 3 + │ └─╼ 4 + └─╼ 2 + ├─╼ 5 + └─╼ 6 + """ + ).strip() + + label_target = dedent( + """ + ╙── node_a + ├─╼ node_b + │ ├─╼ node_d + │ └─╼ node_e + └─╼ node_c + ├─╼ node_f + └─╼ node_g + """ + ).strip() + + # Basic node case + ret = nx.forest_str(graph, with_labels=False) + print(ret) + assert ret == node_target + + # Basic label case + ret = nx.forest_str(graph, with_labels=True) + print(ret) + assert ret == label_target + + # Custom write function case + lines = [] + ret = nx.forest_str(graph, write=lines.append, with_labels=False) + assert ret is None + assert lines == node_target.split("\n") + + # Smoke test to ensure passing the print function works. To properly test + # this case we would need to capture stdout. (for potential reference + # implementation see :class:`ubelt.util_stream.CaptureStdout`) + ret = nx.forest_str(graph, write=print) + assert ret is None + + +def test_write_network_text_empty_graph(): + def _graph_str(g, **kw): + printbuf = [] + nx.write_network_text(g, printbuf.append, end="", **kw) + return "\n".join(printbuf) + + assert _graph_str(nx.DiGraph()) == "╙" + assert _graph_str(nx.Graph()) == "╙" + assert _graph_str(nx.DiGraph(), ascii_only=True) == "+" + assert _graph_str(nx.Graph(), ascii_only=True) == "+" + + +def test_write_network_text_within_forest_glyph(): + g = nx.DiGraph() + g.add_nodes_from([1, 2, 3, 4]) + g.add_edge(2, 4) + lines = [] + write = lines.append + nx.write_network_text(g, path=write, end="") + nx.write_network_text(g, path=write, ascii_only=True, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╟── 1 + ╟── 2 + ╎ └─╼ 4 + ╙── 3 + +-- 1 + +-- 2 + : L-> 4 + +-- 3 + """ + ).strip() + assert text == target + + +def test_forest_str_directed_multi_tree(): + tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + forest = nx.disjoint_union_all([tree1, tree2]) + ret = nx.forest_str(forest) + print(ret) + + target = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ │ ├─╼ 3 + ╎ │ └─╼ 4 + ╎ └─╼ 2 + ╎ ├─╼ 5 + ╎ └─╼ 6 + ╙── 7 + ├─╼ 8 + │ ├─╼ 10 + │ └─╼ 11 + └─╼ 9 + ├─╼ 12 + └─╼ 13 + """ + ).strip() + assert ret == target + + tree3 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + forest = nx.disjoint_union_all([tree1, tree2, tree3]) + ret = nx.forest_str(forest, sources=[0, 14, 7]) + print(ret) + + target = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ │ ├─╼ 3 + ╎ │ └─╼ 4 + ╎ └─╼ 2 + ╎ ├─╼ 5 + ╎ └─╼ 6 + ╟── 14 + ╎ ├─╼ 15 + ╎ │ ├─╼ 17 + ╎ │ └─╼ 18 + ╎ └─╼ 16 + ╎ ├─╼ 19 + ╎ └─╼ 20 + ╙── 7 + ├─╼ 8 + │ ├─╼ 10 + │ └─╼ 11 + └─╼ 9 + ├─╼ 12 + └─╼ 13 + """ + ).strip() + assert ret == target + + ret = nx.forest_str(forest, sources=[0, 14, 7], ascii_only=True) + print(ret) + + target = dedent( + """ + +-- 0 + : |-> 1 + : | |-> 3 + : | L-> 4 + : L-> 2 + : |-> 5 + : L-> 6 + +-- 14 + : |-> 15 + : | |-> 17 + : | L-> 18 + : L-> 16 + : |-> 19 + : L-> 20 + +-- 7 + |-> 8 + | |-> 10 + | L-> 11 + L-> 9 + |-> 12 + L-> 13 + """ + ).strip() + assert ret == target + + +def test_forest_str_undirected_multi_tree(): + tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph) + tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph) + tree2 = nx.relabel_nodes(tree2, {n: n + len(tree1) for n in tree2.nodes}) + forest = nx.union(tree1, tree2) + ret = nx.forest_str(forest, sources=[0, 7]) + print(ret) + + target = dedent( + """ + ╟── 0 + ╎ ├── 1 + ╎ │ ├── 3 + ╎ │ └── 4 + ╎ └── 2 + ╎ ├── 5 + ╎ └── 6 + ╙── 7 + ├── 8 + │ ├── 10 + │ └── 11 + └── 9 + ├── 12 + └── 13 + """ + ).strip() + assert ret == target + + ret = nx.forest_str(forest, sources=[0, 7], ascii_only=True) + print(ret) + + target = dedent( + """ + +-- 0 + : |-- 1 + : | |-- 3 + : | L-- 4 + : L-- 2 + : |-- 5 + : L-- 6 + +-- 7 + |-- 8 + | |-- 10 + | L-- 11 + L-- 9 + |-- 12 + L-- 13 + """ + ).strip() + assert ret == target + + +def test_forest_str_undirected(): + # Create a directed forest + graph = nx.balanced_tree(r=2, h=2, create_using=nx.Graph) + + # arbitrary starting point + nx.forest_str(graph) + + node_target0 = dedent( + """ + ╙── 0 + ├── 1 + │ ├── 3 + │ └── 4 + └── 2 + ├── 5 + └── 6 + """ + ).strip() + + # defined starting point + ret = nx.forest_str(graph, sources=[0]) + print(ret) + assert ret == node_target0 + + # defined starting point + node_target2 = dedent( + """ + ╙── 2 + ├── 0 + │ └── 1 + │ ├── 3 + │ └── 4 + ├── 5 + └── 6 + """ + ).strip() + ret = nx.forest_str(graph, sources=[2]) + print(ret) + assert ret == node_target2 + + +def test_forest_str_errors(): + ugraph = nx.complete_graph(3, create_using=nx.Graph) + + with pytest.raises(nx.NetworkXNotImplemented): + nx.forest_str(ugraph) + + dgraph = nx.complete_graph(3, create_using=nx.DiGraph) + + with pytest.raises(nx.NetworkXNotImplemented): + nx.forest_str(dgraph) + + +def test_forest_str_overspecified_sources(): + """ + When sources are directly specified, we won't be able to determine when we + are in the last component, so there will always be a trailing, leftmost + pipe. + """ + graph = nx.disjoint_union_all( + [ + nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph), + nx.balanced_tree(r=1, h=2, create_using=nx.DiGraph), + nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph), + ] + ) + + # defined starting point + target1 = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ └─╼ 2 + ╟── 3 + ╎ └─╼ 4 + ╎ └─╼ 5 + ╟── 6 + ╎ ├─╼ 7 + ╎ └─╼ 8 + """ + ).strip() + + target2 = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ └─╼ 2 + ╟── 3 + ╎ └─╼ 4 + ╎ └─╼ 5 + ╙── 6 + ├─╼ 7 + └─╼ 8 + """ + ).strip() + + lines = [] + nx.forest_str(graph, write=lines.append, sources=graph.nodes) + got1 = "\n".join(lines) + print("got1: ") + print(got1) + + lines = [] + nx.forest_str(graph, write=lines.append) + got2 = "\n".join(lines) + print("got2: ") + print(got2) + + assert got1 == target1 + assert got2 == target2 + + +def test_write_network_text_iterative_add_directed_edges(): + """ + Walk through the cases going from a disconnected to fully connected graph + """ + graph = nx.DiGraph() + graph.add_nodes_from([1, 2, 3, 4]) + lines = [] + write = lines.append + write("--- initial state ---") + nx.write_network_text(graph, path=write, end="") + for i, j in product(graph.nodes, graph.nodes): + write(f"--- add_edge({i}, {j}) ---") + graph.add_edge(i, j) + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + # defined starting point + target = dedent( + """ + --- initial state --- + ╟── 1 + ╟── 2 + ╟── 3 + ╙── 4 + --- add_edge(1, 1) --- + ╟── 1 ╾ 1 + ╎ └─╼ ... + ╟── 2 + ╟── 3 + ╙── 4 + --- add_edge(1, 2) --- + ╟── 1 ╾ 1 + ╎ ├─╼ 2 + ╎ └─╼ ... + ╟── 3 + ╙── 4 + --- add_edge(1, 3) --- + ╟── 1 ╾ 1 + ╎ ├─╼ 2 + ╎ ├─╼ 3 + ╎ └─╼ ... + ╙── 4 + --- add_edge(1, 4) --- + ╙── 1 ╾ 1 + ├─╼ 2 + ├─╼ 3 + ├─╼ 4 + └─╼ ... + --- add_edge(2, 1) --- + ╙── 2 ╾ 1 + └─╼ 1 ╾ 1 + ├─╼ 3 + ├─╼ 4 + └─╼ ... + --- add_edge(2, 2) --- + ╙── 1 ╾ 1, 2 + ├─╼ 2 ╾ 2 + │ └─╼ ... + ├─╼ 3 + ├─╼ 4 + └─╼ ... + --- add_edge(2, 3) --- + ╙── 1 ╾ 1, 2 + ├─╼ 2 ╾ 2 + │ ├─╼ 3 ╾ 1 + │ └─╼ ... + ├─╼ 4 + └─╼ ... + --- add_edge(2, 4) --- + ╙── 1 ╾ 1, 2 + ├─╼ 2 ╾ 2 + │ ├─╼ 3 ╾ 1 + │ ├─╼ 4 ╾ 1 + │ └─╼ ... + └─╼ ... + --- add_edge(3, 1) --- + ╙── 2 ╾ 1, 2 + ├─╼ 1 ╾ 1, 3 + │ ├─╼ 3 ╾ 2 + │ │ └─╼ ... + │ ├─╼ 4 ╾ 2 + │ └─╼ ... + └─╼ ... + --- add_edge(3, 2) --- + ╙── 3 ╾ 1, 2 + ├─╼ 1 ╾ 1, 2 + │ ├─╼ 2 ╾ 2, 3 + │ │ ├─╼ 4 ╾ 1 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(3, 3) --- + ╙── 1 ╾ 1, 2, 3 + ├─╼ 2 ╾ 2, 3 + │ ├─╼ 3 ╾ 1, 3 + │ │ └─╼ ... + │ ├─╼ 4 ╾ 1 + │ └─╼ ... + └─╼ ... + --- add_edge(3, 4) --- + ╙── 1 ╾ 1, 2, 3 + ├─╼ 2 ╾ 2, 3 + │ ├─╼ 3 ╾ 1, 3 + │ │ ├─╼ 4 ╾ 1, 2 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 1) --- + ╙── 2 ╾ 1, 2, 3 + ├─╼ 1 ╾ 1, 3, 4 + │ ├─╼ 3 ╾ 2, 3 + │ │ ├─╼ 4 ╾ 1, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 2) --- + ╙── 3 ╾ 1, 2, 3 + ├─╼ 1 ╾ 1, 2, 4 + │ ├─╼ 2 ╾ 2, 3, 4 + │ │ ├─╼ 4 ╾ 1, 3 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 3) --- + ╙── 4 ╾ 1, 2, 3 + ├─╼ 1 ╾ 1, 2, 3 + │ ├─╼ 2 ╾ 2, 3, 4 + │ │ ├─╼ 3 ╾ 1, 3, 4 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 4) --- + ╙── 1 ╾ 1, 2, 3, 4 + ├─╼ 2 ╾ 2, 3, 4 + │ ├─╼ 3 ╾ 1, 3, 4 + │ │ ├─╼ 4 ╾ 1, 2, 4 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_iterative_add_undirected_edges(): + """ + Walk through the cases going from a disconnected to fully connected graph + """ + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + lines = [] + write = lines.append + write("--- initial state ---") + nx.write_network_text(graph, path=write, end="") + for i, j in product(graph.nodes, graph.nodes): + if i == j: + continue + write(f"--- add_edge({i}, {j}) ---") + graph.add_edge(i, j) + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- initial state --- + ╟── 1 + ╟── 2 + ╟── 3 + ╙── 4 + --- add_edge(1, 2) --- + ╟── 3 + ╟── 4 + ╙── 1 + └── 2 + --- add_edge(1, 3) --- + ╟── 4 + ╙── 2 + └── 1 + └── 3 + --- add_edge(1, 4) --- + ╙── 2 + └── 1 + ├── 3 + └── 4 + --- add_edge(2, 1) --- + ╙── 2 + └── 1 + ├── 3 + └── 4 + --- add_edge(2, 3) --- + ╙── 4 + └── 1 + ├── 2 + │ └── 3 ─ 1 + └── ... + --- add_edge(2, 4) --- + ╙── 3 + ├── 1 + │ ├── 2 ─ 3 + │ │ └── 4 ─ 1 + │ └── ... + └── ... + --- add_edge(3, 1) --- + ╙── 3 + ├── 1 + │ ├── 2 ─ 3 + │ │ └── 4 ─ 1 + │ └── ... + └── ... + --- add_edge(3, 2) --- + ╙── 3 + ├── 1 + │ ├── 2 ─ 3 + │ │ └── 4 ─ 1 + │ └── ... + └── ... + --- add_edge(3, 4) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + --- add_edge(4, 1) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + --- add_edge(4, 2) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + --- add_edge(4, 3) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_iterative_add_random_directed_edges(): + """ + Walk through the cases going from a disconnected to fully connected graph + """ + + rng = random.Random(724466096) + graph = nx.DiGraph() + graph.add_nodes_from([1, 2, 3, 4, 5]) + possible_edges = list(product(graph.nodes, graph.nodes)) + rng.shuffle(possible_edges) + graph.add_edges_from(possible_edges[0:8]) + lines = [] + write = lines.append + write("--- initial state ---") + nx.write_network_text(graph, path=write, end="") + for i, j in possible_edges[8:12]: + write(f"--- add_edge({i}, {j}) ---") + graph.add_edge(i, j) + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- initial state --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 1) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1, 4 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(2, 1) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1, 4, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(5, 2) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2, 5 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1, 4, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(1, 5) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2, 5 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 ╾ 1 + │ │ ├─╼ 1 ╾ 1, 4, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + + """ + ).strip() + assert target == text + + +def test_write_network_text_nearly_forest(): + g = nx.DiGraph() + g.add_edge(1, 2) + g.add_edge(1, 5) + g.add_edge(2, 3) + g.add_edge(3, 4) + g.add_edge(5, 6) + g.add_edge(6, 7) + g.add_edge(6, 8) + orig = g.copy() + g.add_edge(1, 8) # forward edge + g.add_edge(4, 2) # back edge + g.add_edge(6, 3) # cross edge + lines = [] + write = lines.append + write("--- directed case ---") + nx.write_network_text(orig, path=write, end="") + write("--- add (1, 8), (4, 2), (6, 3) ---") + nx.write_network_text(g, path=write, end="") + write("--- undirected case ---") + nx.write_network_text(orig.to_undirected(), path=write, sources=[1], end="") + write("--- add (1, 8), (4, 2), (6, 3) ---") + nx.write_network_text(g.to_undirected(), path=write, sources=[1], end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- directed case --- + ╙── 1 + ├─╼ 2 + │ └─╼ 3 + │ └─╼ 4 + └─╼ 5 + └─╼ 6 + ├─╼ 7 + └─╼ 8 + --- add (1, 8), (4, 2), (6, 3) --- + ╙── 1 + ├─╼ 2 ╾ 4 + │ └─╼ 3 ╾ 6 + │ └─╼ 4 + │ └─╼ ... + ├─╼ 5 + │ └─╼ 6 + │ ├─╼ 7 + │ ├─╼ 8 ╾ 1 + │ └─╼ ... + └─╼ ... + --- undirected case --- + ╙── 1 + ├── 2 + │ └── 3 + │ └── 4 + └── 5 + └── 6 + ├── 7 + └── 8 + --- add (1, 8), (4, 2), (6, 3) --- + ╙── 1 + ├── 2 + │ ├── 3 + │ │ ├── 4 ─ 2 + │ │ └── 6 + │ │ ├── 5 ─ 1 + │ │ ├── 7 + │ │ └── 8 ─ 1 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_complete_graph_ascii_only(): + graph = nx.generators.complete_graph(5, create_using=nx.DiGraph) + lines = [] + write = lines.append + write("--- directed case ---") + nx.write_network_text(graph, path=write, ascii_only=True, end="") + write("--- undirected case ---") + nx.write_network_text(graph.to_undirected(), path=write, ascii_only=True, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- directed case --- + +-- 0 <- 1, 2, 3, 4 + |-> 1 <- 2, 3, 4 + | |-> 2 <- 0, 3, 4 + | | |-> 3 <- 0, 1, 4 + | | | |-> 4 <- 0, 1, 2 + | | | | L-> ... + | | | L-> ... + | | L-> ... + | L-> ... + L-> ... + --- undirected case --- + +-- 0 + |-- 1 + | |-- 2 - 0 + | | |-- 3 - 0, 1 + | | | L-- 4 - 0, 1, 2 + | | L-- ... + | L-- ... + L-- ... + """ + ).strip() + assert target == text + + +def test_write_network_text_with_labels(): + graph = nx.generators.complete_graph(5, create_using=nx.DiGraph) + for n in graph.nodes: + graph.nodes[n]["label"] = f"Node(n={n})" + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, with_labels=True, ascii_only=False, end="") + text = "\n".join(lines) + print(text) + # Non trees with labels can get somewhat out of hand with network text + # because we need to immediately show every non-tree edge to the right + target = dedent( + """ + ╙── Node(n=0) ╾ Node(n=1), Node(n=2), Node(n=3), Node(n=4) + ├─╼ Node(n=1) ╾ Node(n=2), Node(n=3), Node(n=4) + │ ├─╼ Node(n=2) ╾ Node(n=0), Node(n=3), Node(n=4) + │ │ ├─╼ Node(n=3) ╾ Node(n=0), Node(n=1), Node(n=4) + │ │ │ ├─╼ Node(n=4) ╾ Node(n=0), Node(n=1), Node(n=2) + │ │ │ │ └─╼ ... + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_complete_graphs(): + lines = [] + write = lines.append + for k in [0, 1, 2, 3, 4, 5]: + g = nx.generators.complete_graph(k) + write(f"--- undirected k={k} ---") + nx.write_network_text(g, path=write, end="") + + for k in [0, 1, 2, 3, 4, 5]: + g = nx.generators.complete_graph(k, nx.DiGraph) + write(f"--- directed k={k} ---") + nx.write_network_text(g, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- undirected k=0 --- + ╙ + --- undirected k=1 --- + ╙── 0 + --- undirected k=2 --- + ╙── 0 + └── 1 + --- undirected k=3 --- + ╙── 0 + ├── 1 + │ └── 2 ─ 0 + └── ... + --- undirected k=4 --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ └── 3 ─ 0, 1 + │ └── ... + └── ... + --- undirected k=5 --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ ├── 3 ─ 0, 1 + │ │ │ └── 4 ─ 0, 1, 2 + │ │ └── ... + │ └── ... + └── ... + --- directed k=0 --- + ╙ + --- directed k=1 --- + ╙── 0 + --- directed k=2 --- + ╙── 0 ╾ 1 + └─╼ 1 + └─╼ ... + --- directed k=3 --- + ╙── 0 ╾ 1, 2 + ├─╼ 1 ╾ 2 + │ ├─╼ 2 ╾ 0 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- directed k=4 --- + ╙── 0 ╾ 1, 2, 3 + ├─╼ 1 ╾ 2, 3 + │ ├─╼ 2 ╾ 0, 3 + │ │ ├─╼ 3 ╾ 0, 1 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- directed k=5 --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ ├─╼ 2 ╾ 0, 3, 4 + │ │ ├─╼ 3 ╾ 0, 1, 4 + │ │ │ ├─╼ 4 ╾ 0, 1, 2 + │ │ │ │ └─╼ ... + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_multiple_sources(): + g = nx.DiGraph() + g.add_edge(1, 2) + g.add_edge(1, 3) + g.add_edge(2, 4) + g.add_edge(3, 5) + g.add_edge(3, 6) + g.add_edge(5, 4) + g.add_edge(4, 1) + g.add_edge(1, 5) + lines = [] + write = lines.append + # Use each node as the starting point to demonstrate how the representation + # changes. + nodes = sorted(g.nodes()) + for n in nodes: + write(f"--- source node: {n} ---") + nx.write_network_text(g, path=write, sources=[n], end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- source node: 1 --- + ╙── 1 ╾ 4 + ├─╼ 2 + │ └─╼ 4 ╾ 5 + │ └─╼ ... + ├─╼ 3 + │ ├─╼ 5 ╾ 1 + │ │ └─╼ ... + │ └─╼ 6 + └─╼ ... + --- source node: 2 --- + ╙── 2 ╾ 1 + └─╼ 4 ╾ 5 + └─╼ 1 + ├─╼ 3 + │ ├─╼ 5 ╾ 1 + │ │ └─╼ ... + │ └─╼ 6 + └─╼ ... + --- source node: 3 --- + ╙── 3 ╾ 1 + ├─╼ 5 ╾ 1 + │ └─╼ 4 ╾ 2 + │ └─╼ 1 + │ ├─╼ 2 + │ │ └─╼ ... + │ └─╼ ... + └─╼ 6 + --- source node: 4 --- + ╙── 4 ╾ 2, 5 + └─╼ 1 + ├─╼ 2 + │ └─╼ ... + ├─╼ 3 + │ ├─╼ 5 ╾ 1 + │ │ └─╼ ... + │ └─╼ 6 + └─╼ ... + --- source node: 5 --- + ╙── 5 ╾ 3, 1 + └─╼ 4 ╾ 2 + └─╼ 1 + ├─╼ 2 + │ └─╼ ... + ├─╼ 3 + │ ├─╼ 6 + │ └─╼ ... + └─╼ ... + --- source node: 6 --- + ╙── 6 ╾ 3 + """ + ).strip() + assert target == text + + +def test_write_network_text_star_graph(): + graph = nx.star_graph(5, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 1 + └── 0 + ├── 2 + ├── 3 + ├── 4 + └── 5 + """ + ).strip() + assert target == text + + +def test_write_network_text_path_graph(): + graph = nx.path_graph(3, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 0 + └── 1 + └── 2 + """ + ).strip() + assert target == text + + +def test_write_network_text_lollipop_graph(): + graph = nx.lollipop_graph(4, 2, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 5 + └── 4 + └── 3 + ├── 0 + │ ├── 1 ─ 3 + │ │ └── 2 ─ 0, 3 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_wheel_graph(): + graph = nx.wheel_graph(7, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 1 + ├── 0 + │ ├── 2 ─ 1 + │ │ └── 3 ─ 0 + │ │ └── 4 ─ 0 + │ │ └── 5 ─ 0 + │ │ └── 6 ─ 0, 1 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_circular_ladder_graph(): + graph = nx.circular_ladder_graph(4, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 0 + ├── 1 + │ ├── 2 + │ │ ├── 3 ─ 0 + │ │ │ └── 7 + │ │ │ ├── 6 ─ 2 + │ │ │ │ └── 5 ─ 1 + │ │ │ │ └── 4 ─ 0, 7 + │ │ │ └── ... + │ │ └── ... + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_dorogovtsev_goltsev_mendes_graph(): + graph = nx.dorogovtsev_goltsev_mendes_graph(4, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 15 + ├── 0 + │ ├── 1 ─ 15 + │ │ ├── 2 ─ 0 + │ │ │ ├── 4 ─ 0 + │ │ │ │ ├── 9 ─ 0 + │ │ │ │ │ ├── 22 ─ 0 + │ │ │ │ │ └── 38 ─ 4 + │ │ │ │ ├── 13 ─ 2 + │ │ │ │ │ ├── 34 ─ 2 + │ │ │ │ │ └── 39 ─ 4 + │ │ │ │ ├── 18 ─ 0 + │ │ │ │ ├── 30 ─ 2 + │ │ │ │ └── ... + │ │ │ ├── 5 ─ 1 + │ │ │ │ ├── 12 ─ 1 + │ │ │ │ │ ├── 29 ─ 1 + │ │ │ │ │ └── 40 ─ 5 + │ │ │ │ ├── 14 ─ 2 + │ │ │ │ │ ├── 35 ─ 2 + │ │ │ │ │ └── 41 ─ 5 + │ │ │ │ ├── 25 ─ 1 + │ │ │ │ ├── 31 ─ 2 + │ │ │ │ └── ... + │ │ │ ├── 7 ─ 0 + │ │ │ │ ├── 20 ─ 0 + │ │ │ │ └── 32 ─ 2 + │ │ │ ├── 10 ─ 1 + │ │ │ │ ├── 27 ─ 1 + │ │ │ │ └── 33 ─ 2 + │ │ │ ├── 16 ─ 0 + │ │ │ ├── 23 ─ 1 + │ │ │ └── ... + │ │ ├── 3 ─ 0 + │ │ │ ├── 8 ─ 0 + │ │ │ │ ├── 21 ─ 0 + │ │ │ │ └── 36 ─ 3 + │ │ │ ├── 11 ─ 1 + │ │ │ │ ├── 28 ─ 1 + │ │ │ │ └── 37 ─ 3 + │ │ │ ├── 17 ─ 0 + │ │ │ ├── 24 ─ 1 + │ │ │ └── ... + │ │ ├── 6 ─ 0 + │ │ │ ├── 19 ─ 0 + │ │ │ └── 26 ─ 1 + │ │ └── ... + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_tree_max_depth(): + orig = nx.balanced_tree(r=1, h=3, create_using=nx.DiGraph) + lines = [] + write = lines.append + write("--- directed case, max_depth=0 ---") + nx.write_network_text(orig, path=write, end="", max_depth=0) + write("--- directed case, max_depth=1 ---") + nx.write_network_text(orig, path=write, end="", max_depth=1) + write("--- directed case, max_depth=2 ---") + nx.write_network_text(orig, path=write, end="", max_depth=2) + write("--- directed case, max_depth=3 ---") + nx.write_network_text(orig, path=write, end="", max_depth=3) + write("--- directed case, max_depth=4 ---") + nx.write_network_text(orig, path=write, end="", max_depth=4) + write("--- undirected case, max_depth=0 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0) + write("--- undirected case, max_depth=1 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1) + write("--- undirected case, max_depth=2 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2) + write("--- undirected case, max_depth=3 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3) + write("--- undirected case, max_depth=4 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=4) + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- directed case, max_depth=0 --- + ╙ ... + --- directed case, max_depth=1 --- + ╙── 0 + └─╼ ... + --- directed case, max_depth=2 --- + ╙── 0 + └─╼ 1 + └─╼ ... + --- directed case, max_depth=3 --- + ╙── 0 + └─╼ 1 + └─╼ 2 + └─╼ ... + --- directed case, max_depth=4 --- + ╙── 0 + └─╼ 1 + └─╼ 2 + └─╼ 3 + --- undirected case, max_depth=0 --- + ╙ ... + --- undirected case, max_depth=1 --- + ╙── 0 ─ 1 + └── ... + --- undirected case, max_depth=2 --- + ╙── 0 + └── 1 ─ 2 + └── ... + --- undirected case, max_depth=3 --- + ╙── 0 + └── 1 + └── 2 ─ 3 + └── ... + --- undirected case, max_depth=4 --- + ╙── 0 + └── 1 + └── 2 + └── 3 + """ + ).strip() + assert target == text + + +def test_write_network_text_graph_max_depth(): + orig = nx.erdos_renyi_graph(10, 0.15, directed=True, seed=40392) + lines = [] + write = lines.append + write("--- directed case, max_depth=None ---") + nx.write_network_text(orig, path=write, end="", max_depth=None) + write("--- directed case, max_depth=0 ---") + nx.write_network_text(orig, path=write, end="", max_depth=0) + write("--- directed case, max_depth=1 ---") + nx.write_network_text(orig, path=write, end="", max_depth=1) + write("--- directed case, max_depth=2 ---") + nx.write_network_text(orig, path=write, end="", max_depth=2) + write("--- directed case, max_depth=3 ---") + nx.write_network_text(orig, path=write, end="", max_depth=3) + write("--- undirected case, max_depth=None ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=None) + write("--- undirected case, max_depth=0 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0) + write("--- undirected case, max_depth=1 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1) + write("--- undirected case, max_depth=2 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2) + write("--- undirected case, max_depth=3 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3) + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- directed case, max_depth=None --- + ╟── 4 + ╎ ├─╼ 0 ╾ 3 + ╎ ├─╼ 5 ╾ 7 + ╎ │ └─╼ 3 + ╎ │ ├─╼ 1 ╾ 9 + ╎ │ │ └─╼ 9 ╾ 6 + ╎ │ │ ├─╼ 6 + ╎ │ │ │ └─╼ ... + ╎ │ │ ├─╼ 7 ╾ 4 + ╎ │ │ │ ├─╼ 2 + ╎ │ │ │ └─╼ ... + ╎ │ │ └─╼ ... + ╎ │ └─╼ ... + ╎ └─╼ ... + ╙── 8 + --- directed case, max_depth=0 --- + ╙ ... + --- directed case, max_depth=1 --- + ╟── 4 + ╎ └─╼ ... + ╙── 8 + --- directed case, max_depth=2 --- + ╟── 4 + ╎ ├─╼ 0 ╾ 3 + ╎ ├─╼ 5 ╾ 7 + ╎ │ └─╼ ... + ╎ └─╼ 7 ╾ 9 + ╎ └─╼ ... + ╙── 8 + --- directed case, max_depth=3 --- + ╟── 4 + ╎ ├─╼ 0 ╾ 3 + ╎ ├─╼ 5 ╾ 7 + ╎ │ └─╼ 3 + ╎ │ └─╼ ... + ╎ └─╼ 7 ╾ 9 + ╎ ├─╼ 2 + ╎ └─╼ ... + ╙── 8 + --- undirected case, max_depth=None --- + ╟── 8 + ╙── 2 + └── 7 + ├── 4 + │ ├── 0 + │ │ └── 3 + │ │ ├── 1 + │ │ │ └── 9 ─ 7 + │ │ │ └── 6 + │ │ └── 5 ─ 4, 7 + │ └── ... + └── ... + --- undirected case, max_depth=0 --- + ╙ ... + --- undirected case, max_depth=1 --- + ╟── 8 + ╙── 2 ─ 7 + └── ... + --- undirected case, max_depth=2 --- + ╟── 8 + ╙── 2 + └── 7 ─ 4, 5, 9 + └── ... + --- undirected case, max_depth=3 --- + ╟── 8 + ╙── 2 + └── 7 + ├── 4 ─ 0, 5 + │ └── ... + ├── 5 ─ 4, 3 + │ └── ... + └── 9 ─ 1, 6 + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_clique_max_depth(): + orig = nx.complete_graph(5, nx.DiGraph) + lines = [] + write = lines.append + write("--- directed case, max_depth=None ---") + nx.write_network_text(orig, path=write, end="", max_depth=None) + write("--- directed case, max_depth=0 ---") + nx.write_network_text(orig, path=write, end="", max_depth=0) + write("--- directed case, max_depth=1 ---") + nx.write_network_text(orig, path=write, end="", max_depth=1) + write("--- directed case, max_depth=2 ---") + nx.write_network_text(orig, path=write, end="", max_depth=2) + write("--- directed case, max_depth=3 ---") + nx.write_network_text(orig, path=write, end="", max_depth=3) + write("--- undirected case, max_depth=None ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=None) + write("--- undirected case, max_depth=0 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0) + write("--- undirected case, max_depth=1 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1) + write("--- undirected case, max_depth=2 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2) + write("--- undirected case, max_depth=3 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3) + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- directed case, max_depth=None --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ ├─╼ 2 ╾ 0, 3, 4 + │ │ ├─╼ 3 ╾ 0, 1, 4 + │ │ │ ├─╼ 4 ╾ 0, 1, 2 + │ │ │ │ └─╼ ... + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- directed case, max_depth=0 --- + ╙ ... + --- directed case, max_depth=1 --- + ╙── 0 ╾ 1, 2, 3, 4 + └─╼ ... + --- directed case, max_depth=2 --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ └─╼ ... + ├─╼ 2 ╾ 1, 3, 4 + │ └─╼ ... + ├─╼ 3 ╾ 1, 2, 4 + │ └─╼ ... + └─╼ 4 ╾ 1, 2, 3 + └─╼ ... + --- directed case, max_depth=3 --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ ├─╼ 2 ╾ 0, 3, 4 + │ │ └─╼ ... + │ ├─╼ 3 ╾ 0, 2, 4 + │ │ └─╼ ... + │ ├─╼ 4 ╾ 0, 2, 3 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- undirected case, max_depth=None --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ ├── 3 ─ 0, 1 + │ │ │ └── 4 ─ 0, 1, 2 + │ │ └── ... + │ └── ... + └── ... + --- undirected case, max_depth=0 --- + ╙ ... + --- undirected case, max_depth=1 --- + ╙── 0 ─ 1, 2, 3, 4 + └── ... + --- undirected case, max_depth=2 --- + ╙── 0 + ├── 1 ─ 2, 3, 4 + │ └── ... + ├── 2 ─ 1, 3, 4 + │ └── ... + ├── 3 ─ 1, 2, 4 + │ └── ... + └── 4 ─ 1, 2, 3 + --- undirected case, max_depth=3 --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0, 3, 4 + │ │ └── ... + │ ├── 3 ─ 0, 2, 4 + │ │ └── ... + │ └── 4 ─ 0, 2, 3 + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_custom_label(): + # Create a directed forest with labels + graph = nx.erdos_renyi_graph(5, 0.4, directed=True, seed=359222358) + for node in graph.nodes: + graph.nodes[node]["label"] = f"Node({node})" + graph.nodes[node]["chr"] = chr(node + ord("a") - 1) + if node % 2 == 0: + graph.nodes[node]["part"] = chr(node + ord("a")) + + lines = [] + write = lines.append + write("--- when with_labels=True, uses the 'label' attr ---") + nx.write_network_text(graph, path=write, with_labels=True, end="", max_depth=None) + write("--- when with_labels=False, uses str(node) value ---") + nx.write_network_text(graph, path=write, with_labels=False, end="", max_depth=None) + write("--- when with_labels is a string, use that attr ---") + nx.write_network_text(graph, path=write, with_labels="chr", end="", max_depth=None) + write("--- fallback to str(node) when the attr does not exist ---") + nx.write_network_text(graph, path=write, with_labels="part", end="", max_depth=None) + + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- when with_labels=True, uses the 'label' attr --- + ╙── Node(1) + └─╼ Node(3) ╾ Node(2) + ├─╼ Node(0) + │ ├─╼ Node(2) ╾ Node(3), Node(4) + │ │ └─╼ ... + │ └─╼ Node(4) + │ └─╼ ... + └─╼ ... + --- when with_labels=False, uses str(node) value --- + ╙── 1 + └─╼ 3 ╾ 2 + ├─╼ 0 + │ ├─╼ 2 ╾ 3, 4 + │ │ └─╼ ... + │ └─╼ 4 + │ └─╼ ... + └─╼ ... + --- when with_labels is a string, use that attr --- + ╙── a + └─╼ c ╾ b + ├─╼ ` + │ ├─╼ b ╾ c, d + │ │ └─╼ ... + │ └─╼ d + │ └─╼ ... + └─╼ ... + --- fallback to str(node) when the attr does not exist --- + ╙── 1 + └─╼ 3 ╾ c + ├─╼ a + │ ├─╼ c ╾ 3, e + │ │ └─╼ ... + │ └─╼ e + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_vertical_chains(): + graph1 = nx.lollipop_graph(4, 2, create_using=nx.Graph) + graph1.add_edge(0, -1) + graph1.add_edge(-1, -2) + graph1.add_edge(-2, -3) + + graph2 = graph1.to_directed() + graph2.remove_edges_from([(u, v) for u, v in graph2.edges if v > u]) + + lines = [] + write = lines.append + write("--- Undirected UTF ---") + nx.write_network_text(graph1, path=write, end="", vertical_chains=True) + write("--- Undirected ASCI ---") + nx.write_network_text( + graph1, path=write, end="", vertical_chains=True, ascii_only=True + ) + write("--- Directed UTF ---") + nx.write_network_text(graph2, path=write, end="", vertical_chains=True) + write("--- Directed ASCI ---") + nx.write_network_text( + graph2, path=write, end="", vertical_chains=True, ascii_only=True + ) + + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- Undirected UTF --- + ╙── 5 + │ + 4 + │ + 3 + ├── 0 + │ ├── 1 ─ 3 + │ │ │ + │ │ 2 ─ 0, 3 + │ ├── -1 + │ │ │ + │ │ -2 + │ │ │ + │ │ -3 + │ └── ... + └── ... + --- Undirected ASCI --- + +-- 5 + | + 4 + | + 3 + |-- 0 + | |-- 1 - 3 + | | | + | | 2 - 0, 3 + | |-- -1 + | | | + | | -2 + | | | + | | -3 + | L-- ... + L-- ... + --- Directed UTF --- + ╙── 5 + ╽ + 4 + ╽ + 3 + ├─╼ 0 ╾ 1, 2 + │ ╽ + │ -1 + │ ╽ + │ -2 + │ ╽ + │ -3 + ├─╼ 1 ╾ 2 + │ └─╼ ... + └─╼ 2 + └─╼ ... + --- Directed ASCI --- + +-- 5 + ! + 4 + ! + 3 + |-> 0 <- 1, 2 + | ! + | -1 + | ! + | -2 + | ! + | -3 + |-> 1 <- 2 + | L-> ... + L-> 2 + L-> ... + """ + ).strip() + assert target == text + + +def test_collapse_directed(): + graph = nx.balanced_tree(r=2, h=3, create_using=nx.DiGraph) + lines = [] + write = lines.append + write("--- Original ---") + nx.write_network_text(graph, path=write, end="") + graph.nodes[1]["collapse"] = True + write("--- Collapse Node 1 ---") + nx.write_network_text(graph, path=write, end="") + write("--- Add alternate path (5, 3) to collapsed zone") + graph.add_edge(5, 3) + nx.write_network_text(graph, path=write, end="") + write("--- Collapse Node 0 ---") + graph.nodes[0]["collapse"] = True + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- Original --- + ╙── 0 + ├─╼ 1 + │ ├─╼ 3 + │ │ ├─╼ 7 + │ │ └─╼ 8 + │ └─╼ 4 + │ ├─╼ 9 + │ └─╼ 10 + └─╼ 2 + ├─╼ 5 + │ ├─╼ 11 + │ └─╼ 12 + └─╼ 6 + ├─╼ 13 + └─╼ 14 + --- Collapse Node 1 --- + ╙── 0 + ├─╼ 1 + │ └─╼ ... + └─╼ 2 + ├─╼ 5 + │ ├─╼ 11 + │ └─╼ 12 + └─╼ 6 + ├─╼ 13 + └─╼ 14 + --- Add alternate path (5, 3) to collapsed zone + ╙── 0 + ├─╼ 1 + │ └─╼ ... + └─╼ 2 + ├─╼ 5 + │ ├─╼ 11 + │ ├─╼ 12 + │ └─╼ 3 ╾ 1 + │ ├─╼ 7 + │ └─╼ 8 + └─╼ 6 + ├─╼ 13 + └─╼ 14 + --- Collapse Node 0 --- + ╙── 0 + └─╼ ... + """ + ).strip() + assert target == text + + +def test_collapse_undirected(): + graph = nx.balanced_tree(r=2, h=3, create_using=nx.Graph) + lines = [] + write = lines.append + write("--- Original ---") + nx.write_network_text(graph, path=write, end="", sources=[0]) + graph.nodes[1]["collapse"] = True + write("--- Collapse Node 1 ---") + nx.write_network_text(graph, path=write, end="", sources=[0]) + write("--- Add alternate path (5, 3) to collapsed zone") + graph.add_edge(5, 3) + nx.write_network_text(graph, path=write, end="", sources=[0]) + write("--- Collapse Node 0 ---") + graph.nodes[0]["collapse"] = True + nx.write_network_text(graph, path=write, end="", sources=[0]) + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- Original --- + ╙── 0 + ├── 1 + │ ├── 3 + │ │ ├── 7 + │ │ └── 8 + │ └── 4 + │ ├── 9 + │ └── 10 + └── 2 + ├── 5 + │ ├── 11 + │ └── 12 + └── 6 + ├── 13 + └── 14 + --- Collapse Node 1 --- + ╙── 0 + ├── 1 ─ 3, 4 + │ └── ... + └── 2 + ├── 5 + │ ├── 11 + │ └── 12 + └── 6 + ├── 13 + └── 14 + --- Add alternate path (5, 3) to collapsed zone + ╙── 0 + ├── 1 ─ 3, 4 + │ └── ... + └── 2 + ├── 5 + │ ├── 11 + │ ├── 12 + │ └── 3 ─ 1 + │ ├── 7 + │ └── 8 + └── 6 + ├── 13 + └── 14 + --- Collapse Node 0 --- + ╙── 0 ─ 1, 2 + └── ... + """ + ).strip() + assert target == text + + +def generate_test_graphs(): + """ + Generate a gauntlet of different test graphs with different properties + """ + import random + + rng = random.Random(976689776) + num_randomized = 3 + + for directed in [0, 1]: + cls = nx.DiGraph if directed else nx.Graph + + for num_nodes in range(17): + # Disconnected graph + graph = cls() + graph.add_nodes_from(range(num_nodes)) + yield graph + + # Randomize graphs + if num_nodes > 0: + for p in [0.1, 0.3, 0.5, 0.7, 0.9]: + for seed in range(num_randomized): + graph = nx.erdos_renyi_graph( + num_nodes, p, directed=directed, seed=rng + ) + yield graph + + yield nx.complete_graph(num_nodes, cls) + + yield nx.path_graph(3, create_using=cls) + yield nx.balanced_tree(r=1, h=3, create_using=cls) + if not directed: + yield nx.circular_ladder_graph(4, create_using=cls) + yield nx.star_graph(5, create_using=cls) + yield nx.lollipop_graph(4, 2, create_using=cls) + yield nx.wheel_graph(7, create_using=cls) + yield nx.dorogovtsev_goltsev_mendes_graph(4, create_using=cls) + + +@pytest.mark.parametrize( + ("vertical_chains", "ascii_only"), + tuple( + [ + (vertical_chains, ascii_only) + for vertical_chains in [0, 1] + for ascii_only in [0, 1] + ] + ), +) +def test_network_text_round_trip(vertical_chains, ascii_only): + """ + Write the graph to network text format, then parse it back in, assert it is + the same as the original graph. Passing this test is strong validation of + both the format generator and parser. + """ + from networkx.readwrite.text import _parse_network_text + + for graph in generate_test_graphs(): + graph = nx.relabel_nodes(graph, {n: str(n) for n in graph.nodes}) + lines = list( + nx.generate_network_text( + graph, vertical_chains=vertical_chains, ascii_only=ascii_only + ) + ) + new = _parse_network_text(lines) + try: + assert new.nodes == graph.nodes + assert new.edges == graph.edges + except Exception: + print("ERROR in round trip with graph") + nx.write_network_text(graph) + raise