Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step80/zero/13.attention.dense.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step80/zero/13.attention.dense.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step80/zero/16.input_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step80/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/fp32.pt +3 -0
- ckpts/universal/global_step80/zero/9.input_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step80/zero/9.input_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step80/zero/9.input_layernorm.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/_yaml/__init__.py +33 -0
- venv/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/cpuinfo/__init__.py +5 -0
- venv/lib/python3.10/site-packages/cpuinfo/__main__.py +5 -0
- venv/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/cpuinfo/cpuinfo.py +2827 -0
- venv/lib/python3.10/site-packages/dateutil/__init__.py +24 -0
- venv/lib/python3.10/site-packages/dateutil/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/dateutil/__pycache__/relativedelta.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/dateutil/__pycache__/tzwin.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/dateutil/_common.py +43 -0
- venv/lib/python3.10/site-packages/dateutil/_version.py +4 -0
- venv/lib/python3.10/site-packages/dateutil/easter.py +89 -0
- venv/lib/python3.10/site-packages/dateutil/relativedelta.py +599 -0
- venv/lib/python3.10/site-packages/dateutil/tzwin.py +2 -0
- venv/lib/python3.10/site-packages/dateutil/utils.py +71 -0
- venv/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER +1 -0
- venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE +27 -0
- venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA +233 -0
- venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD +180 -0
- venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL +5 -0
- venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt +1 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/graphml.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/p2g.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__init__.py +18 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/adjacency.py +156 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/cytoscape.py +178 -0
- venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/node_link.py +244 -0
ckpts/universal/global_step80/zero/13.attention.dense.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a34a2a1580c149f6417f3440f8beceffbed628b704f54e49d68fc2c77ed64cb
|
3 |
+
size 16778396
|
ckpts/universal/global_step80/zero/13.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4fafaee522fc5fcc3dfc2dabebd9466883145dd4d635290521bbba1996c1aef
|
3 |
+
size 16778411
|
ckpts/universal/global_step80/zero/16.input_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eca5dfc3b12c771303feef09f8cd6db30d4165806844fd473e3a20dc6be0a7b1
|
3 |
+
size 9372
|
ckpts/universal/global_step80/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d394e9b1715caa08708a8828335cf2c1c88e9586f3f89d4ee0c19c78b1e40703
|
3 |
+
size 9387
|
ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aa5fddb0f52cd4a6f1e3468729a05e715060803e77ea16f8121fc3797f137f14
|
3 |
+
size 50332828
|
ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f43dec9500120252ac82fd3d09f23fbfe9c57a4c4ce593fbb88bd5b7d33f268c
|
3 |
+
size 50332843
|
ckpts/universal/global_step80/zero/4.attention.query_key_value.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5344df11a25c90747bd21f03b7b51c5486d64382559e526f6e13caf0724e1d8
|
3 |
+
size 50332749
|
ckpts/universal/global_step80/zero/9.input_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23fde5177554e6fda2b0638bd17ad1560f943b9a1b74fe3f4ccbf0646e95a8c0
|
3 |
+
size 9372
|
ckpts/universal/global_step80/zero/9.input_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4306607fd45995d8104ed028e9c67223da4e014b7a1232986ef7e0bdc4413d40
|
3 |
+
size 9387
|
ckpts/universal/global_step80/zero/9.input_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:11a0f04e1a72a1746f7fe46065aaae43d7bb5fd62891637081595b5e45b55fa6
|
3 |
+
size 9293
|
venv/lib/python3.10/site-packages/_yaml/__init__.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This is a stub package designed to roughly emulate the _yaml
|
2 |
+
# extension module, which previously existed as a standalone module
|
3 |
+
# and has been moved into the `yaml` package namespace.
|
4 |
+
# It does not perfectly mimic its old counterpart, but should get
|
5 |
+
# close enough for anyone who's relying on it even when they shouldn't.
|
6 |
+
import yaml
|
7 |
+
|
8 |
+
# in some circumstances, the yaml module we imoprted may be from a different version, so we need
|
9 |
+
# to tread carefully when poking at it here (it may not have the attributes we expect)
|
10 |
+
if not getattr(yaml, '__with_libyaml__', False):
|
11 |
+
from sys import version_info
|
12 |
+
|
13 |
+
exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError
|
14 |
+
raise exc("No module named '_yaml'")
|
15 |
+
else:
|
16 |
+
from yaml._yaml import *
|
17 |
+
import warnings
|
18 |
+
warnings.warn(
|
19 |
+
'The _yaml extension module is now located at yaml._yaml'
|
20 |
+
' and its location is subject to change. To use the'
|
21 |
+
' LibYAML-based parser and emitter, import from `yaml`:'
|
22 |
+
' `from yaml import CLoader as Loader, CDumper as Dumper`.',
|
23 |
+
DeprecationWarning
|
24 |
+
)
|
25 |
+
del warnings
|
26 |
+
# Don't `del yaml` here because yaml is actually an existing
|
27 |
+
# namespace member of _yaml.
|
28 |
+
|
29 |
+
__name__ = '_yaml'
|
30 |
+
# If the module is top-level (i.e. not a part of any specific package)
|
31 |
+
# then the attribute should be set to ''.
|
32 |
+
# https://docs.python.org/3.8/library/types.html
|
33 |
+
__package__ = ''
|
venv/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (735 Bytes). View file
|
|
venv/lib/python3.10/site-packages/cpuinfo/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import sys
|
3 |
+
from cpuinfo.cpuinfo import *
|
4 |
+
|
5 |
+
|
venv/lib/python3.10/site-packages/cpuinfo/__main__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import cpuinfo
|
3 |
+
|
4 |
+
cpuinfo.main()
|
5 |
+
|
venv/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (222 Bytes). View file
|
|
venv/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc
ADDED
Binary file (210 Bytes). View file
|
|
venv/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc
ADDED
Binary file (58.8 kB). View file
|
|
venv/lib/python3.10/site-packages/cpuinfo/cpuinfo.py
ADDED
@@ -0,0 +1,2827 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: UTF-8 -*-
|
3 |
+
|
4 |
+
# Copyright (c) 2014-2022 Matthew Brennan Jones <[email protected]>
|
5 |
+
# Py-cpuinfo gets CPU info with pure Python
|
6 |
+
# It uses the MIT License
|
7 |
+
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
|
8 |
+
#
|
9 |
+
# Permission is hereby granted, free of charge, to any person obtaining
|
10 |
+
# a copy of this software and associated documentation files (the
|
11 |
+
# "Software"), to deal in the Software without restriction, including
|
12 |
+
# without limitation the rights to use, copy, modify, merge, publish,
|
13 |
+
# distribute, sublicense, and/or sell copies of the Software, and to
|
14 |
+
# permit persons to whom the Software is furnished to do so, subject to
|
15 |
+
# the following conditions:
|
16 |
+
#
|
17 |
+
# The above copyright notice and this permission notice shall be included
|
18 |
+
# in all copies or substantial portions of the Software.
|
19 |
+
#
|
20 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
21 |
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
22 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
23 |
+
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
24 |
+
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
25 |
+
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
26 |
+
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
27 |
+
|
28 |
+
CPUINFO_VERSION = (9, 0, 0)
|
29 |
+
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
|
30 |
+
|
31 |
+
import os, sys
|
32 |
+
import platform
|
33 |
+
import multiprocessing
|
34 |
+
import ctypes
|
35 |
+
|
36 |
+
|
37 |
+
CAN_CALL_CPUID_IN_SUBPROCESS = True
|
38 |
+
|
39 |
+
g_trace = None
|
40 |
+
|
41 |
+
|
42 |
+
class Trace(object):
|
43 |
+
def __init__(self, is_active, is_stored_in_string):
|
44 |
+
self._is_active = is_active
|
45 |
+
if not self._is_active:
|
46 |
+
return
|
47 |
+
|
48 |
+
from datetime import datetime
|
49 |
+
from io import StringIO
|
50 |
+
|
51 |
+
if is_stored_in_string:
|
52 |
+
self._output = StringIO()
|
53 |
+
else:
|
54 |
+
date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
|
55 |
+
self._output = open('cpuinfo_trace_{0}.trace'.format(date), 'w')
|
56 |
+
|
57 |
+
self._stdout = StringIO()
|
58 |
+
self._stderr = StringIO()
|
59 |
+
self._err = None
|
60 |
+
|
61 |
+
def header(self, msg):
|
62 |
+
if not self._is_active: return
|
63 |
+
|
64 |
+
from inspect import stack
|
65 |
+
frame = stack()[1]
|
66 |
+
file = frame[1]
|
67 |
+
line = frame[2]
|
68 |
+
self._output.write("{0} ({1} {2})\n".format(msg, file, line))
|
69 |
+
self._output.flush()
|
70 |
+
|
71 |
+
def success(self):
|
72 |
+
if not self._is_active: return
|
73 |
+
|
74 |
+
from inspect import stack
|
75 |
+
frame = stack()[1]
|
76 |
+
file = frame[1]
|
77 |
+
line = frame[2]
|
78 |
+
|
79 |
+
self._output.write("Success ... ({0} {1})\n\n".format(file, line))
|
80 |
+
self._output.flush()
|
81 |
+
|
82 |
+
def fail(self, msg):
|
83 |
+
if not self._is_active: return
|
84 |
+
|
85 |
+
from inspect import stack
|
86 |
+
frame = stack()[1]
|
87 |
+
file = frame[1]
|
88 |
+
line = frame[2]
|
89 |
+
|
90 |
+
if isinstance(msg, str):
|
91 |
+
msg = ''.join(['\t' + line for line in msg.split('\n')]) + '\n'
|
92 |
+
|
93 |
+
self._output.write(msg)
|
94 |
+
self._output.write("Failed ... ({0} {1})\n\n".format(file, line))
|
95 |
+
self._output.flush()
|
96 |
+
elif isinstance(msg, Exception):
|
97 |
+
from traceback import format_exc
|
98 |
+
err_string = format_exc()
|
99 |
+
self._output.write("\tFailed ... ({0} {1})\n".format(file, line))
|
100 |
+
self._output.write(''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n')
|
101 |
+
self._output.flush()
|
102 |
+
|
103 |
+
def command_header(self, msg):
|
104 |
+
if not self._is_active: return
|
105 |
+
|
106 |
+
from inspect import stack
|
107 |
+
frame = stack()[3]
|
108 |
+
file = frame[1]
|
109 |
+
line = frame[2]
|
110 |
+
self._output.write("\t{0} ({1} {2})\n".format(msg, file, line))
|
111 |
+
self._output.flush()
|
112 |
+
|
113 |
+
def command_output(self, msg, output):
|
114 |
+
if not self._is_active: return
|
115 |
+
|
116 |
+
self._output.write("\t\t{0}\n".format(msg))
|
117 |
+
self._output.write(''.join(['\t\t\t{0}\n'.format(n) for n in output.split('\n')]) + '\n')
|
118 |
+
self._output.flush()
|
119 |
+
|
120 |
+
def keys(self, keys, info, new_info):
|
121 |
+
if not self._is_active: return
|
122 |
+
|
123 |
+
from inspect import stack
|
124 |
+
frame = stack()[2]
|
125 |
+
file = frame[1]
|
126 |
+
line = frame[2]
|
127 |
+
|
128 |
+
# List updated keys
|
129 |
+
self._output.write("\tChanged keys ({0} {1})\n".format(file, line))
|
130 |
+
changed_keys = [key for key in keys if key in info and key in new_info and info[key] != new_info[key]]
|
131 |
+
if changed_keys:
|
132 |
+
for key in changed_keys:
|
133 |
+
self._output.write('\t\t{0}: {1} to {2}\n'.format(key, info[key], new_info[key]))
|
134 |
+
else:
|
135 |
+
self._output.write('\t\tNone\n')
|
136 |
+
|
137 |
+
# List new keys
|
138 |
+
self._output.write("\tNew keys ({0} {1})\n".format(file, line))
|
139 |
+
new_keys = [key for key in keys if key in new_info and key not in info]
|
140 |
+
if new_keys:
|
141 |
+
for key in new_keys:
|
142 |
+
self._output.write('\t\t{0}: {1}\n'.format(key, new_info[key]))
|
143 |
+
else:
|
144 |
+
self._output.write('\t\tNone\n')
|
145 |
+
|
146 |
+
self._output.write('\n')
|
147 |
+
self._output.flush()
|
148 |
+
|
149 |
+
def write(self, msg):
|
150 |
+
if not self._is_active: return
|
151 |
+
|
152 |
+
self._output.write(msg + '\n')
|
153 |
+
self._output.flush()
|
154 |
+
|
155 |
+
def to_dict(self, info, is_fail):
|
156 |
+
return {
|
157 |
+
'output' : self._output.getvalue(),
|
158 |
+
'stdout' : self._stdout.getvalue(),
|
159 |
+
'stderr' : self._stderr.getvalue(),
|
160 |
+
'info' : info,
|
161 |
+
'err' : self._err,
|
162 |
+
'is_fail' : is_fail
|
163 |
+
}
|
164 |
+
|
165 |
+
class DataSource(object):
|
166 |
+
bits = platform.architecture()[0]
|
167 |
+
cpu_count = multiprocessing.cpu_count()
|
168 |
+
is_windows = platform.system().lower() == 'windows'
|
169 |
+
arch_string_raw = platform.machine()
|
170 |
+
uname_string_raw = platform.uname()[5]
|
171 |
+
can_cpuid = True
|
172 |
+
|
173 |
+
@staticmethod
|
174 |
+
def has_proc_cpuinfo():
|
175 |
+
return os.path.exists('/proc/cpuinfo')
|
176 |
+
|
177 |
+
@staticmethod
|
178 |
+
def has_dmesg():
|
179 |
+
return len(_program_paths('dmesg')) > 0
|
180 |
+
|
181 |
+
@staticmethod
|
182 |
+
def has_var_run_dmesg_boot():
|
183 |
+
uname = platform.system().strip().strip('"').strip("'").strip().lower()
|
184 |
+
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
|
185 |
+
|
186 |
+
@staticmethod
|
187 |
+
def has_cpufreq_info():
|
188 |
+
return len(_program_paths('cpufreq-info')) > 0
|
189 |
+
|
190 |
+
@staticmethod
|
191 |
+
def has_sestatus():
|
192 |
+
return len(_program_paths('sestatus')) > 0
|
193 |
+
|
194 |
+
@staticmethod
|
195 |
+
def has_sysctl():
|
196 |
+
return len(_program_paths('sysctl')) > 0
|
197 |
+
|
198 |
+
@staticmethod
|
199 |
+
def has_isainfo():
|
200 |
+
return len(_program_paths('isainfo')) > 0
|
201 |
+
|
202 |
+
@staticmethod
|
203 |
+
def has_kstat():
|
204 |
+
return len(_program_paths('kstat')) > 0
|
205 |
+
|
206 |
+
@staticmethod
|
207 |
+
def has_sysinfo():
|
208 |
+
uname = platform.system().strip().strip('"').strip("'").strip().lower()
|
209 |
+
is_beos = 'beos' in uname or 'haiku' in uname
|
210 |
+
return is_beos and len(_program_paths('sysinfo')) > 0
|
211 |
+
|
212 |
+
@staticmethod
|
213 |
+
def has_lscpu():
|
214 |
+
return len(_program_paths('lscpu')) > 0
|
215 |
+
|
216 |
+
@staticmethod
|
217 |
+
def has_ibm_pa_features():
|
218 |
+
return len(_program_paths('lsprop')) > 0
|
219 |
+
|
220 |
+
@staticmethod
|
221 |
+
def has_wmic():
|
222 |
+
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
|
223 |
+
return returncode == 0 and len(output) > 0
|
224 |
+
|
225 |
+
@staticmethod
|
226 |
+
def cat_proc_cpuinfo():
|
227 |
+
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
|
228 |
+
|
229 |
+
@staticmethod
|
230 |
+
def cpufreq_info():
|
231 |
+
return _run_and_get_stdout(['cpufreq-info'])
|
232 |
+
|
233 |
+
@staticmethod
|
234 |
+
def sestatus_b():
|
235 |
+
return _run_and_get_stdout(['sestatus', '-b'])
|
236 |
+
|
237 |
+
@staticmethod
|
238 |
+
def dmesg_a():
|
239 |
+
return _run_and_get_stdout(['dmesg', '-a'])
|
240 |
+
|
241 |
+
@staticmethod
|
242 |
+
def cat_var_run_dmesg_boot():
|
243 |
+
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
|
244 |
+
|
245 |
+
@staticmethod
|
246 |
+
def sysctl_machdep_cpu_hw_cpufrequency():
|
247 |
+
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
|
248 |
+
|
249 |
+
@staticmethod
|
250 |
+
def isainfo_vb():
|
251 |
+
return _run_and_get_stdout(['isainfo', '-vb'])
|
252 |
+
|
253 |
+
@staticmethod
|
254 |
+
def kstat_m_cpu_info():
|
255 |
+
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
|
256 |
+
|
257 |
+
@staticmethod
|
258 |
+
def sysinfo_cpu():
|
259 |
+
return _run_and_get_stdout(['sysinfo', '-cpu'])
|
260 |
+
|
261 |
+
@staticmethod
|
262 |
+
def lscpu():
|
263 |
+
return _run_and_get_stdout(['lscpu'])
|
264 |
+
|
265 |
+
@staticmethod
|
266 |
+
def ibm_pa_features():
|
267 |
+
import glob
|
268 |
+
|
269 |
+
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
|
270 |
+
if ibm_features:
|
271 |
+
return _run_and_get_stdout(['lsprop', ibm_features[0]])
|
272 |
+
|
273 |
+
@staticmethod
|
274 |
+
def wmic_cpu():
|
275 |
+
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
|
276 |
+
|
277 |
+
@staticmethod
|
278 |
+
def winreg_processor_brand():
|
279 |
+
processor_brand = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "ProcessorNameString")
|
280 |
+
return processor_brand.strip()
|
281 |
+
|
282 |
+
@staticmethod
|
283 |
+
def winreg_vendor_id_raw():
|
284 |
+
vendor_id_raw = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "VendorIdentifier")
|
285 |
+
return vendor_id_raw
|
286 |
+
|
287 |
+
@staticmethod
|
288 |
+
def winreg_arch_string_raw():
|
289 |
+
arch_string_raw = _read_windows_registry_key(r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", "PROCESSOR_ARCHITECTURE")
|
290 |
+
return arch_string_raw
|
291 |
+
|
292 |
+
@staticmethod
|
293 |
+
def winreg_hz_actual():
|
294 |
+
hz_actual = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "~Mhz")
|
295 |
+
hz_actual = _to_decimal_string(hz_actual)
|
296 |
+
return hz_actual
|
297 |
+
|
298 |
+
@staticmethod
|
299 |
+
def winreg_feature_bits():
|
300 |
+
feature_bits = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "FeatureSet")
|
301 |
+
return feature_bits
|
302 |
+
|
303 |
+
|
304 |
+
def _program_paths(program_name):
|
305 |
+
paths = []
|
306 |
+
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
|
307 |
+
for p in os.environ['PATH'].split(os.pathsep):
|
308 |
+
p = os.path.join(p, program_name)
|
309 |
+
if os.access(p, os.X_OK):
|
310 |
+
paths.append(p)
|
311 |
+
for e in exts:
|
312 |
+
pext = p + e
|
313 |
+
if os.access(pext, os.X_OK):
|
314 |
+
paths.append(pext)
|
315 |
+
return paths
|
316 |
+
|
317 |
+
def _run_and_get_stdout(command, pipe_command=None):
|
318 |
+
from subprocess import Popen, PIPE
|
319 |
+
|
320 |
+
g_trace.command_header('Running command "' + ' '.join(command) + '" ...')
|
321 |
+
|
322 |
+
# Run the command normally
|
323 |
+
if not pipe_command:
|
324 |
+
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
|
325 |
+
# Run the command and pipe it into another command
|
326 |
+
else:
|
327 |
+
p2 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
|
328 |
+
p1 = Popen(pipe_command, stdin=p2.stdout, stdout=PIPE, stderr=PIPE)
|
329 |
+
p2.stdout.close()
|
330 |
+
|
331 |
+
# Get the stdout and stderr
|
332 |
+
stdout_output, stderr_output = p1.communicate()
|
333 |
+
stdout_output = stdout_output.decode(encoding='UTF-8')
|
334 |
+
stderr_output = stderr_output.decode(encoding='UTF-8')
|
335 |
+
|
336 |
+
# Send the result to the logger
|
337 |
+
g_trace.command_output('return code:', str(p1.returncode))
|
338 |
+
g_trace.command_output('stdout:', stdout_output)
|
339 |
+
|
340 |
+
# Return the return code and stdout
|
341 |
+
return p1.returncode, stdout_output
|
342 |
+
|
343 |
+
def _read_windows_registry_key(key_name, field_name):
|
344 |
+
g_trace.command_header('Reading Registry key "{0}" field "{1}" ...'.format(key_name, field_name))
|
345 |
+
|
346 |
+
try:
|
347 |
+
import _winreg as winreg
|
348 |
+
except ImportError as err:
|
349 |
+
try:
|
350 |
+
import winreg
|
351 |
+
except ImportError as err:
|
352 |
+
pass
|
353 |
+
|
354 |
+
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name)
|
355 |
+
value = winreg.QueryValueEx(key, field_name)[0]
|
356 |
+
winreg.CloseKey(key)
|
357 |
+
g_trace.command_output('value:', str(value))
|
358 |
+
return value
|
359 |
+
|
360 |
+
# Make sure we are running on a supported system
|
361 |
+
def _check_arch():
|
362 |
+
arch, bits = _parse_arch(DataSource.arch_string_raw)
|
363 |
+
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8',
|
364 |
+
'PPC_64', 'S390X', 'MIPS_32', 'MIPS_64',
|
365 |
+
"RISCV_32", "RISCV_64"]:
|
366 |
+
raise Exception("py-cpuinfo currently only works on X86 "
|
367 |
+
"and some ARM/PPC/S390X/MIPS/RISCV CPUs.")
|
368 |
+
|
369 |
+
def _obj_to_b64(thing):
|
370 |
+
import pickle
|
371 |
+
import base64
|
372 |
+
|
373 |
+
a = thing
|
374 |
+
b = pickle.dumps(a)
|
375 |
+
c = base64.b64encode(b)
|
376 |
+
d = c.decode('utf8')
|
377 |
+
return d
|
378 |
+
|
379 |
+
def _b64_to_obj(thing):
|
380 |
+
import pickle
|
381 |
+
import base64
|
382 |
+
|
383 |
+
try:
|
384 |
+
a = base64.b64decode(thing)
|
385 |
+
b = pickle.loads(a)
|
386 |
+
return b
|
387 |
+
except Exception:
|
388 |
+
return {}
|
389 |
+
|
390 |
+
def _utf_to_str(input):
|
391 |
+
if isinstance(input, list):
|
392 |
+
return [_utf_to_str(element) for element in input]
|
393 |
+
elif isinstance(input, dict):
|
394 |
+
return {_utf_to_str(key): _utf_to_str(value)
|
395 |
+
for key, value in input.items()}
|
396 |
+
else:
|
397 |
+
return input
|
398 |
+
|
399 |
+
def _copy_new_fields(info, new_info):
|
400 |
+
keys = [
|
401 |
+
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
|
402 |
+
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
|
403 |
+
'arch_string_raw', 'uname_string_raw',
|
404 |
+
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
|
405 |
+
'stepping', 'model', 'family',
|
406 |
+
'processor_type', 'flags',
|
407 |
+
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
|
408 |
+
]
|
409 |
+
|
410 |
+
g_trace.keys(keys, info, new_info)
|
411 |
+
|
412 |
+
# Update the keys with new values
|
413 |
+
for key in keys:
|
414 |
+
if new_info.get(key, None) and not info.get(key, None):
|
415 |
+
info[key] = new_info[key]
|
416 |
+
elif key == 'flags' and new_info.get('flags'):
|
417 |
+
for f in new_info['flags']:
|
418 |
+
if f not in info['flags']: info['flags'].append(f)
|
419 |
+
info['flags'].sort()
|
420 |
+
|
421 |
+
def _get_field_actual(cant_be_number, raw_string, field_names):
|
422 |
+
for line in raw_string.splitlines():
|
423 |
+
for field_name in field_names:
|
424 |
+
field_name = field_name.lower()
|
425 |
+
if ':' in line:
|
426 |
+
left, right = line.split(':', 1)
|
427 |
+
left = left.strip().lower()
|
428 |
+
right = right.strip()
|
429 |
+
if left == field_name and len(right) > 0:
|
430 |
+
if cant_be_number:
|
431 |
+
if not right.isdigit():
|
432 |
+
return right
|
433 |
+
else:
|
434 |
+
return right
|
435 |
+
|
436 |
+
return None
|
437 |
+
|
438 |
+
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
|
439 |
+
retval = _get_field_actual(cant_be_number, raw_string, field_names)
|
440 |
+
|
441 |
+
# Convert the return value
|
442 |
+
if retval and convert_to:
|
443 |
+
try:
|
444 |
+
retval = convert_to(retval)
|
445 |
+
except Exception:
|
446 |
+
retval = default_value
|
447 |
+
|
448 |
+
# Return the default if there is no return value
|
449 |
+
if retval is None:
|
450 |
+
retval = default_value
|
451 |
+
|
452 |
+
return retval
|
453 |
+
|
454 |
+
def _to_decimal_string(ticks):
|
455 |
+
try:
|
456 |
+
# Convert to string
|
457 |
+
ticks = '{0}'.format(ticks)
|
458 |
+
# Sometimes ',' is used as a decimal separator
|
459 |
+
ticks = ticks.replace(',', '.')
|
460 |
+
|
461 |
+
# Strip off non numbers and decimal places
|
462 |
+
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
|
463 |
+
if ticks == '':
|
464 |
+
ticks = '0'
|
465 |
+
|
466 |
+
# Add decimal if missing
|
467 |
+
if '.' not in ticks:
|
468 |
+
ticks = '{0}.0'.format(ticks)
|
469 |
+
|
470 |
+
# Remove trailing zeros
|
471 |
+
ticks = ticks.rstrip('0')
|
472 |
+
|
473 |
+
# Add one trailing zero for empty right side
|
474 |
+
if ticks.endswith('.'):
|
475 |
+
ticks = '{0}0'.format(ticks)
|
476 |
+
|
477 |
+
# Make sure the number can be converted to a float
|
478 |
+
ticks = float(ticks)
|
479 |
+
ticks = '{0}'.format(ticks)
|
480 |
+
return ticks
|
481 |
+
except Exception:
|
482 |
+
return '0.0'
|
483 |
+
|
484 |
+
def _hz_short_to_full(ticks, scale):
|
485 |
+
try:
|
486 |
+
# Make sure the number can be converted to a float
|
487 |
+
ticks = float(ticks)
|
488 |
+
ticks = '{0}'.format(ticks)
|
489 |
+
|
490 |
+
# Scale the numbers
|
491 |
+
hz = ticks.lstrip('0')
|
492 |
+
old_index = hz.index('.')
|
493 |
+
hz = hz.replace('.', '')
|
494 |
+
hz = hz.ljust(scale + old_index+1, '0')
|
495 |
+
new_index = old_index + scale
|
496 |
+
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
|
497 |
+
left, right = hz.split('.')
|
498 |
+
left, right = int(left), int(right)
|
499 |
+
return (left, right)
|
500 |
+
except Exception:
|
501 |
+
return (0, 0)
|
502 |
+
|
503 |
+
def _hz_friendly_to_full(hz_string):
|
504 |
+
try:
|
505 |
+
hz_string = hz_string.strip().lower()
|
506 |
+
hz, scale = (None, None)
|
507 |
+
|
508 |
+
if hz_string.endswith('ghz'):
|
509 |
+
scale = 9
|
510 |
+
elif hz_string.endswith('mhz'):
|
511 |
+
scale = 6
|
512 |
+
elif hz_string.endswith('hz'):
|
513 |
+
scale = 0
|
514 |
+
|
515 |
+
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
|
516 |
+
if not '.' in hz:
|
517 |
+
hz += '.0'
|
518 |
+
|
519 |
+
hz, scale = _hz_short_to_full(hz, scale)
|
520 |
+
|
521 |
+
return (hz, scale)
|
522 |
+
except Exception:
|
523 |
+
return (0, 0)
|
524 |
+
|
525 |
+
def _hz_short_to_friendly(ticks, scale):
|
526 |
+
try:
|
527 |
+
# Get the raw Hz as a string
|
528 |
+
left, right = _hz_short_to_full(ticks, scale)
|
529 |
+
result = '{0}.{1}'.format(left, right)
|
530 |
+
|
531 |
+
# Get the location of the dot, and remove said dot
|
532 |
+
dot_index = result.index('.')
|
533 |
+
result = result.replace('.', '')
|
534 |
+
|
535 |
+
# Get the Hz symbol and scale
|
536 |
+
symbol = "Hz"
|
537 |
+
scale = 0
|
538 |
+
if dot_index > 9:
|
539 |
+
symbol = "GHz"
|
540 |
+
scale = 9
|
541 |
+
elif dot_index > 6:
|
542 |
+
symbol = "MHz"
|
543 |
+
scale = 6
|
544 |
+
elif dot_index > 3:
|
545 |
+
symbol = "KHz"
|
546 |
+
scale = 3
|
547 |
+
|
548 |
+
# Get the Hz with the dot at the new scaled point
|
549 |
+
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
|
550 |
+
|
551 |
+
# Format the ticks to have 4 numbers after the decimal
|
552 |
+
# and remove any superfluous zeroes.
|
553 |
+
result = '{0:.4f} {1}'.format(float(result), symbol)
|
554 |
+
result = result.rstrip('0')
|
555 |
+
return result
|
556 |
+
except Exception:
|
557 |
+
return '0.0000 Hz'
|
558 |
+
|
559 |
+
def _to_friendly_bytes(input):
|
560 |
+
import re
|
561 |
+
|
562 |
+
if not input:
|
563 |
+
return input
|
564 |
+
input = "{0}".format(input)
|
565 |
+
|
566 |
+
formats = {
|
567 |
+
r"^[0-9]+B$" : 'B',
|
568 |
+
r"^[0-9]+K$" : 'KB',
|
569 |
+
r"^[0-9]+M$" : 'MB',
|
570 |
+
r"^[0-9]+G$" : 'GB'
|
571 |
+
}
|
572 |
+
|
573 |
+
for pattern, friendly_size in formats.items():
|
574 |
+
if re.match(pattern, input):
|
575 |
+
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
|
576 |
+
|
577 |
+
return input
|
578 |
+
|
579 |
+
def _friendly_bytes_to_int(friendly_bytes):
|
580 |
+
input = friendly_bytes.lower()
|
581 |
+
|
582 |
+
formats = [
|
583 |
+
{'gib' : 1024 * 1024 * 1024},
|
584 |
+
{'mib' : 1024 * 1024},
|
585 |
+
{'kib' : 1024},
|
586 |
+
|
587 |
+
{'gb' : 1024 * 1024 * 1024},
|
588 |
+
{'mb' : 1024 * 1024},
|
589 |
+
{'kb' : 1024},
|
590 |
+
|
591 |
+
{'g' : 1024 * 1024 * 1024},
|
592 |
+
{'m' : 1024 * 1024},
|
593 |
+
{'k' : 1024},
|
594 |
+
{'b' : 1},
|
595 |
+
]
|
596 |
+
|
597 |
+
try:
|
598 |
+
for entry in formats:
|
599 |
+
pattern = list(entry.keys())[0]
|
600 |
+
multiplier = list(entry.values())[0]
|
601 |
+
if input.endswith(pattern):
|
602 |
+
return int(input.split(pattern)[0].strip()) * multiplier
|
603 |
+
|
604 |
+
except Exception as err:
|
605 |
+
pass
|
606 |
+
|
607 |
+
return friendly_bytes
|
608 |
+
|
609 |
+
def _parse_cpu_brand_string(cpu_string):
|
610 |
+
# Just return 0 if the processor brand does not have the Hz
|
611 |
+
if not 'hz' in cpu_string.lower():
|
612 |
+
return ('0.0', 0)
|
613 |
+
|
614 |
+
hz = cpu_string.lower()
|
615 |
+
scale = 0
|
616 |
+
|
617 |
+
if hz.endswith('mhz'):
|
618 |
+
scale = 6
|
619 |
+
elif hz.endswith('ghz'):
|
620 |
+
scale = 9
|
621 |
+
if '@' in hz:
|
622 |
+
hz = hz.split('@')[1]
|
623 |
+
else:
|
624 |
+
hz = hz.rsplit(None, 1)[1]
|
625 |
+
|
626 |
+
hz = hz.rstrip('mhz').rstrip('ghz').strip()
|
627 |
+
hz = _to_decimal_string(hz)
|
628 |
+
|
629 |
+
return (hz, scale)
|
630 |
+
|
631 |
+
def _parse_cpu_brand_string_dx(cpu_string):
|
632 |
+
import re
|
633 |
+
|
634 |
+
# Find all the strings inside brackets ()
|
635 |
+
starts = [m.start() for m in re.finditer(r"\(", cpu_string)]
|
636 |
+
ends = [m.start() for m in re.finditer(r"\)", cpu_string)]
|
637 |
+
insides = {k: v for k, v in zip(starts, ends)}
|
638 |
+
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
|
639 |
+
|
640 |
+
# Find all the fields
|
641 |
+
vendor_id, stepping, model, family = (None, None, None, None)
|
642 |
+
for inside in insides:
|
643 |
+
for pair in inside.split(','):
|
644 |
+
pair = [n.strip() for n in pair.split(':')]
|
645 |
+
if len(pair) > 1:
|
646 |
+
name, value = pair[0], pair[1]
|
647 |
+
if name == 'origin':
|
648 |
+
vendor_id = value.strip('"')
|
649 |
+
elif name == 'stepping':
|
650 |
+
stepping = int(value.lstrip('0x'), 16)
|
651 |
+
elif name == 'model':
|
652 |
+
model = int(value.lstrip('0x'), 16)
|
653 |
+
elif name in ['fam', 'family']:
|
654 |
+
family = int(value.lstrip('0x'), 16)
|
655 |
+
|
656 |
+
# Find the Processor Brand
|
657 |
+
# Strip off extra strings in brackets at end
|
658 |
+
brand = cpu_string.strip()
|
659 |
+
is_working = True
|
660 |
+
while is_working:
|
661 |
+
is_working = False
|
662 |
+
for inside in insides:
|
663 |
+
full = "({0})".format(inside)
|
664 |
+
if brand.endswith(full):
|
665 |
+
brand = brand[ :-len(full)].strip()
|
666 |
+
is_working = True
|
667 |
+
|
668 |
+
# Find the Hz in the brand string
|
669 |
+
hz_brand, scale = _parse_cpu_brand_string(brand)
|
670 |
+
|
671 |
+
# Find Hz inside brackets () after the brand string
|
672 |
+
if hz_brand == '0.0':
|
673 |
+
for inside in insides:
|
674 |
+
hz = inside
|
675 |
+
for entry in ['GHz', 'MHz', 'Hz']:
|
676 |
+
if entry in hz:
|
677 |
+
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
|
678 |
+
hz_brand, scale = _parse_cpu_brand_string(hz)
|
679 |
+
break
|
680 |
+
|
681 |
+
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
|
682 |
+
|
683 |
+
def _parse_dmesg_output(output):
|
684 |
+
try:
|
685 |
+
# Get all the dmesg lines that might contain a CPU string
|
686 |
+
lines = output.split(' CPU0:')[1:] + \
|
687 |
+
output.split(' CPU1:')[1:] + \
|
688 |
+
output.split(' CPU:')[1:] + \
|
689 |
+
output.split('\nCPU0:')[1:] + \
|
690 |
+
output.split('\nCPU1:')[1:] + \
|
691 |
+
output.split('\nCPU:')[1:]
|
692 |
+
lines = [l.split('\n')[0].strip() for l in lines]
|
693 |
+
|
694 |
+
# Convert the lines to CPU strings
|
695 |
+
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
|
696 |
+
|
697 |
+
# Find the CPU string that has the most fields
|
698 |
+
best_string = None
|
699 |
+
highest_count = 0
|
700 |
+
for cpu_string in cpu_strings:
|
701 |
+
count = sum([n is not None for n in cpu_string])
|
702 |
+
if count > highest_count:
|
703 |
+
highest_count = count
|
704 |
+
best_string = cpu_string
|
705 |
+
|
706 |
+
# If no CPU string was found, return {}
|
707 |
+
if not best_string:
|
708 |
+
return {}
|
709 |
+
|
710 |
+
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
|
711 |
+
|
712 |
+
# Origin
|
713 |
+
if ' Origin=' in output:
|
714 |
+
fields = output[output.find(' Origin=') : ].split('\n')[0]
|
715 |
+
fields = fields.strip().split()
|
716 |
+
fields = [n.strip().split('=') for n in fields]
|
717 |
+
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
|
718 |
+
|
719 |
+
for field in fields:
|
720 |
+
name = list(field.keys())[0]
|
721 |
+
value = list(field.values())[0]
|
722 |
+
|
723 |
+
if name == 'origin':
|
724 |
+
vendor_id = value.strip('"')
|
725 |
+
elif name == 'stepping':
|
726 |
+
stepping = int(value.lstrip('0x'), 16)
|
727 |
+
elif name == 'model':
|
728 |
+
model = int(value.lstrip('0x'), 16)
|
729 |
+
elif name in ['fam', 'family']:
|
730 |
+
family = int(value.lstrip('0x'), 16)
|
731 |
+
|
732 |
+
# Features
|
733 |
+
flag_lines = []
|
734 |
+
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
|
735 |
+
if category in output:
|
736 |
+
flag_lines.append(output.split(category)[1].split('\n')[0])
|
737 |
+
|
738 |
+
flags = []
|
739 |
+
for line in flag_lines:
|
740 |
+
line = line.split('<')[1].split('>')[0].lower()
|
741 |
+
for flag in line.split(','):
|
742 |
+
flags.append(flag)
|
743 |
+
flags.sort()
|
744 |
+
|
745 |
+
# Convert from GHz/MHz string to Hz
|
746 |
+
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
|
747 |
+
|
748 |
+
# If advertised hz not found, use the actual hz
|
749 |
+
if hz_advertised == '0.0':
|
750 |
+
scale = 6
|
751 |
+
hz_advertised = _to_decimal_string(hz_actual)
|
752 |
+
|
753 |
+
info = {
|
754 |
+
'vendor_id_raw' : vendor_id,
|
755 |
+
'brand_raw' : processor_brand,
|
756 |
+
|
757 |
+
'stepping' : stepping,
|
758 |
+
'model' : model,
|
759 |
+
'family' : family,
|
760 |
+
'flags' : flags
|
761 |
+
}
|
762 |
+
|
763 |
+
if hz_advertised and hz_advertised != '0.0':
|
764 |
+
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
|
765 |
+
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
|
766 |
+
|
767 |
+
if hz_advertised and hz_advertised != '0.0':
|
768 |
+
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
|
769 |
+
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
|
770 |
+
|
771 |
+
return {k: v for k, v in info.items() if v}
|
772 |
+
except Exception as err:
|
773 |
+
g_trace.fail(err)
|
774 |
+
#raise
|
775 |
+
|
776 |
+
return {}
|
777 |
+
|
778 |
+
def _parse_arch(arch_string_raw):
|
779 |
+
import re
|
780 |
+
|
781 |
+
arch, bits = None, None
|
782 |
+
arch_string_raw = arch_string_raw.lower()
|
783 |
+
|
784 |
+
# X86
|
785 |
+
if re.match(r'^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
|
786 |
+
arch = 'X86_32'
|
787 |
+
bits = 32
|
788 |
+
elif re.match(r'^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
|
789 |
+
arch = 'X86_64'
|
790 |
+
bits = 64
|
791 |
+
# ARM
|
792 |
+
elif re.match(r'^armv8-a|aarch64|arm64$', arch_string_raw):
|
793 |
+
arch = 'ARM_8'
|
794 |
+
bits = 64
|
795 |
+
elif re.match(r'^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
|
796 |
+
arch = 'ARM_7'
|
797 |
+
bits = 32
|
798 |
+
elif re.match(r'^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
|
799 |
+
arch = 'ARM_8'
|
800 |
+
bits = 32
|
801 |
+
# PPC
|
802 |
+
elif re.match(r'^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
|
803 |
+
arch = 'PPC_32'
|
804 |
+
bits = 32
|
805 |
+
elif re.match(r'^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
|
806 |
+
arch = 'PPC_64'
|
807 |
+
bits = 64
|
808 |
+
# SPARC
|
809 |
+
elif re.match(r'^sparc32$|^sparc$', arch_string_raw):
|
810 |
+
arch = 'SPARC_32'
|
811 |
+
bits = 32
|
812 |
+
elif re.match(r'^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
|
813 |
+
arch = 'SPARC_64'
|
814 |
+
bits = 64
|
815 |
+
# S390X
|
816 |
+
elif re.match(r'^s390x$', arch_string_raw):
|
817 |
+
arch = 'S390X'
|
818 |
+
bits = 64
|
819 |
+
elif arch_string_raw == 'mips':
|
820 |
+
arch = 'MIPS_32'
|
821 |
+
bits = 32
|
822 |
+
elif arch_string_raw == 'mips64':
|
823 |
+
arch = 'MIPS_64'
|
824 |
+
bits = 64
|
825 |
+
# RISCV
|
826 |
+
elif re.match(r'^riscv$|^riscv32$|^riscv32be$', arch_string_raw):
|
827 |
+
arch = 'RISCV_32'
|
828 |
+
bits = 32
|
829 |
+
elif re.match(r'^riscv64$|^riscv64be$', arch_string_raw):
|
830 |
+
arch = 'RISCV_64'
|
831 |
+
bits = 64
|
832 |
+
|
833 |
+
return (arch, bits)
|
834 |
+
|
835 |
+
def _is_bit_set(reg, bit):
|
836 |
+
mask = 1 << bit
|
837 |
+
is_set = reg & mask > 0
|
838 |
+
return is_set
|
839 |
+
|
840 |
+
|
841 |
+
def _is_selinux_enforcing(trace):
|
842 |
+
# Just return if the SE Linux Status Tool is not installed
|
843 |
+
if not DataSource.has_sestatus():
|
844 |
+
trace.fail('Failed to find sestatus.')
|
845 |
+
return False
|
846 |
+
|
847 |
+
# Run the sestatus, and just return if it failed to run
|
848 |
+
returncode, output = DataSource.sestatus_b()
|
849 |
+
if returncode != 0:
|
850 |
+
trace.fail('Failed to run sestatus. Skipping ...')
|
851 |
+
return False
|
852 |
+
|
853 |
+
# Figure out if explicitly in enforcing mode
|
854 |
+
for line in output.splitlines():
|
855 |
+
line = line.strip().lower()
|
856 |
+
if line.startswith("current mode:"):
|
857 |
+
if line.endswith("enforcing"):
|
858 |
+
return True
|
859 |
+
else:
|
860 |
+
return False
|
861 |
+
|
862 |
+
# Figure out if we can execute heap and execute memory
|
863 |
+
can_selinux_exec_heap = False
|
864 |
+
can_selinux_exec_memory = False
|
865 |
+
for line in output.splitlines():
|
866 |
+
line = line.strip().lower()
|
867 |
+
if line.startswith("allow_execheap") and line.endswith("on"):
|
868 |
+
can_selinux_exec_heap = True
|
869 |
+
elif line.startswith("allow_execmem") and line.endswith("on"):
|
870 |
+
can_selinux_exec_memory = True
|
871 |
+
|
872 |
+
trace.command_output('can_selinux_exec_heap:', can_selinux_exec_heap)
|
873 |
+
trace.command_output('can_selinux_exec_memory:', can_selinux_exec_memory)
|
874 |
+
|
875 |
+
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
|
876 |
+
|
877 |
+
def _filter_dict_keys_with_empty_values(info, acceptable_values = {}):
|
878 |
+
filtered_info = {}
|
879 |
+
for key in info:
|
880 |
+
value = info[key]
|
881 |
+
|
882 |
+
# Keep if value is acceptable
|
883 |
+
if key in acceptable_values:
|
884 |
+
if acceptable_values[key] == value:
|
885 |
+
filtered_info[key] = value
|
886 |
+
continue
|
887 |
+
|
888 |
+
# Filter out None, 0, "", (), {}, []
|
889 |
+
if not value:
|
890 |
+
continue
|
891 |
+
|
892 |
+
# Filter out (0, 0)
|
893 |
+
if value == (0, 0):
|
894 |
+
continue
|
895 |
+
|
896 |
+
# Filter out -1
|
897 |
+
if value == -1:
|
898 |
+
continue
|
899 |
+
|
900 |
+
# Filter out strings that start with "0.0"
|
901 |
+
if type(value) == str and value.startswith('0.0'):
|
902 |
+
continue
|
903 |
+
|
904 |
+
filtered_info[key] = value
|
905 |
+
|
906 |
+
return filtered_info
|
907 |
+
|
908 |
+
class ASM(object):
|
909 |
+
def __init__(self, restype=None, argtypes=(), machine_code=[]):
|
910 |
+
self.restype = restype
|
911 |
+
self.argtypes = argtypes
|
912 |
+
self.machine_code = machine_code
|
913 |
+
self.prochandle = None
|
914 |
+
self.mm = None
|
915 |
+
self.func = None
|
916 |
+
self.address = None
|
917 |
+
self.size = 0
|
918 |
+
|
919 |
+
def compile(self):
|
920 |
+
machine_code = bytes.join(b'', self.machine_code)
|
921 |
+
self.size = ctypes.c_size_t(len(machine_code))
|
922 |
+
|
923 |
+
if DataSource.is_windows:
|
924 |
+
# Allocate a memory segment the size of the machine code, and make it executable
|
925 |
+
size = len(machine_code)
|
926 |
+
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
|
927 |
+
if size < 0x1000: size = 0x1000
|
928 |
+
MEM_COMMIT = ctypes.c_ulong(0x1000)
|
929 |
+
PAGE_READWRITE = ctypes.c_ulong(0x4)
|
930 |
+
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
|
931 |
+
pfnVirtualAlloc.restype = ctypes.c_void_p
|
932 |
+
self.address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
|
933 |
+
if not self.address:
|
934 |
+
raise Exception("Failed to VirtualAlloc")
|
935 |
+
|
936 |
+
# Copy the machine code into the memory segment
|
937 |
+
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
|
938 |
+
if memmove(self.address, machine_code, size) < 0:
|
939 |
+
raise Exception("Failed to memmove")
|
940 |
+
|
941 |
+
# Enable execute permissions
|
942 |
+
PAGE_EXECUTE = ctypes.c_ulong(0x10)
|
943 |
+
old_protect = ctypes.c_ulong(0)
|
944 |
+
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
|
945 |
+
res = pfnVirtualProtect(ctypes.c_void_p(self.address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
|
946 |
+
if not res:
|
947 |
+
raise Exception("Failed VirtualProtect")
|
948 |
+
|
949 |
+
# Flush Instruction Cache
|
950 |
+
# First, get process Handle
|
951 |
+
if not self.prochandle:
|
952 |
+
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
|
953 |
+
pfnGetCurrentProcess.restype = ctypes.c_void_p
|
954 |
+
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
|
955 |
+
# Actually flush cache
|
956 |
+
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(self.address), ctypes.c_size_t(size))
|
957 |
+
if not res:
|
958 |
+
raise Exception("Failed FlushInstructionCache")
|
959 |
+
else:
|
960 |
+
from mmap import mmap, MAP_PRIVATE, MAP_ANONYMOUS, PROT_WRITE, PROT_READ, PROT_EXEC
|
961 |
+
|
962 |
+
# Allocate a private and executable memory segment the size of the machine code
|
963 |
+
machine_code = bytes.join(b'', self.machine_code)
|
964 |
+
self.size = len(machine_code)
|
965 |
+
self.mm = mmap(-1, self.size, flags=MAP_PRIVATE | MAP_ANONYMOUS, prot=PROT_WRITE | PROT_READ | PROT_EXEC)
|
966 |
+
|
967 |
+
# Copy the machine code into the memory segment
|
968 |
+
self.mm.write(machine_code)
|
969 |
+
self.address = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
|
970 |
+
|
971 |
+
# Cast the memory segment into a function
|
972 |
+
functype = ctypes.CFUNCTYPE(self.restype, *self.argtypes)
|
973 |
+
self.func = functype(self.address)
|
974 |
+
|
975 |
+
def run(self):
|
976 |
+
# Call the machine code like a function
|
977 |
+
retval = self.func()
|
978 |
+
|
979 |
+
return retval
|
980 |
+
|
981 |
+
def free(self):
|
982 |
+
# Free the function memory segment
|
983 |
+
if DataSource.is_windows:
|
984 |
+
MEM_RELEASE = ctypes.c_ulong(0x8000)
|
985 |
+
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(self.address), ctypes.c_size_t(0), MEM_RELEASE)
|
986 |
+
else:
|
987 |
+
self.mm.close()
|
988 |
+
|
989 |
+
self.prochandle = None
|
990 |
+
self.mm = None
|
991 |
+
self.func = None
|
992 |
+
self.address = None
|
993 |
+
self.size = 0
|
994 |
+
|
995 |
+
|
996 |
+
class CPUID(object):
|
997 |
+
def __init__(self, trace=None):
|
998 |
+
if trace is None:
|
999 |
+
trace = Trace(False, False)
|
1000 |
+
|
1001 |
+
# Figure out if SE Linux is on and in enforcing mode
|
1002 |
+
self.is_selinux_enforcing = _is_selinux_enforcing(trace)
|
1003 |
+
|
1004 |
+
def _asm_func(self, restype=None, argtypes=(), machine_code=[]):
|
1005 |
+
asm = ASM(restype, argtypes, machine_code)
|
1006 |
+
asm.compile()
|
1007 |
+
return asm
|
1008 |
+
|
1009 |
+
def _run_asm(self, *machine_code):
|
1010 |
+
asm = ASM(ctypes.c_uint32, (), machine_code)
|
1011 |
+
asm.compile()
|
1012 |
+
retval = asm.run()
|
1013 |
+
asm.free()
|
1014 |
+
return retval
|
1015 |
+
|
1016 |
+
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
|
1017 |
+
def get_vendor_id(self):
|
1018 |
+
# EBX
|
1019 |
+
ebx = self._run_asm(
|
1020 |
+
b"\x31\xC0", # xor eax,eax
|
1021 |
+
b"\x0F\xA2" # cpuid
|
1022 |
+
b"\x89\xD8" # mov ax,bx
|
1023 |
+
b"\xC3" # ret
|
1024 |
+
)
|
1025 |
+
|
1026 |
+
# ECX
|
1027 |
+
ecx = self._run_asm(
|
1028 |
+
b"\x31\xC0", # xor eax,eax
|
1029 |
+
b"\x0f\xa2" # cpuid
|
1030 |
+
b"\x89\xC8" # mov ax,cx
|
1031 |
+
b"\xC3" # ret
|
1032 |
+
)
|
1033 |
+
|
1034 |
+
# EDX
|
1035 |
+
edx = self._run_asm(
|
1036 |
+
b"\x31\xC0", # xor eax,eax
|
1037 |
+
b"\x0f\xa2" # cpuid
|
1038 |
+
b"\x89\xD0" # mov ax,dx
|
1039 |
+
b"\xC3" # ret
|
1040 |
+
)
|
1041 |
+
|
1042 |
+
# Each 4bits is a ascii letter in the name
|
1043 |
+
vendor_id = []
|
1044 |
+
for reg in [ebx, edx, ecx]:
|
1045 |
+
for n in [0, 8, 16, 24]:
|
1046 |
+
vendor_id.append(chr((reg >> n) & 0xFF))
|
1047 |
+
vendor_id = ''.join(vendor_id)
|
1048 |
+
|
1049 |
+
return vendor_id
|
1050 |
+
|
1051 |
+
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
|
1052 |
+
def get_info(self):
|
1053 |
+
# EAX
|
1054 |
+
eax = self._run_asm(
|
1055 |
+
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
|
1056 |
+
b"\x0f\xa2" # cpuid
|
1057 |
+
b"\xC3" # ret
|
1058 |
+
)
|
1059 |
+
|
1060 |
+
# Get the CPU info
|
1061 |
+
stepping_id = (eax >> 0) & 0xF # 4 bits
|
1062 |
+
model = (eax >> 4) & 0xF # 4 bits
|
1063 |
+
family_id = (eax >> 8) & 0xF # 4 bits
|
1064 |
+
processor_type = (eax >> 12) & 0x3 # 2 bits
|
1065 |
+
extended_model_id = (eax >> 16) & 0xF # 4 bits
|
1066 |
+
extended_family_id = (eax >> 20) & 0xFF # 8 bits
|
1067 |
+
family = 0
|
1068 |
+
|
1069 |
+
if family_id in [15]:
|
1070 |
+
family = extended_family_id + family_id
|
1071 |
+
else:
|
1072 |
+
family = family_id
|
1073 |
+
|
1074 |
+
if family_id in [6, 15]:
|
1075 |
+
model = (extended_model_id << 4) + model
|
1076 |
+
|
1077 |
+
return {
|
1078 |
+
'stepping' : stepping_id,
|
1079 |
+
'model' : model,
|
1080 |
+
'family' : family,
|
1081 |
+
'processor_type' : processor_type
|
1082 |
+
}
|
1083 |
+
|
1084 |
+
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
|
1085 |
+
def get_max_extension_support(self):
|
1086 |
+
# Check for extension support
|
1087 |
+
max_extension_support = self._run_asm(
|
1088 |
+
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
|
1089 |
+
b"\x0f\xa2" # cpuid
|
1090 |
+
b"\xC3" # ret
|
1091 |
+
)
|
1092 |
+
|
1093 |
+
return max_extension_support
|
1094 |
+
|
1095 |
+
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
|
1096 |
+
def get_flags(self, max_extension_support):
|
1097 |
+
# EDX
|
1098 |
+
edx = self._run_asm(
|
1099 |
+
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
|
1100 |
+
b"\x0f\xa2" # cpuid
|
1101 |
+
b"\x89\xD0" # mov ax,dx
|
1102 |
+
b"\xC3" # ret
|
1103 |
+
)
|
1104 |
+
|
1105 |
+
# ECX
|
1106 |
+
ecx = self._run_asm(
|
1107 |
+
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
|
1108 |
+
b"\x0f\xa2" # cpuid
|
1109 |
+
b"\x89\xC8" # mov ax,cx
|
1110 |
+
b"\xC3" # ret
|
1111 |
+
)
|
1112 |
+
|
1113 |
+
# Get the CPU flags
|
1114 |
+
flags = {
|
1115 |
+
'fpu' : _is_bit_set(edx, 0),
|
1116 |
+
'vme' : _is_bit_set(edx, 1),
|
1117 |
+
'de' : _is_bit_set(edx, 2),
|
1118 |
+
'pse' : _is_bit_set(edx, 3),
|
1119 |
+
'tsc' : _is_bit_set(edx, 4),
|
1120 |
+
'msr' : _is_bit_set(edx, 5),
|
1121 |
+
'pae' : _is_bit_set(edx, 6),
|
1122 |
+
'mce' : _is_bit_set(edx, 7),
|
1123 |
+
'cx8' : _is_bit_set(edx, 8),
|
1124 |
+
'apic' : _is_bit_set(edx, 9),
|
1125 |
+
#'reserved1' : _is_bit_set(edx, 10),
|
1126 |
+
'sep' : _is_bit_set(edx, 11),
|
1127 |
+
'mtrr' : _is_bit_set(edx, 12),
|
1128 |
+
'pge' : _is_bit_set(edx, 13),
|
1129 |
+
'mca' : _is_bit_set(edx, 14),
|
1130 |
+
'cmov' : _is_bit_set(edx, 15),
|
1131 |
+
'pat' : _is_bit_set(edx, 16),
|
1132 |
+
'pse36' : _is_bit_set(edx, 17),
|
1133 |
+
'pn' : _is_bit_set(edx, 18),
|
1134 |
+
'clflush' : _is_bit_set(edx, 19),
|
1135 |
+
#'reserved2' : _is_bit_set(edx, 20),
|
1136 |
+
'dts' : _is_bit_set(edx, 21),
|
1137 |
+
'acpi' : _is_bit_set(edx, 22),
|
1138 |
+
'mmx' : _is_bit_set(edx, 23),
|
1139 |
+
'fxsr' : _is_bit_set(edx, 24),
|
1140 |
+
'sse' : _is_bit_set(edx, 25),
|
1141 |
+
'sse2' : _is_bit_set(edx, 26),
|
1142 |
+
'ss' : _is_bit_set(edx, 27),
|
1143 |
+
'ht' : _is_bit_set(edx, 28),
|
1144 |
+
'tm' : _is_bit_set(edx, 29),
|
1145 |
+
'ia64' : _is_bit_set(edx, 30),
|
1146 |
+
'pbe' : _is_bit_set(edx, 31),
|
1147 |
+
|
1148 |
+
'pni' : _is_bit_set(ecx, 0),
|
1149 |
+
'pclmulqdq' : _is_bit_set(ecx, 1),
|
1150 |
+
'dtes64' : _is_bit_set(ecx, 2),
|
1151 |
+
'monitor' : _is_bit_set(ecx, 3),
|
1152 |
+
'ds_cpl' : _is_bit_set(ecx, 4),
|
1153 |
+
'vmx' : _is_bit_set(ecx, 5),
|
1154 |
+
'smx' : _is_bit_set(ecx, 6),
|
1155 |
+
'est' : _is_bit_set(ecx, 7),
|
1156 |
+
'tm2' : _is_bit_set(ecx, 8),
|
1157 |
+
'ssse3' : _is_bit_set(ecx, 9),
|
1158 |
+
'cid' : _is_bit_set(ecx, 10),
|
1159 |
+
#'reserved3' : _is_bit_set(ecx, 11),
|
1160 |
+
'fma' : _is_bit_set(ecx, 12),
|
1161 |
+
'cx16' : _is_bit_set(ecx, 13),
|
1162 |
+
'xtpr' : _is_bit_set(ecx, 14),
|
1163 |
+
'pdcm' : _is_bit_set(ecx, 15),
|
1164 |
+
#'reserved4' : _is_bit_set(ecx, 16),
|
1165 |
+
'pcid' : _is_bit_set(ecx, 17),
|
1166 |
+
'dca' : _is_bit_set(ecx, 18),
|
1167 |
+
'sse4_1' : _is_bit_set(ecx, 19),
|
1168 |
+
'sse4_2' : _is_bit_set(ecx, 20),
|
1169 |
+
'x2apic' : _is_bit_set(ecx, 21),
|
1170 |
+
'movbe' : _is_bit_set(ecx, 22),
|
1171 |
+
'popcnt' : _is_bit_set(ecx, 23),
|
1172 |
+
'tscdeadline' : _is_bit_set(ecx, 24),
|
1173 |
+
'aes' : _is_bit_set(ecx, 25),
|
1174 |
+
'xsave' : _is_bit_set(ecx, 26),
|
1175 |
+
'osxsave' : _is_bit_set(ecx, 27),
|
1176 |
+
'avx' : _is_bit_set(ecx, 28),
|
1177 |
+
'f16c' : _is_bit_set(ecx, 29),
|
1178 |
+
'rdrnd' : _is_bit_set(ecx, 30),
|
1179 |
+
'hypervisor' : _is_bit_set(ecx, 31)
|
1180 |
+
}
|
1181 |
+
|
1182 |
+
# Get a list of only the flags that are true
|
1183 |
+
flags = [k for k, v in flags.items() if v]
|
1184 |
+
|
1185 |
+
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
|
1186 |
+
if max_extension_support >= 7:
|
1187 |
+
# EBX
|
1188 |
+
ebx = self._run_asm(
|
1189 |
+
b"\x31\xC9", # xor ecx,ecx
|
1190 |
+
b"\xB8\x07\x00\x00\x00" # mov eax,7
|
1191 |
+
b"\x0f\xa2" # cpuid
|
1192 |
+
b"\x89\xD8" # mov ax,bx
|
1193 |
+
b"\xC3" # ret
|
1194 |
+
)
|
1195 |
+
|
1196 |
+
# ECX
|
1197 |
+
ecx = self._run_asm(
|
1198 |
+
b"\x31\xC9", # xor ecx,ecx
|
1199 |
+
b"\xB8\x07\x00\x00\x00" # mov eax,7
|
1200 |
+
b"\x0f\xa2" # cpuid
|
1201 |
+
b"\x89\xC8" # mov ax,cx
|
1202 |
+
b"\xC3" # ret
|
1203 |
+
)
|
1204 |
+
|
1205 |
+
# Get the extended CPU flags
|
1206 |
+
extended_flags = {
|
1207 |
+
#'fsgsbase' : _is_bit_set(ebx, 0),
|
1208 |
+
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
|
1209 |
+
'sgx' : _is_bit_set(ebx, 2),
|
1210 |
+
'bmi1' : _is_bit_set(ebx, 3),
|
1211 |
+
'hle' : _is_bit_set(ebx, 4),
|
1212 |
+
'avx2' : _is_bit_set(ebx, 5),
|
1213 |
+
#'reserved' : _is_bit_set(ebx, 6),
|
1214 |
+
'smep' : _is_bit_set(ebx, 7),
|
1215 |
+
'bmi2' : _is_bit_set(ebx, 8),
|
1216 |
+
'erms' : _is_bit_set(ebx, 9),
|
1217 |
+
'invpcid' : _is_bit_set(ebx, 10),
|
1218 |
+
'rtm' : _is_bit_set(ebx, 11),
|
1219 |
+
'pqm' : _is_bit_set(ebx, 12),
|
1220 |
+
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
|
1221 |
+
'mpx' : _is_bit_set(ebx, 14),
|
1222 |
+
'pqe' : _is_bit_set(ebx, 15),
|
1223 |
+
'avx512f' : _is_bit_set(ebx, 16),
|
1224 |
+
'avx512dq' : _is_bit_set(ebx, 17),
|
1225 |
+
'rdseed' : _is_bit_set(ebx, 18),
|
1226 |
+
'adx' : _is_bit_set(ebx, 19),
|
1227 |
+
'smap' : _is_bit_set(ebx, 20),
|
1228 |
+
'avx512ifma' : _is_bit_set(ebx, 21),
|
1229 |
+
'pcommit' : _is_bit_set(ebx, 22),
|
1230 |
+
'clflushopt' : _is_bit_set(ebx, 23),
|
1231 |
+
'clwb' : _is_bit_set(ebx, 24),
|
1232 |
+
'intel_pt' : _is_bit_set(ebx, 25),
|
1233 |
+
'avx512pf' : _is_bit_set(ebx, 26),
|
1234 |
+
'avx512er' : _is_bit_set(ebx, 27),
|
1235 |
+
'avx512cd' : _is_bit_set(ebx, 28),
|
1236 |
+
'sha' : _is_bit_set(ebx, 29),
|
1237 |
+
'avx512bw' : _is_bit_set(ebx, 30),
|
1238 |
+
'avx512vl' : _is_bit_set(ebx, 31),
|
1239 |
+
|
1240 |
+
'prefetchwt1' : _is_bit_set(ecx, 0),
|
1241 |
+
'avx512vbmi' : _is_bit_set(ecx, 1),
|
1242 |
+
'umip' : _is_bit_set(ecx, 2),
|
1243 |
+
'pku' : _is_bit_set(ecx, 3),
|
1244 |
+
'ospke' : _is_bit_set(ecx, 4),
|
1245 |
+
#'reserved' : _is_bit_set(ecx, 5),
|
1246 |
+
'avx512vbmi2' : _is_bit_set(ecx, 6),
|
1247 |
+
#'reserved' : _is_bit_set(ecx, 7),
|
1248 |
+
'gfni' : _is_bit_set(ecx, 8),
|
1249 |
+
'vaes' : _is_bit_set(ecx, 9),
|
1250 |
+
'vpclmulqdq' : _is_bit_set(ecx, 10),
|
1251 |
+
'avx512vnni' : _is_bit_set(ecx, 11),
|
1252 |
+
'avx512bitalg' : _is_bit_set(ecx, 12),
|
1253 |
+
#'reserved' : _is_bit_set(ecx, 13),
|
1254 |
+
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
|
1255 |
+
#'reserved' : _is_bit_set(ecx, 15),
|
1256 |
+
#'reserved' : _is_bit_set(ecx, 16),
|
1257 |
+
#'mpx0' : _is_bit_set(ecx, 17),
|
1258 |
+
#'mpx1' : _is_bit_set(ecx, 18),
|
1259 |
+
#'mpx2' : _is_bit_set(ecx, 19),
|
1260 |
+
#'mpx3' : _is_bit_set(ecx, 20),
|
1261 |
+
#'mpx4' : _is_bit_set(ecx, 21),
|
1262 |
+
'rdpid' : _is_bit_set(ecx, 22),
|
1263 |
+
#'reserved' : _is_bit_set(ecx, 23),
|
1264 |
+
#'reserved' : _is_bit_set(ecx, 24),
|
1265 |
+
#'reserved' : _is_bit_set(ecx, 25),
|
1266 |
+
#'reserved' : _is_bit_set(ecx, 26),
|
1267 |
+
#'reserved' : _is_bit_set(ecx, 27),
|
1268 |
+
#'reserved' : _is_bit_set(ecx, 28),
|
1269 |
+
#'reserved' : _is_bit_set(ecx, 29),
|
1270 |
+
'sgx_lc' : _is_bit_set(ecx, 30),
|
1271 |
+
#'reserved' : _is_bit_set(ecx, 31)
|
1272 |
+
}
|
1273 |
+
|
1274 |
+
# Get a list of only the flags that are true
|
1275 |
+
extended_flags = [k for k, v in extended_flags.items() if v]
|
1276 |
+
flags += extended_flags
|
1277 |
+
|
1278 |
+
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
|
1279 |
+
if max_extension_support >= 0x80000001:
|
1280 |
+
# EBX
|
1281 |
+
ebx = self._run_asm(
|
1282 |
+
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
|
1283 |
+
b"\x0f\xa2" # cpuid
|
1284 |
+
b"\x89\xD8" # mov ax,bx
|
1285 |
+
b"\xC3" # ret
|
1286 |
+
)
|
1287 |
+
|
1288 |
+
# ECX
|
1289 |
+
ecx = self._run_asm(
|
1290 |
+
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
|
1291 |
+
b"\x0f\xa2" # cpuid
|
1292 |
+
b"\x89\xC8" # mov ax,cx
|
1293 |
+
b"\xC3" # ret
|
1294 |
+
)
|
1295 |
+
|
1296 |
+
# Get the extended CPU flags
|
1297 |
+
extended_flags = {
|
1298 |
+
'fpu' : _is_bit_set(ebx, 0),
|
1299 |
+
'vme' : _is_bit_set(ebx, 1),
|
1300 |
+
'de' : _is_bit_set(ebx, 2),
|
1301 |
+
'pse' : _is_bit_set(ebx, 3),
|
1302 |
+
'tsc' : _is_bit_set(ebx, 4),
|
1303 |
+
'msr' : _is_bit_set(ebx, 5),
|
1304 |
+
'pae' : _is_bit_set(ebx, 6),
|
1305 |
+
'mce' : _is_bit_set(ebx, 7),
|
1306 |
+
'cx8' : _is_bit_set(ebx, 8),
|
1307 |
+
'apic' : _is_bit_set(ebx, 9),
|
1308 |
+
#'reserved' : _is_bit_set(ebx, 10),
|
1309 |
+
'syscall' : _is_bit_set(ebx, 11),
|
1310 |
+
'mtrr' : _is_bit_set(ebx, 12),
|
1311 |
+
'pge' : _is_bit_set(ebx, 13),
|
1312 |
+
'mca' : _is_bit_set(ebx, 14),
|
1313 |
+
'cmov' : _is_bit_set(ebx, 15),
|
1314 |
+
'pat' : _is_bit_set(ebx, 16),
|
1315 |
+
'pse36' : _is_bit_set(ebx, 17),
|
1316 |
+
#'reserved' : _is_bit_set(ebx, 18),
|
1317 |
+
'mp' : _is_bit_set(ebx, 19),
|
1318 |
+
'nx' : _is_bit_set(ebx, 20),
|
1319 |
+
#'reserved' : _is_bit_set(ebx, 21),
|
1320 |
+
'mmxext' : _is_bit_set(ebx, 22),
|
1321 |
+
'mmx' : _is_bit_set(ebx, 23),
|
1322 |
+
'fxsr' : _is_bit_set(ebx, 24),
|
1323 |
+
'fxsr_opt' : _is_bit_set(ebx, 25),
|
1324 |
+
'pdpe1gp' : _is_bit_set(ebx, 26),
|
1325 |
+
'rdtscp' : _is_bit_set(ebx, 27),
|
1326 |
+
#'reserved' : _is_bit_set(ebx, 28),
|
1327 |
+
'lm' : _is_bit_set(ebx, 29),
|
1328 |
+
'3dnowext' : _is_bit_set(ebx, 30),
|
1329 |
+
'3dnow' : _is_bit_set(ebx, 31),
|
1330 |
+
|
1331 |
+
'lahf_lm' : _is_bit_set(ecx, 0),
|
1332 |
+
'cmp_legacy' : _is_bit_set(ecx, 1),
|
1333 |
+
'svm' : _is_bit_set(ecx, 2),
|
1334 |
+
'extapic' : _is_bit_set(ecx, 3),
|
1335 |
+
'cr8_legacy' : _is_bit_set(ecx, 4),
|
1336 |
+
'abm' : _is_bit_set(ecx, 5),
|
1337 |
+
'sse4a' : _is_bit_set(ecx, 6),
|
1338 |
+
'misalignsse' : _is_bit_set(ecx, 7),
|
1339 |
+
'3dnowprefetch' : _is_bit_set(ecx, 8),
|
1340 |
+
'osvw' : _is_bit_set(ecx, 9),
|
1341 |
+
'ibs' : _is_bit_set(ecx, 10),
|
1342 |
+
'xop' : _is_bit_set(ecx, 11),
|
1343 |
+
'skinit' : _is_bit_set(ecx, 12),
|
1344 |
+
'wdt' : _is_bit_set(ecx, 13),
|
1345 |
+
#'reserved' : _is_bit_set(ecx, 14),
|
1346 |
+
'lwp' : _is_bit_set(ecx, 15),
|
1347 |
+
'fma4' : _is_bit_set(ecx, 16),
|
1348 |
+
'tce' : _is_bit_set(ecx, 17),
|
1349 |
+
#'reserved' : _is_bit_set(ecx, 18),
|
1350 |
+
'nodeid_msr' : _is_bit_set(ecx, 19),
|
1351 |
+
#'reserved' : _is_bit_set(ecx, 20),
|
1352 |
+
'tbm' : _is_bit_set(ecx, 21),
|
1353 |
+
'topoext' : _is_bit_set(ecx, 22),
|
1354 |
+
'perfctr_core' : _is_bit_set(ecx, 23),
|
1355 |
+
'perfctr_nb' : _is_bit_set(ecx, 24),
|
1356 |
+
#'reserved' : _is_bit_set(ecx, 25),
|
1357 |
+
'dbx' : _is_bit_set(ecx, 26),
|
1358 |
+
'perftsc' : _is_bit_set(ecx, 27),
|
1359 |
+
'pci_l2i' : _is_bit_set(ecx, 28),
|
1360 |
+
#'reserved' : _is_bit_set(ecx, 29),
|
1361 |
+
#'reserved' : _is_bit_set(ecx, 30),
|
1362 |
+
#'reserved' : _is_bit_set(ecx, 31)
|
1363 |
+
}
|
1364 |
+
|
1365 |
+
# Get a list of only the flags that are true
|
1366 |
+
extended_flags = [k for k, v in extended_flags.items() if v]
|
1367 |
+
flags += extended_flags
|
1368 |
+
|
1369 |
+
flags.sort()
|
1370 |
+
return flags
|
1371 |
+
|
1372 |
+
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
|
1373 |
+
def get_processor_brand(self, max_extension_support):
|
1374 |
+
processor_brand = ""
|
1375 |
+
|
1376 |
+
# Processor brand string
|
1377 |
+
if max_extension_support >= 0x80000004:
|
1378 |
+
instructions = [
|
1379 |
+
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
|
1380 |
+
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
|
1381 |
+
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
|
1382 |
+
]
|
1383 |
+
for instruction in instructions:
|
1384 |
+
# EAX
|
1385 |
+
eax = self._run_asm(
|
1386 |
+
instruction, # mov ax,0x8000000?
|
1387 |
+
b"\x0f\xa2" # cpuid
|
1388 |
+
b"\x89\xC0" # mov ax,ax
|
1389 |
+
b"\xC3" # ret
|
1390 |
+
)
|
1391 |
+
|
1392 |
+
# EBX
|
1393 |
+
ebx = self._run_asm(
|
1394 |
+
instruction, # mov ax,0x8000000?
|
1395 |
+
b"\x0f\xa2" # cpuid
|
1396 |
+
b"\x89\xD8" # mov ax,bx
|
1397 |
+
b"\xC3" # ret
|
1398 |
+
)
|
1399 |
+
|
1400 |
+
# ECX
|
1401 |
+
ecx = self._run_asm(
|
1402 |
+
instruction, # mov ax,0x8000000?
|
1403 |
+
b"\x0f\xa2" # cpuid
|
1404 |
+
b"\x89\xC8" # mov ax,cx
|
1405 |
+
b"\xC3" # ret
|
1406 |
+
)
|
1407 |
+
|
1408 |
+
# EDX
|
1409 |
+
edx = self._run_asm(
|
1410 |
+
instruction, # mov ax,0x8000000?
|
1411 |
+
b"\x0f\xa2" # cpuid
|
1412 |
+
b"\x89\xD0" # mov ax,dx
|
1413 |
+
b"\xC3" # ret
|
1414 |
+
)
|
1415 |
+
|
1416 |
+
# Combine each of the 4 bytes in each register into the string
|
1417 |
+
for reg in [eax, ebx, ecx, edx]:
|
1418 |
+
for n in [0, 8, 16, 24]:
|
1419 |
+
processor_brand += chr((reg >> n) & 0xFF)
|
1420 |
+
|
1421 |
+
# Strip off any trailing NULL terminators and white space
|
1422 |
+
processor_brand = processor_brand.strip("\0").strip()
|
1423 |
+
|
1424 |
+
return processor_brand
|
1425 |
+
|
1426 |
+
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
|
1427 |
+
def get_cache(self, max_extension_support):
|
1428 |
+
cache_info = {}
|
1429 |
+
|
1430 |
+
# Just return if the cache feature is not supported
|
1431 |
+
if max_extension_support < 0x80000006:
|
1432 |
+
return cache_info
|
1433 |
+
|
1434 |
+
# ECX
|
1435 |
+
ecx = self._run_asm(
|
1436 |
+
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
|
1437 |
+
b"\x0f\xa2" # cpuid
|
1438 |
+
b"\x89\xC8" # mov ax,cx
|
1439 |
+
b"\xC3" # ret
|
1440 |
+
)
|
1441 |
+
|
1442 |
+
cache_info = {
|
1443 |
+
'size_b' : (ecx & 0xFF) * 1024,
|
1444 |
+
'associativity' : (ecx >> 12) & 0xF,
|
1445 |
+
'line_size_b' : (ecx >> 16) & 0xFFFF
|
1446 |
+
}
|
1447 |
+
|
1448 |
+
return cache_info
|
1449 |
+
|
1450 |
+
def get_ticks_func(self):
|
1451 |
+
retval = None
|
1452 |
+
|
1453 |
+
if DataSource.bits == '32bit':
|
1454 |
+
# Works on x86_32
|
1455 |
+
restype = None
|
1456 |
+
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
|
1457 |
+
get_ticks_x86_32 = self._asm_func(restype, argtypes,
|
1458 |
+
[
|
1459 |
+
b"\x55", # push bp
|
1460 |
+
b"\x89\xE5", # mov bp,sp
|
1461 |
+
b"\x31\xC0", # xor ax,ax
|
1462 |
+
b"\x0F\xA2", # cpuid
|
1463 |
+
b"\x0F\x31", # rdtsc
|
1464 |
+
b"\x8B\x5D\x08", # mov bx,[di+0x8]
|
1465 |
+
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
|
1466 |
+
b"\x89\x13", # mov [bp+di],dx
|
1467 |
+
b"\x89\x01", # mov [bx+di],ax
|
1468 |
+
b"\x5D", # pop bp
|
1469 |
+
b"\xC3" # ret
|
1470 |
+
]
|
1471 |
+
)
|
1472 |
+
|
1473 |
+
# Monkey patch func to combine high and low args into one return
|
1474 |
+
old_func = get_ticks_x86_32.func
|
1475 |
+
def new_func():
|
1476 |
+
# Pass two uint32s into function
|
1477 |
+
high = ctypes.c_uint32(0)
|
1478 |
+
low = ctypes.c_uint32(0)
|
1479 |
+
old_func(ctypes.byref(high), ctypes.byref(low))
|
1480 |
+
|
1481 |
+
# Shift the two uint32s into one uint64
|
1482 |
+
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
|
1483 |
+
return retval
|
1484 |
+
get_ticks_x86_32.func = new_func
|
1485 |
+
|
1486 |
+
retval = get_ticks_x86_32
|
1487 |
+
elif DataSource.bits == '64bit':
|
1488 |
+
# Works on x86_64
|
1489 |
+
restype = ctypes.c_uint64
|
1490 |
+
argtypes = ()
|
1491 |
+
get_ticks_x86_64 = self._asm_func(restype, argtypes,
|
1492 |
+
[
|
1493 |
+
b"\x48", # dec ax
|
1494 |
+
b"\x31\xC0", # xor ax,ax
|
1495 |
+
b"\x0F\xA2", # cpuid
|
1496 |
+
b"\x0F\x31", # rdtsc
|
1497 |
+
b"\x48", # dec ax
|
1498 |
+
b"\xC1\xE2\x20", # shl dx,byte 0x20
|
1499 |
+
b"\x48", # dec ax
|
1500 |
+
b"\x09\xD0", # or ax,dx
|
1501 |
+
b"\xC3", # ret
|
1502 |
+
]
|
1503 |
+
)
|
1504 |
+
|
1505 |
+
retval = get_ticks_x86_64
|
1506 |
+
return retval
|
1507 |
+
|
1508 |
+
def get_raw_hz(self):
|
1509 |
+
from time import sleep
|
1510 |
+
|
1511 |
+
ticks_fn = self.get_ticks_func()
|
1512 |
+
|
1513 |
+
start = ticks_fn.func()
|
1514 |
+
sleep(1)
|
1515 |
+
end = ticks_fn.func()
|
1516 |
+
|
1517 |
+
ticks = (end - start)
|
1518 |
+
ticks_fn.free()
|
1519 |
+
|
1520 |
+
return ticks
|
1521 |
+
|
1522 |
+
def _get_cpu_info_from_cpuid_actual():
|
1523 |
+
'''
|
1524 |
+
Warning! This function has the potential to crash the Python runtime.
|
1525 |
+
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
|
1526 |
+
It will safely call this function in another process.
|
1527 |
+
'''
|
1528 |
+
|
1529 |
+
from io import StringIO
|
1530 |
+
|
1531 |
+
trace = Trace(True, True)
|
1532 |
+
info = {}
|
1533 |
+
|
1534 |
+
# Pipe stdout and stderr to strings
|
1535 |
+
sys.stdout = trace._stdout
|
1536 |
+
sys.stderr = trace._stderr
|
1537 |
+
|
1538 |
+
try:
|
1539 |
+
# Get the CPU arch and bits
|
1540 |
+
arch, bits = _parse_arch(DataSource.arch_string_raw)
|
1541 |
+
|
1542 |
+
# Return none if this is not an X86 CPU
|
1543 |
+
if not arch in ['X86_32', 'X86_64']:
|
1544 |
+
trace.fail('Not running on X86_32 or X86_64. Skipping ...')
|
1545 |
+
return trace.to_dict(info, True)
|
1546 |
+
|
1547 |
+
# Return none if SE Linux is in enforcing mode
|
1548 |
+
cpuid = CPUID(trace)
|
1549 |
+
if cpuid.is_selinux_enforcing:
|
1550 |
+
trace.fail('SELinux is enforcing. Skipping ...')
|
1551 |
+
return trace.to_dict(info, True)
|
1552 |
+
|
1553 |
+
# Get the cpu info from the CPUID register
|
1554 |
+
max_extension_support = cpuid.get_max_extension_support()
|
1555 |
+
cache_info = cpuid.get_cache(max_extension_support)
|
1556 |
+
info = cpuid.get_info()
|
1557 |
+
|
1558 |
+
processor_brand = cpuid.get_processor_brand(max_extension_support)
|
1559 |
+
|
1560 |
+
# Get the Hz and scale
|
1561 |
+
hz_actual = cpuid.get_raw_hz()
|
1562 |
+
hz_actual = _to_decimal_string(hz_actual)
|
1563 |
+
|
1564 |
+
# Get the Hz and scale
|
1565 |
+
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
|
1566 |
+
info = {
|
1567 |
+
'vendor_id_raw' : cpuid.get_vendor_id(),
|
1568 |
+
'hardware_raw' : '',
|
1569 |
+
'brand_raw' : processor_brand,
|
1570 |
+
|
1571 |
+
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
|
1572 |
+
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
|
1573 |
+
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
|
1574 |
+
'hz_actual' : _hz_short_to_full(hz_actual, 0),
|
1575 |
+
|
1576 |
+
'l2_cache_size' : cache_info['size_b'],
|
1577 |
+
'l2_cache_line_size' : cache_info['line_size_b'],
|
1578 |
+
'l2_cache_associativity' : cache_info['associativity'],
|
1579 |
+
|
1580 |
+
'stepping' : info['stepping'],
|
1581 |
+
'model' : info['model'],
|
1582 |
+
'family' : info['family'],
|
1583 |
+
'processor_type' : info['processor_type'],
|
1584 |
+
'flags' : cpuid.get_flags(max_extension_support)
|
1585 |
+
}
|
1586 |
+
|
1587 |
+
info = _filter_dict_keys_with_empty_values(info)
|
1588 |
+
trace.success()
|
1589 |
+
except Exception as err:
|
1590 |
+
from traceback import format_exc
|
1591 |
+
err_string = format_exc()
|
1592 |
+
trace._err = ''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n'
|
1593 |
+
return trace.to_dict(info, True)
|
1594 |
+
|
1595 |
+
return trace.to_dict(info, False)
|
1596 |
+
|
1597 |
+
def _get_cpu_info_from_cpuid_subprocess_wrapper(queue):
|
1598 |
+
orig_stdout = sys.stdout
|
1599 |
+
orig_stderr = sys.stderr
|
1600 |
+
|
1601 |
+
output = _get_cpu_info_from_cpuid_actual()
|
1602 |
+
|
1603 |
+
sys.stdout = orig_stdout
|
1604 |
+
sys.stderr = orig_stderr
|
1605 |
+
|
1606 |
+
queue.put(_obj_to_b64(output))
|
1607 |
+
|
1608 |
+
def _get_cpu_info_from_cpuid():
|
1609 |
+
'''
|
1610 |
+
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
|
1611 |
+
Returns {} on non X86 cpus.
|
1612 |
+
Returns {} if SELinux is in enforcing mode.
|
1613 |
+
'''
|
1614 |
+
|
1615 |
+
g_trace.header('Tying to get info from CPUID ...')
|
1616 |
+
|
1617 |
+
from multiprocessing import Process, Queue
|
1618 |
+
|
1619 |
+
# Return {} if can't cpuid
|
1620 |
+
if not DataSource.can_cpuid:
|
1621 |
+
g_trace.fail('Can\'t CPUID. Skipping ...')
|
1622 |
+
return {}
|
1623 |
+
|
1624 |
+
# Get the CPU arch and bits
|
1625 |
+
arch, bits = _parse_arch(DataSource.arch_string_raw)
|
1626 |
+
|
1627 |
+
# Return {} if this is not an X86 CPU
|
1628 |
+
if not arch in ['X86_32', 'X86_64']:
|
1629 |
+
g_trace.fail('Not running on X86_32 or X86_64. Skipping ...')
|
1630 |
+
return {}
|
1631 |
+
|
1632 |
+
try:
|
1633 |
+
if CAN_CALL_CPUID_IN_SUBPROCESS:
|
1634 |
+
# Start running the function in a subprocess
|
1635 |
+
queue = Queue()
|
1636 |
+
p = Process(target=_get_cpu_info_from_cpuid_subprocess_wrapper, args=(queue,))
|
1637 |
+
p.start()
|
1638 |
+
|
1639 |
+
# Wait for the process to end, while it is still alive
|
1640 |
+
while p.is_alive():
|
1641 |
+
p.join(0)
|
1642 |
+
|
1643 |
+
# Return {} if it failed
|
1644 |
+
if p.exitcode != 0:
|
1645 |
+
g_trace.fail('Failed to run CPUID in process. Skipping ...')
|
1646 |
+
return {}
|
1647 |
+
|
1648 |
+
# Return {} if no results
|
1649 |
+
if queue.empty():
|
1650 |
+
g_trace.fail('Failed to get anything from CPUID process. Skipping ...')
|
1651 |
+
return {}
|
1652 |
+
# Return the result, only if there is something to read
|
1653 |
+
else:
|
1654 |
+
output = _b64_to_obj(queue.get())
|
1655 |
+
import pprint
|
1656 |
+
pp = pprint.PrettyPrinter(indent=4)
|
1657 |
+
#pp.pprint(output)
|
1658 |
+
|
1659 |
+
if 'output' in output and output['output']:
|
1660 |
+
g_trace.write(output['output'])
|
1661 |
+
|
1662 |
+
if 'stdout' in output and output['stdout']:
|
1663 |
+
sys.stdout.write('{0}\n'.format(output['stdout']))
|
1664 |
+
sys.stdout.flush()
|
1665 |
+
|
1666 |
+
if 'stderr' in output and output['stderr']:
|
1667 |
+
sys.stderr.write('{0}\n'.format(output['stderr']))
|
1668 |
+
sys.stderr.flush()
|
1669 |
+
|
1670 |
+
if 'is_fail' not in output:
|
1671 |
+
g_trace.fail('Failed to get is_fail from CPUID process. Skipping ...')
|
1672 |
+
return {}
|
1673 |
+
|
1674 |
+
# Fail if there was an exception
|
1675 |
+
if 'err' in output and output['err']:
|
1676 |
+
g_trace.fail('Failed to run CPUID in process. Skipping ...')
|
1677 |
+
g_trace.write(output['err'])
|
1678 |
+
g_trace.write('Failed ...')
|
1679 |
+
return {}
|
1680 |
+
|
1681 |
+
if 'is_fail' in output and output['is_fail']:
|
1682 |
+
g_trace.write('Failed ...')
|
1683 |
+
return {}
|
1684 |
+
|
1685 |
+
if 'info' not in output or not output['info']:
|
1686 |
+
g_trace.fail('Failed to get return info from CPUID process. Skipping ...')
|
1687 |
+
return {}
|
1688 |
+
|
1689 |
+
return output['info']
|
1690 |
+
else:
|
1691 |
+
# FIXME: This should write the values like in the above call to actual
|
1692 |
+
orig_stdout = sys.stdout
|
1693 |
+
orig_stderr = sys.stderr
|
1694 |
+
|
1695 |
+
output = _get_cpu_info_from_cpuid_actual()
|
1696 |
+
|
1697 |
+
sys.stdout = orig_stdout
|
1698 |
+
sys.stderr = orig_stderr
|
1699 |
+
|
1700 |
+
g_trace.success()
|
1701 |
+
return output['info']
|
1702 |
+
except Exception as err:
|
1703 |
+
g_trace.fail(err)
|
1704 |
+
|
1705 |
+
# Return {} if everything failed
|
1706 |
+
return {}
|
1707 |
+
|
1708 |
+
def _get_cpu_info_from_proc_cpuinfo():
|
1709 |
+
'''
|
1710 |
+
Returns the CPU info gathered from /proc/cpuinfo.
|
1711 |
+
Returns {} if /proc/cpuinfo is not found.
|
1712 |
+
'''
|
1713 |
+
|
1714 |
+
g_trace.header('Tying to get info from /proc/cpuinfo ...')
|
1715 |
+
|
1716 |
+
try:
|
1717 |
+
# Just return {} if there is no cpuinfo
|
1718 |
+
if not DataSource.has_proc_cpuinfo():
|
1719 |
+
g_trace.fail('Failed to find /proc/cpuinfo. Skipping ...')
|
1720 |
+
return {}
|
1721 |
+
|
1722 |
+
returncode, output = DataSource.cat_proc_cpuinfo()
|
1723 |
+
if returncode != 0:
|
1724 |
+
g_trace.fail('Failed to run cat /proc/cpuinfo. Skipping ...')
|
1725 |
+
return {}
|
1726 |
+
|
1727 |
+
# Various fields
|
1728 |
+
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
|
1729 |
+
processor_brand = _get_field(True, output, None, None, 'model name', 'cpu', 'processor', 'uarch')
|
1730 |
+
cache_size = _get_field(False, output, None, '', 'cache size')
|
1731 |
+
stepping = _get_field(False, output, int, -1, 'stepping')
|
1732 |
+
model = _get_field(False, output, int, -1, 'model')
|
1733 |
+
family = _get_field(False, output, int, -1, 'cpu family')
|
1734 |
+
hardware = _get_field(False, output, None, '', 'Hardware')
|
1735 |
+
|
1736 |
+
# Flags
|
1737 |
+
flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented')
|
1738 |
+
if flags:
|
1739 |
+
flags = flags.split()
|
1740 |
+
flags.sort()
|
1741 |
+
|
1742 |
+
# Check for other cache format
|
1743 |
+
if not cache_size:
|
1744 |
+
try:
|
1745 |
+
for i in range(0, 10):
|
1746 |
+
name = "cache{0}".format(i)
|
1747 |
+
value = _get_field(False, output, None, None, name)
|
1748 |
+
if value:
|
1749 |
+
value = [entry.split('=') for entry in value.split(' ')]
|
1750 |
+
value = dict(value)
|
1751 |
+
if 'level' in value and value['level'] == '3' and 'size' in value:
|
1752 |
+
cache_size = value['size']
|
1753 |
+
break
|
1754 |
+
except Exception:
|
1755 |
+
pass
|
1756 |
+
|
1757 |
+
# Convert from MHz string to Hz
|
1758 |
+
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock', 'cpu MHz dynamic', 'cpu MHz static')
|
1759 |
+
hz_actual = hz_actual.lower().rstrip('mhz').strip()
|
1760 |
+
hz_actual = _to_decimal_string(hz_actual)
|
1761 |
+
|
1762 |
+
# Convert from GHz/MHz string to Hz
|
1763 |
+
hz_advertised, scale = (None, 0)
|
1764 |
+
try:
|
1765 |
+
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
|
1766 |
+
except Exception:
|
1767 |
+
pass
|
1768 |
+
|
1769 |
+
info = {
|
1770 |
+
'hardware_raw' : hardware,
|
1771 |
+
'brand_raw' : processor_brand,
|
1772 |
+
|
1773 |
+
'l3_cache_size' : _friendly_bytes_to_int(cache_size),
|
1774 |
+
'flags' : flags,
|
1775 |
+
'vendor_id_raw' : vendor_id,
|
1776 |
+
'stepping' : stepping,
|
1777 |
+
'model' : model,
|
1778 |
+
'family' : family,
|
1779 |
+
}
|
1780 |
+
|
1781 |
+
# Make the Hz the same for actual and advertised if missing any
|
1782 |
+
if not hz_advertised or hz_advertised == '0.0':
|
1783 |
+
hz_advertised = hz_actual
|
1784 |
+
scale = 6
|
1785 |
+
elif not hz_actual or hz_actual == '0.0':
|
1786 |
+
hz_actual = hz_advertised
|
1787 |
+
|
1788 |
+
# Add the Hz if there is one
|
1789 |
+
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
|
1790 |
+
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
|
1791 |
+
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
|
1792 |
+
if _hz_short_to_full(hz_actual, scale) > (0, 0):
|
1793 |
+
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
|
1794 |
+
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
|
1795 |
+
|
1796 |
+
info = _filter_dict_keys_with_empty_values(info, {'stepping':0, 'model':0, 'family':0})
|
1797 |
+
g_trace.success()
|
1798 |
+
return info
|
1799 |
+
except Exception as err:
|
1800 |
+
g_trace.fail(err)
|
1801 |
+
#raise # NOTE: To have this throw on error, uncomment this line
|
1802 |
+
return {}
|
1803 |
+
|
1804 |
+
def _get_cpu_info_from_cpufreq_info():
|
1805 |
+
'''
|
1806 |
+
Returns the CPU info gathered from cpufreq-info.
|
1807 |
+
Returns {} if cpufreq-info is not found.
|
1808 |
+
'''
|
1809 |
+
|
1810 |
+
g_trace.header('Tying to get info from cpufreq-info ...')
|
1811 |
+
|
1812 |
+
try:
|
1813 |
+
hz_brand, scale = '0.0', 0
|
1814 |
+
|
1815 |
+
if not DataSource.has_cpufreq_info():
|
1816 |
+
g_trace.fail('Failed to find cpufreq-info. Skipping ...')
|
1817 |
+
return {}
|
1818 |
+
|
1819 |
+
returncode, output = DataSource.cpufreq_info()
|
1820 |
+
if returncode != 0:
|
1821 |
+
g_trace.fail('Failed to run cpufreq-info. Skipping ...')
|
1822 |
+
return {}
|
1823 |
+
|
1824 |
+
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
|
1825 |
+
i = hz_brand.find('Hz')
|
1826 |
+
assert(i != -1)
|
1827 |
+
hz_brand = hz_brand[0 : i+2].strip().lower()
|
1828 |
+
|
1829 |
+
if hz_brand.endswith('mhz'):
|
1830 |
+
scale = 6
|
1831 |
+
elif hz_brand.endswith('ghz'):
|
1832 |
+
scale = 9
|
1833 |
+
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
|
1834 |
+
hz_brand = _to_decimal_string(hz_brand)
|
1835 |
+
|
1836 |
+
info = {
|
1837 |
+
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
|
1838 |
+
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
|
1839 |
+
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
|
1840 |
+
'hz_actual' : _hz_short_to_full(hz_brand, scale),
|
1841 |
+
}
|
1842 |
+
|
1843 |
+
info = _filter_dict_keys_with_empty_values(info)
|
1844 |
+
g_trace.success()
|
1845 |
+
return info
|
1846 |
+
except Exception as err:
|
1847 |
+
g_trace.fail(err)
|
1848 |
+
#raise # NOTE: To have this throw on error, uncomment this line
|
1849 |
+
return {}
|
1850 |
+
|
1851 |
+
def _get_cpu_info_from_lscpu():
|
1852 |
+
'''
|
1853 |
+
Returns the CPU info gathered from lscpu.
|
1854 |
+
Returns {} if lscpu is not found.
|
1855 |
+
'''
|
1856 |
+
|
1857 |
+
g_trace.header('Tying to get info from lscpu ...')
|
1858 |
+
|
1859 |
+
try:
|
1860 |
+
if not DataSource.has_lscpu():
|
1861 |
+
g_trace.fail('Failed to find lscpu. Skipping ...')
|
1862 |
+
return {}
|
1863 |
+
|
1864 |
+
returncode, output = DataSource.lscpu()
|
1865 |
+
if returncode != 0:
|
1866 |
+
g_trace.fail('Failed to run lscpu. Skipping ...')
|
1867 |
+
return {}
|
1868 |
+
|
1869 |
+
info = {}
|
1870 |
+
|
1871 |
+
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
|
1872 |
+
if new_hz:
|
1873 |
+
new_hz = _to_decimal_string(new_hz)
|
1874 |
+
scale = 6
|
1875 |
+
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
|
1876 |
+
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
|
1877 |
+
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
|
1878 |
+
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
|
1879 |
+
|
1880 |
+
new_hz = _get_field(False, output, None, None, 'CPU dynamic MHz', 'CPU static MHz')
|
1881 |
+
if new_hz:
|
1882 |
+
new_hz = _to_decimal_string(new_hz)
|
1883 |
+
scale = 6
|
1884 |
+
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
|
1885 |
+
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
|
1886 |
+
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
|
1887 |
+
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
|
1888 |
+
|
1889 |
+
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
|
1890 |
+
if vendor_id:
|
1891 |
+
info['vendor_id_raw'] = vendor_id
|
1892 |
+
|
1893 |
+
brand = _get_field(False, output, None, None, 'Model name')
|
1894 |
+
if brand:
|
1895 |
+
info['brand_raw'] = brand
|
1896 |
+
else:
|
1897 |
+
brand = _get_field(False, output, None, None, 'Model')
|
1898 |
+
if brand and not brand.isdigit():
|
1899 |
+
info['brand_raw'] = brand
|
1900 |
+
|
1901 |
+
family = _get_field(False, output, None, None, 'CPU family')
|
1902 |
+
if family and family.isdigit():
|
1903 |
+
info['family'] = int(family)
|
1904 |
+
|
1905 |
+
stepping = _get_field(False, output, None, None, 'Stepping')
|
1906 |
+
if stepping and stepping.isdigit():
|
1907 |
+
info['stepping'] = int(stepping)
|
1908 |
+
|
1909 |
+
model = _get_field(False, output, None, None, 'Model')
|
1910 |
+
if model and model.isdigit():
|
1911 |
+
info['model'] = int(model)
|
1912 |
+
|
1913 |
+
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
|
1914 |
+
if l1_data_cache_size:
|
1915 |
+
l1_data_cache_size = l1_data_cache_size.split('(')[0].strip()
|
1916 |
+
info['l1_data_cache_size'] = _friendly_bytes_to_int(l1_data_cache_size)
|
1917 |
+
|
1918 |
+
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
|
1919 |
+
if l1_instruction_cache_size:
|
1920 |
+
l1_instruction_cache_size = l1_instruction_cache_size.split('(')[0].strip()
|
1921 |
+
info['l1_instruction_cache_size'] = _friendly_bytes_to_int(l1_instruction_cache_size)
|
1922 |
+
|
1923 |
+
l2_cache_size = _get_field(False, output, None, None, 'L2 cache', 'L2d cache')
|
1924 |
+
if l2_cache_size:
|
1925 |
+
l2_cache_size = l2_cache_size.split('(')[0].strip()
|
1926 |
+
info['l2_cache_size'] = _friendly_bytes_to_int(l2_cache_size)
|
1927 |
+
|
1928 |
+
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
|
1929 |
+
if l3_cache_size:
|
1930 |
+
l3_cache_size = l3_cache_size.split('(')[0].strip()
|
1931 |
+
info['l3_cache_size'] = _friendly_bytes_to_int(l3_cache_size)
|
1932 |
+
|
1933 |
+
# Flags
|
1934 |
+
flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented')
|
1935 |
+
if flags:
|
1936 |
+
flags = flags.split()
|
1937 |
+
flags.sort()
|
1938 |
+
info['flags'] = flags
|
1939 |
+
|
1940 |
+
info = _filter_dict_keys_with_empty_values(info, {'stepping':0, 'model':0, 'family':0})
|
1941 |
+
g_trace.success()
|
1942 |
+
return info
|
1943 |
+
except Exception as err:
|
1944 |
+
g_trace.fail(err)
|
1945 |
+
#raise # NOTE: To have this throw on error, uncomment this line
|
1946 |
+
return {}
|
1947 |
+
|
1948 |
+
def _get_cpu_info_from_dmesg():
|
1949 |
+
'''
|
1950 |
+
Returns the CPU info gathered from dmesg.
|
1951 |
+
Returns {} if dmesg is not found or does not have the desired info.
|
1952 |
+
'''
|
1953 |
+
|
1954 |
+
g_trace.header('Tying to get info from the dmesg ...')
|
1955 |
+
|
1956 |
+
# Just return {} if this arch has an unreliable dmesg log
|
1957 |
+
arch, bits = _parse_arch(DataSource.arch_string_raw)
|
1958 |
+
if arch in ['S390X']:
|
1959 |
+
g_trace.fail('Running on S390X. Skipping ...')
|
1960 |
+
return {}
|
1961 |
+
|
1962 |
+
# Just return {} if there is no dmesg
|
1963 |
+
if not DataSource.has_dmesg():
|
1964 |
+
g_trace.fail('Failed to find dmesg. Skipping ...')
|
1965 |
+
return {}
|
1966 |
+
|
1967 |
+
# If dmesg fails return {}
|
1968 |
+
returncode, output = DataSource.dmesg_a()
|
1969 |
+
if output is None or returncode != 0:
|
1970 |
+
g_trace.fail('Failed to run \"dmesg -a\". Skipping ...')
|
1971 |
+
return {}
|
1972 |
+
|
1973 |
+
info = _parse_dmesg_output(output)
|
1974 |
+
g_trace.success()
|
1975 |
+
return info
|
1976 |
+
|
1977 |
+
|
1978 |
+
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
|
1979 |
+
# page 767
|
1980 |
+
def _get_cpu_info_from_ibm_pa_features():
|
1981 |
+
'''
|
1982 |
+
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
|
1983 |
+
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
|
1984 |
+
'''
|
1985 |
+
|
1986 |
+
g_trace.header('Tying to get info from lsprop ...')
|
1987 |
+
|
1988 |
+
try:
|
1989 |
+
# Just return {} if there is no lsprop
|
1990 |
+
if not DataSource.has_ibm_pa_features():
|
1991 |
+
g_trace.fail('Failed to find lsprop. Skipping ...')
|
1992 |
+
return {}
|
1993 |
+
|
1994 |
+
# If ibm,pa-features fails return {}
|
1995 |
+
returncode, output = DataSource.ibm_pa_features()
|
1996 |
+
if output is None or returncode != 0:
|
1997 |
+
g_trace.fail('Failed to glob /proc/device-tree/cpus/*/ibm,pa-features. Skipping ...')
|
1998 |
+
return {}
|
1999 |
+
|
2000 |
+
# Filter out invalid characters from output
|
2001 |
+
value = output.split("ibm,pa-features")[1].lower()
|
2002 |
+
value = [s for s in value if s in list('0123456789abcfed')]
|
2003 |
+
value = ''.join(value)
|
2004 |
+
|
2005 |
+
# Get data converted to Uint32 chunks
|
2006 |
+
left = int(value[0 : 8], 16)
|
2007 |
+
right = int(value[8 : 16], 16)
|
2008 |
+
|
2009 |
+
# Get the CPU flags
|
2010 |
+
flags = {
|
2011 |
+
# Byte 0
|
2012 |
+
'mmu' : _is_bit_set(left, 0),
|
2013 |
+
'fpu' : _is_bit_set(left, 1),
|
2014 |
+
'slb' : _is_bit_set(left, 2),
|
2015 |
+
'run' : _is_bit_set(left, 3),
|
2016 |
+
#'reserved' : _is_bit_set(left, 4),
|
2017 |
+
'dabr' : _is_bit_set(left, 5),
|
2018 |
+
'ne' : _is_bit_set(left, 6),
|
2019 |
+
'wtr' : _is_bit_set(left, 7),
|
2020 |
+
|
2021 |
+
# Byte 1
|
2022 |
+
'mcr' : _is_bit_set(left, 8),
|
2023 |
+
'dsisr' : _is_bit_set(left, 9),
|
2024 |
+
'lp' : _is_bit_set(left, 10),
|
2025 |
+
'ri' : _is_bit_set(left, 11),
|
2026 |
+
'dabrx' : _is_bit_set(left, 12),
|
2027 |
+
'sprg3' : _is_bit_set(left, 13),
|
2028 |
+
'rislb' : _is_bit_set(left, 14),
|
2029 |
+
'pp' : _is_bit_set(left, 15),
|
2030 |
+
|
2031 |
+
# Byte 2
|
2032 |
+
'vpm' : _is_bit_set(left, 16),
|
2033 |
+
'dss_2.05' : _is_bit_set(left, 17),
|
2034 |
+
#'reserved' : _is_bit_set(left, 18),
|
2035 |
+
'dar' : _is_bit_set(left, 19),
|
2036 |
+
#'reserved' : _is_bit_set(left, 20),
|
2037 |
+
'ppr' : _is_bit_set(left, 21),
|
2038 |
+
'dss_2.02' : _is_bit_set(left, 22),
|
2039 |
+
'dss_2.06' : _is_bit_set(left, 23),
|
2040 |
+
|
2041 |
+
# Byte 3
|
2042 |
+
'lsd_in_dscr' : _is_bit_set(left, 24),
|
2043 |
+
'ugr_in_dscr' : _is_bit_set(left, 25),
|
2044 |
+
#'reserved' : _is_bit_set(left, 26),
|
2045 |
+
#'reserved' : _is_bit_set(left, 27),
|
2046 |
+
#'reserved' : _is_bit_set(left, 28),
|
2047 |
+
#'reserved' : _is_bit_set(left, 29),
|
2048 |
+
#'reserved' : _is_bit_set(left, 30),
|
2049 |
+
#'reserved' : _is_bit_set(left, 31),
|
2050 |
+
|
2051 |
+
# Byte 4
|
2052 |
+
'sso_2.06' : _is_bit_set(right, 0),
|
2053 |
+
#'reserved' : _is_bit_set(right, 1),
|
2054 |
+
#'reserved' : _is_bit_set(right, 2),
|
2055 |
+
#'reserved' : _is_bit_set(right, 3),
|
2056 |
+
#'reserved' : _is_bit_set(right, 4),
|
2057 |
+
#'reserved' : _is_bit_set(right, 5),
|
2058 |
+
#'reserved' : _is_bit_set(right, 6),
|
2059 |
+
#'reserved' : _is_bit_set(right, 7),
|
2060 |
+
|
2061 |
+
# Byte 5
|
2062 |
+
'le' : _is_bit_set(right, 8),
|
2063 |
+
'cfar' : _is_bit_set(right, 9),
|
2064 |
+
'eb' : _is_bit_set(right, 10),
|
2065 |
+
'lsq_2.07' : _is_bit_set(right, 11),
|
2066 |
+
#'reserved' : _is_bit_set(right, 12),
|
2067 |
+
#'reserved' : _is_bit_set(right, 13),
|
2068 |
+
#'reserved' : _is_bit_set(right, 14),
|
2069 |
+
#'reserved' : _is_bit_set(right, 15),
|
2070 |
+
|
2071 |
+
# Byte 6
|
2072 |
+
'dss_2.07' : _is_bit_set(right, 16),
|
2073 |
+
#'reserved' : _is_bit_set(right, 17),
|
2074 |
+
#'reserved' : _is_bit_set(right, 18),
|
2075 |
+
#'reserved' : _is_bit_set(right, 19),
|
2076 |
+
#'reserved' : _is_bit_set(right, 20),
|
2077 |
+
#'reserved' : _is_bit_set(right, 21),
|
2078 |
+
#'reserved' : _is_bit_set(right, 22),
|
2079 |
+
#'reserved' : _is_bit_set(right, 23),
|
2080 |
+
|
2081 |
+
# Byte 7
|
2082 |
+
#'reserved' : _is_bit_set(right, 24),
|
2083 |
+
#'reserved' : _is_bit_set(right, 25),
|
2084 |
+
#'reserved' : _is_bit_set(right, 26),
|
2085 |
+
#'reserved' : _is_bit_set(right, 27),
|
2086 |
+
#'reserved' : _is_bit_set(right, 28),
|
2087 |
+
#'reserved' : _is_bit_set(right, 29),
|
2088 |
+
#'reserved' : _is_bit_set(right, 30),
|
2089 |
+
#'reserved' : _is_bit_set(right, 31),
|
2090 |
+
}
|
2091 |
+
|
2092 |
+
# Get a list of only the flags that are true
|
2093 |
+
flags = [k for k, v in flags.items() if v]
|
2094 |
+
flags.sort()
|
2095 |
+
|
2096 |
+
info = {
|
2097 |
+
'flags' : flags
|
2098 |
+
}
|
2099 |
+
info = _filter_dict_keys_with_empty_values(info)
|
2100 |
+
g_trace.success()
|
2101 |
+
return info
|
2102 |
+
except Exception as err:
|
2103 |
+
g_trace.fail(err)
|
2104 |
+
return {}
|
2105 |
+
|
2106 |
+
|
2107 |
+
def _get_cpu_info_from_cat_var_run_dmesg_boot():
|
2108 |
+
'''
|
2109 |
+
Returns the CPU info gathered from /var/run/dmesg.boot.
|
2110 |
+
Returns {} if dmesg is not found or does not have the desired info.
|
2111 |
+
'''
|
2112 |
+
|
2113 |
+
g_trace.header('Tying to get info from the /var/run/dmesg.boot log ...')
|
2114 |
+
|
2115 |
+
# Just return {} if there is no /var/run/dmesg.boot
|
2116 |
+
if not DataSource.has_var_run_dmesg_boot():
|
2117 |
+
g_trace.fail('Failed to find /var/run/dmesg.boot file. Skipping ...')
|
2118 |
+
return {}
|
2119 |
+
|
2120 |
+
# If dmesg.boot fails return {}
|
2121 |
+
returncode, output = DataSource.cat_var_run_dmesg_boot()
|
2122 |
+
if output is None or returncode != 0:
|
2123 |
+
g_trace.fail('Failed to run \"cat /var/run/dmesg.boot\". Skipping ...')
|
2124 |
+
return {}
|
2125 |
+
|
2126 |
+
info = _parse_dmesg_output(output)
|
2127 |
+
g_trace.success()
|
2128 |
+
return info
|
2129 |
+
|
2130 |
+
|
2131 |
+
def _get_cpu_info_from_sysctl():
|
2132 |
+
'''
|
2133 |
+
Returns the CPU info gathered from sysctl.
|
2134 |
+
Returns {} if sysctl is not found.
|
2135 |
+
'''
|
2136 |
+
|
2137 |
+
g_trace.header('Tying to get info from sysctl ...')
|
2138 |
+
|
2139 |
+
try:
|
2140 |
+
# Just return {} if there is no sysctl
|
2141 |
+
if not DataSource.has_sysctl():
|
2142 |
+
g_trace.fail('Failed to find sysctl. Skipping ...')
|
2143 |
+
return {}
|
2144 |
+
|
2145 |
+
# If sysctl fails return {}
|
2146 |
+
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
|
2147 |
+
if output is None or returncode != 0:
|
2148 |
+
g_trace.fail('Failed to run \"sysctl machdep.cpu hw.cpufrequency\". Skipping ...')
|
2149 |
+
return {}
|
2150 |
+
|
2151 |
+
# Various fields
|
2152 |
+
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
|
2153 |
+
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
|
2154 |
+
cache_size = _get_field(False, output, int, 0, 'machdep.cpu.cache.size')
|
2155 |
+
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
|
2156 |
+
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
|
2157 |
+
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
|
2158 |
+
|
2159 |
+
# Flags
|
2160 |
+
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
|
2161 |
+
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
|
2162 |
+
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
|
2163 |
+
flags.sort()
|
2164 |
+
|
2165 |
+
# Convert from GHz/MHz string to Hz
|
2166 |
+
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
|
2167 |
+
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
|
2168 |
+
hz_actual = _to_decimal_string(hz_actual)
|
2169 |
+
|
2170 |
+
info = {
|
2171 |
+
'vendor_id_raw' : vendor_id,
|
2172 |
+
'brand_raw' : processor_brand,
|
2173 |
+
|
2174 |
+
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
|
2175 |
+
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
|
2176 |
+
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
|
2177 |
+
'hz_actual' : _hz_short_to_full(hz_actual, 0),
|
2178 |
+
|
2179 |
+
'l2_cache_size' : int(cache_size) * 1024,
|
2180 |
+
|
2181 |
+
'stepping' : stepping,
|
2182 |
+
'model' : model,
|
2183 |
+
'family' : family,
|
2184 |
+
'flags' : flags
|
2185 |
+
}
|
2186 |
+
|
2187 |
+
info = _filter_dict_keys_with_empty_values(info)
|
2188 |
+
g_trace.success()
|
2189 |
+
return info
|
2190 |
+
except Exception as err:
|
2191 |
+
g_trace.fail(err)
|
2192 |
+
return {}
|
2193 |
+
|
2194 |
+
|
2195 |
+
def _get_cpu_info_from_sysinfo():
|
2196 |
+
'''
|
2197 |
+
Returns the CPU info gathered from sysinfo.
|
2198 |
+
Returns {} if sysinfo is not found.
|
2199 |
+
'''
|
2200 |
+
|
2201 |
+
info = _get_cpu_info_from_sysinfo_v1()
|
2202 |
+
info.update(_get_cpu_info_from_sysinfo_v2())
|
2203 |
+
return info
|
2204 |
+
|
2205 |
+
def _get_cpu_info_from_sysinfo_v1():
|
2206 |
+
'''
|
2207 |
+
Returns the CPU info gathered from sysinfo.
|
2208 |
+
Returns {} if sysinfo is not found.
|
2209 |
+
'''
|
2210 |
+
|
2211 |
+
g_trace.header('Tying to get info from sysinfo version 1 ...')
|
2212 |
+
|
2213 |
+
try:
|
2214 |
+
# Just return {} if there is no sysinfo
|
2215 |
+
if not DataSource.has_sysinfo():
|
2216 |
+
g_trace.fail('Failed to find sysinfo. Skipping ...')
|
2217 |
+
return {}
|
2218 |
+
|
2219 |
+
# If sysinfo fails return {}
|
2220 |
+
returncode, output = DataSource.sysinfo_cpu()
|
2221 |
+
if output is None or returncode != 0:
|
2222 |
+
g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...')
|
2223 |
+
return {}
|
2224 |
+
|
2225 |
+
# Various fields
|
2226 |
+
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
|
2227 |
+
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
|
2228 |
+
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
|
2229 |
+
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
|
2230 |
+
model = int(output.split(', model ')[1].split(',')[0].strip())
|
2231 |
+
family = int(output.split(', family ')[1].split(',')[0].strip())
|
2232 |
+
|
2233 |
+
# Flags
|
2234 |
+
flags = []
|
2235 |
+
for line in output.split('\n'):
|
2236 |
+
if line.startswith('\t\t'):
|
2237 |
+
for flag in line.strip().lower().split():
|
2238 |
+
flags.append(flag)
|
2239 |
+
flags.sort()
|
2240 |
+
|
2241 |
+
# Convert from GHz/MHz string to Hz
|
2242 |
+
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
|
2243 |
+
hz_actual = hz_advertised
|
2244 |
+
|
2245 |
+
info = {
|
2246 |
+
'vendor_id_raw' : vendor_id,
|
2247 |
+
'brand_raw' : processor_brand,
|
2248 |
+
|
2249 |
+
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
|
2250 |
+
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
|
2251 |
+
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
|
2252 |
+
'hz_actual' : _hz_short_to_full(hz_actual, scale),
|
2253 |
+
|
2254 |
+
'l2_cache_size' : _to_friendly_bytes(cache_size),
|
2255 |
+
|
2256 |
+
'stepping' : stepping,
|
2257 |
+
'model' : model,
|
2258 |
+
'family' : family,
|
2259 |
+
'flags' : flags
|
2260 |
+
}
|
2261 |
+
|
2262 |
+
info = _filter_dict_keys_with_empty_values(info)
|
2263 |
+
g_trace.success()
|
2264 |
+
return info
|
2265 |
+
except Exception as err:
|
2266 |
+
g_trace.fail(err)
|
2267 |
+
#raise # NOTE: To have this throw on error, uncomment this line
|
2268 |
+
return {}
|
2269 |
+
|
2270 |
+
def _get_cpu_info_from_sysinfo_v2():
|
2271 |
+
'''
|
2272 |
+
Returns the CPU info gathered from sysinfo.
|
2273 |
+
Returns {} if sysinfo is not found.
|
2274 |
+
'''
|
2275 |
+
|
2276 |
+
g_trace.header('Tying to get info from sysinfo version 2 ...')
|
2277 |
+
|
2278 |
+
try:
|
2279 |
+
# Just return {} if there is no sysinfo
|
2280 |
+
if not DataSource.has_sysinfo():
|
2281 |
+
g_trace.fail('Failed to find sysinfo. Skipping ...')
|
2282 |
+
return {}
|
2283 |
+
|
2284 |
+
# If sysinfo fails return {}
|
2285 |
+
returncode, output = DataSource.sysinfo_cpu()
|
2286 |
+
if output is None or returncode != 0:
|
2287 |
+
g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...')
|
2288 |
+
return {}
|
2289 |
+
|
2290 |
+
# Various fields
|
2291 |
+
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
|
2292 |
+
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
|
2293 |
+
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
|
2294 |
+
signature = output.split('Signature:')[1].split('\n')[0].strip()
|
2295 |
+
#
|
2296 |
+
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
|
2297 |
+
model = int(signature.split('model ')[1].split(',')[0].strip())
|
2298 |
+
family = int(signature.split('family ')[1].split(',')[0].strip())
|
2299 |
+
|
2300 |
+
# Flags
|
2301 |
+
def get_subsection_flags(output):
|
2302 |
+
retval = []
|
2303 |
+
for line in output.split('\n')[1:]:
|
2304 |
+
if not line.startswith(' ') and not line.startswith(' '): break
|
2305 |
+
for entry in line.strip().lower().split(' '):
|
2306 |
+
retval.append(entry)
|
2307 |
+
return retval
|
2308 |
+
|
2309 |
+
flags = get_subsection_flags(output.split('Features: ')[1]) + \
|
2310 |
+
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
|
2311 |
+
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
|
2312 |
+
flags.sort()
|
2313 |
+
|
2314 |
+
# Convert from GHz/MHz string to Hz
|
2315 |
+
lines = [n for n in output.split('\n') if n]
|
2316 |
+
raw_hz = lines[0].split('running at ')[1].strip().lower()
|
2317 |
+
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
|
2318 |
+
hz_advertised = _to_decimal_string(hz_advertised)
|
2319 |
+
hz_actual = hz_advertised
|
2320 |
+
|
2321 |
+
scale = 0
|
2322 |
+
if raw_hz.endswith('mhz'):
|
2323 |
+
scale = 6
|
2324 |
+
elif raw_hz.endswith('ghz'):
|
2325 |
+
scale = 9
|
2326 |
+
|
2327 |
+
info = {
|
2328 |
+
'vendor_id_raw' : vendor_id,
|
2329 |
+
'brand_raw' : processor_brand,
|
2330 |
+
|
2331 |
+
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
|
2332 |
+
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
|
2333 |
+
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
|
2334 |
+
'hz_actual' : _hz_short_to_full(hz_actual, scale),
|
2335 |
+
|
2336 |
+
'l2_cache_size' : _to_friendly_bytes(cache_size),
|
2337 |
+
|
2338 |
+
'stepping' : stepping,
|
2339 |
+
'model' : model,
|
2340 |
+
'family' : family,
|
2341 |
+
'flags' : flags
|
2342 |
+
}
|
2343 |
+
|
2344 |
+
info = _filter_dict_keys_with_empty_values(info)
|
2345 |
+
g_trace.success()
|
2346 |
+
return info
|
2347 |
+
except Exception as err:
|
2348 |
+
g_trace.fail(err)
|
2349 |
+
#raise # NOTE: To have this throw on error, uncomment this line
|
2350 |
+
return {}
|
2351 |
+
|
2352 |
+
def _get_cpu_info_from_wmic():
|
2353 |
+
'''
|
2354 |
+
Returns the CPU info gathered from WMI.
|
2355 |
+
Returns {} if not on Windows, or wmic is not installed.
|
2356 |
+
'''
|
2357 |
+
g_trace.header('Tying to get info from wmic ...')
|
2358 |
+
|
2359 |
+
try:
|
2360 |
+
# Just return {} if not Windows or there is no wmic
|
2361 |
+
if not DataSource.is_windows or not DataSource.has_wmic():
|
2362 |
+
g_trace.fail('Failed to find WMIC, or not on Windows. Skipping ...')
|
2363 |
+
return {}
|
2364 |
+
|
2365 |
+
returncode, output = DataSource.wmic_cpu()
|
2366 |
+
if output is None or returncode != 0:
|
2367 |
+
g_trace.fail('Failed to run wmic. Skipping ...')
|
2368 |
+
return {}
|
2369 |
+
|
2370 |
+
# Break the list into key values pairs
|
2371 |
+
value = output.split("\n")
|
2372 |
+
value = [s.rstrip().split('=') for s in value if '=' in s]
|
2373 |
+
value = {k: v for k, v in value if v}
|
2374 |
+
|
2375 |
+
# Get the advertised MHz
|
2376 |
+
processor_brand = value.get('Name')
|
2377 |
+
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
|
2378 |
+
|
2379 |
+
# Get the actual MHz
|
2380 |
+
hz_actual = value.get('CurrentClockSpeed')
|
2381 |
+
scale_actual = 6
|
2382 |
+
if hz_actual:
|
2383 |
+
hz_actual = _to_decimal_string(hz_actual)
|
2384 |
+
|
2385 |
+
# Get cache sizes
|
2386 |
+
l2_cache_size = value.get('L2CacheSize') # NOTE: L2CacheSize is in kilobytes
|
2387 |
+
if l2_cache_size:
|
2388 |
+
l2_cache_size = int(l2_cache_size) * 1024
|
2389 |
+
|
2390 |
+
l3_cache_size = value.get('L3CacheSize') # NOTE: L3CacheSize is in kilobytes
|
2391 |
+
if l3_cache_size:
|
2392 |
+
l3_cache_size = int(l3_cache_size) * 1024
|
2393 |
+
|
2394 |
+
# Get family, model, and stepping
|
2395 |
+
family, model, stepping = '', '', ''
|
2396 |
+
description = value.get('Description') or value.get('Caption')
|
2397 |
+
entries = description.split(' ')
|
2398 |
+
|
2399 |
+
if 'Family' in entries and entries.index('Family') < len(entries)-1:
|
2400 |
+
i = entries.index('Family')
|
2401 |
+
family = int(entries[i + 1])
|
2402 |
+
|
2403 |
+
if 'Model' in entries and entries.index('Model') < len(entries)-1:
|
2404 |
+
i = entries.index('Model')
|
2405 |
+
model = int(entries[i + 1])
|
2406 |
+
|
2407 |
+
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
|
2408 |
+
i = entries.index('Stepping')
|
2409 |
+
stepping = int(entries[i + 1])
|
2410 |
+
|
2411 |
+
info = {
|
2412 |
+
'vendor_id_raw' : value.get('Manufacturer'),
|
2413 |
+
'brand_raw' : processor_brand,
|
2414 |
+
|
2415 |
+
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
|
2416 |
+
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
|
2417 |
+
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
|
2418 |
+
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
|
2419 |
+
|
2420 |
+
'l2_cache_size' : l2_cache_size,
|
2421 |
+
'l3_cache_size' : l3_cache_size,
|
2422 |
+
|
2423 |
+
'stepping' : stepping,
|
2424 |
+
'model' : model,
|
2425 |
+
'family' : family,
|
2426 |
+
}
|
2427 |
+
|
2428 |
+
info = _filter_dict_keys_with_empty_values(info)
|
2429 |
+
g_trace.success()
|
2430 |
+
return info
|
2431 |
+
except Exception as err:
|
2432 |
+
g_trace.fail(err)
|
2433 |
+
#raise # NOTE: To have this throw on error, uncomment this line
|
2434 |
+
return {}
|
2435 |
+
|
2436 |
+
def _get_cpu_info_from_registry():
|
2437 |
+
'''
|
2438 |
+
Returns the CPU info gathered from the Windows Registry.
|
2439 |
+
Returns {} if not on Windows.
|
2440 |
+
'''
|
2441 |
+
|
2442 |
+
g_trace.header('Tying to get info from Windows registry ...')
|
2443 |
+
|
2444 |
+
try:
|
2445 |
+
# Just return {} if not on Windows
|
2446 |
+
if not DataSource.is_windows:
|
2447 |
+
g_trace.fail('Not running on Windows. Skipping ...')
|
2448 |
+
return {}
|
2449 |
+
|
2450 |
+
# Get the CPU name
|
2451 |
+
processor_brand = DataSource.winreg_processor_brand().strip()
|
2452 |
+
|
2453 |
+
# Get the CPU vendor id
|
2454 |
+
vendor_id = DataSource.winreg_vendor_id_raw()
|
2455 |
+
|
2456 |
+
# Get the CPU arch and bits
|
2457 |
+
arch_string_raw = DataSource.winreg_arch_string_raw()
|
2458 |
+
arch, bits = _parse_arch(arch_string_raw)
|
2459 |
+
|
2460 |
+
# Get the actual CPU Hz
|
2461 |
+
hz_actual = DataSource.winreg_hz_actual()
|
2462 |
+
hz_actual = _to_decimal_string(hz_actual)
|
2463 |
+
|
2464 |
+
# Get the advertised CPU Hz
|
2465 |
+
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
|
2466 |
+
|
2467 |
+
# If advertised hz not found, use the actual hz
|
2468 |
+
if hz_advertised == '0.0':
|
2469 |
+
scale = 6
|
2470 |
+
hz_advertised = _to_decimal_string(hz_actual)
|
2471 |
+
|
2472 |
+
# Get the CPU features
|
2473 |
+
feature_bits = DataSource.winreg_feature_bits()
|
2474 |
+
|
2475 |
+
def is_set(bit):
|
2476 |
+
mask = 0x80000000 >> bit
|
2477 |
+
retval = mask & feature_bits > 0
|
2478 |
+
return retval
|
2479 |
+
|
2480 |
+
# http://en.wikipedia.org/wiki/CPUID
|
2481 |
+
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
|
2482 |
+
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
|
2483 |
+
flags = {
|
2484 |
+
'fpu' : is_set(0), # Floating Point Unit
|
2485 |
+
'vme' : is_set(1), # V86 Mode Extensions
|
2486 |
+
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
|
2487 |
+
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
|
2488 |
+
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
|
2489 |
+
'msr' : is_set(5), # Model Specific Registers
|
2490 |
+
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
|
2491 |
+
'mce' : is_set(7), # Machine Check Exception supported
|
2492 |
+
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
|
2493 |
+
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
|
2494 |
+
'sepamd' : is_set(10), # Fast system calls (AMD only)
|
2495 |
+
'sep' : is_set(11), # Fast system calls
|
2496 |
+
'mtrr' : is_set(12), # Memory Type Range Registers
|
2497 |
+
'pge' : is_set(13), # Page Global Enable
|
2498 |
+
'mca' : is_set(14), # Machine Check Architecture
|
2499 |
+
'cmov' : is_set(15), # Conditional MOVe instructions
|
2500 |
+
'pat' : is_set(16), # Page Attribute Table
|
2501 |
+
'pse36' : is_set(17), # 36 bit Page Size Extensions
|
2502 |
+
'serial' : is_set(18), # Processor Serial Number
|
2503 |
+
'clflush' : is_set(19), # Cache Flush
|
2504 |
+
#'reserved1' : is_set(20), # reserved
|
2505 |
+
'dts' : is_set(21), # Debug Trace Store
|
2506 |
+
'acpi' : is_set(22), # ACPI support
|
2507 |
+
'mmx' : is_set(23), # MultiMedia Extensions
|
2508 |
+
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
|
2509 |
+
'sse' : is_set(25), # SSE instructions
|
2510 |
+
'sse2' : is_set(26), # SSE2 (WNI) instructions
|
2511 |
+
'ss' : is_set(27), # self snoop
|
2512 |
+
#'reserved2' : is_set(28), # reserved
|
2513 |
+
'tm' : is_set(29), # Automatic clock control
|
2514 |
+
'ia64' : is_set(30), # IA64 instructions
|
2515 |
+
'3dnow' : is_set(31) # 3DNow! instructions available
|
2516 |
+
}
|
2517 |
+
|
2518 |
+
# Get a list of only the flags that are true
|
2519 |
+
flags = [k for k, v in flags.items() if v]
|
2520 |
+
flags.sort()
|
2521 |
+
|
2522 |
+
info = {
|
2523 |
+
'vendor_id_raw' : vendor_id,
|
2524 |
+
'brand_raw' : processor_brand,
|
2525 |
+
|
2526 |
+
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
|
2527 |
+
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
|
2528 |
+
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
|
2529 |
+
'hz_actual' : _hz_short_to_full(hz_actual, 6),
|
2530 |
+
|
2531 |
+
'flags' : flags
|
2532 |
+
}
|
2533 |
+
|
2534 |
+
info = _filter_dict_keys_with_empty_values(info)
|
2535 |
+
g_trace.success()
|
2536 |
+
return info
|
2537 |
+
except Exception as err:
|
2538 |
+
g_trace.fail(err)
|
2539 |
+
return {}
|
2540 |
+
|
2541 |
+
def _get_cpu_info_from_kstat():
|
2542 |
+
'''
|
2543 |
+
Returns the CPU info gathered from isainfo and kstat.
|
2544 |
+
Returns {} if isainfo or kstat are not found.
|
2545 |
+
'''
|
2546 |
+
|
2547 |
+
g_trace.header('Tying to get info from kstat ...')
|
2548 |
+
|
2549 |
+
try:
|
2550 |
+
# Just return {} if there is no isainfo or kstat
|
2551 |
+
if not DataSource.has_isainfo() or not DataSource.has_kstat():
|
2552 |
+
g_trace.fail('Failed to find isinfo or kstat. Skipping ...')
|
2553 |
+
return {}
|
2554 |
+
|
2555 |
+
# If isainfo fails return {}
|
2556 |
+
returncode, flag_output = DataSource.isainfo_vb()
|
2557 |
+
if flag_output is None or returncode != 0:
|
2558 |
+
g_trace.fail('Failed to run \"isainfo -vb\". Skipping ...')
|
2559 |
+
return {}
|
2560 |
+
|
2561 |
+
# If kstat fails return {}
|
2562 |
+
returncode, kstat = DataSource.kstat_m_cpu_info()
|
2563 |
+
if kstat is None or returncode != 0:
|
2564 |
+
g_trace.fail('Failed to run \"kstat -m cpu_info\". Skipping ...')
|
2565 |
+
return {}
|
2566 |
+
|
2567 |
+
# Various fields
|
2568 |
+
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
|
2569 |
+
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
|
2570 |
+
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
|
2571 |
+
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
|
2572 |
+
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
|
2573 |
+
|
2574 |
+
# Flags
|
2575 |
+
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
|
2576 |
+
flags.sort()
|
2577 |
+
|
2578 |
+
# Convert from GHz/MHz string to Hz
|
2579 |
+
scale = 6
|
2580 |
+
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
|
2581 |
+
hz_advertised = _to_decimal_string(hz_advertised)
|
2582 |
+
|
2583 |
+
# Convert from GHz/MHz string to Hz
|
2584 |
+
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
|
2585 |
+
hz_actual = _to_decimal_string(hz_actual)
|
2586 |
+
|
2587 |
+
info = {
|
2588 |
+
'vendor_id_raw' : vendor_id,
|
2589 |
+
'brand_raw' : processor_brand,
|
2590 |
+
|
2591 |
+
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
|
2592 |
+
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
|
2593 |
+
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
|
2594 |
+
'hz_actual' : _hz_short_to_full(hz_actual, 0),
|
2595 |
+
|
2596 |
+
'stepping' : stepping,
|
2597 |
+
'model' : model,
|
2598 |
+
'family' : family,
|
2599 |
+
'flags' : flags
|
2600 |
+
}
|
2601 |
+
|
2602 |
+
info = _filter_dict_keys_with_empty_values(info)
|
2603 |
+
g_trace.success()
|
2604 |
+
return info
|
2605 |
+
except Exception as err:
|
2606 |
+
g_trace.fail(err)
|
2607 |
+
return {}
|
2608 |
+
|
2609 |
+
def _get_cpu_info_from_platform_uname():
|
2610 |
+
|
2611 |
+
g_trace.header('Tying to get info from platform.uname ...')
|
2612 |
+
|
2613 |
+
try:
|
2614 |
+
uname = DataSource.uname_string_raw.split(',')[0]
|
2615 |
+
|
2616 |
+
family, model, stepping = (None, None, None)
|
2617 |
+
entries = uname.split(' ')
|
2618 |
+
|
2619 |
+
if 'Family' in entries and entries.index('Family') < len(entries)-1:
|
2620 |
+
i = entries.index('Family')
|
2621 |
+
family = int(entries[i + 1])
|
2622 |
+
|
2623 |
+
if 'Model' in entries and entries.index('Model') < len(entries)-1:
|
2624 |
+
i = entries.index('Model')
|
2625 |
+
model = int(entries[i + 1])
|
2626 |
+
|
2627 |
+
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
|
2628 |
+
i = entries.index('Stepping')
|
2629 |
+
stepping = int(entries[i + 1])
|
2630 |
+
|
2631 |
+
info = {
|
2632 |
+
'family' : family,
|
2633 |
+
'model' : model,
|
2634 |
+
'stepping' : stepping
|
2635 |
+
}
|
2636 |
+
info = _filter_dict_keys_with_empty_values(info)
|
2637 |
+
g_trace.success()
|
2638 |
+
return info
|
2639 |
+
except Exception as err:
|
2640 |
+
g_trace.fail(err)
|
2641 |
+
return {}
|
2642 |
+
|
2643 |
+
def _get_cpu_info_internal():
|
2644 |
+
'''
|
2645 |
+
Returns the CPU info by using the best sources of information for your OS.
|
2646 |
+
Returns {} if nothing is found.
|
2647 |
+
'''
|
2648 |
+
|
2649 |
+
g_trace.write('!' * 80)
|
2650 |
+
|
2651 |
+
# Get the CPU arch and bits
|
2652 |
+
arch, bits = _parse_arch(DataSource.arch_string_raw)
|
2653 |
+
|
2654 |
+
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
|
2655 |
+
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
|
2656 |
+
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
|
2657 |
+
|
2658 |
+
info = {
|
2659 |
+
'python_version' : PYTHON_VERSION,
|
2660 |
+
'cpuinfo_version' : CPUINFO_VERSION,
|
2661 |
+
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
|
2662 |
+
'arch' : arch,
|
2663 |
+
'bits' : bits,
|
2664 |
+
'count' : DataSource.cpu_count,
|
2665 |
+
'arch_string_raw' : DataSource.arch_string_raw,
|
2666 |
+
}
|
2667 |
+
|
2668 |
+
g_trace.write("python_version: {0}".format(info['python_version']))
|
2669 |
+
g_trace.write("cpuinfo_version: {0}".format(info['cpuinfo_version']))
|
2670 |
+
g_trace.write("arch: {0}".format(info['arch']))
|
2671 |
+
g_trace.write("bits: {0}".format(info['bits']))
|
2672 |
+
g_trace.write("count: {0}".format(info['count']))
|
2673 |
+
g_trace.write("arch_string_raw: {0}".format(info['arch_string_raw']))
|
2674 |
+
|
2675 |
+
# Try the Windows wmic
|
2676 |
+
_copy_new_fields(info, _get_cpu_info_from_wmic())
|
2677 |
+
|
2678 |
+
# Try the Windows registry
|
2679 |
+
_copy_new_fields(info, _get_cpu_info_from_registry())
|
2680 |
+
|
2681 |
+
# Try /proc/cpuinfo
|
2682 |
+
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
|
2683 |
+
|
2684 |
+
# Try cpufreq-info
|
2685 |
+
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
|
2686 |
+
|
2687 |
+
# Try LSCPU
|
2688 |
+
_copy_new_fields(info, _get_cpu_info_from_lscpu())
|
2689 |
+
|
2690 |
+
# Try sysctl
|
2691 |
+
_copy_new_fields(info, _get_cpu_info_from_sysctl())
|
2692 |
+
|
2693 |
+
# Try kstat
|
2694 |
+
_copy_new_fields(info, _get_cpu_info_from_kstat())
|
2695 |
+
|
2696 |
+
# Try dmesg
|
2697 |
+
_copy_new_fields(info, _get_cpu_info_from_dmesg())
|
2698 |
+
|
2699 |
+
# Try /var/run/dmesg.boot
|
2700 |
+
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
|
2701 |
+
|
2702 |
+
# Try lsprop ibm,pa-features
|
2703 |
+
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
|
2704 |
+
|
2705 |
+
# Try sysinfo
|
2706 |
+
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
|
2707 |
+
|
2708 |
+
# Try querying the CPU cpuid register
|
2709 |
+
# FIXME: This should print stdout and stderr to trace log
|
2710 |
+
_copy_new_fields(info, _get_cpu_info_from_cpuid())
|
2711 |
+
|
2712 |
+
# Try platform.uname
|
2713 |
+
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
|
2714 |
+
|
2715 |
+
g_trace.write('!' * 80)
|
2716 |
+
|
2717 |
+
return info
|
2718 |
+
|
2719 |
+
def get_cpu_info_json():
|
2720 |
+
'''
|
2721 |
+
Returns the CPU info by using the best sources of information for your OS.
|
2722 |
+
Returns the result in a json string
|
2723 |
+
'''
|
2724 |
+
|
2725 |
+
import json
|
2726 |
+
|
2727 |
+
output = None
|
2728 |
+
|
2729 |
+
# If running under pyinstaller, run normally
|
2730 |
+
if getattr(sys, 'frozen', False):
|
2731 |
+
info = _get_cpu_info_internal()
|
2732 |
+
output = json.dumps(info)
|
2733 |
+
output = "{0}".format(output)
|
2734 |
+
# if not running under pyinstaller, run in another process.
|
2735 |
+
# This is done because multiprocesing has a design flaw that
|
2736 |
+
# causes non main programs to run multiple times on Windows.
|
2737 |
+
else:
|
2738 |
+
from subprocess import Popen, PIPE
|
2739 |
+
|
2740 |
+
command = [sys.executable, __file__, '--json']
|
2741 |
+
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
|
2742 |
+
output = p1.communicate()[0]
|
2743 |
+
|
2744 |
+
if p1.returncode != 0:
|
2745 |
+
return "{}"
|
2746 |
+
|
2747 |
+
output = output.decode(encoding='UTF-8')
|
2748 |
+
|
2749 |
+
return output
|
2750 |
+
|
2751 |
+
def get_cpu_info():
|
2752 |
+
'''
|
2753 |
+
Returns the CPU info by using the best sources of information for your OS.
|
2754 |
+
Returns the result in a dict
|
2755 |
+
'''
|
2756 |
+
|
2757 |
+
import json
|
2758 |
+
|
2759 |
+
output = get_cpu_info_json()
|
2760 |
+
|
2761 |
+
# Convert JSON to Python with non unicode strings
|
2762 |
+
output = json.loads(output, object_hook = _utf_to_str)
|
2763 |
+
|
2764 |
+
return output
|
2765 |
+
|
2766 |
+
def main():
|
2767 |
+
from argparse import ArgumentParser
|
2768 |
+
import json
|
2769 |
+
|
2770 |
+
# Parse args
|
2771 |
+
parser = ArgumentParser(description='Gets CPU info with pure Python')
|
2772 |
+
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
|
2773 |
+
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
|
2774 |
+
parser.add_argument('--trace', action='store_true', help='Traces code paths used to find CPU info to file')
|
2775 |
+
args = parser.parse_args()
|
2776 |
+
|
2777 |
+
global g_trace
|
2778 |
+
g_trace = Trace(args.trace, False)
|
2779 |
+
|
2780 |
+
try:
|
2781 |
+
_check_arch()
|
2782 |
+
except Exception as err:
|
2783 |
+
sys.stderr.write(str(err) + "\n")
|
2784 |
+
sys.exit(1)
|
2785 |
+
|
2786 |
+
info = _get_cpu_info_internal()
|
2787 |
+
|
2788 |
+
if not info:
|
2789 |
+
sys.stderr.write("Failed to find cpu info\n")
|
2790 |
+
sys.exit(1)
|
2791 |
+
|
2792 |
+
if args.json:
|
2793 |
+
print(json.dumps(info))
|
2794 |
+
elif args.version:
|
2795 |
+
print(CPUINFO_VERSION_STRING)
|
2796 |
+
else:
|
2797 |
+
print('Python Version: {0}'.format(info.get('python_version', '')))
|
2798 |
+
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
|
2799 |
+
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
|
2800 |
+
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
|
2801 |
+
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
|
2802 |
+
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
|
2803 |
+
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
|
2804 |
+
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
|
2805 |
+
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
|
2806 |
+
print('Arch: {0}'.format(info.get('arch', '')))
|
2807 |
+
print('Bits: {0}'.format(info.get('bits', '')))
|
2808 |
+
print('Count: {0}'.format(info.get('count', '')))
|
2809 |
+
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
|
2810 |
+
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
|
2811 |
+
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
|
2812 |
+
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
|
2813 |
+
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
|
2814 |
+
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
|
2815 |
+
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
|
2816 |
+
print('Stepping: {0}'.format(info.get('stepping', '')))
|
2817 |
+
print('Model: {0}'.format(info.get('model', '')))
|
2818 |
+
print('Family: {0}'.format(info.get('family', '')))
|
2819 |
+
print('Processor Type: {0}'.format(info.get('processor_type', '')))
|
2820 |
+
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
|
2821 |
+
|
2822 |
+
|
2823 |
+
if __name__ == '__main__':
|
2824 |
+
main()
|
2825 |
+
else:
|
2826 |
+
g_trace = Trace(False, False)
|
2827 |
+
_check_arch()
|
venv/lib/python3.10/site-packages/dateutil/__init__.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
import sys
|
3 |
+
|
4 |
+
try:
|
5 |
+
from ._version import version as __version__
|
6 |
+
except ImportError:
|
7 |
+
__version__ = 'unknown'
|
8 |
+
|
9 |
+
__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz',
|
10 |
+
'utils', 'zoneinfo']
|
11 |
+
|
12 |
+
def __getattr__(name):
|
13 |
+
import importlib
|
14 |
+
|
15 |
+
if name in __all__:
|
16 |
+
return importlib.import_module("." + name, __name__)
|
17 |
+
raise AttributeError(
|
18 |
+
"module {!r} has not attribute {!r}".format(__name__, name)
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
def __dir__():
|
23 |
+
# __dir__ should include all the lazy-importable modules as well.
|
24 |
+
return [x for x in globals() if x not in sys.modules] + __all__
|
venv/lib/python3.10/site-packages/dateutil/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (943 Bytes). View file
|
|
venv/lib/python3.10/site-packages/dateutil/__pycache__/relativedelta.cpython-310.pyc
ADDED
Binary file (15.7 kB). View file
|
|
venv/lib/python3.10/site-packages/dateutil/__pycache__/tzwin.cpython-310.pyc
ADDED
Binary file (196 Bytes). View file
|
|
venv/lib/python3.10/site-packages/dateutil/_common.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Common code used in multiple modules.
|
3 |
+
"""
|
4 |
+
|
5 |
+
|
6 |
+
class weekday(object):
|
7 |
+
__slots__ = ["weekday", "n"]
|
8 |
+
|
9 |
+
def __init__(self, weekday, n=None):
|
10 |
+
self.weekday = weekday
|
11 |
+
self.n = n
|
12 |
+
|
13 |
+
def __call__(self, n):
|
14 |
+
if n == self.n:
|
15 |
+
return self
|
16 |
+
else:
|
17 |
+
return self.__class__(self.weekday, n)
|
18 |
+
|
19 |
+
def __eq__(self, other):
|
20 |
+
try:
|
21 |
+
if self.weekday != other.weekday or self.n != other.n:
|
22 |
+
return False
|
23 |
+
except AttributeError:
|
24 |
+
return False
|
25 |
+
return True
|
26 |
+
|
27 |
+
def __hash__(self):
|
28 |
+
return hash((
|
29 |
+
self.weekday,
|
30 |
+
self.n,
|
31 |
+
))
|
32 |
+
|
33 |
+
def __ne__(self, other):
|
34 |
+
return not (self == other)
|
35 |
+
|
36 |
+
def __repr__(self):
|
37 |
+
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
|
38 |
+
if not self.n:
|
39 |
+
return s
|
40 |
+
else:
|
41 |
+
return "%s(%+d)" % (s, self.n)
|
42 |
+
|
43 |
+
# vim:ts=4:sw=4:et
|
venv/lib/python3.10/site-packages/dateutil/_version.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# file generated by setuptools_scm
|
2 |
+
# don't change, don't track in version control
|
3 |
+
__version__ = version = '2.9.0.post0'
|
4 |
+
__version_tuple__ = version_tuple = (2, 9, 0)
|
venv/lib/python3.10/site-packages/dateutil/easter.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
This module offers a generic Easter computing method for any given year, using
|
4 |
+
Western, Orthodox or Julian algorithms.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import datetime
|
8 |
+
|
9 |
+
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
|
10 |
+
|
11 |
+
EASTER_JULIAN = 1
|
12 |
+
EASTER_ORTHODOX = 2
|
13 |
+
EASTER_WESTERN = 3
|
14 |
+
|
15 |
+
|
16 |
+
def easter(year, method=EASTER_WESTERN):
|
17 |
+
"""
|
18 |
+
This method was ported from the work done by GM Arts,
|
19 |
+
on top of the algorithm by Claus Tondering, which was
|
20 |
+
based in part on the algorithm of Ouding (1940), as
|
21 |
+
quoted in "Explanatory Supplement to the Astronomical
|
22 |
+
Almanac", P. Kenneth Seidelmann, editor.
|
23 |
+
|
24 |
+
This algorithm implements three different Easter
|
25 |
+
calculation methods:
|
26 |
+
|
27 |
+
1. Original calculation in Julian calendar, valid in
|
28 |
+
dates after 326 AD
|
29 |
+
2. Original method, with date converted to Gregorian
|
30 |
+
calendar, valid in years 1583 to 4099
|
31 |
+
3. Revised method, in Gregorian calendar, valid in
|
32 |
+
years 1583 to 4099 as well
|
33 |
+
|
34 |
+
These methods are represented by the constants:
|
35 |
+
|
36 |
+
* ``EASTER_JULIAN = 1``
|
37 |
+
* ``EASTER_ORTHODOX = 2``
|
38 |
+
* ``EASTER_WESTERN = 3``
|
39 |
+
|
40 |
+
The default method is method 3.
|
41 |
+
|
42 |
+
More about the algorithm may be found at:
|
43 |
+
|
44 |
+
`GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_
|
45 |
+
|
46 |
+
and
|
47 |
+
|
48 |
+
`The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_
|
49 |
+
|
50 |
+
"""
|
51 |
+
|
52 |
+
if not (1 <= method <= 3):
|
53 |
+
raise ValueError("invalid method")
|
54 |
+
|
55 |
+
# g - Golden year - 1
|
56 |
+
# c - Century
|
57 |
+
# h - (23 - Epact) mod 30
|
58 |
+
# i - Number of days from March 21 to Paschal Full Moon
|
59 |
+
# j - Weekday for PFM (0=Sunday, etc)
|
60 |
+
# p - Number of days from March 21 to Sunday on or before PFM
|
61 |
+
# (-6 to 28 methods 1 & 3, to 56 for method 2)
|
62 |
+
# e - Extra days to add for method 2 (converting Julian
|
63 |
+
# date to Gregorian date)
|
64 |
+
|
65 |
+
y = year
|
66 |
+
g = y % 19
|
67 |
+
e = 0
|
68 |
+
if method < 3:
|
69 |
+
# Old method
|
70 |
+
i = (19*g + 15) % 30
|
71 |
+
j = (y + y//4 + i) % 7
|
72 |
+
if method == 2:
|
73 |
+
# Extra dates to convert Julian to Gregorian date
|
74 |
+
e = 10
|
75 |
+
if y > 1600:
|
76 |
+
e = e + y//100 - 16 - (y//100 - 16)//4
|
77 |
+
else:
|
78 |
+
# New method
|
79 |
+
c = y//100
|
80 |
+
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
|
81 |
+
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
|
82 |
+
j = (y + y//4 + i + 2 - c + c//4) % 7
|
83 |
+
|
84 |
+
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
|
85 |
+
# (later dates apply to method 2, although 23 May never actually occurs)
|
86 |
+
p = i - j + e
|
87 |
+
d = 1 + (p + 27 + (p + 6)//40) % 31
|
88 |
+
m = 3 + (p + 26)//30
|
89 |
+
return datetime.date(int(y), int(m), int(d))
|
venv/lib/python3.10/site-packages/dateutil/relativedelta.py
ADDED
@@ -0,0 +1,599 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
import datetime
|
3 |
+
import calendar
|
4 |
+
|
5 |
+
import operator
|
6 |
+
from math import copysign
|
7 |
+
|
8 |
+
from six import integer_types
|
9 |
+
from warnings import warn
|
10 |
+
|
11 |
+
from ._common import weekday
|
12 |
+
|
13 |
+
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
|
14 |
+
|
15 |
+
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
|
16 |
+
|
17 |
+
|
18 |
+
class relativedelta(object):
|
19 |
+
"""
|
20 |
+
The relativedelta type is designed to be applied to an existing datetime and
|
21 |
+
can replace specific components of that datetime, or represents an interval
|
22 |
+
of time.
|
23 |
+
|
24 |
+
It is based on the specification of the excellent work done by M.-A. Lemburg
|
25 |
+
in his
|
26 |
+
`mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension.
|
27 |
+
However, notice that this type does *NOT* implement the same algorithm as
|
28 |
+
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
|
29 |
+
|
30 |
+
There are two different ways to build a relativedelta instance. The
|
31 |
+
first one is passing it two date/datetime classes::
|
32 |
+
|
33 |
+
relativedelta(datetime1, datetime2)
|
34 |
+
|
35 |
+
The second one is passing it any number of the following keyword arguments::
|
36 |
+
|
37 |
+
relativedelta(arg1=x,arg2=y,arg3=z...)
|
38 |
+
|
39 |
+
year, month, day, hour, minute, second, microsecond:
|
40 |
+
Absolute information (argument is singular); adding or subtracting a
|
41 |
+
relativedelta with absolute information does not perform an arithmetic
|
42 |
+
operation, but rather REPLACES the corresponding value in the
|
43 |
+
original datetime with the value(s) in relativedelta.
|
44 |
+
|
45 |
+
years, months, weeks, days, hours, minutes, seconds, microseconds:
|
46 |
+
Relative information, may be negative (argument is plural); adding
|
47 |
+
or subtracting a relativedelta with relative information performs
|
48 |
+
the corresponding arithmetic operation on the original datetime value
|
49 |
+
with the information in the relativedelta.
|
50 |
+
|
51 |
+
weekday:
|
52 |
+
One of the weekday instances (MO, TU, etc) available in the
|
53 |
+
relativedelta module. These instances may receive a parameter N,
|
54 |
+
specifying the Nth weekday, which could be positive or negative
|
55 |
+
(like MO(+1) or MO(-2)). Not specifying it is the same as specifying
|
56 |
+
+1. You can also use an integer, where 0=MO. This argument is always
|
57 |
+
relative e.g. if the calculated date is already Monday, using MO(1)
|
58 |
+
or MO(-1) won't change the day. To effectively make it absolute, use
|
59 |
+
it in combination with the day argument (e.g. day=1, MO(1) for first
|
60 |
+
Monday of the month).
|
61 |
+
|
62 |
+
leapdays:
|
63 |
+
Will add given days to the date found, if year is a leap
|
64 |
+
year, and the date found is post 28 of february.
|
65 |
+
|
66 |
+
yearday, nlyearday:
|
67 |
+
Set the yearday or the non-leap year day (jump leap days).
|
68 |
+
These are converted to day/month/leapdays information.
|
69 |
+
|
70 |
+
There are relative and absolute forms of the keyword
|
71 |
+
arguments. The plural is relative, and the singular is
|
72 |
+
absolute. For each argument in the order below, the absolute form
|
73 |
+
is applied first (by setting each attribute to that value) and
|
74 |
+
then the relative form (by adding the value to the attribute).
|
75 |
+
|
76 |
+
The order of attributes considered when this relativedelta is
|
77 |
+
added to a datetime is:
|
78 |
+
|
79 |
+
1. Year
|
80 |
+
2. Month
|
81 |
+
3. Day
|
82 |
+
4. Hours
|
83 |
+
5. Minutes
|
84 |
+
6. Seconds
|
85 |
+
7. Microseconds
|
86 |
+
|
87 |
+
Finally, weekday is applied, using the rule described above.
|
88 |
+
|
89 |
+
For example
|
90 |
+
|
91 |
+
>>> from datetime import datetime
|
92 |
+
>>> from dateutil.relativedelta import relativedelta, MO
|
93 |
+
>>> dt = datetime(2018, 4, 9, 13, 37, 0)
|
94 |
+
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
|
95 |
+
>>> dt + delta
|
96 |
+
datetime.datetime(2018, 4, 2, 14, 37)
|
97 |
+
|
98 |
+
First, the day is set to 1 (the first of the month), then 25 hours
|
99 |
+
are added, to get to the 2nd day and 14th hour, finally the
|
100 |
+
weekday is applied, but since the 2nd is already a Monday there is
|
101 |
+
no effect.
|
102 |
+
|
103 |
+
"""
|
104 |
+
|
105 |
+
def __init__(self, dt1=None, dt2=None,
|
106 |
+
years=0, months=0, days=0, leapdays=0, weeks=0,
|
107 |
+
hours=0, minutes=0, seconds=0, microseconds=0,
|
108 |
+
year=None, month=None, day=None, weekday=None,
|
109 |
+
yearday=None, nlyearday=None,
|
110 |
+
hour=None, minute=None, second=None, microsecond=None):
|
111 |
+
|
112 |
+
if dt1 and dt2:
|
113 |
+
# datetime is a subclass of date. So both must be date
|
114 |
+
if not (isinstance(dt1, datetime.date) and
|
115 |
+
isinstance(dt2, datetime.date)):
|
116 |
+
raise TypeError("relativedelta only diffs datetime/date")
|
117 |
+
|
118 |
+
# We allow two dates, or two datetimes, so we coerce them to be
|
119 |
+
# of the same type
|
120 |
+
if (isinstance(dt1, datetime.datetime) !=
|
121 |
+
isinstance(dt2, datetime.datetime)):
|
122 |
+
if not isinstance(dt1, datetime.datetime):
|
123 |
+
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
|
124 |
+
elif not isinstance(dt2, datetime.datetime):
|
125 |
+
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
|
126 |
+
|
127 |
+
self.years = 0
|
128 |
+
self.months = 0
|
129 |
+
self.days = 0
|
130 |
+
self.leapdays = 0
|
131 |
+
self.hours = 0
|
132 |
+
self.minutes = 0
|
133 |
+
self.seconds = 0
|
134 |
+
self.microseconds = 0
|
135 |
+
self.year = None
|
136 |
+
self.month = None
|
137 |
+
self.day = None
|
138 |
+
self.weekday = None
|
139 |
+
self.hour = None
|
140 |
+
self.minute = None
|
141 |
+
self.second = None
|
142 |
+
self.microsecond = None
|
143 |
+
self._has_time = 0
|
144 |
+
|
145 |
+
# Get year / month delta between the two
|
146 |
+
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
|
147 |
+
self._set_months(months)
|
148 |
+
|
149 |
+
# Remove the year/month delta so the timedelta is just well-defined
|
150 |
+
# time units (seconds, days and microseconds)
|
151 |
+
dtm = self.__radd__(dt2)
|
152 |
+
|
153 |
+
# If we've overshot our target, make an adjustment
|
154 |
+
if dt1 < dt2:
|
155 |
+
compare = operator.gt
|
156 |
+
increment = 1
|
157 |
+
else:
|
158 |
+
compare = operator.lt
|
159 |
+
increment = -1
|
160 |
+
|
161 |
+
while compare(dt1, dtm):
|
162 |
+
months += increment
|
163 |
+
self._set_months(months)
|
164 |
+
dtm = self.__radd__(dt2)
|
165 |
+
|
166 |
+
# Get the timedelta between the "months-adjusted" date and dt1
|
167 |
+
delta = dt1 - dtm
|
168 |
+
self.seconds = delta.seconds + delta.days * 86400
|
169 |
+
self.microseconds = delta.microseconds
|
170 |
+
else:
|
171 |
+
# Check for non-integer values in integer-only quantities
|
172 |
+
if any(x is not None and x != int(x) for x in (years, months)):
|
173 |
+
raise ValueError("Non-integer years and months are "
|
174 |
+
"ambiguous and not currently supported.")
|
175 |
+
|
176 |
+
# Relative information
|
177 |
+
self.years = int(years)
|
178 |
+
self.months = int(months)
|
179 |
+
self.days = days + weeks * 7
|
180 |
+
self.leapdays = leapdays
|
181 |
+
self.hours = hours
|
182 |
+
self.minutes = minutes
|
183 |
+
self.seconds = seconds
|
184 |
+
self.microseconds = microseconds
|
185 |
+
|
186 |
+
# Absolute information
|
187 |
+
self.year = year
|
188 |
+
self.month = month
|
189 |
+
self.day = day
|
190 |
+
self.hour = hour
|
191 |
+
self.minute = minute
|
192 |
+
self.second = second
|
193 |
+
self.microsecond = microsecond
|
194 |
+
|
195 |
+
if any(x is not None and int(x) != x
|
196 |
+
for x in (year, month, day, hour,
|
197 |
+
minute, second, microsecond)):
|
198 |
+
# For now we'll deprecate floats - later it'll be an error.
|
199 |
+
warn("Non-integer value passed as absolute information. " +
|
200 |
+
"This is not a well-defined condition and will raise " +
|
201 |
+
"errors in future versions.", DeprecationWarning)
|
202 |
+
|
203 |
+
if isinstance(weekday, integer_types):
|
204 |
+
self.weekday = weekdays[weekday]
|
205 |
+
else:
|
206 |
+
self.weekday = weekday
|
207 |
+
|
208 |
+
yday = 0
|
209 |
+
if nlyearday:
|
210 |
+
yday = nlyearday
|
211 |
+
elif yearday:
|
212 |
+
yday = yearday
|
213 |
+
if yearday > 59:
|
214 |
+
self.leapdays = -1
|
215 |
+
if yday:
|
216 |
+
ydayidx = [31, 59, 90, 120, 151, 181, 212,
|
217 |
+
243, 273, 304, 334, 366]
|
218 |
+
for idx, ydays in enumerate(ydayidx):
|
219 |
+
if yday <= ydays:
|
220 |
+
self.month = idx+1
|
221 |
+
if idx == 0:
|
222 |
+
self.day = yday
|
223 |
+
else:
|
224 |
+
self.day = yday-ydayidx[idx-1]
|
225 |
+
break
|
226 |
+
else:
|
227 |
+
raise ValueError("invalid year day (%d)" % yday)
|
228 |
+
|
229 |
+
self._fix()
|
230 |
+
|
231 |
+
def _fix(self):
|
232 |
+
if abs(self.microseconds) > 999999:
|
233 |
+
s = _sign(self.microseconds)
|
234 |
+
div, mod = divmod(self.microseconds * s, 1000000)
|
235 |
+
self.microseconds = mod * s
|
236 |
+
self.seconds += div * s
|
237 |
+
if abs(self.seconds) > 59:
|
238 |
+
s = _sign(self.seconds)
|
239 |
+
div, mod = divmod(self.seconds * s, 60)
|
240 |
+
self.seconds = mod * s
|
241 |
+
self.minutes += div * s
|
242 |
+
if abs(self.minutes) > 59:
|
243 |
+
s = _sign(self.minutes)
|
244 |
+
div, mod = divmod(self.minutes * s, 60)
|
245 |
+
self.minutes = mod * s
|
246 |
+
self.hours += div * s
|
247 |
+
if abs(self.hours) > 23:
|
248 |
+
s = _sign(self.hours)
|
249 |
+
div, mod = divmod(self.hours * s, 24)
|
250 |
+
self.hours = mod * s
|
251 |
+
self.days += div * s
|
252 |
+
if abs(self.months) > 11:
|
253 |
+
s = _sign(self.months)
|
254 |
+
div, mod = divmod(self.months * s, 12)
|
255 |
+
self.months = mod * s
|
256 |
+
self.years += div * s
|
257 |
+
if (self.hours or self.minutes or self.seconds or self.microseconds
|
258 |
+
or self.hour is not None or self.minute is not None or
|
259 |
+
self.second is not None or self.microsecond is not None):
|
260 |
+
self._has_time = 1
|
261 |
+
else:
|
262 |
+
self._has_time = 0
|
263 |
+
|
264 |
+
@property
|
265 |
+
def weeks(self):
|
266 |
+
return int(self.days / 7.0)
|
267 |
+
|
268 |
+
@weeks.setter
|
269 |
+
def weeks(self, value):
|
270 |
+
self.days = self.days - (self.weeks * 7) + value * 7
|
271 |
+
|
272 |
+
def _set_months(self, months):
|
273 |
+
self.months = months
|
274 |
+
if abs(self.months) > 11:
|
275 |
+
s = _sign(self.months)
|
276 |
+
div, mod = divmod(self.months * s, 12)
|
277 |
+
self.months = mod * s
|
278 |
+
self.years = div * s
|
279 |
+
else:
|
280 |
+
self.years = 0
|
281 |
+
|
282 |
+
def normalized(self):
|
283 |
+
"""
|
284 |
+
Return a version of this object represented entirely using integer
|
285 |
+
values for the relative attributes.
|
286 |
+
|
287 |
+
>>> relativedelta(days=1.5, hours=2).normalized()
|
288 |
+
relativedelta(days=+1, hours=+14)
|
289 |
+
|
290 |
+
:return:
|
291 |
+
Returns a :class:`dateutil.relativedelta.relativedelta` object.
|
292 |
+
"""
|
293 |
+
# Cascade remainders down (rounding each to roughly nearest microsecond)
|
294 |
+
days = int(self.days)
|
295 |
+
|
296 |
+
hours_f = round(self.hours + 24 * (self.days - days), 11)
|
297 |
+
hours = int(hours_f)
|
298 |
+
|
299 |
+
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
|
300 |
+
minutes = int(minutes_f)
|
301 |
+
|
302 |
+
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
|
303 |
+
seconds = int(seconds_f)
|
304 |
+
|
305 |
+
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
|
306 |
+
|
307 |
+
# Constructor carries overflow back up with call to _fix()
|
308 |
+
return self.__class__(years=self.years, months=self.months,
|
309 |
+
days=days, hours=hours, minutes=minutes,
|
310 |
+
seconds=seconds, microseconds=microseconds,
|
311 |
+
leapdays=self.leapdays, year=self.year,
|
312 |
+
month=self.month, day=self.day,
|
313 |
+
weekday=self.weekday, hour=self.hour,
|
314 |
+
minute=self.minute, second=self.second,
|
315 |
+
microsecond=self.microsecond)
|
316 |
+
|
317 |
+
def __add__(self, other):
|
318 |
+
if isinstance(other, relativedelta):
|
319 |
+
return self.__class__(years=other.years + self.years,
|
320 |
+
months=other.months + self.months,
|
321 |
+
days=other.days + self.days,
|
322 |
+
hours=other.hours + self.hours,
|
323 |
+
minutes=other.minutes + self.minutes,
|
324 |
+
seconds=other.seconds + self.seconds,
|
325 |
+
microseconds=(other.microseconds +
|
326 |
+
self.microseconds),
|
327 |
+
leapdays=other.leapdays or self.leapdays,
|
328 |
+
year=(other.year if other.year is not None
|
329 |
+
else self.year),
|
330 |
+
month=(other.month if other.month is not None
|
331 |
+
else self.month),
|
332 |
+
day=(other.day if other.day is not None
|
333 |
+
else self.day),
|
334 |
+
weekday=(other.weekday if other.weekday is not None
|
335 |
+
else self.weekday),
|
336 |
+
hour=(other.hour if other.hour is not None
|
337 |
+
else self.hour),
|
338 |
+
minute=(other.minute if other.minute is not None
|
339 |
+
else self.minute),
|
340 |
+
second=(other.second if other.second is not None
|
341 |
+
else self.second),
|
342 |
+
microsecond=(other.microsecond if other.microsecond
|
343 |
+
is not None else
|
344 |
+
self.microsecond))
|
345 |
+
if isinstance(other, datetime.timedelta):
|
346 |
+
return self.__class__(years=self.years,
|
347 |
+
months=self.months,
|
348 |
+
days=self.days + other.days,
|
349 |
+
hours=self.hours,
|
350 |
+
minutes=self.minutes,
|
351 |
+
seconds=self.seconds + other.seconds,
|
352 |
+
microseconds=self.microseconds + other.microseconds,
|
353 |
+
leapdays=self.leapdays,
|
354 |
+
year=self.year,
|
355 |
+
month=self.month,
|
356 |
+
day=self.day,
|
357 |
+
weekday=self.weekday,
|
358 |
+
hour=self.hour,
|
359 |
+
minute=self.minute,
|
360 |
+
second=self.second,
|
361 |
+
microsecond=self.microsecond)
|
362 |
+
if not isinstance(other, datetime.date):
|
363 |
+
return NotImplemented
|
364 |
+
elif self._has_time and not isinstance(other, datetime.datetime):
|
365 |
+
other = datetime.datetime.fromordinal(other.toordinal())
|
366 |
+
year = (self.year or other.year)+self.years
|
367 |
+
month = self.month or other.month
|
368 |
+
if self.months:
|
369 |
+
assert 1 <= abs(self.months) <= 12
|
370 |
+
month += self.months
|
371 |
+
if month > 12:
|
372 |
+
year += 1
|
373 |
+
month -= 12
|
374 |
+
elif month < 1:
|
375 |
+
year -= 1
|
376 |
+
month += 12
|
377 |
+
day = min(calendar.monthrange(year, month)[1],
|
378 |
+
self.day or other.day)
|
379 |
+
repl = {"year": year, "month": month, "day": day}
|
380 |
+
for attr in ["hour", "minute", "second", "microsecond"]:
|
381 |
+
value = getattr(self, attr)
|
382 |
+
if value is not None:
|
383 |
+
repl[attr] = value
|
384 |
+
days = self.days
|
385 |
+
if self.leapdays and month > 2 and calendar.isleap(year):
|
386 |
+
days += self.leapdays
|
387 |
+
ret = (other.replace(**repl)
|
388 |
+
+ datetime.timedelta(days=days,
|
389 |
+
hours=self.hours,
|
390 |
+
minutes=self.minutes,
|
391 |
+
seconds=self.seconds,
|
392 |
+
microseconds=self.microseconds))
|
393 |
+
if self.weekday:
|
394 |
+
weekday, nth = self.weekday.weekday, self.weekday.n or 1
|
395 |
+
jumpdays = (abs(nth) - 1) * 7
|
396 |
+
if nth > 0:
|
397 |
+
jumpdays += (7 - ret.weekday() + weekday) % 7
|
398 |
+
else:
|
399 |
+
jumpdays += (ret.weekday() - weekday) % 7
|
400 |
+
jumpdays *= -1
|
401 |
+
ret += datetime.timedelta(days=jumpdays)
|
402 |
+
return ret
|
403 |
+
|
404 |
+
def __radd__(self, other):
|
405 |
+
return self.__add__(other)
|
406 |
+
|
407 |
+
def __rsub__(self, other):
|
408 |
+
return self.__neg__().__radd__(other)
|
409 |
+
|
410 |
+
def __sub__(self, other):
|
411 |
+
if not isinstance(other, relativedelta):
|
412 |
+
return NotImplemented # In case the other object defines __rsub__
|
413 |
+
return self.__class__(years=self.years - other.years,
|
414 |
+
months=self.months - other.months,
|
415 |
+
days=self.days - other.days,
|
416 |
+
hours=self.hours - other.hours,
|
417 |
+
minutes=self.minutes - other.minutes,
|
418 |
+
seconds=self.seconds - other.seconds,
|
419 |
+
microseconds=self.microseconds - other.microseconds,
|
420 |
+
leapdays=self.leapdays or other.leapdays,
|
421 |
+
year=(self.year if self.year is not None
|
422 |
+
else other.year),
|
423 |
+
month=(self.month if self.month is not None else
|
424 |
+
other.month),
|
425 |
+
day=(self.day if self.day is not None else
|
426 |
+
other.day),
|
427 |
+
weekday=(self.weekday if self.weekday is not None else
|
428 |
+
other.weekday),
|
429 |
+
hour=(self.hour if self.hour is not None else
|
430 |
+
other.hour),
|
431 |
+
minute=(self.minute if self.minute is not None else
|
432 |
+
other.minute),
|
433 |
+
second=(self.second if self.second is not None else
|
434 |
+
other.second),
|
435 |
+
microsecond=(self.microsecond if self.microsecond
|
436 |
+
is not None else
|
437 |
+
other.microsecond))
|
438 |
+
|
439 |
+
def __abs__(self):
|
440 |
+
return self.__class__(years=abs(self.years),
|
441 |
+
months=abs(self.months),
|
442 |
+
days=abs(self.days),
|
443 |
+
hours=abs(self.hours),
|
444 |
+
minutes=abs(self.minutes),
|
445 |
+
seconds=abs(self.seconds),
|
446 |
+
microseconds=abs(self.microseconds),
|
447 |
+
leapdays=self.leapdays,
|
448 |
+
year=self.year,
|
449 |
+
month=self.month,
|
450 |
+
day=self.day,
|
451 |
+
weekday=self.weekday,
|
452 |
+
hour=self.hour,
|
453 |
+
minute=self.minute,
|
454 |
+
second=self.second,
|
455 |
+
microsecond=self.microsecond)
|
456 |
+
|
457 |
+
def __neg__(self):
|
458 |
+
return self.__class__(years=-self.years,
|
459 |
+
months=-self.months,
|
460 |
+
days=-self.days,
|
461 |
+
hours=-self.hours,
|
462 |
+
minutes=-self.minutes,
|
463 |
+
seconds=-self.seconds,
|
464 |
+
microseconds=-self.microseconds,
|
465 |
+
leapdays=self.leapdays,
|
466 |
+
year=self.year,
|
467 |
+
month=self.month,
|
468 |
+
day=self.day,
|
469 |
+
weekday=self.weekday,
|
470 |
+
hour=self.hour,
|
471 |
+
minute=self.minute,
|
472 |
+
second=self.second,
|
473 |
+
microsecond=self.microsecond)
|
474 |
+
|
475 |
+
def __bool__(self):
|
476 |
+
return not (not self.years and
|
477 |
+
not self.months and
|
478 |
+
not self.days and
|
479 |
+
not self.hours and
|
480 |
+
not self.minutes and
|
481 |
+
not self.seconds and
|
482 |
+
not self.microseconds and
|
483 |
+
not self.leapdays and
|
484 |
+
self.year is None and
|
485 |
+
self.month is None and
|
486 |
+
self.day is None and
|
487 |
+
self.weekday is None and
|
488 |
+
self.hour is None and
|
489 |
+
self.minute is None and
|
490 |
+
self.second is None and
|
491 |
+
self.microsecond is None)
|
492 |
+
# Compatibility with Python 2.x
|
493 |
+
__nonzero__ = __bool__
|
494 |
+
|
495 |
+
def __mul__(self, other):
|
496 |
+
try:
|
497 |
+
f = float(other)
|
498 |
+
except TypeError:
|
499 |
+
return NotImplemented
|
500 |
+
|
501 |
+
return self.__class__(years=int(self.years * f),
|
502 |
+
months=int(self.months * f),
|
503 |
+
days=int(self.days * f),
|
504 |
+
hours=int(self.hours * f),
|
505 |
+
minutes=int(self.minutes * f),
|
506 |
+
seconds=int(self.seconds * f),
|
507 |
+
microseconds=int(self.microseconds * f),
|
508 |
+
leapdays=self.leapdays,
|
509 |
+
year=self.year,
|
510 |
+
month=self.month,
|
511 |
+
day=self.day,
|
512 |
+
weekday=self.weekday,
|
513 |
+
hour=self.hour,
|
514 |
+
minute=self.minute,
|
515 |
+
second=self.second,
|
516 |
+
microsecond=self.microsecond)
|
517 |
+
|
518 |
+
__rmul__ = __mul__
|
519 |
+
|
520 |
+
def __eq__(self, other):
|
521 |
+
if not isinstance(other, relativedelta):
|
522 |
+
return NotImplemented
|
523 |
+
if self.weekday or other.weekday:
|
524 |
+
if not self.weekday or not other.weekday:
|
525 |
+
return False
|
526 |
+
if self.weekday.weekday != other.weekday.weekday:
|
527 |
+
return False
|
528 |
+
n1, n2 = self.weekday.n, other.weekday.n
|
529 |
+
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
|
530 |
+
return False
|
531 |
+
return (self.years == other.years and
|
532 |
+
self.months == other.months and
|
533 |
+
self.days == other.days and
|
534 |
+
self.hours == other.hours and
|
535 |
+
self.minutes == other.minutes and
|
536 |
+
self.seconds == other.seconds and
|
537 |
+
self.microseconds == other.microseconds and
|
538 |
+
self.leapdays == other.leapdays and
|
539 |
+
self.year == other.year and
|
540 |
+
self.month == other.month and
|
541 |
+
self.day == other.day and
|
542 |
+
self.hour == other.hour and
|
543 |
+
self.minute == other.minute and
|
544 |
+
self.second == other.second and
|
545 |
+
self.microsecond == other.microsecond)
|
546 |
+
|
547 |
+
def __hash__(self):
|
548 |
+
return hash((
|
549 |
+
self.weekday,
|
550 |
+
self.years,
|
551 |
+
self.months,
|
552 |
+
self.days,
|
553 |
+
self.hours,
|
554 |
+
self.minutes,
|
555 |
+
self.seconds,
|
556 |
+
self.microseconds,
|
557 |
+
self.leapdays,
|
558 |
+
self.year,
|
559 |
+
self.month,
|
560 |
+
self.day,
|
561 |
+
self.hour,
|
562 |
+
self.minute,
|
563 |
+
self.second,
|
564 |
+
self.microsecond,
|
565 |
+
))
|
566 |
+
|
567 |
+
def __ne__(self, other):
|
568 |
+
return not self.__eq__(other)
|
569 |
+
|
570 |
+
def __div__(self, other):
|
571 |
+
try:
|
572 |
+
reciprocal = 1 / float(other)
|
573 |
+
except TypeError:
|
574 |
+
return NotImplemented
|
575 |
+
|
576 |
+
return self.__mul__(reciprocal)
|
577 |
+
|
578 |
+
__truediv__ = __div__
|
579 |
+
|
580 |
+
def __repr__(self):
|
581 |
+
l = []
|
582 |
+
for attr in ["years", "months", "days", "leapdays",
|
583 |
+
"hours", "minutes", "seconds", "microseconds"]:
|
584 |
+
value = getattr(self, attr)
|
585 |
+
if value:
|
586 |
+
l.append("{attr}={value:+g}".format(attr=attr, value=value))
|
587 |
+
for attr in ["year", "month", "day", "weekday",
|
588 |
+
"hour", "minute", "second", "microsecond"]:
|
589 |
+
value = getattr(self, attr)
|
590 |
+
if value is not None:
|
591 |
+
l.append("{attr}={value}".format(attr=attr, value=repr(value)))
|
592 |
+
return "{classname}({attrs})".format(classname=self.__class__.__name__,
|
593 |
+
attrs=", ".join(l))
|
594 |
+
|
595 |
+
|
596 |
+
def _sign(x):
|
597 |
+
return int(copysign(1, x))
|
598 |
+
|
599 |
+
# vim:ts=4:sw=4:et
|
venv/lib/python3.10/site-packages/dateutil/tzwin.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# tzwin has moved to dateutil.tz.win
|
2 |
+
from .tz.win import *
|
venv/lib/python3.10/site-packages/dateutil/utils.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
This module offers general convenience and utility functions for dealing with
|
4 |
+
datetimes.
|
5 |
+
|
6 |
+
.. versionadded:: 2.7.0
|
7 |
+
"""
|
8 |
+
from __future__ import unicode_literals
|
9 |
+
|
10 |
+
from datetime import datetime, time
|
11 |
+
|
12 |
+
|
13 |
+
def today(tzinfo=None):
|
14 |
+
"""
|
15 |
+
Returns a :py:class:`datetime` representing the current day at midnight
|
16 |
+
|
17 |
+
:param tzinfo:
|
18 |
+
The time zone to attach (also used to determine the current day).
|
19 |
+
|
20 |
+
:return:
|
21 |
+
A :py:class:`datetime.datetime` object representing the current day
|
22 |
+
at midnight.
|
23 |
+
"""
|
24 |
+
|
25 |
+
dt = datetime.now(tzinfo)
|
26 |
+
return datetime.combine(dt.date(), time(0, tzinfo=tzinfo))
|
27 |
+
|
28 |
+
|
29 |
+
def default_tzinfo(dt, tzinfo):
|
30 |
+
"""
|
31 |
+
Sets the ``tzinfo`` parameter on naive datetimes only
|
32 |
+
|
33 |
+
This is useful for example when you are provided a datetime that may have
|
34 |
+
either an implicit or explicit time zone, such as when parsing a time zone
|
35 |
+
string.
|
36 |
+
|
37 |
+
.. doctest::
|
38 |
+
|
39 |
+
>>> from dateutil.tz import tzoffset
|
40 |
+
>>> from dateutil.parser import parse
|
41 |
+
>>> from dateutil.utils import default_tzinfo
|
42 |
+
>>> dflt_tz = tzoffset("EST", -18000)
|
43 |
+
>>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz))
|
44 |
+
2014-01-01 12:30:00+00:00
|
45 |
+
>>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz))
|
46 |
+
2014-01-01 12:30:00-05:00
|
47 |
+
|
48 |
+
:param dt:
|
49 |
+
The datetime on which to replace the time zone
|
50 |
+
|
51 |
+
:param tzinfo:
|
52 |
+
The :py:class:`datetime.tzinfo` subclass instance to assign to
|
53 |
+
``dt`` if (and only if) it is naive.
|
54 |
+
|
55 |
+
:return:
|
56 |
+
Returns an aware :py:class:`datetime.datetime`.
|
57 |
+
"""
|
58 |
+
if dt.tzinfo is not None:
|
59 |
+
return dt
|
60 |
+
else:
|
61 |
+
return dt.replace(tzinfo=tzinfo)
|
62 |
+
|
63 |
+
|
64 |
+
def within_delta(dt1, dt2, delta):
|
65 |
+
"""
|
66 |
+
Useful for comparing two datetimes that may have a negligible difference
|
67 |
+
to be considered equal.
|
68 |
+
"""
|
69 |
+
delta = abs(delta)
|
70 |
+
difference = dt1 - dt2
|
71 |
+
return -delta <= difference <= delta
|
venv/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (5.76 kB). View file
|
|
venv/lib/python3.10/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-310.pyc
ADDED
Binary file (2.69 kB). View file
|
|
venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (c) 2005-2021 Fredrik Johansson and mpmath contributors
|
2 |
+
|
3 |
+
All rights reserved.
|
4 |
+
|
5 |
+
Redistribution and use in source and binary forms, with or without
|
6 |
+
modification, are permitted provided that the following conditions are met:
|
7 |
+
|
8 |
+
a. Redistributions of source code must retain the above copyright notice,
|
9 |
+
this list of conditions and the following disclaimer.
|
10 |
+
b. Redistributions in binary form must reproduce the above copyright
|
11 |
+
notice, this list of conditions and the following disclaimer in the
|
12 |
+
documentation and/or other materials provided with the distribution.
|
13 |
+
c. Neither the name of the copyright holder nor the names of its
|
14 |
+
contributors may be used to endorse or promote products derived
|
15 |
+
from this software without specific prior written permission.
|
16 |
+
|
17 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
18 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
19 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
20 |
+
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
|
21 |
+
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
22 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
23 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
24 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
25 |
+
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
26 |
+
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
|
27 |
+
DAMAGE.
|
venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: mpmath
|
3 |
+
Version: 1.3.0
|
4 |
+
Summary: Python library for arbitrary-precision floating-point arithmetic
|
5 |
+
Home-page: http://mpmath.org/
|
6 |
+
Author: Fredrik Johansson
|
7 |
+
Author-email: [email protected]
|
8 |
+
License: BSD
|
9 |
+
Project-URL: Source, https://github.com/fredrik-johansson/mpmath
|
10 |
+
Project-URL: Tracker, https://github.com/fredrik-johansson/mpmath/issues
|
11 |
+
Project-URL: Documentation, http://mpmath.org/doc/current/
|
12 |
+
Classifier: License :: OSI Approved :: BSD License
|
13 |
+
Classifier: Topic :: Scientific/Engineering :: Mathematics
|
14 |
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
15 |
+
Classifier: Programming Language :: Python
|
16 |
+
Classifier: Programming Language :: Python :: 2
|
17 |
+
Classifier: Programming Language :: Python :: 2.7
|
18 |
+
Classifier: Programming Language :: Python :: 3
|
19 |
+
Classifier: Programming Language :: Python :: 3.5
|
20 |
+
Classifier: Programming Language :: Python :: 3.6
|
21 |
+
Classifier: Programming Language :: Python :: 3.7
|
22 |
+
Classifier: Programming Language :: Python :: 3.8
|
23 |
+
Classifier: Programming Language :: Python :: 3.9
|
24 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
25 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
26 |
+
License-File: LICENSE
|
27 |
+
Provides-Extra: develop
|
28 |
+
Requires-Dist: pytest (>=4.6) ; extra == 'develop'
|
29 |
+
Requires-Dist: pycodestyle ; extra == 'develop'
|
30 |
+
Requires-Dist: pytest-cov ; extra == 'develop'
|
31 |
+
Requires-Dist: codecov ; extra == 'develop'
|
32 |
+
Requires-Dist: wheel ; extra == 'develop'
|
33 |
+
Provides-Extra: docs
|
34 |
+
Requires-Dist: sphinx ; extra == 'docs'
|
35 |
+
Provides-Extra: gmpy
|
36 |
+
Requires-Dist: gmpy2 (>=2.1.0a4) ; (platform_python_implementation != "PyPy") and extra == 'gmpy'
|
37 |
+
Provides-Extra: tests
|
38 |
+
Requires-Dist: pytest (>=4.6) ; extra == 'tests'
|
39 |
+
|
40 |
+
mpmath
|
41 |
+
======
|
42 |
+
|
43 |
+
|pypi version| |Build status| |Code coverage status| |Zenodo Badge|
|
44 |
+
|
45 |
+
.. |pypi version| image:: https://img.shields.io/pypi/v/mpmath.svg
|
46 |
+
:target: https://pypi.python.org/pypi/mpmath
|
47 |
+
.. |Build status| image:: https://github.com/fredrik-johansson/mpmath/workflows/test/badge.svg
|
48 |
+
:target: https://github.com/fredrik-johansson/mpmath/actions?workflow=test
|
49 |
+
.. |Code coverage status| image:: https://codecov.io/gh/fredrik-johansson/mpmath/branch/master/graph/badge.svg
|
50 |
+
:target: https://codecov.io/gh/fredrik-johansson/mpmath
|
51 |
+
.. |Zenodo Badge| image:: https://zenodo.org/badge/2934512.svg
|
52 |
+
:target: https://zenodo.org/badge/latestdoi/2934512
|
53 |
+
|
54 |
+
A Python library for arbitrary-precision floating-point arithmetic.
|
55 |
+
|
56 |
+
Website: http://mpmath.org/
|
57 |
+
Main author: Fredrik Johansson <[email protected]>
|
58 |
+
|
59 |
+
Mpmath is free software released under the New BSD License (see the
|
60 |
+
LICENSE file for details)
|
61 |
+
|
62 |
+
0. History and credits
|
63 |
+
----------------------
|
64 |
+
|
65 |
+
The following people (among others) have contributed major patches
|
66 |
+
or new features to mpmath:
|
67 |
+
|
68 |
+
* Pearu Peterson <[email protected]>
|
69 |
+
* Mario Pernici <[email protected]>
|
70 |
+
* Ondrej Certik <[email protected]>
|
71 |
+
* Vinzent Steinberg <[email protected]>
|
72 |
+
* Nimish Telang <[email protected]>
|
73 |
+
* Mike Taschuk <[email protected]>
|
74 |
+
* Case Van Horsen <[email protected]>
|
75 |
+
* Jorn Baayen <[email protected]>
|
76 |
+
* Chris Smith <[email protected]>
|
77 |
+
* Juan Arias de Reyna <[email protected]>
|
78 |
+
* Ioannis Tziakos <[email protected]>
|
79 |
+
* Aaron Meurer <[email protected]>
|
80 |
+
* Stefan Krastanov <[email protected]>
|
81 |
+
* Ken Allen <[email protected]>
|
82 |
+
* Timo Hartmann <[email protected]>
|
83 |
+
* Sergey B Kirpichev <[email protected]>
|
84 |
+
* Kris Kuhlman <[email protected]>
|
85 |
+
* Paul Masson <[email protected]>
|
86 |
+
* Michael Kagalenko <[email protected]>
|
87 |
+
* Jonathan Warner <[email protected]>
|
88 |
+
* Max Gaukler <[email protected]>
|
89 |
+
* Guillermo Navas-Palencia <[email protected]>
|
90 |
+
* Nike Dattani <[email protected]>
|
91 |
+
|
92 |
+
Numerous other people have contributed by reporting bugs,
|
93 |
+
requesting new features, or suggesting improvements to the
|
94 |
+
documentation.
|
95 |
+
|
96 |
+
For a detailed changelog, including individual contributions,
|
97 |
+
see the CHANGES file.
|
98 |
+
|
99 |
+
Fredrik's work on mpmath during summer 2008 was sponsored by Google
|
100 |
+
as part of the Google Summer of Code program.
|
101 |
+
|
102 |
+
Fredrik's work on mpmath during summer 2009 was sponsored by the
|
103 |
+
American Institute of Mathematics under the support of the National Science
|
104 |
+
Foundation Grant No. 0757627 (FRG: L-functions and Modular Forms).
|
105 |
+
|
106 |
+
Any opinions, findings, and conclusions or recommendations expressed in this
|
107 |
+
material are those of the author(s) and do not necessarily reflect the
|
108 |
+
views of the sponsors.
|
109 |
+
|
110 |
+
Credit also goes to:
|
111 |
+
|
112 |
+
* The authors of the GMP library and the Python wrapper
|
113 |
+
gmpy, enabling mpmath to become much faster at
|
114 |
+
high precision
|
115 |
+
* The authors of MPFR, pari/gp, MPFUN, and other arbitrary-
|
116 |
+
precision libraries, whose documentation has been helpful
|
117 |
+
for implementing many of the algorithms in mpmath
|
118 |
+
* Wikipedia contributors; Abramowitz & Stegun; Gradshteyn & Ryzhik;
|
119 |
+
Wolfram Research for MathWorld and the Wolfram Functions site.
|
120 |
+
These are the main references used for special functions
|
121 |
+
implementations.
|
122 |
+
* George Brandl for developing the Sphinx documentation tool
|
123 |
+
used to build mpmath's documentation
|
124 |
+
|
125 |
+
Release history:
|
126 |
+
|
127 |
+
* Version 1.3.0 released on March 7, 2023
|
128 |
+
* Version 1.2.0 released on February 1, 2021
|
129 |
+
* Version 1.1.0 released on December 11, 2018
|
130 |
+
* Version 1.0.0 released on September 27, 2017
|
131 |
+
* Version 0.19 released on June 10, 2014
|
132 |
+
* Version 0.18 released on December 31, 2013
|
133 |
+
* Version 0.17 released on February 1, 2011
|
134 |
+
* Version 0.16 released on September 24, 2010
|
135 |
+
* Version 0.15 released on June 6, 2010
|
136 |
+
* Version 0.14 released on February 5, 2010
|
137 |
+
* Version 0.13 released on August 13, 2009
|
138 |
+
* Version 0.12 released on June 9, 2009
|
139 |
+
* Version 0.11 released on January 26, 2009
|
140 |
+
* Version 0.10 released on October 15, 2008
|
141 |
+
* Version 0.9 released on August 23, 2008
|
142 |
+
* Version 0.8 released on April 20, 2008
|
143 |
+
* Version 0.7 released on March 12, 2008
|
144 |
+
* Version 0.6 released on January 13, 2008
|
145 |
+
* Version 0.5 released on November 24, 2007
|
146 |
+
* Version 0.4 released on November 3, 2007
|
147 |
+
* Version 0.3 released on October 5, 2007
|
148 |
+
* Version 0.2 released on October 2, 2007
|
149 |
+
* Version 0.1 released on September 27, 2007
|
150 |
+
|
151 |
+
1. Download & installation
|
152 |
+
--------------------------
|
153 |
+
|
154 |
+
Mpmath requires Python 2.7 or 3.5 (or later versions). It has been tested
|
155 |
+
with CPython 2.7, 3.5 through 3.7 and for PyPy.
|
156 |
+
|
157 |
+
The latest release of mpmath can be downloaded from the mpmath
|
158 |
+
website and from https://github.com/fredrik-johansson/mpmath/releases
|
159 |
+
|
160 |
+
It should also be available in the Python Package Index at
|
161 |
+
https://pypi.python.org/pypi/mpmath
|
162 |
+
|
163 |
+
To install latest release of Mpmath with pip, simply run
|
164 |
+
|
165 |
+
``pip install mpmath``
|
166 |
+
|
167 |
+
Or unpack the mpmath archive and run
|
168 |
+
|
169 |
+
``python setup.py install``
|
170 |
+
|
171 |
+
Mpmath can also be installed using
|
172 |
+
|
173 |
+
``python -m easy_install mpmath``
|
174 |
+
|
175 |
+
The latest development code is available from
|
176 |
+
https://github.com/fredrik-johansson/mpmath
|
177 |
+
|
178 |
+
See the main documentation for more detailed instructions.
|
179 |
+
|
180 |
+
2. Running tests
|
181 |
+
----------------
|
182 |
+
|
183 |
+
The unit tests in mpmath/tests/ can be run via the script
|
184 |
+
runtests.py, but it is recommended to run them with py.test
|
185 |
+
(https://pytest.org/), especially
|
186 |
+
to generate more useful reports in case there are failures.
|
187 |
+
|
188 |
+
You may also want to check out the demo scripts in the demo
|
189 |
+
directory.
|
190 |
+
|
191 |
+
The master branch is automatically tested by Travis CI.
|
192 |
+
|
193 |
+
3. Documentation
|
194 |
+
----------------
|
195 |
+
|
196 |
+
Documentation in reStructuredText format is available in the
|
197 |
+
doc directory included with the source package. These files
|
198 |
+
are human-readable, but can be compiled to prettier HTML using
|
199 |
+
the build.py script (requires Sphinx, http://sphinx.pocoo.org/).
|
200 |
+
|
201 |
+
See setup.txt in the documentation for more information.
|
202 |
+
|
203 |
+
The most recent documentation is also available in HTML format:
|
204 |
+
|
205 |
+
http://mpmath.org/doc/current/
|
206 |
+
|
207 |
+
4. Known problems
|
208 |
+
-----------------
|
209 |
+
|
210 |
+
Mpmath is a work in progress. Major issues include:
|
211 |
+
|
212 |
+
* Some functions may return incorrect values when given extremely
|
213 |
+
large arguments or arguments very close to singularities.
|
214 |
+
|
215 |
+
* Directed rounding works for arithmetic operations. It is implemented
|
216 |
+
heuristically for other operations, and their results may be off by one
|
217 |
+
or two units in the last place (even if otherwise accurate).
|
218 |
+
|
219 |
+
* Some IEEE 754 features are not available. Inifinities and NaN are
|
220 |
+
partially supported; denormal rounding is currently not available
|
221 |
+
at all.
|
222 |
+
|
223 |
+
* The interface for switching precision and rounding is not finalized.
|
224 |
+
The current method is not threadsafe.
|
225 |
+
|
226 |
+
5. Help and bug reports
|
227 |
+
-----------------------
|
228 |
+
|
229 |
+
General questions and comments can be sent to the mpmath mailinglist,
|
230 | |
231 |
+
|
232 |
+
You can also report bugs and send patches to the mpmath issue tracker,
|
233 |
+
https://github.com/fredrik-johansson/mpmath/issues
|
venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
mpmath-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
2 |
+
mpmath-1.3.0.dist-info/LICENSE,sha256=wmyugdpFCOXiSZhXd6M4IfGDIj67dNf4z7-Q_n7vL7c,1537
|
3 |
+
mpmath-1.3.0.dist-info/METADATA,sha256=RLZupES5wNGa6UgV01a_BHrmtoDBkmi1wmVofNaoFAY,8630
|
4 |
+
mpmath-1.3.0.dist-info/RECORD,,
|
5 |
+
mpmath-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
6 |
+
mpmath-1.3.0.dist-info/top_level.txt,sha256=BUVWrh8EVlkOhM1n3X9S8msTaVcC-3s6Sjt60avHYus,7
|
7 |
+
mpmath/__init__.py,sha256=skFYTSwfwDBLChAV6pI3SdewgAQR3UBtyrfIK_Jdn-g,8765
|
8 |
+
mpmath/__pycache__/__init__.cpython-310.pyc,,
|
9 |
+
mpmath/__pycache__/ctx_base.cpython-310.pyc,,
|
10 |
+
mpmath/__pycache__/ctx_fp.cpython-310.pyc,,
|
11 |
+
mpmath/__pycache__/ctx_iv.cpython-310.pyc,,
|
12 |
+
mpmath/__pycache__/ctx_mp.cpython-310.pyc,,
|
13 |
+
mpmath/__pycache__/ctx_mp_python.cpython-310.pyc,,
|
14 |
+
mpmath/__pycache__/function_docs.cpython-310.pyc,,
|
15 |
+
mpmath/__pycache__/identification.cpython-310.pyc,,
|
16 |
+
mpmath/__pycache__/math2.cpython-310.pyc,,
|
17 |
+
mpmath/__pycache__/rational.cpython-310.pyc,,
|
18 |
+
mpmath/__pycache__/usertools.cpython-310.pyc,,
|
19 |
+
mpmath/__pycache__/visualization.cpython-310.pyc,,
|
20 |
+
mpmath/calculus/__init__.py,sha256=UAgCIJ1YmaeyTqpNzjBlCZGeIzLtUZMEEpl99VWNjus,162
|
21 |
+
mpmath/calculus/__pycache__/__init__.cpython-310.pyc,,
|
22 |
+
mpmath/calculus/__pycache__/approximation.cpython-310.pyc,,
|
23 |
+
mpmath/calculus/__pycache__/calculus.cpython-310.pyc,,
|
24 |
+
mpmath/calculus/__pycache__/differentiation.cpython-310.pyc,,
|
25 |
+
mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc,,
|
26 |
+
mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc,,
|
27 |
+
mpmath/calculus/__pycache__/odes.cpython-310.pyc,,
|
28 |
+
mpmath/calculus/__pycache__/optimization.cpython-310.pyc,,
|
29 |
+
mpmath/calculus/__pycache__/polynomials.cpython-310.pyc,,
|
30 |
+
mpmath/calculus/__pycache__/quadrature.cpython-310.pyc,,
|
31 |
+
mpmath/calculus/approximation.py,sha256=vyzu3YI6r63Oq1KFHrQz02mGXAcH23emqNYhJuUaFZ4,8817
|
32 |
+
mpmath/calculus/calculus.py,sha256=A0gSp0hxSyEDfugJViY3CeWalF-vK701YftzrjSQzQ4,112
|
33 |
+
mpmath/calculus/differentiation.py,sha256=2L6CBj8xtX9iip98NPbKsLtwtRjxi571wYmTMHFeL90,20226
|
34 |
+
mpmath/calculus/extrapolation.py,sha256=xM0rvk2DFEF4iR1Jhl-Y3aS93iW9VVJX7y9IGpmzC-A,73306
|
35 |
+
mpmath/calculus/inverselaplace.py,sha256=5-pn8N_t0PtgBTXixsXZ4xxrihK2J5gYsVfTKfDx4gA,36056
|
36 |
+
mpmath/calculus/odes.py,sha256=gaHiw7IJjsONNTAa6izFPZpmcg9uyTp8MULnGdzTIGo,9908
|
37 |
+
mpmath/calculus/optimization.py,sha256=bKnShXElBOmVOIOlFeksDsYCp9fYSmYwKmXDt0z26MM,32856
|
38 |
+
mpmath/calculus/polynomials.py,sha256=D16BhU_SHbVi06IxNwABHR-H77IylndNsN3muPTuFYs,7877
|
39 |
+
mpmath/calculus/quadrature.py,sha256=n-avtS8E43foV-5tr5lofgOBaiMUYE8AJjQcWI9QcKk,42432
|
40 |
+
mpmath/ctx_base.py,sha256=rfjmfMyA55x8R_cWFINUwWVTElfZmyx5erKDdauSEVw,15985
|
41 |
+
mpmath/ctx_fp.py,sha256=ctUjx_NoU0iFWk05cXDYCL2ZtLZOlWs1n6Zao3pbG2g,6572
|
42 |
+
mpmath/ctx_iv.py,sha256=tqdMr-GDfkZk1EhoGeCAajy7pQv-RWtrVqhYjfI8r4g,17211
|
43 |
+
mpmath/ctx_mp.py,sha256=d3r4t7xHNqSFtmqsA9Btq1Npy3WTM-pcM2_jeCyECxY,49452
|
44 |
+
mpmath/ctx_mp_python.py,sha256=3olYWo4lk1SnQ0A_IaZ181qqG8u5pxGat_v-L4Qtn3Y,37815
|
45 |
+
mpmath/function_docs.py,sha256=g4PP8n6ILXmHcLyA50sxK6Tmp_Z4_pRN-wDErU8D1i4,283512
|
46 |
+
mpmath/functions/__init__.py,sha256=YXVdhqv-6LKm6cr5xxtTNTtuD9zDPKGQl8GmS0xz2xo,330
|
47 |
+
mpmath/functions/__pycache__/__init__.cpython-310.pyc,,
|
48 |
+
mpmath/functions/__pycache__/bessel.cpython-310.pyc,,
|
49 |
+
mpmath/functions/__pycache__/elliptic.cpython-310.pyc,,
|
50 |
+
mpmath/functions/__pycache__/expintegrals.cpython-310.pyc,,
|
51 |
+
mpmath/functions/__pycache__/factorials.cpython-310.pyc,,
|
52 |
+
mpmath/functions/__pycache__/functions.cpython-310.pyc,,
|
53 |
+
mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc,,
|
54 |
+
mpmath/functions/__pycache__/orthogonal.cpython-310.pyc,,
|
55 |
+
mpmath/functions/__pycache__/qfunctions.cpython-310.pyc,,
|
56 |
+
mpmath/functions/__pycache__/rszeta.cpython-310.pyc,,
|
57 |
+
mpmath/functions/__pycache__/signals.cpython-310.pyc,,
|
58 |
+
mpmath/functions/__pycache__/theta.cpython-310.pyc,,
|
59 |
+
mpmath/functions/__pycache__/zeta.cpython-310.pyc,,
|
60 |
+
mpmath/functions/__pycache__/zetazeros.cpython-310.pyc,,
|
61 |
+
mpmath/functions/bessel.py,sha256=dUPLu8frlK-vmf3-irX_7uvwyw4xccv6EIizmIZ88kM,37938
|
62 |
+
mpmath/functions/elliptic.py,sha256=qz0yVMb4lWEeOTDL_DWz5u5awmGIPKAsuZFJXgwHJNU,42237
|
63 |
+
mpmath/functions/expintegrals.py,sha256=75X_MRdYc1F_X73bgNiOJqwRlS2hqAzcFLl3RM2tCDc,11644
|
64 |
+
mpmath/functions/factorials.py,sha256=8_6kCR7e4k1GwxiAOJu0NRadeF4jA28qx4hidhu4ILk,5273
|
65 |
+
mpmath/functions/functions.py,sha256=ub2JExvqzCWLkm5yAm72Fr6fdWmZZUknq9_3w9MEigI,18100
|
66 |
+
mpmath/functions/hypergeometric.py,sha256=Z0OMAMC4ylK42n_SnamyFVnUx6zHLyCLCoJDSZ1JrHY,51570
|
67 |
+
mpmath/functions/orthogonal.py,sha256=FabkxKfBoSseA5flWu1a3re-2BYaew9augqIsT8LaLw,16097
|
68 |
+
mpmath/functions/qfunctions.py,sha256=a3EHGKQt_jMd4x9I772Jz-TGFnGY-arWqPvZGz9QSe0,7633
|
69 |
+
mpmath/functions/rszeta.py,sha256=yuUVp4ilIyDmXyE3WTBxDDjwfEJNypJnbPS-xPH5How,46184
|
70 |
+
mpmath/functions/signals.py,sha256=ELotwQaW1CDpv-eeJzOZ5c23NhfaZcj9_Gkb3psvS0Q,703
|
71 |
+
mpmath/functions/theta.py,sha256=KggOocczoMG6_HMoal4oEP7iZ4SKOou9JFE-WzY2r3M,37320
|
72 |
+
mpmath/functions/zeta.py,sha256=ue7JY7GXA0oX8q08sQJl2CSRrZ7kOt8HsftpVjnTwrE,36410
|
73 |
+
mpmath/functions/zetazeros.py,sha256=uq6TVyZBcY2MLX7VSdVfn0TOkowBLM9fXtnySEwaNzw,30858
|
74 |
+
mpmath/identification.py,sha256=7aMdngRAaeL_MafDUNbmEIlGQSklHDZ8pmPFt-OLgkw,29253
|
75 |
+
mpmath/libmp/__init__.py,sha256=UCDjLZw4brbklaCmSixCcPdLdHkz8sF_-6F_wr0duAg,3790
|
76 |
+
mpmath/libmp/__pycache__/__init__.cpython-310.pyc,,
|
77 |
+
mpmath/libmp/__pycache__/backend.cpython-310.pyc,,
|
78 |
+
mpmath/libmp/__pycache__/gammazeta.cpython-310.pyc,,
|
79 |
+
mpmath/libmp/__pycache__/libelefun.cpython-310.pyc,,
|
80 |
+
mpmath/libmp/__pycache__/libhyper.cpython-310.pyc,,
|
81 |
+
mpmath/libmp/__pycache__/libintmath.cpython-310.pyc,,
|
82 |
+
mpmath/libmp/__pycache__/libmpc.cpython-310.pyc,,
|
83 |
+
mpmath/libmp/__pycache__/libmpf.cpython-310.pyc,,
|
84 |
+
mpmath/libmp/__pycache__/libmpi.cpython-310.pyc,,
|
85 |
+
mpmath/libmp/backend.py,sha256=26A8pUkaGov26vrrFNQVyWJ5LDtK8sl3UHrYLecaTjA,3360
|
86 |
+
mpmath/libmp/gammazeta.py,sha256=Xqdw6PMoswDaSca_sOs-IglRuk3fb8c9p43M_lbcrlc,71469
|
87 |
+
mpmath/libmp/libelefun.py,sha256=joBZP4FOdxPfieWso1LPtSr6dHydpG_LQiF_bYQYWMg,43861
|
88 |
+
mpmath/libmp/libhyper.py,sha256=J9fmdDF6u27EcssEWvBuVaAa3hFjPvPN1SgRgu1dEbc,36624
|
89 |
+
mpmath/libmp/libintmath.py,sha256=aIRT0rkUZ_sdGQf3TNCLd-pBMvtQWjssbvFLfK7U0jc,16688
|
90 |
+
mpmath/libmp/libmpc.py,sha256=KBndUjs5YVS32-Id3fflDfYgpdW1Prx6zfo8Ez5Qbrs,26875
|
91 |
+
mpmath/libmp/libmpf.py,sha256=vpP0kNVkScbCVoZogJ4Watl4I7Ce0d4dzHVjfVe57so,45021
|
92 |
+
mpmath/libmp/libmpi.py,sha256=u0I5Eiwkqa-4-dXETi5k7MuaxBeZbvCAPFtl93U9YF0,27622
|
93 |
+
mpmath/math2.py,sha256=O5Dglg81SsW0wfHDUJcXOD8-cCaLvbVIvyw0sVmRbpI,18561
|
94 |
+
mpmath/matrices/__init__.py,sha256=ETzGDciYbq9ftiKwaMbJ15EI-KNXHrzRb-ZHehhqFjs,94
|
95 |
+
mpmath/matrices/__pycache__/__init__.cpython-310.pyc,,
|
96 |
+
mpmath/matrices/__pycache__/calculus.cpython-310.pyc,,
|
97 |
+
mpmath/matrices/__pycache__/eigen.cpython-310.pyc,,
|
98 |
+
mpmath/matrices/__pycache__/eigen_symmetric.cpython-310.pyc,,
|
99 |
+
mpmath/matrices/__pycache__/linalg.cpython-310.pyc,,
|
100 |
+
mpmath/matrices/__pycache__/matrices.cpython-310.pyc,,
|
101 |
+
mpmath/matrices/calculus.py,sha256=PNRq-p2nxgT-fzC54K2depi8ddhdx6Q86G8qpUiHeUY,18609
|
102 |
+
mpmath/matrices/eigen.py,sha256=GbDXI3CixzEdXxr1G86uUWkAngAvd-05MmSQ-Tsu_5k,24394
|
103 |
+
mpmath/matrices/eigen_symmetric.py,sha256=FPKPeQr1cGYw6Y6ea32a1YdEWQDLP6JlQHEA2WfNLYg,58534
|
104 |
+
mpmath/matrices/linalg.py,sha256=04C3ijzMFom7ob5fXBCDfyPPdo3BIboIeE8x2A6vqF0,26958
|
105 |
+
mpmath/matrices/matrices.py,sha256=o78Eq62EHQnxcsR0LBoWDEGREOoN4L2iDM1q3dQrw0o,32331
|
106 |
+
mpmath/rational.py,sha256=64d56fvZXngYZT7nOAHeFRUX77eJ1A0R3rpfWBU-mSo,5976
|
107 |
+
mpmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
108 |
+
mpmath/tests/__pycache__/__init__.cpython-310.pyc,,
|
109 |
+
mpmath/tests/__pycache__/extratest_gamma.cpython-310.pyc,,
|
110 |
+
mpmath/tests/__pycache__/extratest_zeta.cpython-310.pyc,,
|
111 |
+
mpmath/tests/__pycache__/runtests.cpython-310.pyc,,
|
112 |
+
mpmath/tests/__pycache__/test_basic_ops.cpython-310.pyc,,
|
113 |
+
mpmath/tests/__pycache__/test_bitwise.cpython-310.pyc,,
|
114 |
+
mpmath/tests/__pycache__/test_calculus.cpython-310.pyc,,
|
115 |
+
mpmath/tests/__pycache__/test_compatibility.cpython-310.pyc,,
|
116 |
+
mpmath/tests/__pycache__/test_convert.cpython-310.pyc,,
|
117 |
+
mpmath/tests/__pycache__/test_diff.cpython-310.pyc,,
|
118 |
+
mpmath/tests/__pycache__/test_division.cpython-310.pyc,,
|
119 |
+
mpmath/tests/__pycache__/test_eigen.cpython-310.pyc,,
|
120 |
+
mpmath/tests/__pycache__/test_eigen_symmetric.cpython-310.pyc,,
|
121 |
+
mpmath/tests/__pycache__/test_elliptic.cpython-310.pyc,,
|
122 |
+
mpmath/tests/__pycache__/test_fp.cpython-310.pyc,,
|
123 |
+
mpmath/tests/__pycache__/test_functions.cpython-310.pyc,,
|
124 |
+
mpmath/tests/__pycache__/test_functions2.cpython-310.pyc,,
|
125 |
+
mpmath/tests/__pycache__/test_gammazeta.cpython-310.pyc,,
|
126 |
+
mpmath/tests/__pycache__/test_hp.cpython-310.pyc,,
|
127 |
+
mpmath/tests/__pycache__/test_identify.cpython-310.pyc,,
|
128 |
+
mpmath/tests/__pycache__/test_interval.cpython-310.pyc,,
|
129 |
+
mpmath/tests/__pycache__/test_levin.cpython-310.pyc,,
|
130 |
+
mpmath/tests/__pycache__/test_linalg.cpython-310.pyc,,
|
131 |
+
mpmath/tests/__pycache__/test_matrices.cpython-310.pyc,,
|
132 |
+
mpmath/tests/__pycache__/test_mpmath.cpython-310.pyc,,
|
133 |
+
mpmath/tests/__pycache__/test_ode.cpython-310.pyc,,
|
134 |
+
mpmath/tests/__pycache__/test_pickle.cpython-310.pyc,,
|
135 |
+
mpmath/tests/__pycache__/test_power.cpython-310.pyc,,
|
136 |
+
mpmath/tests/__pycache__/test_quad.cpython-310.pyc,,
|
137 |
+
mpmath/tests/__pycache__/test_rootfinding.cpython-310.pyc,,
|
138 |
+
mpmath/tests/__pycache__/test_special.cpython-310.pyc,,
|
139 |
+
mpmath/tests/__pycache__/test_str.cpython-310.pyc,,
|
140 |
+
mpmath/tests/__pycache__/test_summation.cpython-310.pyc,,
|
141 |
+
mpmath/tests/__pycache__/test_trig.cpython-310.pyc,,
|
142 |
+
mpmath/tests/__pycache__/test_visualization.cpython-310.pyc,,
|
143 |
+
mpmath/tests/__pycache__/torture.cpython-310.pyc,,
|
144 |
+
mpmath/tests/extratest_gamma.py,sha256=xidhXUelILcxtiPGoTBHjqUOKIJzEaZ_v3nntGQyWZQ,7228
|
145 |
+
mpmath/tests/extratest_zeta.py,sha256=sg10j9RhjBpV2EdUqyYhGV2ERWvM--EvwwGIz6HTmlw,1003
|
146 |
+
mpmath/tests/runtests.py,sha256=7NUV82F3K_5AhU8mCLUFf5OibtT7uloFCwPyM3l71wM,5189
|
147 |
+
mpmath/tests/test_basic_ops.py,sha256=dsB8DRG-GrPzBaZ-bIauYabaeqXbfqBo9SIP9BqcTSs,15348
|
148 |
+
mpmath/tests/test_bitwise.py,sha256=-nLYhgQbhDza3SQM63BhktYntACagqMYx9ib3dPnTKM,7686
|
149 |
+
mpmath/tests/test_calculus.py,sha256=4oxtNfMpO4RLLoOzrv7r9-h8BcqfBsJIE6UpsHe7c4w,9187
|
150 |
+
mpmath/tests/test_compatibility.py,sha256=_t3ASZ3jhfAMnN1voWX7PDNIDzn-3PokkJGIdT1x7y0,2306
|
151 |
+
mpmath/tests/test_convert.py,sha256=JPcDcTJIWh5prIxjx5DM1aNWgqlUoF2KpHvAgK3uHi4,8834
|
152 |
+
mpmath/tests/test_diff.py,sha256=qjiF8NxQ8vueuZ5ZHGPQ-kjcj_I7Jh_fEdFtaA8DzEI,2466
|
153 |
+
mpmath/tests/test_division.py,sha256=6lUeZfmaBWvvszdqlWLMHgXPjVsxvW1WZpd4-jFWCpU,5340
|
154 |
+
mpmath/tests/test_eigen.py,sha256=2mnqVATGbsJkvSVHPpitfAk881twFfb3LsO3XikV9Hs,3905
|
155 |
+
mpmath/tests/test_eigen_symmetric.py,sha256=v0VimCicIU2owASDMBaP-t-30uq-pXcsglt95KBtNO4,8778
|
156 |
+
mpmath/tests/test_elliptic.py,sha256=Kjiwq9Bb6N_OOzzWewGQ1M_PMa7vRs42V0t90gloZxo,26225
|
157 |
+
mpmath/tests/test_fp.py,sha256=AJo0FTyH4BuUnUsv176LD956om308KGYndy-b54KGxM,89997
|
158 |
+
mpmath/tests/test_functions.py,sha256=b47VywdomoOX6KmMmz9-iv2IqVIydwKSuUw2pWlFHrY,30955
|
159 |
+
mpmath/tests/test_functions2.py,sha256=vlw2RWhL1oTcifnOMDx1a_YzN96UgNNIE5STeKRv1HY,96990
|
160 |
+
mpmath/tests/test_gammazeta.py,sha256=AB34O0DV7AlEf9Z4brnCadeQU5-uAwhWRw5FZas65DA,27917
|
161 |
+
mpmath/tests/test_hp.py,sha256=6hcENu6Te2klPEiTSeLBIRPlH7PADlJwFKbx8xpnOhg,10461
|
162 |
+
mpmath/tests/test_identify.py,sha256=lGUIPfrB2paTg0cFUo64GmMzF77F9gs9FQjX7gxGHV8,692
|
163 |
+
mpmath/tests/test_interval.py,sha256=TjYd7a9ca6iRJiLjw06isLeZTuGoGAPmgleDZ0cYfJ0,17527
|
164 |
+
mpmath/tests/test_levin.py,sha256=P8M11yV1dj_gdSNv5xuwCzFiF86QyRDtPMjURy6wJ28,5090
|
165 |
+
mpmath/tests/test_linalg.py,sha256=miKEnwB8iwWV13hi1bF1cg3hgB4rTKOR0fvDVfWmXds,10440
|
166 |
+
mpmath/tests/test_matrices.py,sha256=qyA4Ml2CvNvW034lzB01G6wVgNr7UrgZqh2wkMXtpzM,7944
|
167 |
+
mpmath/tests/test_mpmath.py,sha256=LVyJUeofiaxW-zLKWVBCz59L9UQsjlW0Ts9_oBiEv_4,196
|
168 |
+
mpmath/tests/test_ode.py,sha256=zAxexBH4fnmFNO4bvEHbug1NJWC5zqfFaVDlYijowkY,1822
|
169 |
+
mpmath/tests/test_pickle.py,sha256=Y8CKmDLFsJHUqG8CDaBw5ilrPP4YT1xijVduLpQ7XFE,401
|
170 |
+
mpmath/tests/test_power.py,sha256=sz_K02SmNxpa6Kb1uJLN_N4tXTJGdQ___vPRshEN7Gk,5227
|
171 |
+
mpmath/tests/test_quad.py,sha256=49Ltft0vZ_kdKLL5s-Kj-BzAVoF5LPVEUeNUzdOkghI,3893
|
172 |
+
mpmath/tests/test_rootfinding.py,sha256=umQegEaKHmYOEl5jEyoD-VLKDtXsTJJkepKEr4c0dC0,3132
|
173 |
+
mpmath/tests/test_special.py,sha256=YbMIoMIkJEvvKYIzS0CXthJFG0--j6un7-tcE6b7FPM,2848
|
174 |
+
mpmath/tests/test_str.py,sha256=0WsGD9hMPRi8zcuYMA9Cu2mOvQiCFskPwMsMf8lBDK4,544
|
175 |
+
mpmath/tests/test_summation.py,sha256=fdNlsvRVOsbWxbhlyDLDaEO2S8kTJrRMKIvB5-aNci0,2035
|
176 |
+
mpmath/tests/test_trig.py,sha256=zPtkIEnZaThxcWur4k7BX8-2Jmj-AhO191Svv7ANYUU,4799
|
177 |
+
mpmath/tests/test_visualization.py,sha256=1PqtkoUx-WsKYgTRiu5o9pBc85kwhf1lzU2eobDQCJM,944
|
178 |
+
mpmath/tests/torture.py,sha256=LD95oES7JY2KroELK-m-jhvtbvZaKChnt0Cq7kFMNCw,7868
|
179 |
+
mpmath/usertools.py,sha256=a-TDw7XSRsPdBEffxOooDV4WDFfuXnO58P75dcAD87I,3029
|
180 |
+
mpmath/visualization.py,sha256=pnnbjcd9AhFVRBZavYX5gjx4ytK_kXoDDisYR6EpXhs,10627
|
venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.38.4)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
5 |
+
|
venv/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
mpmath
|
venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-310.pyc
ADDED
Binary file (13.2 kB). View file
|
|
venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/graphml.cpython-310.pyc
ADDED
Binary file (28.5 kB). View file
|
|
venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-310.pyc
ADDED
Binary file (9.53 kB). View file
|
|
venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/p2g.cpython-310.pyc
ADDED
Binary file (3.09 kB). View file
|
|
venv/lib/python3.10/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-310.pyc
ADDED
Binary file (9.79 kB). View file
|
|
venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__init__.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
*********
|
3 |
+
JSON data
|
4 |
+
*********
|
5 |
+
Generate and parse JSON serializable data for NetworkX graphs.
|
6 |
+
|
7 |
+
These formats are suitable for use with the d3.js examples https://d3js.org/
|
8 |
+
|
9 |
+
The three formats that you can generate with NetworkX are:
|
10 |
+
|
11 |
+
- node-link like in the d3.js example https://bl.ocks.org/mbostock/4062045
|
12 |
+
- tree like in the d3.js example https://bl.ocks.org/mbostock/4063550
|
13 |
+
- adjacency like in the d3.js example https://bost.ocks.org/mike/miserables/
|
14 |
+
"""
|
15 |
+
from networkx.readwrite.json_graph.node_link import *
|
16 |
+
from networkx.readwrite.json_graph.adjacency import *
|
17 |
+
from networkx.readwrite.json_graph.tree import *
|
18 |
+
from networkx.readwrite.json_graph.cytoscape import *
|
venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (879 Bytes). View file
|
|
venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-310.pyc
ADDED
Binary file (4.16 kB). View file
|
|
venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-310.pyc
ADDED
Binary file (4.88 kB). View file
|
|
venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-310.pyc
ADDED
Binary file (7.66 kB). View file
|
|
venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-310.pyc
ADDED
Binary file (4.13 kB). View file
|
|
venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/adjacency.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import networkx as nx
|
2 |
+
|
3 |
+
__all__ = ["adjacency_data", "adjacency_graph"]
|
4 |
+
|
5 |
+
_attrs = {"id": "id", "key": "key"}
|
6 |
+
|
7 |
+
|
8 |
+
def adjacency_data(G, attrs=_attrs):
|
9 |
+
"""Returns data in adjacency format that is suitable for JSON serialization
|
10 |
+
and use in JavaScript documents.
|
11 |
+
|
12 |
+
Parameters
|
13 |
+
----------
|
14 |
+
G : NetworkX graph
|
15 |
+
|
16 |
+
attrs : dict
|
17 |
+
A dictionary that contains two keys 'id' and 'key'. The corresponding
|
18 |
+
values provide the attribute names for storing NetworkX-internal graph
|
19 |
+
data. The values should be unique. Default value:
|
20 |
+
:samp:`dict(id='id', key='key')`.
|
21 |
+
|
22 |
+
If some user-defined graph data use these attribute names as data keys,
|
23 |
+
they may be silently dropped.
|
24 |
+
|
25 |
+
Returns
|
26 |
+
-------
|
27 |
+
data : dict
|
28 |
+
A dictionary with adjacency formatted data.
|
29 |
+
|
30 |
+
Raises
|
31 |
+
------
|
32 |
+
NetworkXError
|
33 |
+
If values in attrs are not unique.
|
34 |
+
|
35 |
+
Examples
|
36 |
+
--------
|
37 |
+
>>> from networkx.readwrite import json_graph
|
38 |
+
>>> G = nx.Graph([(1, 2)])
|
39 |
+
>>> data = json_graph.adjacency_data(G)
|
40 |
+
|
41 |
+
To serialize with json
|
42 |
+
|
43 |
+
>>> import json
|
44 |
+
>>> s = json.dumps(data)
|
45 |
+
|
46 |
+
Notes
|
47 |
+
-----
|
48 |
+
Graph, node, and link attributes will be written when using this format
|
49 |
+
but attribute keys must be strings if you want to serialize the resulting
|
50 |
+
data with JSON.
|
51 |
+
|
52 |
+
The default value of attrs will be changed in a future release of NetworkX.
|
53 |
+
|
54 |
+
See Also
|
55 |
+
--------
|
56 |
+
adjacency_graph, node_link_data, tree_data
|
57 |
+
"""
|
58 |
+
multigraph = G.is_multigraph()
|
59 |
+
id_ = attrs["id"]
|
60 |
+
# Allow 'key' to be omitted from attrs if the graph is not a multigraph.
|
61 |
+
key = None if not multigraph else attrs["key"]
|
62 |
+
if id_ == key:
|
63 |
+
raise nx.NetworkXError("Attribute names are not unique.")
|
64 |
+
data = {}
|
65 |
+
data["directed"] = G.is_directed()
|
66 |
+
data["multigraph"] = multigraph
|
67 |
+
data["graph"] = list(G.graph.items())
|
68 |
+
data["nodes"] = []
|
69 |
+
data["adjacency"] = []
|
70 |
+
for n, nbrdict in G.adjacency():
|
71 |
+
data["nodes"].append({**G.nodes[n], id_: n})
|
72 |
+
adj = []
|
73 |
+
if multigraph:
|
74 |
+
for nbr, keys in nbrdict.items():
|
75 |
+
for k, d in keys.items():
|
76 |
+
adj.append({**d, id_: nbr, key: k})
|
77 |
+
else:
|
78 |
+
for nbr, d in nbrdict.items():
|
79 |
+
adj.append({**d, id_: nbr})
|
80 |
+
data["adjacency"].append(adj)
|
81 |
+
return data
|
82 |
+
|
83 |
+
|
84 |
+
@nx._dispatchable(graphs=None, returns_graph=True)
|
85 |
+
def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs):
|
86 |
+
"""Returns graph from adjacency data format.
|
87 |
+
|
88 |
+
Parameters
|
89 |
+
----------
|
90 |
+
data : dict
|
91 |
+
Adjacency list formatted graph data
|
92 |
+
|
93 |
+
directed : bool
|
94 |
+
If True, and direction not specified in data, return a directed graph.
|
95 |
+
|
96 |
+
multigraph : bool
|
97 |
+
If True, and multigraph not specified in data, return a multigraph.
|
98 |
+
|
99 |
+
attrs : dict
|
100 |
+
A dictionary that contains two keys 'id' and 'key'. The corresponding
|
101 |
+
values provide the attribute names for storing NetworkX-internal graph
|
102 |
+
data. The values should be unique. Default value:
|
103 |
+
:samp:`dict(id='id', key='key')`.
|
104 |
+
|
105 |
+
Returns
|
106 |
+
-------
|
107 |
+
G : NetworkX graph
|
108 |
+
A NetworkX graph object
|
109 |
+
|
110 |
+
Examples
|
111 |
+
--------
|
112 |
+
>>> from networkx.readwrite import json_graph
|
113 |
+
>>> G = nx.Graph([(1, 2)])
|
114 |
+
>>> data = json_graph.adjacency_data(G)
|
115 |
+
>>> H = json_graph.adjacency_graph(data)
|
116 |
+
|
117 |
+
Notes
|
118 |
+
-----
|
119 |
+
The default value of attrs will be changed in a future release of NetworkX.
|
120 |
+
|
121 |
+
See Also
|
122 |
+
--------
|
123 |
+
adjacency_graph, node_link_data, tree_data
|
124 |
+
"""
|
125 |
+
multigraph = data.get("multigraph", multigraph)
|
126 |
+
directed = data.get("directed", directed)
|
127 |
+
if multigraph:
|
128 |
+
graph = nx.MultiGraph()
|
129 |
+
else:
|
130 |
+
graph = nx.Graph()
|
131 |
+
if directed:
|
132 |
+
graph = graph.to_directed()
|
133 |
+
id_ = attrs["id"]
|
134 |
+
# Allow 'key' to be omitted from attrs if the graph is not a multigraph.
|
135 |
+
key = None if not multigraph else attrs["key"]
|
136 |
+
graph.graph = dict(data.get("graph", []))
|
137 |
+
mapping = []
|
138 |
+
for d in data["nodes"]:
|
139 |
+
node_data = d.copy()
|
140 |
+
node = node_data.pop(id_)
|
141 |
+
mapping.append(node)
|
142 |
+
graph.add_node(node)
|
143 |
+
graph.nodes[node].update(node_data)
|
144 |
+
for i, d in enumerate(data["adjacency"]):
|
145 |
+
source = mapping[i]
|
146 |
+
for tdata in d:
|
147 |
+
target_data = tdata.copy()
|
148 |
+
target = target_data.pop(id_)
|
149 |
+
if not multigraph:
|
150 |
+
graph.add_edge(source, target)
|
151 |
+
graph[source][target].update(target_data)
|
152 |
+
else:
|
153 |
+
ky = target_data.pop(key, None)
|
154 |
+
graph.add_edge(source, target, key=ky)
|
155 |
+
graph[source][target][ky].update(target_data)
|
156 |
+
return graph
|
venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/cytoscape.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import networkx as nx
|
2 |
+
|
3 |
+
__all__ = ["cytoscape_data", "cytoscape_graph"]
|
4 |
+
|
5 |
+
|
6 |
+
def cytoscape_data(G, name="name", ident="id"):
|
7 |
+
"""Returns data in Cytoscape JSON format (cyjs).
|
8 |
+
|
9 |
+
Parameters
|
10 |
+
----------
|
11 |
+
G : NetworkX Graph
|
12 |
+
The graph to convert to cytoscape format
|
13 |
+
name : string
|
14 |
+
A string which is mapped to the 'name' node element in cyjs format.
|
15 |
+
Must not have the same value as `ident`.
|
16 |
+
ident : string
|
17 |
+
A string which is mapped to the 'id' node element in cyjs format.
|
18 |
+
Must not have the same value as `name`.
|
19 |
+
|
20 |
+
Returns
|
21 |
+
-------
|
22 |
+
data: dict
|
23 |
+
A dictionary with cyjs formatted data.
|
24 |
+
|
25 |
+
Raises
|
26 |
+
------
|
27 |
+
NetworkXError
|
28 |
+
If the values for `name` and `ident` are identical.
|
29 |
+
|
30 |
+
See Also
|
31 |
+
--------
|
32 |
+
cytoscape_graph: convert a dictionary in cyjs format to a graph
|
33 |
+
|
34 |
+
References
|
35 |
+
----------
|
36 |
+
.. [1] Cytoscape user's manual:
|
37 |
+
http://manual.cytoscape.org/en/stable/index.html
|
38 |
+
|
39 |
+
Examples
|
40 |
+
--------
|
41 |
+
>>> G = nx.path_graph(2)
|
42 |
+
>>> nx.cytoscape_data(G) # doctest: +SKIP
|
43 |
+
{'data': [],
|
44 |
+
'directed': False,
|
45 |
+
'multigraph': False,
|
46 |
+
'elements': {'nodes': [{'data': {'id': '0', 'value': 0, 'name': '0'}},
|
47 |
+
{'data': {'id': '1', 'value': 1, 'name': '1'}}],
|
48 |
+
'edges': [{'data': {'source': 0, 'target': 1}}]}}
|
49 |
+
"""
|
50 |
+
if name == ident:
|
51 |
+
raise nx.NetworkXError("name and ident must be different.")
|
52 |
+
|
53 |
+
jsondata = {"data": list(G.graph.items())}
|
54 |
+
jsondata["directed"] = G.is_directed()
|
55 |
+
jsondata["multigraph"] = G.is_multigraph()
|
56 |
+
jsondata["elements"] = {"nodes": [], "edges": []}
|
57 |
+
nodes = jsondata["elements"]["nodes"]
|
58 |
+
edges = jsondata["elements"]["edges"]
|
59 |
+
|
60 |
+
for i, j in G.nodes.items():
|
61 |
+
n = {"data": j.copy()}
|
62 |
+
n["data"]["id"] = j.get(ident) or str(i)
|
63 |
+
n["data"]["value"] = i
|
64 |
+
n["data"]["name"] = j.get(name) or str(i)
|
65 |
+
nodes.append(n)
|
66 |
+
|
67 |
+
if G.is_multigraph():
|
68 |
+
for e in G.edges(keys=True):
|
69 |
+
n = {"data": G.adj[e[0]][e[1]][e[2]].copy()}
|
70 |
+
n["data"]["source"] = e[0]
|
71 |
+
n["data"]["target"] = e[1]
|
72 |
+
n["data"]["key"] = e[2]
|
73 |
+
edges.append(n)
|
74 |
+
else:
|
75 |
+
for e in G.edges():
|
76 |
+
n = {"data": G.adj[e[0]][e[1]].copy()}
|
77 |
+
n["data"]["source"] = e[0]
|
78 |
+
n["data"]["target"] = e[1]
|
79 |
+
edges.append(n)
|
80 |
+
return jsondata
|
81 |
+
|
82 |
+
|
83 |
+
@nx._dispatchable(graphs=None, returns_graph=True)
|
84 |
+
def cytoscape_graph(data, name="name", ident="id"):
|
85 |
+
"""
|
86 |
+
Create a NetworkX graph from a dictionary in cytoscape JSON format.
|
87 |
+
|
88 |
+
Parameters
|
89 |
+
----------
|
90 |
+
data : dict
|
91 |
+
A dictionary of data conforming to cytoscape JSON format.
|
92 |
+
name : string
|
93 |
+
A string which is mapped to the 'name' node element in cyjs format.
|
94 |
+
Must not have the same value as `ident`.
|
95 |
+
ident : string
|
96 |
+
A string which is mapped to the 'id' node element in cyjs format.
|
97 |
+
Must not have the same value as `name`.
|
98 |
+
|
99 |
+
Returns
|
100 |
+
-------
|
101 |
+
graph : a NetworkX graph instance
|
102 |
+
The `graph` can be an instance of `Graph`, `DiGraph`, `MultiGraph`, or
|
103 |
+
`MultiDiGraph` depending on the input data.
|
104 |
+
|
105 |
+
Raises
|
106 |
+
------
|
107 |
+
NetworkXError
|
108 |
+
If the `name` and `ident` attributes are identical.
|
109 |
+
|
110 |
+
See Also
|
111 |
+
--------
|
112 |
+
cytoscape_data: convert a NetworkX graph to a dict in cyjs format
|
113 |
+
|
114 |
+
References
|
115 |
+
----------
|
116 |
+
.. [1] Cytoscape user's manual:
|
117 |
+
http://manual.cytoscape.org/en/stable/index.html
|
118 |
+
|
119 |
+
Examples
|
120 |
+
--------
|
121 |
+
>>> data_dict = {
|
122 |
+
... "data": [],
|
123 |
+
... "directed": False,
|
124 |
+
... "multigraph": False,
|
125 |
+
... "elements": {
|
126 |
+
... "nodes": [
|
127 |
+
... {"data": {"id": "0", "value": 0, "name": "0"}},
|
128 |
+
... {"data": {"id": "1", "value": 1, "name": "1"}},
|
129 |
+
... ],
|
130 |
+
... "edges": [{"data": {"source": 0, "target": 1}}],
|
131 |
+
... },
|
132 |
+
... }
|
133 |
+
>>> G = nx.cytoscape_graph(data_dict)
|
134 |
+
>>> G.name
|
135 |
+
''
|
136 |
+
>>> G.nodes()
|
137 |
+
NodeView((0, 1))
|
138 |
+
>>> G.nodes(data=True)[0]
|
139 |
+
{'id': '0', 'value': 0, 'name': '0'}
|
140 |
+
>>> G.edges(data=True)
|
141 |
+
EdgeDataView([(0, 1, {'source': 0, 'target': 1})])
|
142 |
+
"""
|
143 |
+
if name == ident:
|
144 |
+
raise nx.NetworkXError("name and ident must be different.")
|
145 |
+
|
146 |
+
multigraph = data.get("multigraph")
|
147 |
+
directed = data.get("directed")
|
148 |
+
if multigraph:
|
149 |
+
graph = nx.MultiGraph()
|
150 |
+
else:
|
151 |
+
graph = nx.Graph()
|
152 |
+
if directed:
|
153 |
+
graph = graph.to_directed()
|
154 |
+
graph.graph = dict(data.get("data"))
|
155 |
+
for d in data["elements"]["nodes"]:
|
156 |
+
node_data = d["data"].copy()
|
157 |
+
node = d["data"]["value"]
|
158 |
+
|
159 |
+
if d["data"].get(name):
|
160 |
+
node_data[name] = d["data"].get(name)
|
161 |
+
if d["data"].get(ident):
|
162 |
+
node_data[ident] = d["data"].get(ident)
|
163 |
+
|
164 |
+
graph.add_node(node)
|
165 |
+
graph.nodes[node].update(node_data)
|
166 |
+
|
167 |
+
for d in data["elements"]["edges"]:
|
168 |
+
edge_data = d["data"].copy()
|
169 |
+
sour = d["data"]["source"]
|
170 |
+
targ = d["data"]["target"]
|
171 |
+
if multigraph:
|
172 |
+
key = d["data"].get("key", 0)
|
173 |
+
graph.add_edge(sour, targ, key=key)
|
174 |
+
graph.edges[sour, targ, key].update(edge_data)
|
175 |
+
else:
|
176 |
+
graph.add_edge(sour, targ)
|
177 |
+
graph.edges[sour, targ].update(edge_data)
|
178 |
+
return graph
|
venv/lib/python3.10/site-packages/networkx/readwrite/json_graph/node_link.py
ADDED
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from itertools import chain, count
|
2 |
+
|
3 |
+
import networkx as nx
|
4 |
+
|
5 |
+
__all__ = ["node_link_data", "node_link_graph"]
|
6 |
+
|
7 |
+
|
8 |
+
_attrs = {
|
9 |
+
"source": "source",
|
10 |
+
"target": "target",
|
11 |
+
"name": "id",
|
12 |
+
"key": "key",
|
13 |
+
"link": "links",
|
14 |
+
}
|
15 |
+
|
16 |
+
|
17 |
+
def _to_tuple(x):
|
18 |
+
"""Converts lists to tuples, including nested lists.
|
19 |
+
|
20 |
+
All other non-list inputs are passed through unmodified. This function is
|
21 |
+
intended to be used to convert potentially nested lists from json files
|
22 |
+
into valid nodes.
|
23 |
+
|
24 |
+
Examples
|
25 |
+
--------
|
26 |
+
>>> _to_tuple([1, 2, [3, 4]])
|
27 |
+
(1, 2, (3, 4))
|
28 |
+
"""
|
29 |
+
if not isinstance(x, tuple | list):
|
30 |
+
return x
|
31 |
+
return tuple(map(_to_tuple, x))
|
32 |
+
|
33 |
+
|
34 |
+
def node_link_data(
|
35 |
+
G,
|
36 |
+
*,
|
37 |
+
source="source",
|
38 |
+
target="target",
|
39 |
+
name="id",
|
40 |
+
key="key",
|
41 |
+
link="links",
|
42 |
+
):
|
43 |
+
"""Returns data in node-link format that is suitable for JSON serialization
|
44 |
+
and use in JavaScript documents.
|
45 |
+
|
46 |
+
Parameters
|
47 |
+
----------
|
48 |
+
G : NetworkX graph
|
49 |
+
source : string
|
50 |
+
A string that provides the 'source' attribute name for storing NetworkX-internal graph data.
|
51 |
+
target : string
|
52 |
+
A string that provides the 'target' attribute name for storing NetworkX-internal graph data.
|
53 |
+
name : string
|
54 |
+
A string that provides the 'name' attribute name for storing NetworkX-internal graph data.
|
55 |
+
key : string
|
56 |
+
A string that provides the 'key' attribute name for storing NetworkX-internal graph data.
|
57 |
+
link : string
|
58 |
+
A string that provides the 'link' attribute name for storing NetworkX-internal graph data.
|
59 |
+
|
60 |
+
Returns
|
61 |
+
-------
|
62 |
+
data : dict
|
63 |
+
A dictionary with node-link formatted data.
|
64 |
+
|
65 |
+
Raises
|
66 |
+
------
|
67 |
+
NetworkXError
|
68 |
+
If the values of 'source', 'target' and 'key' are not unique.
|
69 |
+
|
70 |
+
Examples
|
71 |
+
--------
|
72 |
+
>>> G = nx.Graph([("A", "B")])
|
73 |
+
>>> data1 = nx.node_link_data(G)
|
74 |
+
>>> data1
|
75 |
+
{'directed': False, 'multigraph': False, 'graph': {}, 'nodes': [{'id': 'A'}, {'id': 'B'}], 'links': [{'source': 'A', 'target': 'B'}]}
|
76 |
+
|
77 |
+
To serialize with JSON
|
78 |
+
|
79 |
+
>>> import json
|
80 |
+
>>> s1 = json.dumps(data1)
|
81 |
+
>>> s1
|
82 |
+
'{"directed": false, "multigraph": false, "graph": {}, "nodes": [{"id": "A"}, {"id": "B"}], "links": [{"source": "A", "target": "B"}]}'
|
83 |
+
|
84 |
+
A graph can also be serialized by passing `node_link_data` as an encoder function. The two methods are equivalent.
|
85 |
+
|
86 |
+
>>> s1 = json.dumps(G, default=nx.node_link_data)
|
87 |
+
>>> s1
|
88 |
+
'{"directed": false, "multigraph": false, "graph": {}, "nodes": [{"id": "A"}, {"id": "B"}], "links": [{"source": "A", "target": "B"}]}'
|
89 |
+
|
90 |
+
The attribute names for storing NetworkX-internal graph data can
|
91 |
+
be specified as keyword options.
|
92 |
+
|
93 |
+
>>> H = nx.gn_graph(2)
|
94 |
+
>>> data2 = nx.node_link_data(H, link="edges", source="from", target="to")
|
95 |
+
>>> data2
|
96 |
+
{'directed': True, 'multigraph': False, 'graph': {}, 'nodes': [{'id': 0}, {'id': 1}], 'edges': [{'from': 1, 'to': 0}]}
|
97 |
+
|
98 |
+
Notes
|
99 |
+
-----
|
100 |
+
Graph, node, and link attributes are stored in this format. Note that
|
101 |
+
attribute keys will be converted to strings in order to comply with JSON.
|
102 |
+
|
103 |
+
Attribute 'key' is only used for multigraphs.
|
104 |
+
|
105 |
+
To use `node_link_data` in conjunction with `node_link_graph`,
|
106 |
+
the keyword names for the attributes must match.
|
107 |
+
|
108 |
+
|
109 |
+
See Also
|
110 |
+
--------
|
111 |
+
node_link_graph, adjacency_data, tree_data
|
112 |
+
"""
|
113 |
+
multigraph = G.is_multigraph()
|
114 |
+
|
115 |
+
# Allow 'key' to be omitted from attrs if the graph is not a multigraph.
|
116 |
+
key = None if not multigraph else key
|
117 |
+
if len({source, target, key}) < 3:
|
118 |
+
raise nx.NetworkXError("Attribute names are not unique.")
|
119 |
+
data = {
|
120 |
+
"directed": G.is_directed(),
|
121 |
+
"multigraph": multigraph,
|
122 |
+
"graph": G.graph,
|
123 |
+
"nodes": [{**G.nodes[n], name: n} for n in G],
|
124 |
+
}
|
125 |
+
if multigraph:
|
126 |
+
data[link] = [
|
127 |
+
{**d, source: u, target: v, key: k}
|
128 |
+
for u, v, k, d in G.edges(keys=True, data=True)
|
129 |
+
]
|
130 |
+
else:
|
131 |
+
data[link] = [{**d, source: u, target: v} for u, v, d in G.edges(data=True)]
|
132 |
+
return data
|
133 |
+
|
134 |
+
|
135 |
+
@nx._dispatchable(graphs=None, returns_graph=True)
|
136 |
+
def node_link_graph(
|
137 |
+
data,
|
138 |
+
directed=False,
|
139 |
+
multigraph=True,
|
140 |
+
*,
|
141 |
+
source="source",
|
142 |
+
target="target",
|
143 |
+
name="id",
|
144 |
+
key="key",
|
145 |
+
link="links",
|
146 |
+
):
|
147 |
+
"""Returns graph from node-link data format.
|
148 |
+
Useful for de-serialization from JSON.
|
149 |
+
|
150 |
+
Parameters
|
151 |
+
----------
|
152 |
+
data : dict
|
153 |
+
node-link formatted graph data
|
154 |
+
|
155 |
+
directed : bool
|
156 |
+
If True, and direction not specified in data, return a directed graph.
|
157 |
+
|
158 |
+
multigraph : bool
|
159 |
+
If True, and multigraph not specified in data, return a multigraph.
|
160 |
+
|
161 |
+
source : string
|
162 |
+
A string that provides the 'source' attribute name for storing NetworkX-internal graph data.
|
163 |
+
target : string
|
164 |
+
A string that provides the 'target' attribute name for storing NetworkX-internal graph data.
|
165 |
+
name : string
|
166 |
+
A string that provides the 'name' attribute name for storing NetworkX-internal graph data.
|
167 |
+
key : string
|
168 |
+
A string that provides the 'key' attribute name for storing NetworkX-internal graph data.
|
169 |
+
link : string
|
170 |
+
A string that provides the 'link' attribute name for storing NetworkX-internal graph data.
|
171 |
+
|
172 |
+
Returns
|
173 |
+
-------
|
174 |
+
G : NetworkX graph
|
175 |
+
A NetworkX graph object
|
176 |
+
|
177 |
+
Examples
|
178 |
+
--------
|
179 |
+
|
180 |
+
Create data in node-link format by converting a graph.
|
181 |
+
|
182 |
+
>>> G = nx.Graph([("A", "B")])
|
183 |
+
>>> data = nx.node_link_data(G)
|
184 |
+
>>> data
|
185 |
+
{'directed': False, 'multigraph': False, 'graph': {}, 'nodes': [{'id': 'A'}, {'id': 'B'}], 'links': [{'source': 'A', 'target': 'B'}]}
|
186 |
+
|
187 |
+
Revert data in node-link format to a graph.
|
188 |
+
|
189 |
+
>>> H = nx.node_link_graph(data)
|
190 |
+
>>> print(H.edges)
|
191 |
+
[('A', 'B')]
|
192 |
+
|
193 |
+
To serialize and deserialize a graph with JSON,
|
194 |
+
|
195 |
+
>>> import json
|
196 |
+
>>> d = json.dumps(node_link_data(G))
|
197 |
+
>>> H = node_link_graph(json.loads(d))
|
198 |
+
>>> print(G.edges, H.edges)
|
199 |
+
[('A', 'B')] [('A', 'B')]
|
200 |
+
|
201 |
+
|
202 |
+
Notes
|
203 |
+
-----
|
204 |
+
Attribute 'key' is only used for multigraphs.
|
205 |
+
|
206 |
+
To use `node_link_data` in conjunction with `node_link_graph`,
|
207 |
+
the keyword names for the attributes must match.
|
208 |
+
|
209 |
+
See Also
|
210 |
+
--------
|
211 |
+
node_link_data, adjacency_data, tree_data
|
212 |
+
"""
|
213 |
+
multigraph = data.get("multigraph", multigraph)
|
214 |
+
directed = data.get("directed", directed)
|
215 |
+
if multigraph:
|
216 |
+
graph = nx.MultiGraph()
|
217 |
+
else:
|
218 |
+
graph = nx.Graph()
|
219 |
+
if directed:
|
220 |
+
graph = graph.to_directed()
|
221 |
+
|
222 |
+
# Allow 'key' to be omitted from attrs if the graph is not a multigraph.
|
223 |
+
key = None if not multigraph else key
|
224 |
+
graph.graph = data.get("graph", {})
|
225 |
+
c = count()
|
226 |
+
for d in data["nodes"]:
|
227 |
+
node = _to_tuple(d.get(name, next(c)))
|
228 |
+
nodedata = {str(k): v for k, v in d.items() if k != name}
|
229 |
+
graph.add_node(node, **nodedata)
|
230 |
+
for d in data[link]:
|
231 |
+
src = tuple(d[source]) if isinstance(d[source], list) else d[source]
|
232 |
+
tgt = tuple(d[target]) if isinstance(d[target], list) else d[target]
|
233 |
+
if not multigraph:
|
234 |
+
edgedata = {str(k): v for k, v in d.items() if k != source and k != target}
|
235 |
+
graph.add_edge(src, tgt, **edgedata)
|
236 |
+
else:
|
237 |
+
ky = d.get(key, None)
|
238 |
+
edgedata = {
|
239 |
+
str(k): v
|
240 |
+
for k, v in d.items()
|
241 |
+
if k != source and k != target and k != key
|
242 |
+
}
|
243 |
+
graph.add_edge(src, tgt, ky, **edgedata)
|
244 |
+
return graph
|