Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/METADATA +35 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py +608 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py +26 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py +25 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py +301 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py +136 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py +67 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py +304 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py +146 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py +828 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py +484 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py +136 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py +504 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/extern/__init__.py +73 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/__pycache__/setup.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/setup.py +6 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/__init__.py +12 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__pycache__/gamma_matrices.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/gamma_matrices.py +716 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/matrices.py +176 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/paulialgebra.py +231 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/pring.py +94 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/qho_1d.py +88 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/secondquant.py +3114 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__init__.py +36 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/dyadic.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/fieldfunctions.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/frame.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/functions.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/point.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/printing.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/vector.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/dyadic.py +601 -0
llmeval-env/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/METADATA
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: nvidia-curand-cu12
|
3 |
+
Version: 10.3.2.106
|
4 |
+
Summary: CURAND native runtime libraries
|
5 |
+
Home-page: https://developer.nvidia.com/cuda-zone
|
6 |
+
Author: Nvidia CUDA Installer Team
|
7 |
+
Author-email: [email protected]
|
8 |
+
License: NVIDIA Proprietary Software
|
9 |
+
Keywords: cuda,nvidia,runtime,machine learning,deep learning
|
10 |
+
Classifier: Development Status :: 4 - Beta
|
11 |
+
Classifier: Intended Audience :: Developers
|
12 |
+
Classifier: Intended Audience :: Education
|
13 |
+
Classifier: Intended Audience :: Science/Research
|
14 |
+
Classifier: License :: Other/Proprietary License
|
15 |
+
Classifier: Natural Language :: English
|
16 |
+
Classifier: Programming Language :: Python :: 3
|
17 |
+
Classifier: Programming Language :: Python :: 3.5
|
18 |
+
Classifier: Programming Language :: Python :: 3.6
|
19 |
+
Classifier: Programming Language :: Python :: 3.7
|
20 |
+
Classifier: Programming Language :: Python :: 3.8
|
21 |
+
Classifier: Programming Language :: Python :: 3.9
|
22 |
+
Classifier: Programming Language :: Python :: 3.10
|
23 |
+
Classifier: Programming Language :: Python :: 3.11
|
24 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
25 |
+
Classifier: Topic :: Scientific/Engineering
|
26 |
+
Classifier: Topic :: Scientific/Engineering :: Mathematics
|
27 |
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
28 |
+
Classifier: Topic :: Software Development
|
29 |
+
Classifier: Topic :: Software Development :: Libraries
|
30 |
+
Classifier: Operating System :: Microsoft :: Windows
|
31 |
+
Classifier: Operating System :: POSIX :: Linux
|
32 |
+
Requires-Python: >=3
|
33 |
+
License-File: License.txt
|
34 |
+
|
35 |
+
CURAND native runtime libraries
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/__init__.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (101 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py
ADDED
@@ -0,0 +1,608 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# Copyright (c) 2005-2010 ActiveState Software Inc.
|
4 |
+
# Copyright (c) 2013 Eddy Petrișor
|
5 |
+
|
6 |
+
"""Utilities for determining application-specific dirs.
|
7 |
+
|
8 |
+
See <http://github.com/ActiveState/appdirs> for details and usage.
|
9 |
+
"""
|
10 |
+
# Dev Notes:
|
11 |
+
# - MSDN on where to store app data files:
|
12 |
+
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
|
13 |
+
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
|
14 |
+
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
15 |
+
|
16 |
+
__version_info__ = (1, 4, 3)
|
17 |
+
__version__ = '.'.join(map(str, __version_info__))
|
18 |
+
|
19 |
+
|
20 |
+
import sys
|
21 |
+
import os
|
22 |
+
|
23 |
+
PY3 = sys.version_info[0] == 3
|
24 |
+
|
25 |
+
if PY3:
|
26 |
+
unicode = str
|
27 |
+
|
28 |
+
if sys.platform.startswith('java'):
|
29 |
+
import platform
|
30 |
+
os_name = platform.java_ver()[3][0]
|
31 |
+
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
|
32 |
+
system = 'win32'
|
33 |
+
elif os_name.startswith('Mac'): # "Mac OS X", etc.
|
34 |
+
system = 'darwin'
|
35 |
+
else: # "Linux", "SunOS", "FreeBSD", etc.
|
36 |
+
# Setting this to "linux2" is not ideal, but only Windows or Mac
|
37 |
+
# are actually checked for and the rest of the module expects
|
38 |
+
# *sys.platform* style strings.
|
39 |
+
system = 'linux2'
|
40 |
+
else:
|
41 |
+
system = sys.platform
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
46 |
+
r"""Return full path to the user-specific data dir for this application.
|
47 |
+
|
48 |
+
"appname" is the name of application.
|
49 |
+
If None, just the system directory is returned.
|
50 |
+
"appauthor" (only used on Windows) is the name of the
|
51 |
+
appauthor or distributing body for this application. Typically
|
52 |
+
it is the owning company name. This falls back to appname. You may
|
53 |
+
pass False to disable it.
|
54 |
+
"version" is an optional version path element to append to the
|
55 |
+
path. You might want to use this if you want multiple versions
|
56 |
+
of your app to be able to run independently. If used, this
|
57 |
+
would typically be "<major>.<minor>".
|
58 |
+
Only applied when appname is present.
|
59 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
60 |
+
roaming appdata directory. That means that for users on a Windows
|
61 |
+
network setup for roaming profiles, this user data will be
|
62 |
+
sync'd on login. See
|
63 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
64 |
+
for a discussion of issues.
|
65 |
+
|
66 |
+
Typical user data directories are:
|
67 |
+
Mac OS X: ~/Library/Application Support/<AppName>
|
68 |
+
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
|
69 |
+
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
|
70 |
+
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
|
71 |
+
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
|
72 |
+
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
|
73 |
+
|
74 |
+
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
|
75 |
+
That means, by default "~/.local/share/<AppName>".
|
76 |
+
"""
|
77 |
+
if system == "win32":
|
78 |
+
if appauthor is None:
|
79 |
+
appauthor = appname
|
80 |
+
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
|
81 |
+
path = os.path.normpath(_get_win_folder(const))
|
82 |
+
if appname:
|
83 |
+
if appauthor is not False:
|
84 |
+
path = os.path.join(path, appauthor, appname)
|
85 |
+
else:
|
86 |
+
path = os.path.join(path, appname)
|
87 |
+
elif system == 'darwin':
|
88 |
+
path = os.path.expanduser('~/Library/Application Support/')
|
89 |
+
if appname:
|
90 |
+
path = os.path.join(path, appname)
|
91 |
+
else:
|
92 |
+
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
|
93 |
+
if appname:
|
94 |
+
path = os.path.join(path, appname)
|
95 |
+
if appname and version:
|
96 |
+
path = os.path.join(path, version)
|
97 |
+
return path
|
98 |
+
|
99 |
+
|
100 |
+
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
|
101 |
+
r"""Return full path to the user-shared data dir for this application.
|
102 |
+
|
103 |
+
"appname" is the name of application.
|
104 |
+
If None, just the system directory is returned.
|
105 |
+
"appauthor" (only used on Windows) is the name of the
|
106 |
+
appauthor or distributing body for this application. Typically
|
107 |
+
it is the owning company name. This falls back to appname. You may
|
108 |
+
pass False to disable it.
|
109 |
+
"version" is an optional version path element to append to the
|
110 |
+
path. You might want to use this if you want multiple versions
|
111 |
+
of your app to be able to run independently. If used, this
|
112 |
+
would typically be "<major>.<minor>".
|
113 |
+
Only applied when appname is present.
|
114 |
+
"multipath" is an optional parameter only applicable to *nix
|
115 |
+
which indicates that the entire list of data dirs should be
|
116 |
+
returned. By default, the first item from XDG_DATA_DIRS is
|
117 |
+
returned, or '/usr/local/share/<AppName>',
|
118 |
+
if XDG_DATA_DIRS is not set
|
119 |
+
|
120 |
+
Typical site data directories are:
|
121 |
+
Mac OS X: /Library/Application Support/<AppName>
|
122 |
+
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
|
123 |
+
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
|
124 |
+
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
125 |
+
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
|
126 |
+
|
127 |
+
For Unix, this is using the $XDG_DATA_DIRS[0] default.
|
128 |
+
|
129 |
+
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
130 |
+
"""
|
131 |
+
if system == "win32":
|
132 |
+
if appauthor is None:
|
133 |
+
appauthor = appname
|
134 |
+
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
|
135 |
+
if appname:
|
136 |
+
if appauthor is not False:
|
137 |
+
path = os.path.join(path, appauthor, appname)
|
138 |
+
else:
|
139 |
+
path = os.path.join(path, appname)
|
140 |
+
elif system == 'darwin':
|
141 |
+
path = os.path.expanduser('/Library/Application Support')
|
142 |
+
if appname:
|
143 |
+
path = os.path.join(path, appname)
|
144 |
+
else:
|
145 |
+
# XDG default for $XDG_DATA_DIRS
|
146 |
+
# only first, if multipath is False
|
147 |
+
path = os.getenv('XDG_DATA_DIRS',
|
148 |
+
os.pathsep.join(['/usr/local/share', '/usr/share']))
|
149 |
+
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
150 |
+
if appname:
|
151 |
+
if version:
|
152 |
+
appname = os.path.join(appname, version)
|
153 |
+
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
154 |
+
|
155 |
+
if multipath:
|
156 |
+
path = os.pathsep.join(pathlist)
|
157 |
+
else:
|
158 |
+
path = pathlist[0]
|
159 |
+
return path
|
160 |
+
|
161 |
+
if appname and version:
|
162 |
+
path = os.path.join(path, version)
|
163 |
+
return path
|
164 |
+
|
165 |
+
|
166 |
+
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
|
167 |
+
r"""Return full path to the user-specific config dir for this application.
|
168 |
+
|
169 |
+
"appname" is the name of application.
|
170 |
+
If None, just the system directory is returned.
|
171 |
+
"appauthor" (only used on Windows) is the name of the
|
172 |
+
appauthor or distributing body for this application. Typically
|
173 |
+
it is the owning company name. This falls back to appname. You may
|
174 |
+
pass False to disable it.
|
175 |
+
"version" is an optional version path element to append to the
|
176 |
+
path. You might want to use this if you want multiple versions
|
177 |
+
of your app to be able to run independently. If used, this
|
178 |
+
would typically be "<major>.<minor>".
|
179 |
+
Only applied when appname is present.
|
180 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
181 |
+
roaming appdata directory. That means that for users on a Windows
|
182 |
+
network setup for roaming profiles, this user data will be
|
183 |
+
sync'd on login. See
|
184 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
185 |
+
for a discussion of issues.
|
186 |
+
|
187 |
+
Typical user config directories are:
|
188 |
+
Mac OS X: same as user_data_dir
|
189 |
+
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
|
190 |
+
Win *: same as user_data_dir
|
191 |
+
|
192 |
+
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
|
193 |
+
That means, by default "~/.config/<AppName>".
|
194 |
+
"""
|
195 |
+
if system in ["win32", "darwin"]:
|
196 |
+
path = user_data_dir(appname, appauthor, None, roaming)
|
197 |
+
else:
|
198 |
+
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
|
199 |
+
if appname:
|
200 |
+
path = os.path.join(path, appname)
|
201 |
+
if appname and version:
|
202 |
+
path = os.path.join(path, version)
|
203 |
+
return path
|
204 |
+
|
205 |
+
|
206 |
+
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
|
207 |
+
r"""Return full path to the user-shared data dir for this application.
|
208 |
+
|
209 |
+
"appname" is the name of application.
|
210 |
+
If None, just the system directory is returned.
|
211 |
+
"appauthor" (only used on Windows) is the name of the
|
212 |
+
appauthor or distributing body for this application. Typically
|
213 |
+
it is the owning company name. This falls back to appname. You may
|
214 |
+
pass False to disable it.
|
215 |
+
"version" is an optional version path element to append to the
|
216 |
+
path. You might want to use this if you want multiple versions
|
217 |
+
of your app to be able to run independently. If used, this
|
218 |
+
would typically be "<major>.<minor>".
|
219 |
+
Only applied when appname is present.
|
220 |
+
"multipath" is an optional parameter only applicable to *nix
|
221 |
+
which indicates that the entire list of config dirs should be
|
222 |
+
returned. By default, the first item from XDG_CONFIG_DIRS is
|
223 |
+
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
|
224 |
+
|
225 |
+
Typical site config directories are:
|
226 |
+
Mac OS X: same as site_data_dir
|
227 |
+
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
|
228 |
+
$XDG_CONFIG_DIRS
|
229 |
+
Win *: same as site_data_dir
|
230 |
+
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
231 |
+
|
232 |
+
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
|
233 |
+
|
234 |
+
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
235 |
+
"""
|
236 |
+
if system in ["win32", "darwin"]:
|
237 |
+
path = site_data_dir(appname, appauthor)
|
238 |
+
if appname and version:
|
239 |
+
path = os.path.join(path, version)
|
240 |
+
else:
|
241 |
+
# XDG default for $XDG_CONFIG_DIRS
|
242 |
+
# only first, if multipath is False
|
243 |
+
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
|
244 |
+
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
245 |
+
if appname:
|
246 |
+
if version:
|
247 |
+
appname = os.path.join(appname, version)
|
248 |
+
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
249 |
+
|
250 |
+
if multipath:
|
251 |
+
path = os.pathsep.join(pathlist)
|
252 |
+
else:
|
253 |
+
path = pathlist[0]
|
254 |
+
return path
|
255 |
+
|
256 |
+
|
257 |
+
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
|
258 |
+
r"""Return full path to the user-specific cache dir for this application.
|
259 |
+
|
260 |
+
"appname" is the name of application.
|
261 |
+
If None, just the system directory is returned.
|
262 |
+
"appauthor" (only used on Windows) is the name of the
|
263 |
+
appauthor or distributing body for this application. Typically
|
264 |
+
it is the owning company name. This falls back to appname. You may
|
265 |
+
pass False to disable it.
|
266 |
+
"version" is an optional version path element to append to the
|
267 |
+
path. You might want to use this if you want multiple versions
|
268 |
+
of your app to be able to run independently. If used, this
|
269 |
+
would typically be "<major>.<minor>".
|
270 |
+
Only applied when appname is present.
|
271 |
+
"opinion" (boolean) can be False to disable the appending of
|
272 |
+
"Cache" to the base app data dir for Windows. See
|
273 |
+
discussion below.
|
274 |
+
|
275 |
+
Typical user cache directories are:
|
276 |
+
Mac OS X: ~/Library/Caches/<AppName>
|
277 |
+
Unix: ~/.cache/<AppName> (XDG default)
|
278 |
+
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
|
279 |
+
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
|
280 |
+
|
281 |
+
On Windows the only suggestion in the MSDN docs is that local settings go in
|
282 |
+
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
|
283 |
+
app data dir (the default returned by `user_data_dir` above). Apps typically
|
284 |
+
put cache data somewhere *under* the given dir here. Some examples:
|
285 |
+
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
|
286 |
+
...\Acme\SuperApp\Cache\1.0
|
287 |
+
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
|
288 |
+
This can be disabled with the `opinion=False` option.
|
289 |
+
"""
|
290 |
+
if system == "win32":
|
291 |
+
if appauthor is None:
|
292 |
+
appauthor = appname
|
293 |
+
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
|
294 |
+
if appname:
|
295 |
+
if appauthor is not False:
|
296 |
+
path = os.path.join(path, appauthor, appname)
|
297 |
+
else:
|
298 |
+
path = os.path.join(path, appname)
|
299 |
+
if opinion:
|
300 |
+
path = os.path.join(path, "Cache")
|
301 |
+
elif system == 'darwin':
|
302 |
+
path = os.path.expanduser('~/Library/Caches')
|
303 |
+
if appname:
|
304 |
+
path = os.path.join(path, appname)
|
305 |
+
else:
|
306 |
+
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
|
307 |
+
if appname:
|
308 |
+
path = os.path.join(path, appname)
|
309 |
+
if appname and version:
|
310 |
+
path = os.path.join(path, version)
|
311 |
+
return path
|
312 |
+
|
313 |
+
|
314 |
+
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
|
315 |
+
r"""Return full path to the user-specific state dir for this application.
|
316 |
+
|
317 |
+
"appname" is the name of application.
|
318 |
+
If None, just the system directory is returned.
|
319 |
+
"appauthor" (only used on Windows) is the name of the
|
320 |
+
appauthor or distributing body for this application. Typically
|
321 |
+
it is the owning company name. This falls back to appname. You may
|
322 |
+
pass False to disable it.
|
323 |
+
"version" is an optional version path element to append to the
|
324 |
+
path. You might want to use this if you want multiple versions
|
325 |
+
of your app to be able to run independently. If used, this
|
326 |
+
would typically be "<major>.<minor>".
|
327 |
+
Only applied when appname is present.
|
328 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
329 |
+
roaming appdata directory. That means that for users on a Windows
|
330 |
+
network setup for roaming profiles, this user data will be
|
331 |
+
sync'd on login. See
|
332 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
333 |
+
for a discussion of issues.
|
334 |
+
|
335 |
+
Typical user state directories are:
|
336 |
+
Mac OS X: same as user_data_dir
|
337 |
+
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
|
338 |
+
Win *: same as user_data_dir
|
339 |
+
|
340 |
+
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
|
341 |
+
to extend the XDG spec and support $XDG_STATE_HOME.
|
342 |
+
|
343 |
+
That means, by default "~/.local/state/<AppName>".
|
344 |
+
"""
|
345 |
+
if system in ["win32", "darwin"]:
|
346 |
+
path = user_data_dir(appname, appauthor, None, roaming)
|
347 |
+
else:
|
348 |
+
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
|
349 |
+
if appname:
|
350 |
+
path = os.path.join(path, appname)
|
351 |
+
if appname and version:
|
352 |
+
path = os.path.join(path, version)
|
353 |
+
return path
|
354 |
+
|
355 |
+
|
356 |
+
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
357 |
+
r"""Return full path to the user-specific log dir for this application.
|
358 |
+
|
359 |
+
"appname" is the name of application.
|
360 |
+
If None, just the system directory is returned.
|
361 |
+
"appauthor" (only used on Windows) is the name of the
|
362 |
+
appauthor or distributing body for this application. Typically
|
363 |
+
it is the owning company name. This falls back to appname. You may
|
364 |
+
pass False to disable it.
|
365 |
+
"version" is an optional version path element to append to the
|
366 |
+
path. You might want to use this if you want multiple versions
|
367 |
+
of your app to be able to run independently. If used, this
|
368 |
+
would typically be "<major>.<minor>".
|
369 |
+
Only applied when appname is present.
|
370 |
+
"opinion" (boolean) can be False to disable the appending of
|
371 |
+
"Logs" to the base app data dir for Windows, and "log" to the
|
372 |
+
base cache dir for Unix. See discussion below.
|
373 |
+
|
374 |
+
Typical user log directories are:
|
375 |
+
Mac OS X: ~/Library/Logs/<AppName>
|
376 |
+
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
|
377 |
+
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
|
378 |
+
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
|
379 |
+
|
380 |
+
On Windows the only suggestion in the MSDN docs is that local settings
|
381 |
+
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
|
382 |
+
examples of what some windows apps use for a logs dir.)
|
383 |
+
|
384 |
+
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
|
385 |
+
value for Windows and appends "log" to the user cache dir for Unix.
|
386 |
+
This can be disabled with the `opinion=False` option.
|
387 |
+
"""
|
388 |
+
if system == "darwin":
|
389 |
+
path = os.path.join(
|
390 |
+
os.path.expanduser('~/Library/Logs'),
|
391 |
+
appname)
|
392 |
+
elif system == "win32":
|
393 |
+
path = user_data_dir(appname, appauthor, version)
|
394 |
+
version = False
|
395 |
+
if opinion:
|
396 |
+
path = os.path.join(path, "Logs")
|
397 |
+
else:
|
398 |
+
path = user_cache_dir(appname, appauthor, version)
|
399 |
+
version = False
|
400 |
+
if opinion:
|
401 |
+
path = os.path.join(path, "log")
|
402 |
+
if appname and version:
|
403 |
+
path = os.path.join(path, version)
|
404 |
+
return path
|
405 |
+
|
406 |
+
|
407 |
+
class AppDirs(object):
|
408 |
+
"""Convenience wrapper for getting application dirs."""
|
409 |
+
def __init__(self, appname=None, appauthor=None, version=None,
|
410 |
+
roaming=False, multipath=False):
|
411 |
+
self.appname = appname
|
412 |
+
self.appauthor = appauthor
|
413 |
+
self.version = version
|
414 |
+
self.roaming = roaming
|
415 |
+
self.multipath = multipath
|
416 |
+
|
417 |
+
@property
|
418 |
+
def user_data_dir(self):
|
419 |
+
return user_data_dir(self.appname, self.appauthor,
|
420 |
+
version=self.version, roaming=self.roaming)
|
421 |
+
|
422 |
+
@property
|
423 |
+
def site_data_dir(self):
|
424 |
+
return site_data_dir(self.appname, self.appauthor,
|
425 |
+
version=self.version, multipath=self.multipath)
|
426 |
+
|
427 |
+
@property
|
428 |
+
def user_config_dir(self):
|
429 |
+
return user_config_dir(self.appname, self.appauthor,
|
430 |
+
version=self.version, roaming=self.roaming)
|
431 |
+
|
432 |
+
@property
|
433 |
+
def site_config_dir(self):
|
434 |
+
return site_config_dir(self.appname, self.appauthor,
|
435 |
+
version=self.version, multipath=self.multipath)
|
436 |
+
|
437 |
+
@property
|
438 |
+
def user_cache_dir(self):
|
439 |
+
return user_cache_dir(self.appname, self.appauthor,
|
440 |
+
version=self.version)
|
441 |
+
|
442 |
+
@property
|
443 |
+
def user_state_dir(self):
|
444 |
+
return user_state_dir(self.appname, self.appauthor,
|
445 |
+
version=self.version)
|
446 |
+
|
447 |
+
@property
|
448 |
+
def user_log_dir(self):
|
449 |
+
return user_log_dir(self.appname, self.appauthor,
|
450 |
+
version=self.version)
|
451 |
+
|
452 |
+
|
453 |
+
#---- internal support stuff
|
454 |
+
|
455 |
+
def _get_win_folder_from_registry(csidl_name):
|
456 |
+
"""This is a fallback technique at best. I'm not sure if using the
|
457 |
+
registry for this guarantees us the correct answer for all CSIDL_*
|
458 |
+
names.
|
459 |
+
"""
|
460 |
+
if PY3:
|
461 |
+
import winreg as _winreg
|
462 |
+
else:
|
463 |
+
import _winreg
|
464 |
+
|
465 |
+
shell_folder_name = {
|
466 |
+
"CSIDL_APPDATA": "AppData",
|
467 |
+
"CSIDL_COMMON_APPDATA": "Common AppData",
|
468 |
+
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
469 |
+
}[csidl_name]
|
470 |
+
|
471 |
+
key = _winreg.OpenKey(
|
472 |
+
_winreg.HKEY_CURRENT_USER,
|
473 |
+
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
|
474 |
+
)
|
475 |
+
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
|
476 |
+
return dir
|
477 |
+
|
478 |
+
|
479 |
+
def _get_win_folder_with_pywin32(csidl_name):
|
480 |
+
from win32com.shell import shellcon, shell
|
481 |
+
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
|
482 |
+
# Try to make this a unicode path because SHGetFolderPath does
|
483 |
+
# not return unicode strings when there is unicode data in the
|
484 |
+
# path.
|
485 |
+
try:
|
486 |
+
dir = unicode(dir)
|
487 |
+
|
488 |
+
# Downgrade to short path name if have highbit chars. See
|
489 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
490 |
+
has_high_char = False
|
491 |
+
for c in dir:
|
492 |
+
if ord(c) > 255:
|
493 |
+
has_high_char = True
|
494 |
+
break
|
495 |
+
if has_high_char:
|
496 |
+
try:
|
497 |
+
import win32api
|
498 |
+
dir = win32api.GetShortPathName(dir)
|
499 |
+
except ImportError:
|
500 |
+
pass
|
501 |
+
except UnicodeError:
|
502 |
+
pass
|
503 |
+
return dir
|
504 |
+
|
505 |
+
|
506 |
+
def _get_win_folder_with_ctypes(csidl_name):
|
507 |
+
import ctypes
|
508 |
+
|
509 |
+
csidl_const = {
|
510 |
+
"CSIDL_APPDATA": 26,
|
511 |
+
"CSIDL_COMMON_APPDATA": 35,
|
512 |
+
"CSIDL_LOCAL_APPDATA": 28,
|
513 |
+
}[csidl_name]
|
514 |
+
|
515 |
+
buf = ctypes.create_unicode_buffer(1024)
|
516 |
+
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
517 |
+
|
518 |
+
# Downgrade to short path name if have highbit chars. See
|
519 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
520 |
+
has_high_char = False
|
521 |
+
for c in buf:
|
522 |
+
if ord(c) > 255:
|
523 |
+
has_high_char = True
|
524 |
+
break
|
525 |
+
if has_high_char:
|
526 |
+
buf2 = ctypes.create_unicode_buffer(1024)
|
527 |
+
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
528 |
+
buf = buf2
|
529 |
+
|
530 |
+
return buf.value
|
531 |
+
|
532 |
+
def _get_win_folder_with_jna(csidl_name):
|
533 |
+
import array
|
534 |
+
from com.sun import jna
|
535 |
+
from com.sun.jna.platform import win32
|
536 |
+
|
537 |
+
buf_size = win32.WinDef.MAX_PATH * 2
|
538 |
+
buf = array.zeros('c', buf_size)
|
539 |
+
shell = win32.Shell32.INSTANCE
|
540 |
+
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
|
541 |
+
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
542 |
+
|
543 |
+
# Downgrade to short path name if have highbit chars. See
|
544 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
545 |
+
has_high_char = False
|
546 |
+
for c in dir:
|
547 |
+
if ord(c) > 255:
|
548 |
+
has_high_char = True
|
549 |
+
break
|
550 |
+
if has_high_char:
|
551 |
+
buf = array.zeros('c', buf_size)
|
552 |
+
kernel = win32.Kernel32.INSTANCE
|
553 |
+
if kernel.GetShortPathName(dir, buf, buf_size):
|
554 |
+
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
555 |
+
|
556 |
+
return dir
|
557 |
+
|
558 |
+
if system == "win32":
|
559 |
+
try:
|
560 |
+
import win32com.shell
|
561 |
+
_get_win_folder = _get_win_folder_with_pywin32
|
562 |
+
except ImportError:
|
563 |
+
try:
|
564 |
+
from ctypes import windll
|
565 |
+
_get_win_folder = _get_win_folder_with_ctypes
|
566 |
+
except ImportError:
|
567 |
+
try:
|
568 |
+
import com.sun.jna
|
569 |
+
_get_win_folder = _get_win_folder_with_jna
|
570 |
+
except ImportError:
|
571 |
+
_get_win_folder = _get_win_folder_from_registry
|
572 |
+
|
573 |
+
|
574 |
+
#---- self test code
|
575 |
+
|
576 |
+
if __name__ == "__main__":
|
577 |
+
appname = "MyApp"
|
578 |
+
appauthor = "MyCompany"
|
579 |
+
|
580 |
+
props = ("user_data_dir",
|
581 |
+
"user_config_dir",
|
582 |
+
"user_cache_dir",
|
583 |
+
"user_state_dir",
|
584 |
+
"user_log_dir",
|
585 |
+
"site_data_dir",
|
586 |
+
"site_config_dir")
|
587 |
+
|
588 |
+
print("-- app dirs %s --" % __version__)
|
589 |
+
|
590 |
+
print("-- app dirs (with optional 'version')")
|
591 |
+
dirs = AppDirs(appname, appauthor, version="1.0")
|
592 |
+
for prop in props:
|
593 |
+
print("%s: %s" % (prop, getattr(dirs, prop)))
|
594 |
+
|
595 |
+
print("\n-- app dirs (without optional 'version')")
|
596 |
+
dirs = AppDirs(appname, appauthor)
|
597 |
+
for prop in props:
|
598 |
+
print("%s: %s" % (prop, getattr(dirs, prop)))
|
599 |
+
|
600 |
+
print("\n-- app dirs (without optional 'appauthor')")
|
601 |
+
dirs = AppDirs(appname)
|
602 |
+
for prop in props:
|
603 |
+
print("%s: %s" % (prop, getattr(dirs, prop)))
|
604 |
+
|
605 |
+
print("\n-- app dirs (with disabled 'appauthor')")
|
606 |
+
dirs = AppDirs(appname, appauthor=False)
|
607 |
+
for prop in props:
|
608 |
+
print("%s: %s" % (prop, getattr(dirs, prop)))
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
+
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
+
# for complete details.
|
4 |
+
|
5 |
+
__all__ = [
|
6 |
+
"__title__",
|
7 |
+
"__summary__",
|
8 |
+
"__uri__",
|
9 |
+
"__version__",
|
10 |
+
"__author__",
|
11 |
+
"__email__",
|
12 |
+
"__license__",
|
13 |
+
"__copyright__",
|
14 |
+
]
|
15 |
+
|
16 |
+
__title__ = "packaging"
|
17 |
+
__summary__ = "Core utilities for Python packages"
|
18 |
+
__uri__ = "https://github.com/pypa/packaging"
|
19 |
+
|
20 |
+
__version__ = "21.2"
|
21 |
+
|
22 |
+
__author__ = "Donald Stufft and individual contributors"
|
23 |
+
__email__ = "[email protected]"
|
24 |
+
|
25 |
+
__license__ = "BSD-2-Clause or Apache-2.0"
|
26 |
+
__copyright__ = "2014-2019 %s" % __author__
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
+
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
+
# for complete details.
|
4 |
+
|
5 |
+
from .__about__ import (
|
6 |
+
__author__,
|
7 |
+
__copyright__,
|
8 |
+
__email__,
|
9 |
+
__license__,
|
10 |
+
__summary__,
|
11 |
+
__title__,
|
12 |
+
__uri__,
|
13 |
+
__version__,
|
14 |
+
)
|
15 |
+
|
16 |
+
__all__ = [
|
17 |
+
"__title__",
|
18 |
+
"__summary__",
|
19 |
+
"__uri__",
|
20 |
+
"__version__",
|
21 |
+
"__author__",
|
22 |
+
"__email__",
|
23 |
+
"__license__",
|
24 |
+
"__copyright__",
|
25 |
+
]
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc
ADDED
Binary file (601 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (457 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc
ADDED
Binary file (7.31 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc
ADDED
Binary file (2.98 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc
ADDED
Binary file (4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc
ADDED
Binary file (22.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc
ADDED
Binary file (12.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (3.59 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc
ADDED
Binary file (12.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py
ADDED
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
import functools
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import struct
|
6 |
+
import sys
|
7 |
+
import warnings
|
8 |
+
from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
|
9 |
+
|
10 |
+
|
11 |
+
# Python does not provide platform information at sufficient granularity to
|
12 |
+
# identify the architecture of the running executable in some cases, so we
|
13 |
+
# determine it dynamically by reading the information from the running
|
14 |
+
# process. This only applies on Linux, which uses the ELF format.
|
15 |
+
class _ELFFileHeader:
|
16 |
+
# https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
|
17 |
+
class _InvalidELFFileHeader(ValueError):
|
18 |
+
"""
|
19 |
+
An invalid ELF file header was found.
|
20 |
+
"""
|
21 |
+
|
22 |
+
ELF_MAGIC_NUMBER = 0x7F454C46
|
23 |
+
ELFCLASS32 = 1
|
24 |
+
ELFCLASS64 = 2
|
25 |
+
ELFDATA2LSB = 1
|
26 |
+
ELFDATA2MSB = 2
|
27 |
+
EM_386 = 3
|
28 |
+
EM_S390 = 22
|
29 |
+
EM_ARM = 40
|
30 |
+
EM_X86_64 = 62
|
31 |
+
EF_ARM_ABIMASK = 0xFF000000
|
32 |
+
EF_ARM_ABI_VER5 = 0x05000000
|
33 |
+
EF_ARM_ABI_FLOAT_HARD = 0x00000400
|
34 |
+
|
35 |
+
def __init__(self, file: IO[bytes]) -> None:
|
36 |
+
def unpack(fmt: str) -> int:
|
37 |
+
try:
|
38 |
+
data = file.read(struct.calcsize(fmt))
|
39 |
+
result: Tuple[int, ...] = struct.unpack(fmt, data)
|
40 |
+
except struct.error:
|
41 |
+
raise _ELFFileHeader._InvalidELFFileHeader()
|
42 |
+
return result[0]
|
43 |
+
|
44 |
+
self.e_ident_magic = unpack(">I")
|
45 |
+
if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
|
46 |
+
raise _ELFFileHeader._InvalidELFFileHeader()
|
47 |
+
self.e_ident_class = unpack("B")
|
48 |
+
if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
|
49 |
+
raise _ELFFileHeader._InvalidELFFileHeader()
|
50 |
+
self.e_ident_data = unpack("B")
|
51 |
+
if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
|
52 |
+
raise _ELFFileHeader._InvalidELFFileHeader()
|
53 |
+
self.e_ident_version = unpack("B")
|
54 |
+
self.e_ident_osabi = unpack("B")
|
55 |
+
self.e_ident_abiversion = unpack("B")
|
56 |
+
self.e_ident_pad = file.read(7)
|
57 |
+
format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
|
58 |
+
format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
|
59 |
+
format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
|
60 |
+
format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
|
61 |
+
self.e_type = unpack(format_h)
|
62 |
+
self.e_machine = unpack(format_h)
|
63 |
+
self.e_version = unpack(format_i)
|
64 |
+
self.e_entry = unpack(format_p)
|
65 |
+
self.e_phoff = unpack(format_p)
|
66 |
+
self.e_shoff = unpack(format_p)
|
67 |
+
self.e_flags = unpack(format_i)
|
68 |
+
self.e_ehsize = unpack(format_h)
|
69 |
+
self.e_phentsize = unpack(format_h)
|
70 |
+
self.e_phnum = unpack(format_h)
|
71 |
+
self.e_shentsize = unpack(format_h)
|
72 |
+
self.e_shnum = unpack(format_h)
|
73 |
+
self.e_shstrndx = unpack(format_h)
|
74 |
+
|
75 |
+
|
76 |
+
def _get_elf_header() -> Optional[_ELFFileHeader]:
|
77 |
+
try:
|
78 |
+
with open(sys.executable, "rb") as f:
|
79 |
+
elf_header = _ELFFileHeader(f)
|
80 |
+
except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
|
81 |
+
return None
|
82 |
+
return elf_header
|
83 |
+
|
84 |
+
|
85 |
+
def _is_linux_armhf() -> bool:
|
86 |
+
# hard-float ABI can be detected from the ELF header of the running
|
87 |
+
# process
|
88 |
+
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
|
89 |
+
elf_header = _get_elf_header()
|
90 |
+
if elf_header is None:
|
91 |
+
return False
|
92 |
+
result = elf_header.e_ident_class == elf_header.ELFCLASS32
|
93 |
+
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
|
94 |
+
result &= elf_header.e_machine == elf_header.EM_ARM
|
95 |
+
result &= (
|
96 |
+
elf_header.e_flags & elf_header.EF_ARM_ABIMASK
|
97 |
+
) == elf_header.EF_ARM_ABI_VER5
|
98 |
+
result &= (
|
99 |
+
elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
|
100 |
+
) == elf_header.EF_ARM_ABI_FLOAT_HARD
|
101 |
+
return result
|
102 |
+
|
103 |
+
|
104 |
+
def _is_linux_i686() -> bool:
|
105 |
+
elf_header = _get_elf_header()
|
106 |
+
if elf_header is None:
|
107 |
+
return False
|
108 |
+
result = elf_header.e_ident_class == elf_header.ELFCLASS32
|
109 |
+
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
|
110 |
+
result &= elf_header.e_machine == elf_header.EM_386
|
111 |
+
return result
|
112 |
+
|
113 |
+
|
114 |
+
def _have_compatible_abi(arch: str) -> bool:
|
115 |
+
if arch == "armv7l":
|
116 |
+
return _is_linux_armhf()
|
117 |
+
if arch == "i686":
|
118 |
+
return _is_linux_i686()
|
119 |
+
return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
|
120 |
+
|
121 |
+
|
122 |
+
# If glibc ever changes its major version, we need to know what the last
|
123 |
+
# minor version was, so we can build the complete list of all versions.
|
124 |
+
# For now, guess what the highest minor version might be, assume it will
|
125 |
+
# be 50 for testing. Once this actually happens, update the dictionary
|
126 |
+
# with the actual value.
|
127 |
+
_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
|
128 |
+
|
129 |
+
|
130 |
+
class _GLibCVersion(NamedTuple):
|
131 |
+
major: int
|
132 |
+
minor: int
|
133 |
+
|
134 |
+
|
135 |
+
def _glibc_version_string_confstr() -> Optional[str]:
|
136 |
+
"""
|
137 |
+
Primary implementation of glibc_version_string using os.confstr.
|
138 |
+
"""
|
139 |
+
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
|
140 |
+
# to be broken or missing. This strategy is used in the standard library
|
141 |
+
# platform module.
|
142 |
+
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
|
143 |
+
try:
|
144 |
+
# os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
|
145 |
+
version_string = os.confstr("CS_GNU_LIBC_VERSION")
|
146 |
+
assert version_string is not None
|
147 |
+
_, version = version_string.split()
|
148 |
+
except (AssertionError, AttributeError, OSError, ValueError):
|
149 |
+
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
|
150 |
+
return None
|
151 |
+
return version
|
152 |
+
|
153 |
+
|
154 |
+
def _glibc_version_string_ctypes() -> Optional[str]:
|
155 |
+
"""
|
156 |
+
Fallback implementation of glibc_version_string using ctypes.
|
157 |
+
"""
|
158 |
+
try:
|
159 |
+
import ctypes
|
160 |
+
except ImportError:
|
161 |
+
return None
|
162 |
+
|
163 |
+
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
|
164 |
+
# manpage says, "If filename is NULL, then the returned handle is for the
|
165 |
+
# main program". This way we can let the linker do the work to figure out
|
166 |
+
# which libc our process is actually using.
|
167 |
+
#
|
168 |
+
# We must also handle the special case where the executable is not a
|
169 |
+
# dynamically linked executable. This can occur when using musl libc,
|
170 |
+
# for example. In this situation, dlopen() will error, leading to an
|
171 |
+
# OSError. Interestingly, at least in the case of musl, there is no
|
172 |
+
# errno set on the OSError. The single string argument used to construct
|
173 |
+
# OSError comes from libc itself and is therefore not portable to
|
174 |
+
# hard code here. In any case, failure to call dlopen() means we
|
175 |
+
# can proceed, so we bail on our attempt.
|
176 |
+
try:
|
177 |
+
process_namespace = ctypes.CDLL(None)
|
178 |
+
except OSError:
|
179 |
+
return None
|
180 |
+
|
181 |
+
try:
|
182 |
+
gnu_get_libc_version = process_namespace.gnu_get_libc_version
|
183 |
+
except AttributeError:
|
184 |
+
# Symbol doesn't exist -> therefore, we are not linked to
|
185 |
+
# glibc.
|
186 |
+
return None
|
187 |
+
|
188 |
+
# Call gnu_get_libc_version, which returns a string like "2.5"
|
189 |
+
gnu_get_libc_version.restype = ctypes.c_char_p
|
190 |
+
version_str: str = gnu_get_libc_version()
|
191 |
+
# py2 / py3 compatibility:
|
192 |
+
if not isinstance(version_str, str):
|
193 |
+
version_str = version_str.decode("ascii")
|
194 |
+
|
195 |
+
return version_str
|
196 |
+
|
197 |
+
|
198 |
+
def _glibc_version_string() -> Optional[str]:
|
199 |
+
"""Returns glibc version string, or None if not using glibc."""
|
200 |
+
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
|
201 |
+
|
202 |
+
|
203 |
+
def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
|
204 |
+
"""Parse glibc version.
|
205 |
+
|
206 |
+
We use a regexp instead of str.split because we want to discard any
|
207 |
+
random junk that might come after the minor version -- this might happen
|
208 |
+
in patched/forked versions of glibc (e.g. Linaro's version of glibc
|
209 |
+
uses version strings like "2.20-2014.11"). See gh-3588.
|
210 |
+
"""
|
211 |
+
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
|
212 |
+
if not m:
|
213 |
+
warnings.warn(
|
214 |
+
"Expected glibc version with 2 components major.minor,"
|
215 |
+
" got: %s" % version_str,
|
216 |
+
RuntimeWarning,
|
217 |
+
)
|
218 |
+
return -1, -1
|
219 |
+
return int(m.group("major")), int(m.group("minor"))
|
220 |
+
|
221 |
+
|
222 |
+
@functools.lru_cache()
|
223 |
+
def _get_glibc_version() -> Tuple[int, int]:
|
224 |
+
version_str = _glibc_version_string()
|
225 |
+
if version_str is None:
|
226 |
+
return (-1, -1)
|
227 |
+
return _parse_glibc_version(version_str)
|
228 |
+
|
229 |
+
|
230 |
+
# From PEP 513, PEP 600
|
231 |
+
def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
|
232 |
+
sys_glibc = _get_glibc_version()
|
233 |
+
if sys_glibc < version:
|
234 |
+
return False
|
235 |
+
# Check for presence of _manylinux module.
|
236 |
+
try:
|
237 |
+
import _manylinux # noqa
|
238 |
+
except ImportError:
|
239 |
+
return True
|
240 |
+
if hasattr(_manylinux, "manylinux_compatible"):
|
241 |
+
result = _manylinux.manylinux_compatible(version[0], version[1], arch)
|
242 |
+
if result is not None:
|
243 |
+
return bool(result)
|
244 |
+
return True
|
245 |
+
if version == _GLibCVersion(2, 5):
|
246 |
+
if hasattr(_manylinux, "manylinux1_compatible"):
|
247 |
+
return bool(_manylinux.manylinux1_compatible)
|
248 |
+
if version == _GLibCVersion(2, 12):
|
249 |
+
if hasattr(_manylinux, "manylinux2010_compatible"):
|
250 |
+
return bool(_manylinux.manylinux2010_compatible)
|
251 |
+
if version == _GLibCVersion(2, 17):
|
252 |
+
if hasattr(_manylinux, "manylinux2014_compatible"):
|
253 |
+
return bool(_manylinux.manylinux2014_compatible)
|
254 |
+
return True
|
255 |
+
|
256 |
+
|
257 |
+
_LEGACY_MANYLINUX_MAP = {
|
258 |
+
# CentOS 7 w/ glibc 2.17 (PEP 599)
|
259 |
+
(2, 17): "manylinux2014",
|
260 |
+
# CentOS 6 w/ glibc 2.12 (PEP 571)
|
261 |
+
(2, 12): "manylinux2010",
|
262 |
+
# CentOS 5 w/ glibc 2.5 (PEP 513)
|
263 |
+
(2, 5): "manylinux1",
|
264 |
+
}
|
265 |
+
|
266 |
+
|
267 |
+
def platform_tags(linux: str, arch: str) -> Iterator[str]:
|
268 |
+
if not _have_compatible_abi(arch):
|
269 |
+
return
|
270 |
+
# Oldest glibc to be supported regardless of architecture is (2, 17).
|
271 |
+
too_old_glibc2 = _GLibCVersion(2, 16)
|
272 |
+
if arch in {"x86_64", "i686"}:
|
273 |
+
# On x86/i686 also oldest glibc to be supported is (2, 5).
|
274 |
+
too_old_glibc2 = _GLibCVersion(2, 4)
|
275 |
+
current_glibc = _GLibCVersion(*_get_glibc_version())
|
276 |
+
glibc_max_list = [current_glibc]
|
277 |
+
# We can assume compatibility across glibc major versions.
|
278 |
+
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
|
279 |
+
#
|
280 |
+
# Build a list of maximum glibc versions so that we can
|
281 |
+
# output the canonical list of all glibc from current_glibc
|
282 |
+
# down to too_old_glibc2, including all intermediary versions.
|
283 |
+
for glibc_major in range(current_glibc.major - 1, 1, -1):
|
284 |
+
glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
|
285 |
+
glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
|
286 |
+
for glibc_max in glibc_max_list:
|
287 |
+
if glibc_max.major == too_old_glibc2.major:
|
288 |
+
min_minor = too_old_glibc2.minor
|
289 |
+
else:
|
290 |
+
# For other glibc major versions oldest supported is (x, 0).
|
291 |
+
min_minor = -1
|
292 |
+
for glibc_minor in range(glibc_max.minor, min_minor, -1):
|
293 |
+
glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
|
294 |
+
tag = "manylinux_{}_{}".format(*glibc_version)
|
295 |
+
if _is_compatible(tag, arch, glibc_version):
|
296 |
+
yield linux.replace("linux", tag)
|
297 |
+
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
|
298 |
+
if glibc_version in _LEGACY_MANYLINUX_MAP:
|
299 |
+
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
|
300 |
+
if _is_compatible(legacy_tag, arch, glibc_version):
|
301 |
+
yield linux.replace("linux", legacy_tag)
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""PEP 656 support.
|
2 |
+
|
3 |
+
This module implements logic to detect if the currently running Python is
|
4 |
+
linked against musl, and what musl version is used.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import contextlib
|
8 |
+
import functools
|
9 |
+
import operator
|
10 |
+
import os
|
11 |
+
import re
|
12 |
+
import struct
|
13 |
+
import subprocess
|
14 |
+
import sys
|
15 |
+
from typing import IO, Iterator, NamedTuple, Optional, Tuple
|
16 |
+
|
17 |
+
|
18 |
+
def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
|
19 |
+
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
|
20 |
+
|
21 |
+
|
22 |
+
def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
|
23 |
+
"""Detect musl libc location by parsing the Python executable.
|
24 |
+
|
25 |
+
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
|
26 |
+
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
|
27 |
+
"""
|
28 |
+
f.seek(0)
|
29 |
+
try:
|
30 |
+
ident = _read_unpacked(f, "16B")
|
31 |
+
except struct.error:
|
32 |
+
return None
|
33 |
+
if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
|
34 |
+
return None
|
35 |
+
f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
|
36 |
+
|
37 |
+
try:
|
38 |
+
# e_fmt: Format for program header.
|
39 |
+
# p_fmt: Format for section header.
|
40 |
+
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
|
41 |
+
e_fmt, p_fmt, p_idx = {
|
42 |
+
1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
|
43 |
+
2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
|
44 |
+
}[ident[4]]
|
45 |
+
except KeyError:
|
46 |
+
return None
|
47 |
+
else:
|
48 |
+
p_get = operator.itemgetter(*p_idx)
|
49 |
+
|
50 |
+
# Find the interpreter section and return its content.
|
51 |
+
try:
|
52 |
+
_, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
|
53 |
+
except struct.error:
|
54 |
+
return None
|
55 |
+
for i in range(e_phnum + 1):
|
56 |
+
f.seek(e_phoff + e_phentsize * i)
|
57 |
+
try:
|
58 |
+
p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
|
59 |
+
except struct.error:
|
60 |
+
return None
|
61 |
+
if p_type != 3: # Not PT_INTERP.
|
62 |
+
continue
|
63 |
+
f.seek(p_offset)
|
64 |
+
interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
|
65 |
+
if "musl" not in interpreter:
|
66 |
+
return None
|
67 |
+
return interpreter
|
68 |
+
return None
|
69 |
+
|
70 |
+
|
71 |
+
class _MuslVersion(NamedTuple):
|
72 |
+
major: int
|
73 |
+
minor: int
|
74 |
+
|
75 |
+
|
76 |
+
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
|
77 |
+
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
|
78 |
+
if len(lines) < 2 or lines[0][:4] != "musl":
|
79 |
+
return None
|
80 |
+
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
|
81 |
+
if not m:
|
82 |
+
return None
|
83 |
+
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
|
84 |
+
|
85 |
+
|
86 |
+
@functools.lru_cache()
|
87 |
+
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
|
88 |
+
"""Detect currently-running musl runtime version.
|
89 |
+
|
90 |
+
This is done by checking the specified executable's dynamic linking
|
91 |
+
information, and invoking the loader to parse its output for a version
|
92 |
+
string. If the loader is musl, the output would be something like::
|
93 |
+
|
94 |
+
musl libc (x86_64)
|
95 |
+
Version 1.2.2
|
96 |
+
Dynamic Program Loader
|
97 |
+
"""
|
98 |
+
with contextlib.ExitStack() as stack:
|
99 |
+
try:
|
100 |
+
f = stack.enter_context(open(executable, "rb"))
|
101 |
+
except IOError:
|
102 |
+
return None
|
103 |
+
ld = _parse_ld_musl_from_elf(f)
|
104 |
+
if not ld:
|
105 |
+
return None
|
106 |
+
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
|
107 |
+
return _parse_musl_version(proc.stderr)
|
108 |
+
|
109 |
+
|
110 |
+
def platform_tags(arch: str) -> Iterator[str]:
|
111 |
+
"""Generate musllinux tags compatible to the current platform.
|
112 |
+
|
113 |
+
:param arch: Should be the part of platform tag after the ``linux_``
|
114 |
+
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
|
115 |
+
prerequisite for the current platform to be musllinux-compatible.
|
116 |
+
|
117 |
+
:returns: An iterator of compatible musllinux tags.
|
118 |
+
"""
|
119 |
+
sys_musl = _get_musl_version(sys.executable)
|
120 |
+
if sys_musl is None: # Python not dynamically linked against musl.
|
121 |
+
return
|
122 |
+
for minor in range(sys_musl.minor, -1, -1):
|
123 |
+
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
|
124 |
+
|
125 |
+
|
126 |
+
if __name__ == "__main__": # pragma: no cover
|
127 |
+
import sysconfig
|
128 |
+
|
129 |
+
plat = sysconfig.get_platform()
|
130 |
+
assert plat.startswith("linux-"), "not linux"
|
131 |
+
|
132 |
+
print("plat:", plat)
|
133 |
+
print("musl:", _get_musl_version(sys.executable))
|
134 |
+
print("tags:", end=" ")
|
135 |
+
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
|
136 |
+
print(t, end="\n ")
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
+
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
+
# for complete details.
|
4 |
+
|
5 |
+
|
6 |
+
class InfinityType:
|
7 |
+
def __repr__(self) -> str:
|
8 |
+
return "Infinity"
|
9 |
+
|
10 |
+
def __hash__(self) -> int:
|
11 |
+
return hash(repr(self))
|
12 |
+
|
13 |
+
def __lt__(self, other: object) -> bool:
|
14 |
+
return False
|
15 |
+
|
16 |
+
def __le__(self, other: object) -> bool:
|
17 |
+
return False
|
18 |
+
|
19 |
+
def __eq__(self, other: object) -> bool:
|
20 |
+
return isinstance(other, self.__class__)
|
21 |
+
|
22 |
+
def __ne__(self, other: object) -> bool:
|
23 |
+
return not isinstance(other, self.__class__)
|
24 |
+
|
25 |
+
def __gt__(self, other: object) -> bool:
|
26 |
+
return True
|
27 |
+
|
28 |
+
def __ge__(self, other: object) -> bool:
|
29 |
+
return True
|
30 |
+
|
31 |
+
def __neg__(self: object) -> "NegativeInfinityType":
|
32 |
+
return NegativeInfinity
|
33 |
+
|
34 |
+
|
35 |
+
Infinity = InfinityType()
|
36 |
+
|
37 |
+
|
38 |
+
class NegativeInfinityType:
|
39 |
+
def __repr__(self) -> str:
|
40 |
+
return "-Infinity"
|
41 |
+
|
42 |
+
def __hash__(self) -> int:
|
43 |
+
return hash(repr(self))
|
44 |
+
|
45 |
+
def __lt__(self, other: object) -> bool:
|
46 |
+
return True
|
47 |
+
|
48 |
+
def __le__(self, other: object) -> bool:
|
49 |
+
return True
|
50 |
+
|
51 |
+
def __eq__(self, other: object) -> bool:
|
52 |
+
return isinstance(other, self.__class__)
|
53 |
+
|
54 |
+
def __ne__(self, other: object) -> bool:
|
55 |
+
return not isinstance(other, self.__class__)
|
56 |
+
|
57 |
+
def __gt__(self, other: object) -> bool:
|
58 |
+
return False
|
59 |
+
|
60 |
+
def __ge__(self, other: object) -> bool:
|
61 |
+
return False
|
62 |
+
|
63 |
+
def __neg__(self: object) -> InfinityType:
|
64 |
+
return Infinity
|
65 |
+
|
66 |
+
|
67 |
+
NegativeInfinity = NegativeInfinityType()
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py
ADDED
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
+
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
+
# for complete details.
|
4 |
+
|
5 |
+
import operator
|
6 |
+
import os
|
7 |
+
import platform
|
8 |
+
import sys
|
9 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
10 |
+
|
11 |
+
from pkg_resources.extern.pyparsing import ( # noqa: N817
|
12 |
+
Forward,
|
13 |
+
Group,
|
14 |
+
Literal as L,
|
15 |
+
ParseException,
|
16 |
+
ParseResults,
|
17 |
+
QuotedString,
|
18 |
+
ZeroOrMore,
|
19 |
+
stringEnd,
|
20 |
+
stringStart,
|
21 |
+
)
|
22 |
+
|
23 |
+
from .specifiers import InvalidSpecifier, Specifier
|
24 |
+
|
25 |
+
__all__ = [
|
26 |
+
"InvalidMarker",
|
27 |
+
"UndefinedComparison",
|
28 |
+
"UndefinedEnvironmentName",
|
29 |
+
"Marker",
|
30 |
+
"default_environment",
|
31 |
+
]
|
32 |
+
|
33 |
+
Operator = Callable[[str, str], bool]
|
34 |
+
|
35 |
+
|
36 |
+
class InvalidMarker(ValueError):
|
37 |
+
"""
|
38 |
+
An invalid marker was found, users should refer to PEP 508.
|
39 |
+
"""
|
40 |
+
|
41 |
+
|
42 |
+
class UndefinedComparison(ValueError):
|
43 |
+
"""
|
44 |
+
An invalid operation was attempted on a value that doesn't support it.
|
45 |
+
"""
|
46 |
+
|
47 |
+
|
48 |
+
class UndefinedEnvironmentName(ValueError):
|
49 |
+
"""
|
50 |
+
A name was attempted to be used that does not exist inside of the
|
51 |
+
environment.
|
52 |
+
"""
|
53 |
+
|
54 |
+
|
55 |
+
class Node:
|
56 |
+
def __init__(self, value: Any) -> None:
|
57 |
+
self.value = value
|
58 |
+
|
59 |
+
def __str__(self) -> str:
|
60 |
+
return str(self.value)
|
61 |
+
|
62 |
+
def __repr__(self) -> str:
|
63 |
+
return f"<{self.__class__.__name__}('{self}')>"
|
64 |
+
|
65 |
+
def serialize(self) -> str:
|
66 |
+
raise NotImplementedError
|
67 |
+
|
68 |
+
|
69 |
+
class Variable(Node):
|
70 |
+
def serialize(self) -> str:
|
71 |
+
return str(self)
|
72 |
+
|
73 |
+
|
74 |
+
class Value(Node):
|
75 |
+
def serialize(self) -> str:
|
76 |
+
return f'"{self}"'
|
77 |
+
|
78 |
+
|
79 |
+
class Op(Node):
|
80 |
+
def serialize(self) -> str:
|
81 |
+
return str(self)
|
82 |
+
|
83 |
+
|
84 |
+
VARIABLE = (
|
85 |
+
L("implementation_version")
|
86 |
+
| L("platform_python_implementation")
|
87 |
+
| L("implementation_name")
|
88 |
+
| L("python_full_version")
|
89 |
+
| L("platform_release")
|
90 |
+
| L("platform_version")
|
91 |
+
| L("platform_machine")
|
92 |
+
| L("platform_system")
|
93 |
+
| L("python_version")
|
94 |
+
| L("sys_platform")
|
95 |
+
| L("os_name")
|
96 |
+
| L("os.name") # PEP-345
|
97 |
+
| L("sys.platform") # PEP-345
|
98 |
+
| L("platform.version") # PEP-345
|
99 |
+
| L("platform.machine") # PEP-345
|
100 |
+
| L("platform.python_implementation") # PEP-345
|
101 |
+
| L("python_implementation") # undocumented setuptools legacy
|
102 |
+
| L("extra") # PEP-508
|
103 |
+
)
|
104 |
+
ALIASES = {
|
105 |
+
"os.name": "os_name",
|
106 |
+
"sys.platform": "sys_platform",
|
107 |
+
"platform.version": "platform_version",
|
108 |
+
"platform.machine": "platform_machine",
|
109 |
+
"platform.python_implementation": "platform_python_implementation",
|
110 |
+
"python_implementation": "platform_python_implementation",
|
111 |
+
}
|
112 |
+
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
|
113 |
+
|
114 |
+
VERSION_CMP = (
|
115 |
+
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
|
116 |
+
)
|
117 |
+
|
118 |
+
MARKER_OP = VERSION_CMP | L("not in") | L("in")
|
119 |
+
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
|
120 |
+
|
121 |
+
MARKER_VALUE = QuotedString("'") | QuotedString('"')
|
122 |
+
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
|
123 |
+
|
124 |
+
BOOLOP = L("and") | L("or")
|
125 |
+
|
126 |
+
MARKER_VAR = VARIABLE | MARKER_VALUE
|
127 |
+
|
128 |
+
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
|
129 |
+
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
|
130 |
+
|
131 |
+
LPAREN = L("(").suppress()
|
132 |
+
RPAREN = L(")").suppress()
|
133 |
+
|
134 |
+
MARKER_EXPR = Forward()
|
135 |
+
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
|
136 |
+
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
|
137 |
+
|
138 |
+
MARKER = stringStart + MARKER_EXPR + stringEnd
|
139 |
+
|
140 |
+
|
141 |
+
def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
|
142 |
+
if isinstance(results, ParseResults):
|
143 |
+
return [_coerce_parse_result(i) for i in results]
|
144 |
+
else:
|
145 |
+
return results
|
146 |
+
|
147 |
+
|
148 |
+
def _format_marker(
|
149 |
+
marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
|
150 |
+
) -> str:
|
151 |
+
|
152 |
+
assert isinstance(marker, (list, tuple, str))
|
153 |
+
|
154 |
+
# Sometimes we have a structure like [[...]] which is a single item list
|
155 |
+
# where the single item is itself it's own list. In that case we want skip
|
156 |
+
# the rest of this function so that we don't get extraneous () on the
|
157 |
+
# outside.
|
158 |
+
if (
|
159 |
+
isinstance(marker, list)
|
160 |
+
and len(marker) == 1
|
161 |
+
and isinstance(marker[0], (list, tuple))
|
162 |
+
):
|
163 |
+
return _format_marker(marker[0])
|
164 |
+
|
165 |
+
if isinstance(marker, list):
|
166 |
+
inner = (_format_marker(m, first=False) for m in marker)
|
167 |
+
if first:
|
168 |
+
return " ".join(inner)
|
169 |
+
else:
|
170 |
+
return "(" + " ".join(inner) + ")"
|
171 |
+
elif isinstance(marker, tuple):
|
172 |
+
return " ".join([m.serialize() for m in marker])
|
173 |
+
else:
|
174 |
+
return marker
|
175 |
+
|
176 |
+
|
177 |
+
_operators: Dict[str, Operator] = {
|
178 |
+
"in": lambda lhs, rhs: lhs in rhs,
|
179 |
+
"not in": lambda lhs, rhs: lhs not in rhs,
|
180 |
+
"<": operator.lt,
|
181 |
+
"<=": operator.le,
|
182 |
+
"==": operator.eq,
|
183 |
+
"!=": operator.ne,
|
184 |
+
">=": operator.ge,
|
185 |
+
">": operator.gt,
|
186 |
+
}
|
187 |
+
|
188 |
+
|
189 |
+
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
|
190 |
+
try:
|
191 |
+
spec = Specifier("".join([op.serialize(), rhs]))
|
192 |
+
except InvalidSpecifier:
|
193 |
+
pass
|
194 |
+
else:
|
195 |
+
return spec.contains(lhs)
|
196 |
+
|
197 |
+
oper: Optional[Operator] = _operators.get(op.serialize())
|
198 |
+
if oper is None:
|
199 |
+
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
|
200 |
+
|
201 |
+
return oper(lhs, rhs)
|
202 |
+
|
203 |
+
|
204 |
+
class Undefined:
|
205 |
+
pass
|
206 |
+
|
207 |
+
|
208 |
+
_undefined = Undefined()
|
209 |
+
|
210 |
+
|
211 |
+
def _get_env(environment: Dict[str, str], name: str) -> str:
|
212 |
+
value: Union[str, Undefined] = environment.get(name, _undefined)
|
213 |
+
|
214 |
+
if isinstance(value, Undefined):
|
215 |
+
raise UndefinedEnvironmentName(
|
216 |
+
f"{name!r} does not exist in evaluation environment."
|
217 |
+
)
|
218 |
+
|
219 |
+
return value
|
220 |
+
|
221 |
+
|
222 |
+
def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
|
223 |
+
groups: List[List[bool]] = [[]]
|
224 |
+
|
225 |
+
for marker in markers:
|
226 |
+
assert isinstance(marker, (list, tuple, str))
|
227 |
+
|
228 |
+
if isinstance(marker, list):
|
229 |
+
groups[-1].append(_evaluate_markers(marker, environment))
|
230 |
+
elif isinstance(marker, tuple):
|
231 |
+
lhs, op, rhs = marker
|
232 |
+
|
233 |
+
if isinstance(lhs, Variable):
|
234 |
+
lhs_value = _get_env(environment, lhs.value)
|
235 |
+
rhs_value = rhs.value
|
236 |
+
else:
|
237 |
+
lhs_value = lhs.value
|
238 |
+
rhs_value = _get_env(environment, rhs.value)
|
239 |
+
|
240 |
+
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
|
241 |
+
else:
|
242 |
+
assert marker in ["and", "or"]
|
243 |
+
if marker == "or":
|
244 |
+
groups.append([])
|
245 |
+
|
246 |
+
return any(all(item) for item in groups)
|
247 |
+
|
248 |
+
|
249 |
+
def format_full_version(info: "sys._version_info") -> str:
|
250 |
+
version = "{0.major}.{0.minor}.{0.micro}".format(info)
|
251 |
+
kind = info.releaselevel
|
252 |
+
if kind != "final":
|
253 |
+
version += kind[0] + str(info.serial)
|
254 |
+
return version
|
255 |
+
|
256 |
+
|
257 |
+
def default_environment() -> Dict[str, str]:
|
258 |
+
iver = format_full_version(sys.implementation.version)
|
259 |
+
implementation_name = sys.implementation.name
|
260 |
+
return {
|
261 |
+
"implementation_name": implementation_name,
|
262 |
+
"implementation_version": iver,
|
263 |
+
"os_name": os.name,
|
264 |
+
"platform_machine": platform.machine(),
|
265 |
+
"platform_release": platform.release(),
|
266 |
+
"platform_system": platform.system(),
|
267 |
+
"platform_version": platform.version(),
|
268 |
+
"python_full_version": platform.python_version(),
|
269 |
+
"platform_python_implementation": platform.python_implementation(),
|
270 |
+
"python_version": ".".join(platform.python_version_tuple()[:2]),
|
271 |
+
"sys_platform": sys.platform,
|
272 |
+
}
|
273 |
+
|
274 |
+
|
275 |
+
class Marker:
|
276 |
+
def __init__(self, marker: str) -> None:
|
277 |
+
try:
|
278 |
+
self._markers = _coerce_parse_result(MARKER.parseString(marker))
|
279 |
+
except ParseException as e:
|
280 |
+
raise InvalidMarker(
|
281 |
+
f"Invalid marker: {marker!r}, parse error at "
|
282 |
+
f"{marker[e.loc : e.loc + 8]!r}"
|
283 |
+
)
|
284 |
+
|
285 |
+
def __str__(self) -> str:
|
286 |
+
return _format_marker(self._markers)
|
287 |
+
|
288 |
+
def __repr__(self) -> str:
|
289 |
+
return f"<Marker('{self}')>"
|
290 |
+
|
291 |
+
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
|
292 |
+
"""Evaluate a marker.
|
293 |
+
|
294 |
+
Return the boolean from evaluating the given marker against the
|
295 |
+
environment. environment is an optional argument to override all or
|
296 |
+
part of the determined environment.
|
297 |
+
|
298 |
+
The environment is determined from the current Python process.
|
299 |
+
"""
|
300 |
+
current_environment = default_environment()
|
301 |
+
if environment is not None:
|
302 |
+
current_environment.update(environment)
|
303 |
+
|
304 |
+
return _evaluate_markers(self._markers, current_environment)
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
+
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
+
# for complete details.
|
4 |
+
|
5 |
+
import re
|
6 |
+
import string
|
7 |
+
import urllib.parse
|
8 |
+
from typing import List, Optional as TOptional, Set
|
9 |
+
|
10 |
+
from pkg_resources.extern.pyparsing import ( # noqa
|
11 |
+
Combine,
|
12 |
+
Literal as L,
|
13 |
+
Optional,
|
14 |
+
ParseException,
|
15 |
+
Regex,
|
16 |
+
Word,
|
17 |
+
ZeroOrMore,
|
18 |
+
originalTextFor,
|
19 |
+
stringEnd,
|
20 |
+
stringStart,
|
21 |
+
)
|
22 |
+
|
23 |
+
from .markers import MARKER_EXPR, Marker
|
24 |
+
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
|
25 |
+
|
26 |
+
|
27 |
+
class InvalidRequirement(ValueError):
|
28 |
+
"""
|
29 |
+
An invalid requirement was found, users should refer to PEP 508.
|
30 |
+
"""
|
31 |
+
|
32 |
+
|
33 |
+
ALPHANUM = Word(string.ascii_letters + string.digits)
|
34 |
+
|
35 |
+
LBRACKET = L("[").suppress()
|
36 |
+
RBRACKET = L("]").suppress()
|
37 |
+
LPAREN = L("(").suppress()
|
38 |
+
RPAREN = L(")").suppress()
|
39 |
+
COMMA = L(",").suppress()
|
40 |
+
SEMICOLON = L(";").suppress()
|
41 |
+
AT = L("@").suppress()
|
42 |
+
|
43 |
+
PUNCTUATION = Word("-_.")
|
44 |
+
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
|
45 |
+
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
|
46 |
+
|
47 |
+
NAME = IDENTIFIER("name")
|
48 |
+
EXTRA = IDENTIFIER
|
49 |
+
|
50 |
+
URI = Regex(r"[^ ]+")("url")
|
51 |
+
URL = AT + URI
|
52 |
+
|
53 |
+
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
|
54 |
+
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
|
55 |
+
|
56 |
+
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
|
57 |
+
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
|
58 |
+
|
59 |
+
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
|
60 |
+
VERSION_MANY = Combine(
|
61 |
+
VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
|
62 |
+
)("_raw_spec")
|
63 |
+
_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
|
64 |
+
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
|
65 |
+
|
66 |
+
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
|
67 |
+
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
|
68 |
+
|
69 |
+
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
|
70 |
+
MARKER_EXPR.setParseAction(
|
71 |
+
lambda s, l, t: Marker(s[t._original_start : t._original_end])
|
72 |
+
)
|
73 |
+
MARKER_SEPARATOR = SEMICOLON
|
74 |
+
MARKER = MARKER_SEPARATOR + MARKER_EXPR
|
75 |
+
|
76 |
+
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
|
77 |
+
URL_AND_MARKER = URL + Optional(MARKER)
|
78 |
+
|
79 |
+
NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
|
80 |
+
|
81 |
+
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
|
82 |
+
# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
|
83 |
+
# issue #104
|
84 |
+
REQUIREMENT.parseString("x[]")
|
85 |
+
|
86 |
+
|
87 |
+
class Requirement:
|
88 |
+
"""Parse a requirement.
|
89 |
+
|
90 |
+
Parse a given requirement string into its parts, such as name, specifier,
|
91 |
+
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
|
92 |
+
string.
|
93 |
+
"""
|
94 |
+
|
95 |
+
# TODO: Can we test whether something is contained within a requirement?
|
96 |
+
# If so how do we do that? Do we need to test against the _name_ of
|
97 |
+
# the thing as well as the version? What about the markers?
|
98 |
+
# TODO: Can we normalize the name and extra name?
|
99 |
+
|
100 |
+
def __init__(self, requirement_string: str) -> None:
|
101 |
+
try:
|
102 |
+
req = REQUIREMENT.parseString(requirement_string)
|
103 |
+
except ParseException as e:
|
104 |
+
raise InvalidRequirement(
|
105 |
+
f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
|
106 |
+
)
|
107 |
+
|
108 |
+
self.name: str = req.name
|
109 |
+
if req.url:
|
110 |
+
parsed_url = urllib.parse.urlparse(req.url)
|
111 |
+
if parsed_url.scheme == "file":
|
112 |
+
if urllib.parse.urlunparse(parsed_url) != req.url:
|
113 |
+
raise InvalidRequirement("Invalid URL given")
|
114 |
+
elif not (parsed_url.scheme and parsed_url.netloc) or (
|
115 |
+
not parsed_url.scheme and not parsed_url.netloc
|
116 |
+
):
|
117 |
+
raise InvalidRequirement(f"Invalid URL: {req.url}")
|
118 |
+
self.url: TOptional[str] = req.url
|
119 |
+
else:
|
120 |
+
self.url = None
|
121 |
+
self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
|
122 |
+
self.specifier: SpecifierSet = SpecifierSet(req.specifier)
|
123 |
+
self.marker: TOptional[Marker] = req.marker if req.marker else None
|
124 |
+
|
125 |
+
def __str__(self) -> str:
|
126 |
+
parts: List[str] = [self.name]
|
127 |
+
|
128 |
+
if self.extras:
|
129 |
+
formatted_extras = ",".join(sorted(self.extras))
|
130 |
+
parts.append(f"[{formatted_extras}]")
|
131 |
+
|
132 |
+
if self.specifier:
|
133 |
+
parts.append(str(self.specifier))
|
134 |
+
|
135 |
+
if self.url:
|
136 |
+
parts.append(f"@ {self.url}")
|
137 |
+
if self.marker:
|
138 |
+
parts.append(" ")
|
139 |
+
|
140 |
+
if self.marker:
|
141 |
+
parts.append(f"; {self.marker}")
|
142 |
+
|
143 |
+
return "".join(parts)
|
144 |
+
|
145 |
+
def __repr__(self) -> str:
|
146 |
+
return f"<Requirement('{self}')>"
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py
ADDED
@@ -0,0 +1,828 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
+
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
+
# for complete details.
|
4 |
+
|
5 |
+
import abc
|
6 |
+
import functools
|
7 |
+
import itertools
|
8 |
+
import re
|
9 |
+
import warnings
|
10 |
+
from typing import (
|
11 |
+
Callable,
|
12 |
+
Dict,
|
13 |
+
Iterable,
|
14 |
+
Iterator,
|
15 |
+
List,
|
16 |
+
Optional,
|
17 |
+
Pattern,
|
18 |
+
Set,
|
19 |
+
Tuple,
|
20 |
+
TypeVar,
|
21 |
+
Union,
|
22 |
+
)
|
23 |
+
|
24 |
+
from .utils import canonicalize_version
|
25 |
+
from .version import LegacyVersion, Version, parse
|
26 |
+
|
27 |
+
ParsedVersion = Union[Version, LegacyVersion]
|
28 |
+
UnparsedVersion = Union[Version, LegacyVersion, str]
|
29 |
+
VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
|
30 |
+
CallableOperator = Callable[[ParsedVersion, str], bool]
|
31 |
+
|
32 |
+
|
33 |
+
class InvalidSpecifier(ValueError):
|
34 |
+
"""
|
35 |
+
An invalid specifier was found, users should refer to PEP 440.
|
36 |
+
"""
|
37 |
+
|
38 |
+
|
39 |
+
class BaseSpecifier(metaclass=abc.ABCMeta):
|
40 |
+
@abc.abstractmethod
|
41 |
+
def __str__(self) -> str:
|
42 |
+
"""
|
43 |
+
Returns the str representation of this Specifier like object. This
|
44 |
+
should be representative of the Specifier itself.
|
45 |
+
"""
|
46 |
+
|
47 |
+
@abc.abstractmethod
|
48 |
+
def __hash__(self) -> int:
|
49 |
+
"""
|
50 |
+
Returns a hash value for this Specifier like object.
|
51 |
+
"""
|
52 |
+
|
53 |
+
@abc.abstractmethod
|
54 |
+
def __eq__(self, other: object) -> bool:
|
55 |
+
"""
|
56 |
+
Returns a boolean representing whether or not the two Specifier like
|
57 |
+
objects are equal.
|
58 |
+
"""
|
59 |
+
|
60 |
+
@abc.abstractmethod
|
61 |
+
def __ne__(self, other: object) -> bool:
|
62 |
+
"""
|
63 |
+
Returns a boolean representing whether or not the two Specifier like
|
64 |
+
objects are not equal.
|
65 |
+
"""
|
66 |
+
|
67 |
+
@abc.abstractproperty
|
68 |
+
def prereleases(self) -> Optional[bool]:
|
69 |
+
"""
|
70 |
+
Returns whether or not pre-releases as a whole are allowed by this
|
71 |
+
specifier.
|
72 |
+
"""
|
73 |
+
|
74 |
+
@prereleases.setter
|
75 |
+
def prereleases(self, value: bool) -> None:
|
76 |
+
"""
|
77 |
+
Sets whether or not pre-releases as a whole are allowed by this
|
78 |
+
specifier.
|
79 |
+
"""
|
80 |
+
|
81 |
+
@abc.abstractmethod
|
82 |
+
def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
|
83 |
+
"""
|
84 |
+
Determines if the given item is contained within this specifier.
|
85 |
+
"""
|
86 |
+
|
87 |
+
@abc.abstractmethod
|
88 |
+
def filter(
|
89 |
+
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
90 |
+
) -> Iterable[VersionTypeVar]:
|
91 |
+
"""
|
92 |
+
Takes an iterable of items and filters them so that only items which
|
93 |
+
are contained within this specifier are allowed in it.
|
94 |
+
"""
|
95 |
+
|
96 |
+
|
97 |
+
class _IndividualSpecifier(BaseSpecifier):
|
98 |
+
|
99 |
+
_operators: Dict[str, str] = {}
|
100 |
+
_regex: Pattern[str]
|
101 |
+
|
102 |
+
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
|
103 |
+
match = self._regex.search(spec)
|
104 |
+
if not match:
|
105 |
+
raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
|
106 |
+
|
107 |
+
self._spec: Tuple[str, str] = (
|
108 |
+
match.group("operator").strip(),
|
109 |
+
match.group("version").strip(),
|
110 |
+
)
|
111 |
+
|
112 |
+
# Store whether or not this Specifier should accept prereleases
|
113 |
+
self._prereleases = prereleases
|
114 |
+
|
115 |
+
def __repr__(self) -> str:
|
116 |
+
pre = (
|
117 |
+
f", prereleases={self.prereleases!r}"
|
118 |
+
if self._prereleases is not None
|
119 |
+
else ""
|
120 |
+
)
|
121 |
+
|
122 |
+
return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre)
|
123 |
+
|
124 |
+
def __str__(self) -> str:
|
125 |
+
return "{}{}".format(*self._spec)
|
126 |
+
|
127 |
+
@property
|
128 |
+
def _canonical_spec(self) -> Tuple[str, str]:
|
129 |
+
return self._spec[0], canonicalize_version(self._spec[1])
|
130 |
+
|
131 |
+
def __hash__(self) -> int:
|
132 |
+
return hash(self._canonical_spec)
|
133 |
+
|
134 |
+
def __eq__(self, other: object) -> bool:
|
135 |
+
if isinstance(other, str):
|
136 |
+
try:
|
137 |
+
other = self.__class__(str(other))
|
138 |
+
except InvalidSpecifier:
|
139 |
+
return NotImplemented
|
140 |
+
elif not isinstance(other, self.__class__):
|
141 |
+
return NotImplemented
|
142 |
+
|
143 |
+
return self._canonical_spec == other._canonical_spec
|
144 |
+
|
145 |
+
def __ne__(self, other: object) -> bool:
|
146 |
+
if isinstance(other, str):
|
147 |
+
try:
|
148 |
+
other = self.__class__(str(other))
|
149 |
+
except InvalidSpecifier:
|
150 |
+
return NotImplemented
|
151 |
+
elif not isinstance(other, self.__class__):
|
152 |
+
return NotImplemented
|
153 |
+
|
154 |
+
return self._spec != other._spec
|
155 |
+
|
156 |
+
def _get_operator(self, op: str) -> CallableOperator:
|
157 |
+
operator_callable: CallableOperator = getattr(
|
158 |
+
self, f"_compare_{self._operators[op]}"
|
159 |
+
)
|
160 |
+
return operator_callable
|
161 |
+
|
162 |
+
def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
|
163 |
+
if not isinstance(version, (LegacyVersion, Version)):
|
164 |
+
version = parse(version)
|
165 |
+
return version
|
166 |
+
|
167 |
+
@property
|
168 |
+
def operator(self) -> str:
|
169 |
+
return self._spec[0]
|
170 |
+
|
171 |
+
@property
|
172 |
+
def version(self) -> str:
|
173 |
+
return self._spec[1]
|
174 |
+
|
175 |
+
@property
|
176 |
+
def prereleases(self) -> Optional[bool]:
|
177 |
+
return self._prereleases
|
178 |
+
|
179 |
+
@prereleases.setter
|
180 |
+
def prereleases(self, value: bool) -> None:
|
181 |
+
self._prereleases = value
|
182 |
+
|
183 |
+
def __contains__(self, item: str) -> bool:
|
184 |
+
return self.contains(item)
|
185 |
+
|
186 |
+
def contains(
|
187 |
+
self, item: UnparsedVersion, prereleases: Optional[bool] = None
|
188 |
+
) -> bool:
|
189 |
+
|
190 |
+
# Determine if prereleases are to be allowed or not.
|
191 |
+
if prereleases is None:
|
192 |
+
prereleases = self.prereleases
|
193 |
+
|
194 |
+
# Normalize item to a Version or LegacyVersion, this allows us to have
|
195 |
+
# a shortcut for ``"2.0" in Specifier(">=2")
|
196 |
+
normalized_item = self._coerce_version(item)
|
197 |
+
|
198 |
+
# Determine if we should be supporting prereleases in this specifier
|
199 |
+
# or not, if we do not support prereleases than we can short circuit
|
200 |
+
# logic if this version is a prereleases.
|
201 |
+
if normalized_item.is_prerelease and not prereleases:
|
202 |
+
return False
|
203 |
+
|
204 |
+
# Actually do the comparison to determine if this item is contained
|
205 |
+
# within this Specifier or not.
|
206 |
+
operator_callable: CallableOperator = self._get_operator(self.operator)
|
207 |
+
return operator_callable(normalized_item, self.version)
|
208 |
+
|
209 |
+
def filter(
|
210 |
+
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
211 |
+
) -> Iterable[VersionTypeVar]:
|
212 |
+
|
213 |
+
yielded = False
|
214 |
+
found_prereleases = []
|
215 |
+
|
216 |
+
kw = {"prereleases": prereleases if prereleases is not None else True}
|
217 |
+
|
218 |
+
# Attempt to iterate over all the values in the iterable and if any of
|
219 |
+
# them match, yield them.
|
220 |
+
for version in iterable:
|
221 |
+
parsed_version = self._coerce_version(version)
|
222 |
+
|
223 |
+
if self.contains(parsed_version, **kw):
|
224 |
+
# If our version is a prerelease, and we were not set to allow
|
225 |
+
# prereleases, then we'll store it for later in case nothing
|
226 |
+
# else matches this specifier.
|
227 |
+
if parsed_version.is_prerelease and not (
|
228 |
+
prereleases or self.prereleases
|
229 |
+
):
|
230 |
+
found_prereleases.append(version)
|
231 |
+
# Either this is not a prerelease, or we should have been
|
232 |
+
# accepting prereleases from the beginning.
|
233 |
+
else:
|
234 |
+
yielded = True
|
235 |
+
yield version
|
236 |
+
|
237 |
+
# Now that we've iterated over everything, determine if we've yielded
|
238 |
+
# any values, and if we have not and we have any prereleases stored up
|
239 |
+
# then we will go ahead and yield the prereleases.
|
240 |
+
if not yielded and found_prereleases:
|
241 |
+
for version in found_prereleases:
|
242 |
+
yield version
|
243 |
+
|
244 |
+
|
245 |
+
class LegacySpecifier(_IndividualSpecifier):
|
246 |
+
|
247 |
+
_regex_str = r"""
|
248 |
+
(?P<operator>(==|!=|<=|>=|<|>))
|
249 |
+
\s*
|
250 |
+
(?P<version>
|
251 |
+
[^,;\s)]* # Since this is a "legacy" specifier, and the version
|
252 |
+
# string can be just about anything, we match everything
|
253 |
+
# except for whitespace, a semi-colon for marker support,
|
254 |
+
# a closing paren since versions can be enclosed in
|
255 |
+
# them, and a comma since it's a version separator.
|
256 |
+
)
|
257 |
+
"""
|
258 |
+
|
259 |
+
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
260 |
+
|
261 |
+
_operators = {
|
262 |
+
"==": "equal",
|
263 |
+
"!=": "not_equal",
|
264 |
+
"<=": "less_than_equal",
|
265 |
+
">=": "greater_than_equal",
|
266 |
+
"<": "less_than",
|
267 |
+
">": "greater_than",
|
268 |
+
}
|
269 |
+
|
270 |
+
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
|
271 |
+
super().__init__(spec, prereleases)
|
272 |
+
|
273 |
+
warnings.warn(
|
274 |
+
"Creating a LegacyVersion has been deprecated and will be "
|
275 |
+
"removed in the next major release",
|
276 |
+
DeprecationWarning,
|
277 |
+
)
|
278 |
+
|
279 |
+
def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
|
280 |
+
if not isinstance(version, LegacyVersion):
|
281 |
+
version = LegacyVersion(str(version))
|
282 |
+
return version
|
283 |
+
|
284 |
+
def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
285 |
+
return prospective == self._coerce_version(spec)
|
286 |
+
|
287 |
+
def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
288 |
+
return prospective != self._coerce_version(spec)
|
289 |
+
|
290 |
+
def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
|
291 |
+
return prospective <= self._coerce_version(spec)
|
292 |
+
|
293 |
+
def _compare_greater_than_equal(
|
294 |
+
self, prospective: LegacyVersion, spec: str
|
295 |
+
) -> bool:
|
296 |
+
return prospective >= self._coerce_version(spec)
|
297 |
+
|
298 |
+
def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
|
299 |
+
return prospective < self._coerce_version(spec)
|
300 |
+
|
301 |
+
def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
|
302 |
+
return prospective > self._coerce_version(spec)
|
303 |
+
|
304 |
+
|
305 |
+
def _require_version_compare(
|
306 |
+
fn: Callable[["Specifier", ParsedVersion, str], bool]
|
307 |
+
) -> Callable[["Specifier", ParsedVersion, str], bool]:
|
308 |
+
@functools.wraps(fn)
|
309 |
+
def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
|
310 |
+
if not isinstance(prospective, Version):
|
311 |
+
return False
|
312 |
+
return fn(self, prospective, spec)
|
313 |
+
|
314 |
+
return wrapped
|
315 |
+
|
316 |
+
|
317 |
+
class Specifier(_IndividualSpecifier):
|
318 |
+
|
319 |
+
_regex_str = r"""
|
320 |
+
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
|
321 |
+
(?P<version>
|
322 |
+
(?:
|
323 |
+
# The identity operators allow for an escape hatch that will
|
324 |
+
# do an exact string match of the version you wish to install.
|
325 |
+
# This will not be parsed by PEP 440 and we cannot determine
|
326 |
+
# any semantic meaning from it. This operator is discouraged
|
327 |
+
# but included entirely as an escape hatch.
|
328 |
+
(?<====) # Only match for the identity operator
|
329 |
+
\s*
|
330 |
+
[^\s]* # We just match everything, except for whitespace
|
331 |
+
# since we are only testing for strict identity.
|
332 |
+
)
|
333 |
+
|
|
334 |
+
(?:
|
335 |
+
# The (non)equality operators allow for wild card and local
|
336 |
+
# versions to be specified so we have to define these two
|
337 |
+
# operators separately to enable that.
|
338 |
+
(?<===|!=) # Only match for equals and not equals
|
339 |
+
|
340 |
+
\s*
|
341 |
+
v?
|
342 |
+
(?:[0-9]+!)? # epoch
|
343 |
+
[0-9]+(?:\.[0-9]+)* # release
|
344 |
+
(?: # pre release
|
345 |
+
[-_\.]?
|
346 |
+
(a|b|c|rc|alpha|beta|pre|preview)
|
347 |
+
[-_\.]?
|
348 |
+
[0-9]*
|
349 |
+
)?
|
350 |
+
(?: # post release
|
351 |
+
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
352 |
+
)?
|
353 |
+
|
354 |
+
# You cannot use a wild card and a dev or local version
|
355 |
+
# together so group them with a | and make them optional.
|
356 |
+
(?:
|
357 |
+
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
358 |
+
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
359 |
+
|
|
360 |
+
\.\* # Wild card syntax of .*
|
361 |
+
)?
|
362 |
+
)
|
363 |
+
|
|
364 |
+
(?:
|
365 |
+
# The compatible operator requires at least two digits in the
|
366 |
+
# release segment.
|
367 |
+
(?<=~=) # Only match for the compatible operator
|
368 |
+
|
369 |
+
\s*
|
370 |
+
v?
|
371 |
+
(?:[0-9]+!)? # epoch
|
372 |
+
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
|
373 |
+
(?: # pre release
|
374 |
+
[-_\.]?
|
375 |
+
(a|b|c|rc|alpha|beta|pre|preview)
|
376 |
+
[-_\.]?
|
377 |
+
[0-9]*
|
378 |
+
)?
|
379 |
+
(?: # post release
|
380 |
+
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
381 |
+
)?
|
382 |
+
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
383 |
+
)
|
384 |
+
|
|
385 |
+
(?:
|
386 |
+
# All other operators only allow a sub set of what the
|
387 |
+
# (non)equality operators do. Specifically they do not allow
|
388 |
+
# local versions to be specified nor do they allow the prefix
|
389 |
+
# matching wild cards.
|
390 |
+
(?<!==|!=|~=) # We have special cases for these
|
391 |
+
# operators so we want to make sure they
|
392 |
+
# don't match here.
|
393 |
+
|
394 |
+
\s*
|
395 |
+
v?
|
396 |
+
(?:[0-9]+!)? # epoch
|
397 |
+
[0-9]+(?:\.[0-9]+)* # release
|
398 |
+
(?: # pre release
|
399 |
+
[-_\.]?
|
400 |
+
(a|b|c|rc|alpha|beta|pre|preview)
|
401 |
+
[-_\.]?
|
402 |
+
[0-9]*
|
403 |
+
)?
|
404 |
+
(?: # post release
|
405 |
+
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
|
406 |
+
)?
|
407 |
+
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
|
408 |
+
)
|
409 |
+
)
|
410 |
+
"""
|
411 |
+
|
412 |
+
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
413 |
+
|
414 |
+
_operators = {
|
415 |
+
"~=": "compatible",
|
416 |
+
"==": "equal",
|
417 |
+
"!=": "not_equal",
|
418 |
+
"<=": "less_than_equal",
|
419 |
+
">=": "greater_than_equal",
|
420 |
+
"<": "less_than",
|
421 |
+
">": "greater_than",
|
422 |
+
"===": "arbitrary",
|
423 |
+
}
|
424 |
+
|
425 |
+
@_require_version_compare
|
426 |
+
def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
|
427 |
+
|
428 |
+
# Compatible releases have an equivalent combination of >= and ==. That
|
429 |
+
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
|
430 |
+
# implement this in terms of the other specifiers instead of
|
431 |
+
# implementing it ourselves. The only thing we need to do is construct
|
432 |
+
# the other specifiers.
|
433 |
+
|
434 |
+
# We want everything but the last item in the version, but we want to
|
435 |
+
# ignore suffix segments.
|
436 |
+
prefix = ".".join(
|
437 |
+
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
|
438 |
+
)
|
439 |
+
|
440 |
+
# Add the prefix notation to the end of our string
|
441 |
+
prefix += ".*"
|
442 |
+
|
443 |
+
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
|
444 |
+
prospective, prefix
|
445 |
+
)
|
446 |
+
|
447 |
+
@_require_version_compare
|
448 |
+
def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
449 |
+
|
450 |
+
# We need special logic to handle prefix matching
|
451 |
+
if spec.endswith(".*"):
|
452 |
+
# In the case of prefix matching we want to ignore local segment.
|
453 |
+
prospective = Version(prospective.public)
|
454 |
+
# Split the spec out by dots, and pretend that there is an implicit
|
455 |
+
# dot in between a release segment and a pre-release segment.
|
456 |
+
split_spec = _version_split(spec[:-2]) # Remove the trailing .*
|
457 |
+
|
458 |
+
# Split the prospective version out by dots, and pretend that there
|
459 |
+
# is an implicit dot in between a release segment and a pre-release
|
460 |
+
# segment.
|
461 |
+
split_prospective = _version_split(str(prospective))
|
462 |
+
|
463 |
+
# Shorten the prospective version to be the same length as the spec
|
464 |
+
# so that we can determine if the specifier is a prefix of the
|
465 |
+
# prospective version or not.
|
466 |
+
shortened_prospective = split_prospective[: len(split_spec)]
|
467 |
+
|
468 |
+
# Pad out our two sides with zeros so that they both equal the same
|
469 |
+
# length.
|
470 |
+
padded_spec, padded_prospective = _pad_version(
|
471 |
+
split_spec, shortened_prospective
|
472 |
+
)
|
473 |
+
|
474 |
+
return padded_prospective == padded_spec
|
475 |
+
else:
|
476 |
+
# Convert our spec string into a Version
|
477 |
+
spec_version = Version(spec)
|
478 |
+
|
479 |
+
# If the specifier does not have a local segment, then we want to
|
480 |
+
# act as if the prospective version also does not have a local
|
481 |
+
# segment.
|
482 |
+
if not spec_version.local:
|
483 |
+
prospective = Version(prospective.public)
|
484 |
+
|
485 |
+
return prospective == spec_version
|
486 |
+
|
487 |
+
@_require_version_compare
|
488 |
+
def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
489 |
+
return not self._compare_equal(prospective, spec)
|
490 |
+
|
491 |
+
@_require_version_compare
|
492 |
+
def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
|
493 |
+
|
494 |
+
# NB: Local version identifiers are NOT permitted in the version
|
495 |
+
# specifier, so local version labels can be universally removed from
|
496 |
+
# the prospective version.
|
497 |
+
return Version(prospective.public) <= Version(spec)
|
498 |
+
|
499 |
+
@_require_version_compare
|
500 |
+
def _compare_greater_than_equal(
|
501 |
+
self, prospective: ParsedVersion, spec: str
|
502 |
+
) -> bool:
|
503 |
+
|
504 |
+
# NB: Local version identifiers are NOT permitted in the version
|
505 |
+
# specifier, so local version labels can be universally removed from
|
506 |
+
# the prospective version.
|
507 |
+
return Version(prospective.public) >= Version(spec)
|
508 |
+
|
509 |
+
@_require_version_compare
|
510 |
+
def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
|
511 |
+
|
512 |
+
# Convert our spec to a Version instance, since we'll want to work with
|
513 |
+
# it as a version.
|
514 |
+
spec = Version(spec_str)
|
515 |
+
|
516 |
+
# Check to see if the prospective version is less than the spec
|
517 |
+
# version. If it's not we can short circuit and just return False now
|
518 |
+
# instead of doing extra unneeded work.
|
519 |
+
if not prospective < spec:
|
520 |
+
return False
|
521 |
+
|
522 |
+
# This special case is here so that, unless the specifier itself
|
523 |
+
# includes is a pre-release version, that we do not accept pre-release
|
524 |
+
# versions for the version mentioned in the specifier (e.g. <3.1 should
|
525 |
+
# not match 3.1.dev0, but should match 3.0.dev0).
|
526 |
+
if not spec.is_prerelease and prospective.is_prerelease:
|
527 |
+
if Version(prospective.base_version) == Version(spec.base_version):
|
528 |
+
return False
|
529 |
+
|
530 |
+
# If we've gotten to here, it means that prospective version is both
|
531 |
+
# less than the spec version *and* it's not a pre-release of the same
|
532 |
+
# version in the spec.
|
533 |
+
return True
|
534 |
+
|
535 |
+
@_require_version_compare
|
536 |
+
def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
|
537 |
+
|
538 |
+
# Convert our spec to a Version instance, since we'll want to work with
|
539 |
+
# it as a version.
|
540 |
+
spec = Version(spec_str)
|
541 |
+
|
542 |
+
# Check to see if the prospective version is greater than the spec
|
543 |
+
# version. If it's not we can short circuit and just return False now
|
544 |
+
# instead of doing extra unneeded work.
|
545 |
+
if not prospective > spec:
|
546 |
+
return False
|
547 |
+
|
548 |
+
# This special case is here so that, unless the specifier itself
|
549 |
+
# includes is a post-release version, that we do not accept
|
550 |
+
# post-release versions for the version mentioned in the specifier
|
551 |
+
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
|
552 |
+
if not spec.is_postrelease and prospective.is_postrelease:
|
553 |
+
if Version(prospective.base_version) == Version(spec.base_version):
|
554 |
+
return False
|
555 |
+
|
556 |
+
# Ensure that we do not allow a local version of the version mentioned
|
557 |
+
# in the specifier, which is technically greater than, to match.
|
558 |
+
if prospective.local is not None:
|
559 |
+
if Version(prospective.base_version) == Version(spec.base_version):
|
560 |
+
return False
|
561 |
+
|
562 |
+
# If we've gotten to here, it means that prospective version is both
|
563 |
+
# greater than the spec version *and* it's not a pre-release of the
|
564 |
+
# same version in the spec.
|
565 |
+
return True
|
566 |
+
|
567 |
+
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
|
568 |
+
return str(prospective).lower() == str(spec).lower()
|
569 |
+
|
570 |
+
@property
|
571 |
+
def prereleases(self) -> bool:
|
572 |
+
|
573 |
+
# If there is an explicit prereleases set for this, then we'll just
|
574 |
+
# blindly use that.
|
575 |
+
if self._prereleases is not None:
|
576 |
+
return self._prereleases
|
577 |
+
|
578 |
+
# Look at all of our specifiers and determine if they are inclusive
|
579 |
+
# operators, and if they are if they are including an explicit
|
580 |
+
# prerelease.
|
581 |
+
operator, version = self._spec
|
582 |
+
if operator in ["==", ">=", "<=", "~=", "==="]:
|
583 |
+
# The == specifier can include a trailing .*, if it does we
|
584 |
+
# want to remove before parsing.
|
585 |
+
if operator == "==" and version.endswith(".*"):
|
586 |
+
version = version[:-2]
|
587 |
+
|
588 |
+
# Parse the version, and if it is a pre-release than this
|
589 |
+
# specifier allows pre-releases.
|
590 |
+
if parse(version).is_prerelease:
|
591 |
+
return True
|
592 |
+
|
593 |
+
return False
|
594 |
+
|
595 |
+
@prereleases.setter
|
596 |
+
def prereleases(self, value: bool) -> None:
|
597 |
+
self._prereleases = value
|
598 |
+
|
599 |
+
|
600 |
+
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
|
601 |
+
|
602 |
+
|
603 |
+
def _version_split(version: str) -> List[str]:
|
604 |
+
result: List[str] = []
|
605 |
+
for item in version.split("."):
|
606 |
+
match = _prefix_regex.search(item)
|
607 |
+
if match:
|
608 |
+
result.extend(match.groups())
|
609 |
+
else:
|
610 |
+
result.append(item)
|
611 |
+
return result
|
612 |
+
|
613 |
+
|
614 |
+
def _is_not_suffix(segment: str) -> bool:
|
615 |
+
return not any(
|
616 |
+
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
|
617 |
+
)
|
618 |
+
|
619 |
+
|
620 |
+
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
|
621 |
+
left_split, right_split = [], []
|
622 |
+
|
623 |
+
# Get the release segment of our versions
|
624 |
+
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
|
625 |
+
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
|
626 |
+
|
627 |
+
# Get the rest of our versions
|
628 |
+
left_split.append(left[len(left_split[0]) :])
|
629 |
+
right_split.append(right[len(right_split[0]) :])
|
630 |
+
|
631 |
+
# Insert our padding
|
632 |
+
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
|
633 |
+
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
|
634 |
+
|
635 |
+
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
|
636 |
+
|
637 |
+
|
638 |
+
class SpecifierSet(BaseSpecifier):
|
639 |
+
def __init__(
|
640 |
+
self, specifiers: str = "", prereleases: Optional[bool] = None
|
641 |
+
) -> None:
|
642 |
+
|
643 |
+
# Split on , to break each individual specifier into it's own item, and
|
644 |
+
# strip each item to remove leading/trailing whitespace.
|
645 |
+
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
|
646 |
+
|
647 |
+
# Parsed each individual specifier, attempting first to make it a
|
648 |
+
# Specifier and falling back to a LegacySpecifier.
|
649 |
+
parsed: Set[_IndividualSpecifier] = set()
|
650 |
+
for specifier in split_specifiers:
|
651 |
+
try:
|
652 |
+
parsed.add(Specifier(specifier))
|
653 |
+
except InvalidSpecifier:
|
654 |
+
parsed.add(LegacySpecifier(specifier))
|
655 |
+
|
656 |
+
# Turn our parsed specifiers into a frozen set and save them for later.
|
657 |
+
self._specs = frozenset(parsed)
|
658 |
+
|
659 |
+
# Store our prereleases value so we can use it later to determine if
|
660 |
+
# we accept prereleases or not.
|
661 |
+
self._prereleases = prereleases
|
662 |
+
|
663 |
+
def __repr__(self) -> str:
|
664 |
+
pre = (
|
665 |
+
f", prereleases={self.prereleases!r}"
|
666 |
+
if self._prereleases is not None
|
667 |
+
else ""
|
668 |
+
)
|
669 |
+
|
670 |
+
return "<SpecifierSet({!r}{})>".format(str(self), pre)
|
671 |
+
|
672 |
+
def __str__(self) -> str:
|
673 |
+
return ",".join(sorted(str(s) for s in self._specs))
|
674 |
+
|
675 |
+
def __hash__(self) -> int:
|
676 |
+
return hash(self._specs)
|
677 |
+
|
678 |
+
def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
|
679 |
+
if isinstance(other, str):
|
680 |
+
other = SpecifierSet(other)
|
681 |
+
elif not isinstance(other, SpecifierSet):
|
682 |
+
return NotImplemented
|
683 |
+
|
684 |
+
specifier = SpecifierSet()
|
685 |
+
specifier._specs = frozenset(self._specs | other._specs)
|
686 |
+
|
687 |
+
if self._prereleases is None and other._prereleases is not None:
|
688 |
+
specifier._prereleases = other._prereleases
|
689 |
+
elif self._prereleases is not None and other._prereleases is None:
|
690 |
+
specifier._prereleases = self._prereleases
|
691 |
+
elif self._prereleases == other._prereleases:
|
692 |
+
specifier._prereleases = self._prereleases
|
693 |
+
else:
|
694 |
+
raise ValueError(
|
695 |
+
"Cannot combine SpecifierSets with True and False prerelease "
|
696 |
+
"overrides."
|
697 |
+
)
|
698 |
+
|
699 |
+
return specifier
|
700 |
+
|
701 |
+
def __eq__(self, other: object) -> bool:
|
702 |
+
if isinstance(other, (str, _IndividualSpecifier)):
|
703 |
+
other = SpecifierSet(str(other))
|
704 |
+
elif not isinstance(other, SpecifierSet):
|
705 |
+
return NotImplemented
|
706 |
+
|
707 |
+
return self._specs == other._specs
|
708 |
+
|
709 |
+
def __ne__(self, other: object) -> bool:
|
710 |
+
if isinstance(other, (str, _IndividualSpecifier)):
|
711 |
+
other = SpecifierSet(str(other))
|
712 |
+
elif not isinstance(other, SpecifierSet):
|
713 |
+
return NotImplemented
|
714 |
+
|
715 |
+
return self._specs != other._specs
|
716 |
+
|
717 |
+
def __len__(self) -> int:
|
718 |
+
return len(self._specs)
|
719 |
+
|
720 |
+
def __iter__(self) -> Iterator[_IndividualSpecifier]:
|
721 |
+
return iter(self._specs)
|
722 |
+
|
723 |
+
@property
|
724 |
+
def prereleases(self) -> Optional[bool]:
|
725 |
+
|
726 |
+
# If we have been given an explicit prerelease modifier, then we'll
|
727 |
+
# pass that through here.
|
728 |
+
if self._prereleases is not None:
|
729 |
+
return self._prereleases
|
730 |
+
|
731 |
+
# If we don't have any specifiers, and we don't have a forced value,
|
732 |
+
# then we'll just return None since we don't know if this should have
|
733 |
+
# pre-releases or not.
|
734 |
+
if not self._specs:
|
735 |
+
return None
|
736 |
+
|
737 |
+
# Otherwise we'll see if any of the given specifiers accept
|
738 |
+
# prereleases, if any of them do we'll return True, otherwise False.
|
739 |
+
return any(s.prereleases for s in self._specs)
|
740 |
+
|
741 |
+
@prereleases.setter
|
742 |
+
def prereleases(self, value: bool) -> None:
|
743 |
+
self._prereleases = value
|
744 |
+
|
745 |
+
def __contains__(self, item: UnparsedVersion) -> bool:
|
746 |
+
return self.contains(item)
|
747 |
+
|
748 |
+
def contains(
|
749 |
+
self, item: UnparsedVersion, prereleases: Optional[bool] = None
|
750 |
+
) -> bool:
|
751 |
+
|
752 |
+
# Ensure that our item is a Version or LegacyVersion instance.
|
753 |
+
if not isinstance(item, (LegacyVersion, Version)):
|
754 |
+
item = parse(item)
|
755 |
+
|
756 |
+
# Determine if we're forcing a prerelease or not, if we're not forcing
|
757 |
+
# one for this particular filter call, then we'll use whatever the
|
758 |
+
# SpecifierSet thinks for whether or not we should support prereleases.
|
759 |
+
if prereleases is None:
|
760 |
+
prereleases = self.prereleases
|
761 |
+
|
762 |
+
# We can determine if we're going to allow pre-releases by looking to
|
763 |
+
# see if any of the underlying items supports them. If none of them do
|
764 |
+
# and this item is a pre-release then we do not allow it and we can
|
765 |
+
# short circuit that here.
|
766 |
+
# Note: This means that 1.0.dev1 would not be contained in something
|
767 |
+
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
|
768 |
+
if not prereleases and item.is_prerelease:
|
769 |
+
return False
|
770 |
+
|
771 |
+
# We simply dispatch to the underlying specs here to make sure that the
|
772 |
+
# given version is contained within all of them.
|
773 |
+
# Note: This use of all() here means that an empty set of specifiers
|
774 |
+
# will always return True, this is an explicit design decision.
|
775 |
+
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
|
776 |
+
|
777 |
+
def filter(
|
778 |
+
self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
|
779 |
+
) -> Iterable[VersionTypeVar]:
|
780 |
+
|
781 |
+
# Determine if we're forcing a prerelease or not, if we're not forcing
|
782 |
+
# one for this particular filter call, then we'll use whatever the
|
783 |
+
# SpecifierSet thinks for whether or not we should support prereleases.
|
784 |
+
if prereleases is None:
|
785 |
+
prereleases = self.prereleases
|
786 |
+
|
787 |
+
# If we have any specifiers, then we want to wrap our iterable in the
|
788 |
+
# filter method for each one, this will act as a logical AND amongst
|
789 |
+
# each specifier.
|
790 |
+
if self._specs:
|
791 |
+
for spec in self._specs:
|
792 |
+
iterable = spec.filter(iterable, prereleases=bool(prereleases))
|
793 |
+
return iterable
|
794 |
+
# If we do not have any specifiers, then we need to have a rough filter
|
795 |
+
# which will filter out any pre-releases, unless there are no final
|
796 |
+
# releases, and which will filter out LegacyVersion in general.
|
797 |
+
else:
|
798 |
+
filtered: List[VersionTypeVar] = []
|
799 |
+
found_prereleases: List[VersionTypeVar] = []
|
800 |
+
|
801 |
+
item: UnparsedVersion
|
802 |
+
parsed_version: Union[Version, LegacyVersion]
|
803 |
+
|
804 |
+
for item in iterable:
|
805 |
+
# Ensure that we some kind of Version class for this item.
|
806 |
+
if not isinstance(item, (LegacyVersion, Version)):
|
807 |
+
parsed_version = parse(item)
|
808 |
+
else:
|
809 |
+
parsed_version = item
|
810 |
+
|
811 |
+
# Filter out any item which is parsed as a LegacyVersion
|
812 |
+
if isinstance(parsed_version, LegacyVersion):
|
813 |
+
continue
|
814 |
+
|
815 |
+
# Store any item which is a pre-release for later unless we've
|
816 |
+
# already found a final version or we are accepting prereleases
|
817 |
+
if parsed_version.is_prerelease and not prereleases:
|
818 |
+
if not filtered:
|
819 |
+
found_prereleases.append(item)
|
820 |
+
else:
|
821 |
+
filtered.append(item)
|
822 |
+
|
823 |
+
# If we've found no items except for pre-releases, then we'll go
|
824 |
+
# ahead and use the pre-releases
|
825 |
+
if not filtered and found_prereleases and prereleases is None:
|
826 |
+
return found_prereleases
|
827 |
+
|
828 |
+
return filtered
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py
ADDED
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
+
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
+
# for complete details.
|
4 |
+
|
5 |
+
import logging
|
6 |
+
import platform
|
7 |
+
import sys
|
8 |
+
import sysconfig
|
9 |
+
from importlib.machinery import EXTENSION_SUFFIXES
|
10 |
+
from typing import (
|
11 |
+
Dict,
|
12 |
+
FrozenSet,
|
13 |
+
Iterable,
|
14 |
+
Iterator,
|
15 |
+
List,
|
16 |
+
Optional,
|
17 |
+
Sequence,
|
18 |
+
Tuple,
|
19 |
+
Union,
|
20 |
+
cast,
|
21 |
+
)
|
22 |
+
|
23 |
+
from . import _manylinux, _musllinux
|
24 |
+
|
25 |
+
logger = logging.getLogger(__name__)
|
26 |
+
|
27 |
+
PythonVersion = Sequence[int]
|
28 |
+
MacVersion = Tuple[int, int]
|
29 |
+
|
30 |
+
INTERPRETER_SHORT_NAMES: Dict[str, str] = {
|
31 |
+
"python": "py", # Generic.
|
32 |
+
"cpython": "cp",
|
33 |
+
"pypy": "pp",
|
34 |
+
"ironpython": "ip",
|
35 |
+
"jython": "jy",
|
36 |
+
}
|
37 |
+
|
38 |
+
|
39 |
+
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
|
40 |
+
|
41 |
+
|
42 |
+
class Tag:
|
43 |
+
"""
|
44 |
+
A representation of the tag triple for a wheel.
|
45 |
+
|
46 |
+
Instances are considered immutable and thus are hashable. Equality checking
|
47 |
+
is also supported.
|
48 |
+
"""
|
49 |
+
|
50 |
+
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
|
51 |
+
|
52 |
+
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
|
53 |
+
self._interpreter = interpreter.lower()
|
54 |
+
self._abi = abi.lower()
|
55 |
+
self._platform = platform.lower()
|
56 |
+
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
|
57 |
+
# that a set calls its `.disjoint()` method, which may be called hundreds of
|
58 |
+
# times when scanning a page of links for packages with tags matching that
|
59 |
+
# Set[Tag]. Pre-computing the value here produces significant speedups for
|
60 |
+
# downstream consumers.
|
61 |
+
self._hash = hash((self._interpreter, self._abi, self._platform))
|
62 |
+
|
63 |
+
@property
|
64 |
+
def interpreter(self) -> str:
|
65 |
+
return self._interpreter
|
66 |
+
|
67 |
+
@property
|
68 |
+
def abi(self) -> str:
|
69 |
+
return self._abi
|
70 |
+
|
71 |
+
@property
|
72 |
+
def platform(self) -> str:
|
73 |
+
return self._platform
|
74 |
+
|
75 |
+
def __eq__(self, other: object) -> bool:
|
76 |
+
if not isinstance(other, Tag):
|
77 |
+
return NotImplemented
|
78 |
+
|
79 |
+
return (
|
80 |
+
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
|
81 |
+
and (self._platform == other._platform)
|
82 |
+
and (self._abi == other._abi)
|
83 |
+
and (self._interpreter == other._interpreter)
|
84 |
+
)
|
85 |
+
|
86 |
+
def __hash__(self) -> int:
|
87 |
+
return self._hash
|
88 |
+
|
89 |
+
def __str__(self) -> str:
|
90 |
+
return f"{self._interpreter}-{self._abi}-{self._platform}"
|
91 |
+
|
92 |
+
def __repr__(self) -> str:
|
93 |
+
return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
|
94 |
+
|
95 |
+
|
96 |
+
def parse_tag(tag: str) -> FrozenSet[Tag]:
|
97 |
+
"""
|
98 |
+
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
|
99 |
+
|
100 |
+
Returning a set is required due to the possibility that the tag is a
|
101 |
+
compressed tag set.
|
102 |
+
"""
|
103 |
+
tags = set()
|
104 |
+
interpreters, abis, platforms = tag.split("-")
|
105 |
+
for interpreter in interpreters.split("."):
|
106 |
+
for abi in abis.split("."):
|
107 |
+
for platform_ in platforms.split("."):
|
108 |
+
tags.add(Tag(interpreter, abi, platform_))
|
109 |
+
return frozenset(tags)
|
110 |
+
|
111 |
+
|
112 |
+
def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
|
113 |
+
value = sysconfig.get_config_var(name)
|
114 |
+
if value is None and warn:
|
115 |
+
logger.debug(
|
116 |
+
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
|
117 |
+
)
|
118 |
+
return value
|
119 |
+
|
120 |
+
|
121 |
+
def _normalize_string(string: str) -> str:
|
122 |
+
return string.replace(".", "_").replace("-", "_")
|
123 |
+
|
124 |
+
|
125 |
+
def _abi3_applies(python_version: PythonVersion) -> bool:
|
126 |
+
"""
|
127 |
+
Determine if the Python version supports abi3.
|
128 |
+
|
129 |
+
PEP 384 was first implemented in Python 3.2.
|
130 |
+
"""
|
131 |
+
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
|
132 |
+
|
133 |
+
|
134 |
+
def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
|
135 |
+
py_version = tuple(py_version) # To allow for version comparison.
|
136 |
+
abis = []
|
137 |
+
version = _version_nodot(py_version[:2])
|
138 |
+
debug = pymalloc = ucs4 = ""
|
139 |
+
with_debug = _get_config_var("Py_DEBUG", warn)
|
140 |
+
has_refcount = hasattr(sys, "gettotalrefcount")
|
141 |
+
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
|
142 |
+
# extension modules is the best option.
|
143 |
+
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
|
144 |
+
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
|
145 |
+
if with_debug or (with_debug is None and (has_refcount or has_ext)):
|
146 |
+
debug = "d"
|
147 |
+
if py_version < (3, 8):
|
148 |
+
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
|
149 |
+
if with_pymalloc or with_pymalloc is None:
|
150 |
+
pymalloc = "m"
|
151 |
+
if py_version < (3, 3):
|
152 |
+
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
|
153 |
+
if unicode_size == 4 or (
|
154 |
+
unicode_size is None and sys.maxunicode == 0x10FFFF
|
155 |
+
):
|
156 |
+
ucs4 = "u"
|
157 |
+
elif debug:
|
158 |
+
# Debug builds can also load "normal" extension modules.
|
159 |
+
# We can also assume no UCS-4 or pymalloc requirement.
|
160 |
+
abis.append(f"cp{version}")
|
161 |
+
abis.insert(
|
162 |
+
0,
|
163 |
+
"cp{version}{debug}{pymalloc}{ucs4}".format(
|
164 |
+
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
|
165 |
+
),
|
166 |
+
)
|
167 |
+
return abis
|
168 |
+
|
169 |
+
|
170 |
+
def cpython_tags(
|
171 |
+
python_version: Optional[PythonVersion] = None,
|
172 |
+
abis: Optional[Iterable[str]] = None,
|
173 |
+
platforms: Optional[Iterable[str]] = None,
|
174 |
+
*,
|
175 |
+
warn: bool = False,
|
176 |
+
) -> Iterator[Tag]:
|
177 |
+
"""
|
178 |
+
Yields the tags for a CPython interpreter.
|
179 |
+
|
180 |
+
The tags consist of:
|
181 |
+
- cp<python_version>-<abi>-<platform>
|
182 |
+
- cp<python_version>-abi3-<platform>
|
183 |
+
- cp<python_version>-none-<platform>
|
184 |
+
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
|
185 |
+
|
186 |
+
If python_version only specifies a major version then user-provided ABIs and
|
187 |
+
the 'none' ABItag will be used.
|
188 |
+
|
189 |
+
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
|
190 |
+
their normal position and not at the beginning.
|
191 |
+
"""
|
192 |
+
if not python_version:
|
193 |
+
python_version = sys.version_info[:2]
|
194 |
+
|
195 |
+
interpreter = "cp{}".format(_version_nodot(python_version[:2]))
|
196 |
+
|
197 |
+
if abis is None:
|
198 |
+
if len(python_version) > 1:
|
199 |
+
abis = _cpython_abis(python_version, warn)
|
200 |
+
else:
|
201 |
+
abis = []
|
202 |
+
abis = list(abis)
|
203 |
+
# 'abi3' and 'none' are explicitly handled later.
|
204 |
+
for explicit_abi in ("abi3", "none"):
|
205 |
+
try:
|
206 |
+
abis.remove(explicit_abi)
|
207 |
+
except ValueError:
|
208 |
+
pass
|
209 |
+
|
210 |
+
platforms = list(platforms or platform_tags())
|
211 |
+
for abi in abis:
|
212 |
+
for platform_ in platforms:
|
213 |
+
yield Tag(interpreter, abi, platform_)
|
214 |
+
if _abi3_applies(python_version):
|
215 |
+
yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
|
216 |
+
yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
|
217 |
+
|
218 |
+
if _abi3_applies(python_version):
|
219 |
+
for minor_version in range(python_version[1] - 1, 1, -1):
|
220 |
+
for platform_ in platforms:
|
221 |
+
interpreter = "cp{version}".format(
|
222 |
+
version=_version_nodot((python_version[0], minor_version))
|
223 |
+
)
|
224 |
+
yield Tag(interpreter, "abi3", platform_)
|
225 |
+
|
226 |
+
|
227 |
+
def _generic_abi() -> Iterator[str]:
|
228 |
+
abi = sysconfig.get_config_var("SOABI")
|
229 |
+
if abi:
|
230 |
+
yield _normalize_string(abi)
|
231 |
+
|
232 |
+
|
233 |
+
def generic_tags(
|
234 |
+
interpreter: Optional[str] = None,
|
235 |
+
abis: Optional[Iterable[str]] = None,
|
236 |
+
platforms: Optional[Iterable[str]] = None,
|
237 |
+
*,
|
238 |
+
warn: bool = False,
|
239 |
+
) -> Iterator[Tag]:
|
240 |
+
"""
|
241 |
+
Yields the tags for a generic interpreter.
|
242 |
+
|
243 |
+
The tags consist of:
|
244 |
+
- <interpreter>-<abi>-<platform>
|
245 |
+
|
246 |
+
The "none" ABI will be added if it was not explicitly provided.
|
247 |
+
"""
|
248 |
+
if not interpreter:
|
249 |
+
interp_name = interpreter_name()
|
250 |
+
interp_version = interpreter_version(warn=warn)
|
251 |
+
interpreter = "".join([interp_name, interp_version])
|
252 |
+
if abis is None:
|
253 |
+
abis = _generic_abi()
|
254 |
+
platforms = list(platforms or platform_tags())
|
255 |
+
abis = list(abis)
|
256 |
+
if "none" not in abis:
|
257 |
+
abis.append("none")
|
258 |
+
for abi in abis:
|
259 |
+
for platform_ in platforms:
|
260 |
+
yield Tag(interpreter, abi, platform_)
|
261 |
+
|
262 |
+
|
263 |
+
def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
|
264 |
+
"""
|
265 |
+
Yields Python versions in descending order.
|
266 |
+
|
267 |
+
After the latest version, the major-only version will be yielded, and then
|
268 |
+
all previous versions of that major version.
|
269 |
+
"""
|
270 |
+
if len(py_version) > 1:
|
271 |
+
yield "py{version}".format(version=_version_nodot(py_version[:2]))
|
272 |
+
yield "py{major}".format(major=py_version[0])
|
273 |
+
if len(py_version) > 1:
|
274 |
+
for minor in range(py_version[1] - 1, -1, -1):
|
275 |
+
yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
|
276 |
+
|
277 |
+
|
278 |
+
def compatible_tags(
|
279 |
+
python_version: Optional[PythonVersion] = None,
|
280 |
+
interpreter: Optional[str] = None,
|
281 |
+
platforms: Optional[Iterable[str]] = None,
|
282 |
+
) -> Iterator[Tag]:
|
283 |
+
"""
|
284 |
+
Yields the sequence of tags that are compatible with a specific version of Python.
|
285 |
+
|
286 |
+
The tags consist of:
|
287 |
+
- py*-none-<platform>
|
288 |
+
- <interpreter>-none-any # ... if `interpreter` is provided.
|
289 |
+
- py*-none-any
|
290 |
+
"""
|
291 |
+
if not python_version:
|
292 |
+
python_version = sys.version_info[:2]
|
293 |
+
platforms = list(platforms or platform_tags())
|
294 |
+
for version in _py_interpreter_range(python_version):
|
295 |
+
for platform_ in platforms:
|
296 |
+
yield Tag(version, "none", platform_)
|
297 |
+
if interpreter:
|
298 |
+
yield Tag(interpreter, "none", "any")
|
299 |
+
for version in _py_interpreter_range(python_version):
|
300 |
+
yield Tag(version, "none", "any")
|
301 |
+
|
302 |
+
|
303 |
+
def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
|
304 |
+
if not is_32bit:
|
305 |
+
return arch
|
306 |
+
|
307 |
+
if arch.startswith("ppc"):
|
308 |
+
return "ppc"
|
309 |
+
|
310 |
+
return "i386"
|
311 |
+
|
312 |
+
|
313 |
+
def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
|
314 |
+
formats = [cpu_arch]
|
315 |
+
if cpu_arch == "x86_64":
|
316 |
+
if version < (10, 4):
|
317 |
+
return []
|
318 |
+
formats.extend(["intel", "fat64", "fat32"])
|
319 |
+
|
320 |
+
elif cpu_arch == "i386":
|
321 |
+
if version < (10, 4):
|
322 |
+
return []
|
323 |
+
formats.extend(["intel", "fat32", "fat"])
|
324 |
+
|
325 |
+
elif cpu_arch == "ppc64":
|
326 |
+
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
|
327 |
+
if version > (10, 5) or version < (10, 4):
|
328 |
+
return []
|
329 |
+
formats.append("fat64")
|
330 |
+
|
331 |
+
elif cpu_arch == "ppc":
|
332 |
+
if version > (10, 6):
|
333 |
+
return []
|
334 |
+
formats.extend(["fat32", "fat"])
|
335 |
+
|
336 |
+
if cpu_arch in {"arm64", "x86_64"}:
|
337 |
+
formats.append("universal2")
|
338 |
+
|
339 |
+
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
|
340 |
+
formats.append("universal")
|
341 |
+
|
342 |
+
return formats
|
343 |
+
|
344 |
+
|
345 |
+
def mac_platforms(
|
346 |
+
version: Optional[MacVersion] = None, arch: Optional[str] = None
|
347 |
+
) -> Iterator[str]:
|
348 |
+
"""
|
349 |
+
Yields the platform tags for a macOS system.
|
350 |
+
|
351 |
+
The `version` parameter is a two-item tuple specifying the macOS version to
|
352 |
+
generate platform tags for. The `arch` parameter is the CPU architecture to
|
353 |
+
generate platform tags for. Both parameters default to the appropriate value
|
354 |
+
for the current system.
|
355 |
+
"""
|
356 |
+
version_str, _, cpu_arch = platform.mac_ver()
|
357 |
+
if version is None:
|
358 |
+
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
|
359 |
+
else:
|
360 |
+
version = version
|
361 |
+
if arch is None:
|
362 |
+
arch = _mac_arch(cpu_arch)
|
363 |
+
else:
|
364 |
+
arch = arch
|
365 |
+
|
366 |
+
if (10, 0) <= version and version < (11, 0):
|
367 |
+
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
|
368 |
+
# "minor" version number. The major version was always 10.
|
369 |
+
for minor_version in range(version[1], -1, -1):
|
370 |
+
compat_version = 10, minor_version
|
371 |
+
binary_formats = _mac_binary_formats(compat_version, arch)
|
372 |
+
for binary_format in binary_formats:
|
373 |
+
yield "macosx_{major}_{minor}_{binary_format}".format(
|
374 |
+
major=10, minor=minor_version, binary_format=binary_format
|
375 |
+
)
|
376 |
+
|
377 |
+
if version >= (11, 0):
|
378 |
+
# Starting with Mac OS 11, each yearly release bumps the major version
|
379 |
+
# number. The minor versions are now the midyear updates.
|
380 |
+
for major_version in range(version[0], 10, -1):
|
381 |
+
compat_version = major_version, 0
|
382 |
+
binary_formats = _mac_binary_formats(compat_version, arch)
|
383 |
+
for binary_format in binary_formats:
|
384 |
+
yield "macosx_{major}_{minor}_{binary_format}".format(
|
385 |
+
major=major_version, minor=0, binary_format=binary_format
|
386 |
+
)
|
387 |
+
|
388 |
+
if version >= (11, 0):
|
389 |
+
# Mac OS 11 on x86_64 is compatible with binaries from previous releases.
|
390 |
+
# Arm64 support was introduced in 11.0, so no Arm binaries from previous
|
391 |
+
# releases exist.
|
392 |
+
#
|
393 |
+
# However, the "universal2" binary format can have a
|
394 |
+
# macOS version earlier than 11.0 when the x86_64 part of the binary supports
|
395 |
+
# that version of macOS.
|
396 |
+
if arch == "x86_64":
|
397 |
+
for minor_version in range(16, 3, -1):
|
398 |
+
compat_version = 10, minor_version
|
399 |
+
binary_formats = _mac_binary_formats(compat_version, arch)
|
400 |
+
for binary_format in binary_formats:
|
401 |
+
yield "macosx_{major}_{minor}_{binary_format}".format(
|
402 |
+
major=compat_version[0],
|
403 |
+
minor=compat_version[1],
|
404 |
+
binary_format=binary_format,
|
405 |
+
)
|
406 |
+
else:
|
407 |
+
for minor_version in range(16, 3, -1):
|
408 |
+
compat_version = 10, minor_version
|
409 |
+
binary_format = "universal2"
|
410 |
+
yield "macosx_{major}_{minor}_{binary_format}".format(
|
411 |
+
major=compat_version[0],
|
412 |
+
minor=compat_version[1],
|
413 |
+
binary_format=binary_format,
|
414 |
+
)
|
415 |
+
|
416 |
+
|
417 |
+
def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
|
418 |
+
linux = _normalize_string(sysconfig.get_platform())
|
419 |
+
if is_32bit:
|
420 |
+
if linux == "linux_x86_64":
|
421 |
+
linux = "linux_i686"
|
422 |
+
elif linux == "linux_aarch64":
|
423 |
+
linux = "linux_armv7l"
|
424 |
+
_, arch = linux.split("_", 1)
|
425 |
+
yield from _manylinux.platform_tags(linux, arch)
|
426 |
+
yield from _musllinux.platform_tags(arch)
|
427 |
+
yield linux
|
428 |
+
|
429 |
+
|
430 |
+
def _generic_platforms() -> Iterator[str]:
|
431 |
+
yield _normalize_string(sysconfig.get_platform())
|
432 |
+
|
433 |
+
|
434 |
+
def platform_tags() -> Iterator[str]:
|
435 |
+
"""
|
436 |
+
Provides the platform tags for this installation.
|
437 |
+
"""
|
438 |
+
if platform.system() == "Darwin":
|
439 |
+
return mac_platforms()
|
440 |
+
elif platform.system() == "Linux":
|
441 |
+
return _linux_platforms()
|
442 |
+
else:
|
443 |
+
return _generic_platforms()
|
444 |
+
|
445 |
+
|
446 |
+
def interpreter_name() -> str:
|
447 |
+
"""
|
448 |
+
Returns the name of the running interpreter.
|
449 |
+
"""
|
450 |
+
name = sys.implementation.name
|
451 |
+
return INTERPRETER_SHORT_NAMES.get(name) or name
|
452 |
+
|
453 |
+
|
454 |
+
def interpreter_version(*, warn: bool = False) -> str:
|
455 |
+
"""
|
456 |
+
Returns the version of the running interpreter.
|
457 |
+
"""
|
458 |
+
version = _get_config_var("py_version_nodot", warn=warn)
|
459 |
+
if version:
|
460 |
+
version = str(version)
|
461 |
+
else:
|
462 |
+
version = _version_nodot(sys.version_info[:2])
|
463 |
+
return version
|
464 |
+
|
465 |
+
|
466 |
+
def _version_nodot(version: PythonVersion) -> str:
|
467 |
+
return "".join(map(str, version))
|
468 |
+
|
469 |
+
|
470 |
+
def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
|
471 |
+
"""
|
472 |
+
Returns the sequence of tag triples for the running interpreter.
|
473 |
+
|
474 |
+
The order of the sequence corresponds to priority order for the
|
475 |
+
interpreter, from most to least important.
|
476 |
+
"""
|
477 |
+
|
478 |
+
interp_name = interpreter_name()
|
479 |
+
if interp_name == "cp":
|
480 |
+
yield from cpython_tags(warn=warn)
|
481 |
+
else:
|
482 |
+
yield from generic_tags()
|
483 |
+
|
484 |
+
yield from compatible_tags()
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
+
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
+
# for complete details.
|
4 |
+
|
5 |
+
import re
|
6 |
+
from typing import FrozenSet, NewType, Tuple, Union, cast
|
7 |
+
|
8 |
+
from .tags import Tag, parse_tag
|
9 |
+
from .version import InvalidVersion, Version
|
10 |
+
|
11 |
+
BuildTag = Union[Tuple[()], Tuple[int, str]]
|
12 |
+
NormalizedName = NewType("NormalizedName", str)
|
13 |
+
|
14 |
+
|
15 |
+
class InvalidWheelFilename(ValueError):
|
16 |
+
"""
|
17 |
+
An invalid wheel filename was found, users should refer to PEP 427.
|
18 |
+
"""
|
19 |
+
|
20 |
+
|
21 |
+
class InvalidSdistFilename(ValueError):
|
22 |
+
"""
|
23 |
+
An invalid sdist filename was found, users should refer to the packaging user guide.
|
24 |
+
"""
|
25 |
+
|
26 |
+
|
27 |
+
_canonicalize_regex = re.compile(r"[-_.]+")
|
28 |
+
# PEP 427: The build number must start with a digit.
|
29 |
+
_build_tag_regex = re.compile(r"(\d+)(.*)")
|
30 |
+
|
31 |
+
|
32 |
+
def canonicalize_name(name: str) -> NormalizedName:
|
33 |
+
# This is taken from PEP 503.
|
34 |
+
value = _canonicalize_regex.sub("-", name).lower()
|
35 |
+
return cast(NormalizedName, value)
|
36 |
+
|
37 |
+
|
38 |
+
def canonicalize_version(version: Union[Version, str]) -> str:
|
39 |
+
"""
|
40 |
+
This is very similar to Version.__str__, but has one subtle difference
|
41 |
+
with the way it handles the release segment.
|
42 |
+
"""
|
43 |
+
if isinstance(version, str):
|
44 |
+
try:
|
45 |
+
parsed = Version(version)
|
46 |
+
except InvalidVersion:
|
47 |
+
# Legacy versions cannot be normalized
|
48 |
+
return version
|
49 |
+
else:
|
50 |
+
parsed = version
|
51 |
+
|
52 |
+
parts = []
|
53 |
+
|
54 |
+
# Epoch
|
55 |
+
if parsed.epoch != 0:
|
56 |
+
parts.append(f"{parsed.epoch}!")
|
57 |
+
|
58 |
+
# Release segment
|
59 |
+
# NB: This strips trailing '.0's to normalize
|
60 |
+
parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
|
61 |
+
|
62 |
+
# Pre-release
|
63 |
+
if parsed.pre is not None:
|
64 |
+
parts.append("".join(str(x) for x in parsed.pre))
|
65 |
+
|
66 |
+
# Post-release
|
67 |
+
if parsed.post is not None:
|
68 |
+
parts.append(f".post{parsed.post}")
|
69 |
+
|
70 |
+
# Development release
|
71 |
+
if parsed.dev is not None:
|
72 |
+
parts.append(f".dev{parsed.dev}")
|
73 |
+
|
74 |
+
# Local version segment
|
75 |
+
if parsed.local is not None:
|
76 |
+
parts.append(f"+{parsed.local}")
|
77 |
+
|
78 |
+
return "".join(parts)
|
79 |
+
|
80 |
+
|
81 |
+
def parse_wheel_filename(
|
82 |
+
filename: str,
|
83 |
+
) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
|
84 |
+
if not filename.endswith(".whl"):
|
85 |
+
raise InvalidWheelFilename(
|
86 |
+
f"Invalid wheel filename (extension must be '.whl'): {filename}"
|
87 |
+
)
|
88 |
+
|
89 |
+
filename = filename[:-4]
|
90 |
+
dashes = filename.count("-")
|
91 |
+
if dashes not in (4, 5):
|
92 |
+
raise InvalidWheelFilename(
|
93 |
+
f"Invalid wheel filename (wrong number of parts): {filename}"
|
94 |
+
)
|
95 |
+
|
96 |
+
parts = filename.split("-", dashes - 2)
|
97 |
+
name_part = parts[0]
|
98 |
+
# See PEP 427 for the rules on escaping the project name
|
99 |
+
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
|
100 |
+
raise InvalidWheelFilename(f"Invalid project name: {filename}")
|
101 |
+
name = canonicalize_name(name_part)
|
102 |
+
version = Version(parts[1])
|
103 |
+
if dashes == 5:
|
104 |
+
build_part = parts[2]
|
105 |
+
build_match = _build_tag_regex.match(build_part)
|
106 |
+
if build_match is None:
|
107 |
+
raise InvalidWheelFilename(
|
108 |
+
f"Invalid build number: {build_part} in '{filename}'"
|
109 |
+
)
|
110 |
+
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
|
111 |
+
else:
|
112 |
+
build = ()
|
113 |
+
tags = parse_tag(parts[-1])
|
114 |
+
return (name, version, build, tags)
|
115 |
+
|
116 |
+
|
117 |
+
def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
|
118 |
+
if filename.endswith(".tar.gz"):
|
119 |
+
file_stem = filename[: -len(".tar.gz")]
|
120 |
+
elif filename.endswith(".zip"):
|
121 |
+
file_stem = filename[: -len(".zip")]
|
122 |
+
else:
|
123 |
+
raise InvalidSdistFilename(
|
124 |
+
f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
|
125 |
+
f" {filename}"
|
126 |
+
)
|
127 |
+
|
128 |
+
# We are requiring a PEP 440 version, which cannot contain dashes,
|
129 |
+
# so we split on the last dash.
|
130 |
+
name_part, sep, version_part = file_stem.rpartition("-")
|
131 |
+
if not sep:
|
132 |
+
raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
|
133 |
+
|
134 |
+
name = canonicalize_name(name_part)
|
135 |
+
version = Version(version_part)
|
136 |
+
return (name, version)
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py
ADDED
@@ -0,0 +1,504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
+
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
+
# for complete details.
|
4 |
+
|
5 |
+
import collections
|
6 |
+
import itertools
|
7 |
+
import re
|
8 |
+
import warnings
|
9 |
+
from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
|
10 |
+
|
11 |
+
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
|
12 |
+
|
13 |
+
__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
|
14 |
+
|
15 |
+
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
|
16 |
+
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
|
17 |
+
SubLocalType = Union[InfiniteTypes, int, str]
|
18 |
+
LocalType = Union[
|
19 |
+
NegativeInfinityType,
|
20 |
+
Tuple[
|
21 |
+
Union[
|
22 |
+
SubLocalType,
|
23 |
+
Tuple[SubLocalType, str],
|
24 |
+
Tuple[NegativeInfinityType, SubLocalType],
|
25 |
+
],
|
26 |
+
...,
|
27 |
+
],
|
28 |
+
]
|
29 |
+
CmpKey = Tuple[
|
30 |
+
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
|
31 |
+
]
|
32 |
+
LegacyCmpKey = Tuple[int, Tuple[str, ...]]
|
33 |
+
VersionComparisonMethod = Callable[
|
34 |
+
[Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
|
35 |
+
]
|
36 |
+
|
37 |
+
_Version = collections.namedtuple(
|
38 |
+
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
|
39 |
+
)
|
40 |
+
|
41 |
+
|
42 |
+
def parse(version: str) -> Union["LegacyVersion", "Version"]:
|
43 |
+
"""
|
44 |
+
Parse the given version string and return either a :class:`Version` object
|
45 |
+
or a :class:`LegacyVersion` object depending on if the given version is
|
46 |
+
a valid PEP 440 version or a legacy version.
|
47 |
+
"""
|
48 |
+
try:
|
49 |
+
return Version(version)
|
50 |
+
except InvalidVersion:
|
51 |
+
return LegacyVersion(version)
|
52 |
+
|
53 |
+
|
54 |
+
class InvalidVersion(ValueError):
|
55 |
+
"""
|
56 |
+
An invalid version was found, users should refer to PEP 440.
|
57 |
+
"""
|
58 |
+
|
59 |
+
|
60 |
+
class _BaseVersion:
|
61 |
+
_key: Union[CmpKey, LegacyCmpKey]
|
62 |
+
|
63 |
+
def __hash__(self) -> int:
|
64 |
+
return hash(self._key)
|
65 |
+
|
66 |
+
# Please keep the duplicated `isinstance` check
|
67 |
+
# in the six comparisons hereunder
|
68 |
+
# unless you find a way to avoid adding overhead function calls.
|
69 |
+
def __lt__(self, other: "_BaseVersion") -> bool:
|
70 |
+
if not isinstance(other, _BaseVersion):
|
71 |
+
return NotImplemented
|
72 |
+
|
73 |
+
return self._key < other._key
|
74 |
+
|
75 |
+
def __le__(self, other: "_BaseVersion") -> bool:
|
76 |
+
if not isinstance(other, _BaseVersion):
|
77 |
+
return NotImplemented
|
78 |
+
|
79 |
+
return self._key <= other._key
|
80 |
+
|
81 |
+
def __eq__(self, other: object) -> bool:
|
82 |
+
if not isinstance(other, _BaseVersion):
|
83 |
+
return NotImplemented
|
84 |
+
|
85 |
+
return self._key == other._key
|
86 |
+
|
87 |
+
def __ge__(self, other: "_BaseVersion") -> bool:
|
88 |
+
if not isinstance(other, _BaseVersion):
|
89 |
+
return NotImplemented
|
90 |
+
|
91 |
+
return self._key >= other._key
|
92 |
+
|
93 |
+
def __gt__(self, other: "_BaseVersion") -> bool:
|
94 |
+
if not isinstance(other, _BaseVersion):
|
95 |
+
return NotImplemented
|
96 |
+
|
97 |
+
return self._key > other._key
|
98 |
+
|
99 |
+
def __ne__(self, other: object) -> bool:
|
100 |
+
if not isinstance(other, _BaseVersion):
|
101 |
+
return NotImplemented
|
102 |
+
|
103 |
+
return self._key != other._key
|
104 |
+
|
105 |
+
|
106 |
+
class LegacyVersion(_BaseVersion):
|
107 |
+
def __init__(self, version: str) -> None:
|
108 |
+
self._version = str(version)
|
109 |
+
self._key = _legacy_cmpkey(self._version)
|
110 |
+
|
111 |
+
warnings.warn(
|
112 |
+
"Creating a LegacyVersion has been deprecated and will be "
|
113 |
+
"removed in the next major release",
|
114 |
+
DeprecationWarning,
|
115 |
+
)
|
116 |
+
|
117 |
+
def __str__(self) -> str:
|
118 |
+
return self._version
|
119 |
+
|
120 |
+
def __repr__(self) -> str:
|
121 |
+
return f"<LegacyVersion('{self}')>"
|
122 |
+
|
123 |
+
@property
|
124 |
+
def public(self) -> str:
|
125 |
+
return self._version
|
126 |
+
|
127 |
+
@property
|
128 |
+
def base_version(self) -> str:
|
129 |
+
return self._version
|
130 |
+
|
131 |
+
@property
|
132 |
+
def epoch(self) -> int:
|
133 |
+
return -1
|
134 |
+
|
135 |
+
@property
|
136 |
+
def release(self) -> None:
|
137 |
+
return None
|
138 |
+
|
139 |
+
@property
|
140 |
+
def pre(self) -> None:
|
141 |
+
return None
|
142 |
+
|
143 |
+
@property
|
144 |
+
def post(self) -> None:
|
145 |
+
return None
|
146 |
+
|
147 |
+
@property
|
148 |
+
def dev(self) -> None:
|
149 |
+
return None
|
150 |
+
|
151 |
+
@property
|
152 |
+
def local(self) -> None:
|
153 |
+
return None
|
154 |
+
|
155 |
+
@property
|
156 |
+
def is_prerelease(self) -> bool:
|
157 |
+
return False
|
158 |
+
|
159 |
+
@property
|
160 |
+
def is_postrelease(self) -> bool:
|
161 |
+
return False
|
162 |
+
|
163 |
+
@property
|
164 |
+
def is_devrelease(self) -> bool:
|
165 |
+
return False
|
166 |
+
|
167 |
+
|
168 |
+
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
|
169 |
+
|
170 |
+
_legacy_version_replacement_map = {
|
171 |
+
"pre": "c",
|
172 |
+
"preview": "c",
|
173 |
+
"-": "final-",
|
174 |
+
"rc": "c",
|
175 |
+
"dev": "@",
|
176 |
+
}
|
177 |
+
|
178 |
+
|
179 |
+
def _parse_version_parts(s: str) -> Iterator[str]:
|
180 |
+
for part in _legacy_version_component_re.split(s):
|
181 |
+
part = _legacy_version_replacement_map.get(part, part)
|
182 |
+
|
183 |
+
if not part or part == ".":
|
184 |
+
continue
|
185 |
+
|
186 |
+
if part[:1] in "0123456789":
|
187 |
+
# pad for numeric comparison
|
188 |
+
yield part.zfill(8)
|
189 |
+
else:
|
190 |
+
yield "*" + part
|
191 |
+
|
192 |
+
# ensure that alpha/beta/candidate are before final
|
193 |
+
yield "*final"
|
194 |
+
|
195 |
+
|
196 |
+
def _legacy_cmpkey(version: str) -> LegacyCmpKey:
|
197 |
+
|
198 |
+
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
|
199 |
+
# greater than or equal to 0. This will effectively put the LegacyVersion,
|
200 |
+
# which uses the defacto standard originally implemented by setuptools,
|
201 |
+
# as before all PEP 440 versions.
|
202 |
+
epoch = -1
|
203 |
+
|
204 |
+
# This scheme is taken from pkg_resources.parse_version setuptools prior to
|
205 |
+
# it's adoption of the packaging library.
|
206 |
+
parts: List[str] = []
|
207 |
+
for part in _parse_version_parts(version.lower()):
|
208 |
+
if part.startswith("*"):
|
209 |
+
# remove "-" before a prerelease tag
|
210 |
+
if part < "*final":
|
211 |
+
while parts and parts[-1] == "*final-":
|
212 |
+
parts.pop()
|
213 |
+
|
214 |
+
# remove trailing zeros from each series of numeric parts
|
215 |
+
while parts and parts[-1] == "00000000":
|
216 |
+
parts.pop()
|
217 |
+
|
218 |
+
parts.append(part)
|
219 |
+
|
220 |
+
return epoch, tuple(parts)
|
221 |
+
|
222 |
+
|
223 |
+
# Deliberately not anchored to the start and end of the string, to make it
|
224 |
+
# easier for 3rd party code to reuse
|
225 |
+
VERSION_PATTERN = r"""
|
226 |
+
v?
|
227 |
+
(?:
|
228 |
+
(?:(?P<epoch>[0-9]+)!)? # epoch
|
229 |
+
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
|
230 |
+
(?P<pre> # pre-release
|
231 |
+
[-_\.]?
|
232 |
+
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
|
233 |
+
[-_\.]?
|
234 |
+
(?P<pre_n>[0-9]+)?
|
235 |
+
)?
|
236 |
+
(?P<post> # post release
|
237 |
+
(?:-(?P<post_n1>[0-9]+))
|
238 |
+
|
|
239 |
+
(?:
|
240 |
+
[-_\.]?
|
241 |
+
(?P<post_l>post|rev|r)
|
242 |
+
[-_\.]?
|
243 |
+
(?P<post_n2>[0-9]+)?
|
244 |
+
)
|
245 |
+
)?
|
246 |
+
(?P<dev> # dev release
|
247 |
+
[-_\.]?
|
248 |
+
(?P<dev_l>dev)
|
249 |
+
[-_\.]?
|
250 |
+
(?P<dev_n>[0-9]+)?
|
251 |
+
)?
|
252 |
+
)
|
253 |
+
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
|
254 |
+
"""
|
255 |
+
|
256 |
+
|
257 |
+
class Version(_BaseVersion):
|
258 |
+
|
259 |
+
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
260 |
+
|
261 |
+
def __init__(self, version: str) -> None:
|
262 |
+
|
263 |
+
# Validate the version and parse it into pieces
|
264 |
+
match = self._regex.search(version)
|
265 |
+
if not match:
|
266 |
+
raise InvalidVersion(f"Invalid version: '{version}'")
|
267 |
+
|
268 |
+
# Store the parsed out pieces of the version
|
269 |
+
self._version = _Version(
|
270 |
+
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
|
271 |
+
release=tuple(int(i) for i in match.group("release").split(".")),
|
272 |
+
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
|
273 |
+
post=_parse_letter_version(
|
274 |
+
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
|
275 |
+
),
|
276 |
+
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
|
277 |
+
local=_parse_local_version(match.group("local")),
|
278 |
+
)
|
279 |
+
|
280 |
+
# Generate a key which will be used for sorting
|
281 |
+
self._key = _cmpkey(
|
282 |
+
self._version.epoch,
|
283 |
+
self._version.release,
|
284 |
+
self._version.pre,
|
285 |
+
self._version.post,
|
286 |
+
self._version.dev,
|
287 |
+
self._version.local,
|
288 |
+
)
|
289 |
+
|
290 |
+
def __repr__(self) -> str:
|
291 |
+
return f"<Version('{self}')>"
|
292 |
+
|
293 |
+
def __str__(self) -> str:
|
294 |
+
parts = []
|
295 |
+
|
296 |
+
# Epoch
|
297 |
+
if self.epoch != 0:
|
298 |
+
parts.append(f"{self.epoch}!")
|
299 |
+
|
300 |
+
# Release segment
|
301 |
+
parts.append(".".join(str(x) for x in self.release))
|
302 |
+
|
303 |
+
# Pre-release
|
304 |
+
if self.pre is not None:
|
305 |
+
parts.append("".join(str(x) for x in self.pre))
|
306 |
+
|
307 |
+
# Post-release
|
308 |
+
if self.post is not None:
|
309 |
+
parts.append(f".post{self.post}")
|
310 |
+
|
311 |
+
# Development release
|
312 |
+
if self.dev is not None:
|
313 |
+
parts.append(f".dev{self.dev}")
|
314 |
+
|
315 |
+
# Local version segment
|
316 |
+
if self.local is not None:
|
317 |
+
parts.append(f"+{self.local}")
|
318 |
+
|
319 |
+
return "".join(parts)
|
320 |
+
|
321 |
+
@property
|
322 |
+
def epoch(self) -> int:
|
323 |
+
_epoch: int = self._version.epoch
|
324 |
+
return _epoch
|
325 |
+
|
326 |
+
@property
|
327 |
+
def release(self) -> Tuple[int, ...]:
|
328 |
+
_release: Tuple[int, ...] = self._version.release
|
329 |
+
return _release
|
330 |
+
|
331 |
+
@property
|
332 |
+
def pre(self) -> Optional[Tuple[str, int]]:
|
333 |
+
_pre: Optional[Tuple[str, int]] = self._version.pre
|
334 |
+
return _pre
|
335 |
+
|
336 |
+
@property
|
337 |
+
def post(self) -> Optional[int]:
|
338 |
+
return self._version.post[1] if self._version.post else None
|
339 |
+
|
340 |
+
@property
|
341 |
+
def dev(self) -> Optional[int]:
|
342 |
+
return self._version.dev[1] if self._version.dev else None
|
343 |
+
|
344 |
+
@property
|
345 |
+
def local(self) -> Optional[str]:
|
346 |
+
if self._version.local:
|
347 |
+
return ".".join(str(x) for x in self._version.local)
|
348 |
+
else:
|
349 |
+
return None
|
350 |
+
|
351 |
+
@property
|
352 |
+
def public(self) -> str:
|
353 |
+
return str(self).split("+", 1)[0]
|
354 |
+
|
355 |
+
@property
|
356 |
+
def base_version(self) -> str:
|
357 |
+
parts = []
|
358 |
+
|
359 |
+
# Epoch
|
360 |
+
if self.epoch != 0:
|
361 |
+
parts.append(f"{self.epoch}!")
|
362 |
+
|
363 |
+
# Release segment
|
364 |
+
parts.append(".".join(str(x) for x in self.release))
|
365 |
+
|
366 |
+
return "".join(parts)
|
367 |
+
|
368 |
+
@property
|
369 |
+
def is_prerelease(self) -> bool:
|
370 |
+
return self.dev is not None or self.pre is not None
|
371 |
+
|
372 |
+
@property
|
373 |
+
def is_postrelease(self) -> bool:
|
374 |
+
return self.post is not None
|
375 |
+
|
376 |
+
@property
|
377 |
+
def is_devrelease(self) -> bool:
|
378 |
+
return self.dev is not None
|
379 |
+
|
380 |
+
@property
|
381 |
+
def major(self) -> int:
|
382 |
+
return self.release[0] if len(self.release) >= 1 else 0
|
383 |
+
|
384 |
+
@property
|
385 |
+
def minor(self) -> int:
|
386 |
+
return self.release[1] if len(self.release) >= 2 else 0
|
387 |
+
|
388 |
+
@property
|
389 |
+
def micro(self) -> int:
|
390 |
+
return self.release[2] if len(self.release) >= 3 else 0
|
391 |
+
|
392 |
+
|
393 |
+
def _parse_letter_version(
|
394 |
+
letter: str, number: Union[str, bytes, SupportsInt]
|
395 |
+
) -> Optional[Tuple[str, int]]:
|
396 |
+
|
397 |
+
if letter:
|
398 |
+
# We consider there to be an implicit 0 in a pre-release if there is
|
399 |
+
# not a numeral associated with it.
|
400 |
+
if number is None:
|
401 |
+
number = 0
|
402 |
+
|
403 |
+
# We normalize any letters to their lower case form
|
404 |
+
letter = letter.lower()
|
405 |
+
|
406 |
+
# We consider some words to be alternate spellings of other words and
|
407 |
+
# in those cases we want to normalize the spellings to our preferred
|
408 |
+
# spelling.
|
409 |
+
if letter == "alpha":
|
410 |
+
letter = "a"
|
411 |
+
elif letter == "beta":
|
412 |
+
letter = "b"
|
413 |
+
elif letter in ["c", "pre", "preview"]:
|
414 |
+
letter = "rc"
|
415 |
+
elif letter in ["rev", "r"]:
|
416 |
+
letter = "post"
|
417 |
+
|
418 |
+
return letter, int(number)
|
419 |
+
if not letter and number:
|
420 |
+
# We assume if we are given a number, but we are not given a letter
|
421 |
+
# then this is using the implicit post release syntax (e.g. 1.0-1)
|
422 |
+
letter = "post"
|
423 |
+
|
424 |
+
return letter, int(number)
|
425 |
+
|
426 |
+
return None
|
427 |
+
|
428 |
+
|
429 |
+
_local_version_separators = re.compile(r"[\._-]")
|
430 |
+
|
431 |
+
|
432 |
+
def _parse_local_version(local: str) -> Optional[LocalType]:
|
433 |
+
"""
|
434 |
+
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
|
435 |
+
"""
|
436 |
+
if local is not None:
|
437 |
+
return tuple(
|
438 |
+
part.lower() if not part.isdigit() else int(part)
|
439 |
+
for part in _local_version_separators.split(local)
|
440 |
+
)
|
441 |
+
return None
|
442 |
+
|
443 |
+
|
444 |
+
def _cmpkey(
|
445 |
+
epoch: int,
|
446 |
+
release: Tuple[int, ...],
|
447 |
+
pre: Optional[Tuple[str, int]],
|
448 |
+
post: Optional[Tuple[str, int]],
|
449 |
+
dev: Optional[Tuple[str, int]],
|
450 |
+
local: Optional[Tuple[SubLocalType]],
|
451 |
+
) -> CmpKey:
|
452 |
+
|
453 |
+
# When we compare a release version, we want to compare it with all of the
|
454 |
+
# trailing zeros removed. So we'll use a reverse the list, drop all the now
|
455 |
+
# leading zeros until we come to something non zero, then take the rest
|
456 |
+
# re-reverse it back into the correct order and make it a tuple and use
|
457 |
+
# that for our sorting key.
|
458 |
+
_release = tuple(
|
459 |
+
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
|
460 |
+
)
|
461 |
+
|
462 |
+
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
|
463 |
+
# We'll do this by abusing the pre segment, but we _only_ want to do this
|
464 |
+
# if there is not a pre or a post segment. If we have one of those then
|
465 |
+
# the normal sorting rules will handle this case correctly.
|
466 |
+
if pre is None and post is None and dev is not None:
|
467 |
+
_pre: PrePostDevType = NegativeInfinity
|
468 |
+
# Versions without a pre-release (except as noted above) should sort after
|
469 |
+
# those with one.
|
470 |
+
elif pre is None:
|
471 |
+
_pre = Infinity
|
472 |
+
else:
|
473 |
+
_pre = pre
|
474 |
+
|
475 |
+
# Versions without a post segment should sort before those with one.
|
476 |
+
if post is None:
|
477 |
+
_post: PrePostDevType = NegativeInfinity
|
478 |
+
|
479 |
+
else:
|
480 |
+
_post = post
|
481 |
+
|
482 |
+
# Versions without a development segment should sort after those with one.
|
483 |
+
if dev is None:
|
484 |
+
_dev: PrePostDevType = Infinity
|
485 |
+
|
486 |
+
else:
|
487 |
+
_dev = dev
|
488 |
+
|
489 |
+
if local is None:
|
490 |
+
# Versions without a local segment should sort before those with one.
|
491 |
+
_local: LocalType = NegativeInfinity
|
492 |
+
else:
|
493 |
+
# Versions with a local segment need that segment parsed to implement
|
494 |
+
# the sorting rules in PEP440.
|
495 |
+
# - Alpha numeric segments sort before numeric segments
|
496 |
+
# - Alpha numeric segments sort lexicographically
|
497 |
+
# - Numeric segments sort numerically
|
498 |
+
# - Shorter versions sort before longer versions when the prefixes
|
499 |
+
# match exactly
|
500 |
+
_local = tuple(
|
501 |
+
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
|
502 |
+
)
|
503 |
+
|
504 |
+
return epoch, _release, _pre, _post, _dev, _local
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/extern/__init__.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib.util
|
2 |
+
import sys
|
3 |
+
|
4 |
+
|
5 |
+
class VendorImporter:
|
6 |
+
"""
|
7 |
+
A PEP 302 meta path importer for finding optionally-vendored
|
8 |
+
or otherwise naturally-installed packages from root_name.
|
9 |
+
"""
|
10 |
+
|
11 |
+
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
|
12 |
+
self.root_name = root_name
|
13 |
+
self.vendored_names = set(vendored_names)
|
14 |
+
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
|
15 |
+
|
16 |
+
@property
|
17 |
+
def search_path(self):
|
18 |
+
"""
|
19 |
+
Search first the vendor package then as a natural package.
|
20 |
+
"""
|
21 |
+
yield self.vendor_pkg + '.'
|
22 |
+
yield ''
|
23 |
+
|
24 |
+
def _module_matches_namespace(self, fullname):
|
25 |
+
"""Figure out if the target module is vendored."""
|
26 |
+
root, base, target = fullname.partition(self.root_name + '.')
|
27 |
+
return not root and any(map(target.startswith, self.vendored_names))
|
28 |
+
|
29 |
+
def load_module(self, fullname):
|
30 |
+
"""
|
31 |
+
Iterate over the search path to locate and load fullname.
|
32 |
+
"""
|
33 |
+
root, base, target = fullname.partition(self.root_name + '.')
|
34 |
+
for prefix in self.search_path:
|
35 |
+
try:
|
36 |
+
extant = prefix + target
|
37 |
+
__import__(extant)
|
38 |
+
mod = sys.modules[extant]
|
39 |
+
sys.modules[fullname] = mod
|
40 |
+
return mod
|
41 |
+
except ImportError:
|
42 |
+
pass
|
43 |
+
else:
|
44 |
+
raise ImportError(
|
45 |
+
"The '{target}' package is required; "
|
46 |
+
"normally this is bundled with this package so if you get "
|
47 |
+
"this warning, consult the packager of your "
|
48 |
+
"distribution.".format(**locals())
|
49 |
+
)
|
50 |
+
|
51 |
+
def create_module(self, spec):
|
52 |
+
return self.load_module(spec.name)
|
53 |
+
|
54 |
+
def exec_module(self, module):
|
55 |
+
pass
|
56 |
+
|
57 |
+
def find_spec(self, fullname, path=None, target=None):
|
58 |
+
"""Return a module spec for vendored names."""
|
59 |
+
return (
|
60 |
+
importlib.util.spec_from_loader(fullname, self)
|
61 |
+
if self._module_matches_namespace(fullname) else None
|
62 |
+
)
|
63 |
+
|
64 |
+
def install(self):
|
65 |
+
"""
|
66 |
+
Install this importer into sys.meta_path if not already present.
|
67 |
+
"""
|
68 |
+
if self not in sys.meta_path:
|
69 |
+
sys.meta_path.append(self)
|
70 |
+
|
71 |
+
|
72 |
+
names = 'packaging', 'pyparsing', 'appdirs'
|
73 |
+
VendorImporter(__name__, names).install()
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/__pycache__/setup.cpython-310.pyc
ADDED
Binary file (322 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pkg_resources/tests/data/my-test-package-source/setup.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import setuptools
|
2 |
+
setuptools.setup(
|
3 |
+
name="my-test-package",
|
4 |
+
version="1.0",
|
5 |
+
zip_safe=True,
|
6 |
+
)
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/__init__.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
A module that helps solving problems in physics.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from . import units
|
6 |
+
from .matrices import mgamma, msigma, minkowski_tensor, mdft
|
7 |
+
|
8 |
+
__all__ = [
|
9 |
+
'units',
|
10 |
+
|
11 |
+
'mgamma', 'msigma', 'minkowski_tensor', 'mdft',
|
12 |
+
]
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (190 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/__pycache__/gamma_matrices.cpython-310.pyc
ADDED
Binary file (14.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/hep/gamma_matrices.py
ADDED
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Module to handle gamma matrices expressed as tensor objects.
|
3 |
+
|
4 |
+
Examples
|
5 |
+
========
|
6 |
+
|
7 |
+
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
|
8 |
+
>>> from sympy.tensor.tensor import tensor_indices
|
9 |
+
>>> i = tensor_indices('i', LorentzIndex)
|
10 |
+
>>> G(i)
|
11 |
+
GammaMatrix(i)
|
12 |
+
|
13 |
+
Note that there is already an instance of GammaMatrixHead in four dimensions:
|
14 |
+
GammaMatrix, which is simply declare as
|
15 |
+
|
16 |
+
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix
|
17 |
+
>>> from sympy.tensor.tensor import tensor_indices
|
18 |
+
>>> i = tensor_indices('i', LorentzIndex)
|
19 |
+
>>> GammaMatrix(i)
|
20 |
+
GammaMatrix(i)
|
21 |
+
|
22 |
+
To access the metric tensor
|
23 |
+
|
24 |
+
>>> LorentzIndex.metric
|
25 |
+
metric(LorentzIndex,LorentzIndex)
|
26 |
+
|
27 |
+
"""
|
28 |
+
from sympy.core.mul import Mul
|
29 |
+
from sympy.core.singleton import S
|
30 |
+
from sympy.matrices.dense import eye
|
31 |
+
from sympy.matrices.expressions.trace import trace
|
32 |
+
from sympy.tensor.tensor import TensorIndexType, TensorIndex,\
|
33 |
+
TensMul, TensAdd, tensor_mul, Tensor, TensorHead, TensorSymmetry
|
34 |
+
|
35 |
+
|
36 |
+
# DiracSpinorIndex = TensorIndexType('DiracSpinorIndex', dim=4, dummy_name="S")
|
37 |
+
|
38 |
+
|
39 |
+
LorentzIndex = TensorIndexType('LorentzIndex', dim=4, dummy_name="L")
|
40 |
+
|
41 |
+
|
42 |
+
GammaMatrix = TensorHead("GammaMatrix", [LorentzIndex],
|
43 |
+
TensorSymmetry.no_symmetry(1), comm=None)
|
44 |
+
|
45 |
+
|
46 |
+
def extract_type_tens(expression, component):
|
47 |
+
"""
|
48 |
+
Extract from a ``TensExpr`` all tensors with `component`.
|
49 |
+
|
50 |
+
Returns two tensor expressions:
|
51 |
+
|
52 |
+
* the first contains all ``Tensor`` of having `component`.
|
53 |
+
* the second contains all remaining.
|
54 |
+
|
55 |
+
|
56 |
+
"""
|
57 |
+
if isinstance(expression, Tensor):
|
58 |
+
sp = [expression]
|
59 |
+
elif isinstance(expression, TensMul):
|
60 |
+
sp = expression.args
|
61 |
+
else:
|
62 |
+
raise ValueError('wrong type')
|
63 |
+
|
64 |
+
# Collect all gamma matrices of the same dimension
|
65 |
+
new_expr = S.One
|
66 |
+
residual_expr = S.One
|
67 |
+
for i in sp:
|
68 |
+
if isinstance(i, Tensor) and i.component == component:
|
69 |
+
new_expr *= i
|
70 |
+
else:
|
71 |
+
residual_expr *= i
|
72 |
+
return new_expr, residual_expr
|
73 |
+
|
74 |
+
|
75 |
+
def simplify_gamma_expression(expression):
|
76 |
+
extracted_expr, residual_expr = extract_type_tens(expression, GammaMatrix)
|
77 |
+
res_expr = _simplify_single_line(extracted_expr)
|
78 |
+
return res_expr * residual_expr
|
79 |
+
|
80 |
+
|
81 |
+
def simplify_gpgp(ex, sort=True):
|
82 |
+
"""
|
83 |
+
simplify products ``G(i)*p(-i)*G(j)*p(-j) -> p(i)*p(-i)``
|
84 |
+
|
85 |
+
Examples
|
86 |
+
========
|
87 |
+
|
88 |
+
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
|
89 |
+
LorentzIndex, simplify_gpgp
|
90 |
+
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
|
91 |
+
>>> p, q = tensor_heads('p, q', [LorentzIndex])
|
92 |
+
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
|
93 |
+
>>> ps = p(i0)*G(-i0)
|
94 |
+
>>> qs = q(i0)*G(-i0)
|
95 |
+
>>> simplify_gpgp(ps*qs*qs)
|
96 |
+
GammaMatrix(-L_0)*p(L_0)*q(L_1)*q(-L_1)
|
97 |
+
"""
|
98 |
+
def _simplify_gpgp(ex):
|
99 |
+
components = ex.components
|
100 |
+
a = []
|
101 |
+
comp_map = []
|
102 |
+
for i, comp in enumerate(components):
|
103 |
+
comp_map.extend([i]*comp.rank)
|
104 |
+
dum = [(i[0], i[1], comp_map[i[0]], comp_map[i[1]]) for i in ex.dum]
|
105 |
+
for i in range(len(components)):
|
106 |
+
if components[i] != GammaMatrix:
|
107 |
+
continue
|
108 |
+
for dx in dum:
|
109 |
+
if dx[2] == i:
|
110 |
+
p_pos1 = dx[3]
|
111 |
+
elif dx[3] == i:
|
112 |
+
p_pos1 = dx[2]
|
113 |
+
else:
|
114 |
+
continue
|
115 |
+
comp1 = components[p_pos1]
|
116 |
+
if comp1.comm == 0 and comp1.rank == 1:
|
117 |
+
a.append((i, p_pos1))
|
118 |
+
if not a:
|
119 |
+
return ex
|
120 |
+
elim = set()
|
121 |
+
tv = []
|
122 |
+
hit = True
|
123 |
+
coeff = S.One
|
124 |
+
ta = None
|
125 |
+
while hit:
|
126 |
+
hit = False
|
127 |
+
for i, ai in enumerate(a[:-1]):
|
128 |
+
if ai[0] in elim:
|
129 |
+
continue
|
130 |
+
if ai[0] != a[i + 1][0] - 1:
|
131 |
+
continue
|
132 |
+
if components[ai[1]] != components[a[i + 1][1]]:
|
133 |
+
continue
|
134 |
+
elim.add(ai[0])
|
135 |
+
elim.add(ai[1])
|
136 |
+
elim.add(a[i + 1][0])
|
137 |
+
elim.add(a[i + 1][1])
|
138 |
+
if not ta:
|
139 |
+
ta = ex.split()
|
140 |
+
mu = TensorIndex('mu', LorentzIndex)
|
141 |
+
hit = True
|
142 |
+
if i == 0:
|
143 |
+
coeff = ex.coeff
|
144 |
+
tx = components[ai[1]](mu)*components[ai[1]](-mu)
|
145 |
+
if len(a) == 2:
|
146 |
+
tx *= 4 # eye(4)
|
147 |
+
tv.append(tx)
|
148 |
+
break
|
149 |
+
|
150 |
+
if tv:
|
151 |
+
a = [x for j, x in enumerate(ta) if j not in elim]
|
152 |
+
a.extend(tv)
|
153 |
+
t = tensor_mul(*a)*coeff
|
154 |
+
# t = t.replace(lambda x: x.is_Matrix, lambda x: 1)
|
155 |
+
return t
|
156 |
+
else:
|
157 |
+
return ex
|
158 |
+
|
159 |
+
if sort:
|
160 |
+
ex = ex.sorted_components()
|
161 |
+
# this would be better off with pattern matching
|
162 |
+
while 1:
|
163 |
+
t = _simplify_gpgp(ex)
|
164 |
+
if t != ex:
|
165 |
+
ex = t
|
166 |
+
else:
|
167 |
+
return t
|
168 |
+
|
169 |
+
|
170 |
+
def gamma_trace(t):
|
171 |
+
"""
|
172 |
+
trace of a single line of gamma matrices
|
173 |
+
|
174 |
+
Examples
|
175 |
+
========
|
176 |
+
|
177 |
+
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
|
178 |
+
gamma_trace, LorentzIndex
|
179 |
+
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
|
180 |
+
>>> p, q = tensor_heads('p, q', [LorentzIndex])
|
181 |
+
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
|
182 |
+
>>> ps = p(i0)*G(-i0)
|
183 |
+
>>> qs = q(i0)*G(-i0)
|
184 |
+
>>> gamma_trace(G(i0)*G(i1))
|
185 |
+
4*metric(i0, i1)
|
186 |
+
>>> gamma_trace(ps*ps) - 4*p(i0)*p(-i0)
|
187 |
+
0
|
188 |
+
>>> gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0)
|
189 |
+
0
|
190 |
+
|
191 |
+
"""
|
192 |
+
if isinstance(t, TensAdd):
|
193 |
+
res = TensAdd(*[gamma_trace(x) for x in t.args])
|
194 |
+
return res
|
195 |
+
t = _simplify_single_line(t)
|
196 |
+
res = _trace_single_line(t)
|
197 |
+
return res
|
198 |
+
|
199 |
+
|
200 |
+
def _simplify_single_line(expression):
|
201 |
+
"""
|
202 |
+
Simplify single-line product of gamma matrices.
|
203 |
+
|
204 |
+
Examples
|
205 |
+
========
|
206 |
+
|
207 |
+
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
|
208 |
+
LorentzIndex, _simplify_single_line
|
209 |
+
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
|
210 |
+
>>> p = TensorHead('p', [LorentzIndex])
|
211 |
+
>>> i0,i1 = tensor_indices('i0:2', LorentzIndex)
|
212 |
+
>>> _simplify_single_line(G(i0)*G(i1)*p(-i1)*G(-i0)) + 2*G(i0)*p(-i0)
|
213 |
+
0
|
214 |
+
|
215 |
+
"""
|
216 |
+
t1, t2 = extract_type_tens(expression, GammaMatrix)
|
217 |
+
if t1 != 1:
|
218 |
+
t1 = kahane_simplify(t1)
|
219 |
+
res = t1*t2
|
220 |
+
return res
|
221 |
+
|
222 |
+
|
223 |
+
def _trace_single_line(t):
|
224 |
+
"""
|
225 |
+
Evaluate the trace of a single gamma matrix line inside a ``TensExpr``.
|
226 |
+
|
227 |
+
Notes
|
228 |
+
=====
|
229 |
+
|
230 |
+
If there are ``DiracSpinorIndex.auto_left`` and ``DiracSpinorIndex.auto_right``
|
231 |
+
indices trace over them; otherwise traces are not implied (explain)
|
232 |
+
|
233 |
+
|
234 |
+
Examples
|
235 |
+
========
|
236 |
+
|
237 |
+
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
|
238 |
+
LorentzIndex, _trace_single_line
|
239 |
+
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
|
240 |
+
>>> p = TensorHead('p', [LorentzIndex])
|
241 |
+
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
|
242 |
+
>>> _trace_single_line(G(i0)*G(i1))
|
243 |
+
4*metric(i0, i1)
|
244 |
+
>>> _trace_single_line(G(i0)*p(-i0)*G(i1)*p(-i1)) - 4*p(i0)*p(-i0)
|
245 |
+
0
|
246 |
+
|
247 |
+
"""
|
248 |
+
def _trace_single_line1(t):
|
249 |
+
t = t.sorted_components()
|
250 |
+
components = t.components
|
251 |
+
ncomps = len(components)
|
252 |
+
g = LorentzIndex.metric
|
253 |
+
# gamma matirices are in a[i:j]
|
254 |
+
hit = 0
|
255 |
+
for i in range(ncomps):
|
256 |
+
if components[i] == GammaMatrix:
|
257 |
+
hit = 1
|
258 |
+
break
|
259 |
+
|
260 |
+
for j in range(i + hit, ncomps):
|
261 |
+
if components[j] != GammaMatrix:
|
262 |
+
break
|
263 |
+
else:
|
264 |
+
j = ncomps
|
265 |
+
numG = j - i
|
266 |
+
if numG == 0:
|
267 |
+
tcoeff = t.coeff
|
268 |
+
return t.nocoeff if tcoeff else t
|
269 |
+
if numG % 2 == 1:
|
270 |
+
return TensMul.from_data(S.Zero, [], [], [])
|
271 |
+
elif numG > 4:
|
272 |
+
# find the open matrix indices and connect them:
|
273 |
+
a = t.split()
|
274 |
+
ind1 = a[i].get_indices()[0]
|
275 |
+
ind2 = a[i + 1].get_indices()[0]
|
276 |
+
aa = a[:i] + a[i + 2:]
|
277 |
+
t1 = tensor_mul(*aa)*g(ind1, ind2)
|
278 |
+
t1 = t1.contract_metric(g)
|
279 |
+
args = [t1]
|
280 |
+
sign = 1
|
281 |
+
for k in range(i + 2, j):
|
282 |
+
sign = -sign
|
283 |
+
ind2 = a[k].get_indices()[0]
|
284 |
+
aa = a[:i] + a[i + 1:k] + a[k + 1:]
|
285 |
+
t2 = sign*tensor_mul(*aa)*g(ind1, ind2)
|
286 |
+
t2 = t2.contract_metric(g)
|
287 |
+
t2 = simplify_gpgp(t2, False)
|
288 |
+
args.append(t2)
|
289 |
+
t3 = TensAdd(*args)
|
290 |
+
t3 = _trace_single_line(t3)
|
291 |
+
return t3
|
292 |
+
else:
|
293 |
+
a = t.split()
|
294 |
+
t1 = _gamma_trace1(*a[i:j])
|
295 |
+
a2 = a[:i] + a[j:]
|
296 |
+
t2 = tensor_mul(*a2)
|
297 |
+
t3 = t1*t2
|
298 |
+
if not t3:
|
299 |
+
return t3
|
300 |
+
t3 = t3.contract_metric(g)
|
301 |
+
return t3
|
302 |
+
|
303 |
+
t = t.expand()
|
304 |
+
if isinstance(t, TensAdd):
|
305 |
+
a = [_trace_single_line1(x)*x.coeff for x in t.args]
|
306 |
+
return TensAdd(*a)
|
307 |
+
elif isinstance(t, (Tensor, TensMul)):
|
308 |
+
r = t.coeff*_trace_single_line1(t)
|
309 |
+
return r
|
310 |
+
else:
|
311 |
+
return trace(t)
|
312 |
+
|
313 |
+
|
314 |
+
def _gamma_trace1(*a):
|
315 |
+
gctr = 4 # FIXME specific for d=4
|
316 |
+
g = LorentzIndex.metric
|
317 |
+
if not a:
|
318 |
+
return gctr
|
319 |
+
n = len(a)
|
320 |
+
if n%2 == 1:
|
321 |
+
#return TensMul.from_data(S.Zero, [], [], [])
|
322 |
+
return S.Zero
|
323 |
+
if n == 2:
|
324 |
+
ind0 = a[0].get_indices()[0]
|
325 |
+
ind1 = a[1].get_indices()[0]
|
326 |
+
return gctr*g(ind0, ind1)
|
327 |
+
if n == 4:
|
328 |
+
ind0 = a[0].get_indices()[0]
|
329 |
+
ind1 = a[1].get_indices()[0]
|
330 |
+
ind2 = a[2].get_indices()[0]
|
331 |
+
ind3 = a[3].get_indices()[0]
|
332 |
+
|
333 |
+
return gctr*(g(ind0, ind1)*g(ind2, ind3) - \
|
334 |
+
g(ind0, ind2)*g(ind1, ind3) + g(ind0, ind3)*g(ind1, ind2))
|
335 |
+
|
336 |
+
|
337 |
+
def kahane_simplify(expression):
|
338 |
+
r"""
|
339 |
+
This function cancels contracted elements in a product of four
|
340 |
+
dimensional gamma matrices, resulting in an expression equal to the given
|
341 |
+
one, without the contracted gamma matrices.
|
342 |
+
|
343 |
+
Parameters
|
344 |
+
==========
|
345 |
+
|
346 |
+
`expression` the tensor expression containing the gamma matrices to simplify.
|
347 |
+
|
348 |
+
Notes
|
349 |
+
=====
|
350 |
+
|
351 |
+
If spinor indices are given, the matrices must be given in
|
352 |
+
the order given in the product.
|
353 |
+
|
354 |
+
Algorithm
|
355 |
+
=========
|
356 |
+
|
357 |
+
The idea behind the algorithm is to use some well-known identities,
|
358 |
+
i.e., for contractions enclosing an even number of `\gamma` matrices
|
359 |
+
|
360 |
+
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N}} \gamma_\mu = 2 (\gamma_{a_{2N}} \gamma_{a_1} \cdots \gamma_{a_{2N-1}} + \gamma_{a_{2N-1}} \cdots \gamma_{a_1} \gamma_{a_{2N}} )`
|
361 |
+
|
362 |
+
for an odd number of `\gamma` matrices
|
363 |
+
|
364 |
+
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N+1}} \gamma_\mu = -2 \gamma_{a_{2N+1}} \gamma_{a_{2N}} \cdots \gamma_{a_{1}}`
|
365 |
+
|
366 |
+
Instead of repeatedly applying these identities to cancel out all contracted indices,
|
367 |
+
it is possible to recognize the links that would result from such an operation,
|
368 |
+
the problem is thus reduced to a simple rearrangement of free gamma matrices.
|
369 |
+
|
370 |
+
Examples
|
371 |
+
========
|
372 |
+
|
373 |
+
When using, always remember that the original expression coefficient
|
374 |
+
has to be handled separately
|
375 |
+
|
376 |
+
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
|
377 |
+
>>> from sympy.physics.hep.gamma_matrices import kahane_simplify
|
378 |
+
>>> from sympy.tensor.tensor import tensor_indices
|
379 |
+
>>> i0, i1, i2 = tensor_indices('i0:3', LorentzIndex)
|
380 |
+
>>> ta = G(i0)*G(-i0)
|
381 |
+
>>> kahane_simplify(ta)
|
382 |
+
Matrix([
|
383 |
+
[4, 0, 0, 0],
|
384 |
+
[0, 4, 0, 0],
|
385 |
+
[0, 0, 4, 0],
|
386 |
+
[0, 0, 0, 4]])
|
387 |
+
>>> tb = G(i0)*G(i1)*G(-i0)
|
388 |
+
>>> kahane_simplify(tb)
|
389 |
+
-2*GammaMatrix(i1)
|
390 |
+
>>> t = G(i0)*G(-i0)
|
391 |
+
>>> kahane_simplify(t)
|
392 |
+
Matrix([
|
393 |
+
[4, 0, 0, 0],
|
394 |
+
[0, 4, 0, 0],
|
395 |
+
[0, 0, 4, 0],
|
396 |
+
[0, 0, 0, 4]])
|
397 |
+
>>> t = G(i0)*G(-i0)
|
398 |
+
>>> kahane_simplify(t)
|
399 |
+
Matrix([
|
400 |
+
[4, 0, 0, 0],
|
401 |
+
[0, 4, 0, 0],
|
402 |
+
[0, 0, 4, 0],
|
403 |
+
[0, 0, 0, 4]])
|
404 |
+
|
405 |
+
If there are no contractions, the same expression is returned
|
406 |
+
|
407 |
+
>>> tc = G(i0)*G(i1)
|
408 |
+
>>> kahane_simplify(tc)
|
409 |
+
GammaMatrix(i0)*GammaMatrix(i1)
|
410 |
+
|
411 |
+
References
|
412 |
+
==========
|
413 |
+
|
414 |
+
[1] Algorithm for Reducing Contracted Products of gamma Matrices,
|
415 |
+
Joseph Kahane, Journal of Mathematical Physics, Vol. 9, No. 10, October 1968.
|
416 |
+
"""
|
417 |
+
|
418 |
+
if isinstance(expression, Mul):
|
419 |
+
return expression
|
420 |
+
if isinstance(expression, TensAdd):
|
421 |
+
return TensAdd(*[kahane_simplify(arg) for arg in expression.args])
|
422 |
+
|
423 |
+
if isinstance(expression, Tensor):
|
424 |
+
return expression
|
425 |
+
|
426 |
+
assert isinstance(expression, TensMul)
|
427 |
+
|
428 |
+
gammas = expression.args
|
429 |
+
|
430 |
+
for gamma in gammas:
|
431 |
+
assert gamma.component == GammaMatrix
|
432 |
+
|
433 |
+
free = expression.free
|
434 |
+
# spinor_free = [_ for _ in expression.free_in_args if _[1] != 0]
|
435 |
+
|
436 |
+
# if len(spinor_free) == 2:
|
437 |
+
# spinor_free.sort(key=lambda x: x[2])
|
438 |
+
# assert spinor_free[0][1] == 1 and spinor_free[-1][1] == 2
|
439 |
+
# assert spinor_free[0][2] == 0
|
440 |
+
# elif spinor_free:
|
441 |
+
# raise ValueError('spinor indices do not match')
|
442 |
+
|
443 |
+
dum = []
|
444 |
+
for dum_pair in expression.dum:
|
445 |
+
if expression.index_types[dum_pair[0]] == LorentzIndex:
|
446 |
+
dum.append((dum_pair[0], dum_pair[1]))
|
447 |
+
|
448 |
+
dum = sorted(dum)
|
449 |
+
|
450 |
+
if len(dum) == 0: # or GammaMatrixHead:
|
451 |
+
# no contractions in `expression`, just return it.
|
452 |
+
return expression
|
453 |
+
|
454 |
+
# find the `first_dum_pos`, i.e. the position of the first contracted
|
455 |
+
# gamma matrix, Kahane's algorithm as described in his paper requires the
|
456 |
+
# gamma matrix expression to start with a contracted gamma matrix, this is
|
457 |
+
# a workaround which ignores possible initial free indices, and re-adds
|
458 |
+
# them later.
|
459 |
+
|
460 |
+
first_dum_pos = min(map(min, dum))
|
461 |
+
|
462 |
+
# for p1, p2, a1, a2 in expression.dum_in_args:
|
463 |
+
# if p1 != 0 or p2 != 0:
|
464 |
+
# # only Lorentz indices, skip Dirac indices:
|
465 |
+
# continue
|
466 |
+
# first_dum_pos = min(p1, p2)
|
467 |
+
# break
|
468 |
+
|
469 |
+
total_number = len(free) + len(dum)*2
|
470 |
+
number_of_contractions = len(dum)
|
471 |
+
|
472 |
+
free_pos = [None]*total_number
|
473 |
+
for i in free:
|
474 |
+
free_pos[i[1]] = i[0]
|
475 |
+
|
476 |
+
# `index_is_free` is a list of booleans, to identify index position
|
477 |
+
# and whether that index is free or dummy.
|
478 |
+
index_is_free = [False]*total_number
|
479 |
+
|
480 |
+
for i, indx in enumerate(free):
|
481 |
+
index_is_free[indx[1]] = True
|
482 |
+
|
483 |
+
# `links` is a dictionary containing the graph described in Kahane's paper,
|
484 |
+
# to every key correspond one or two values, representing the linked indices.
|
485 |
+
# All values in `links` are integers, negative numbers are used in the case
|
486 |
+
# where it is necessary to insert gamma matrices between free indices, in
|
487 |
+
# order to make Kahane's algorithm work (see paper).
|
488 |
+
links = {i: [] for i in range(first_dum_pos, total_number)}
|
489 |
+
|
490 |
+
# `cum_sign` is a step variable to mark the sign of every index, see paper.
|
491 |
+
cum_sign = -1
|
492 |
+
# `cum_sign_list` keeps storage for all `cum_sign` (every index).
|
493 |
+
cum_sign_list = [None]*total_number
|
494 |
+
block_free_count = 0
|
495 |
+
|
496 |
+
# multiply `resulting_coeff` by the coefficient parameter, the rest
|
497 |
+
# of the algorithm ignores a scalar coefficient.
|
498 |
+
resulting_coeff = S.One
|
499 |
+
|
500 |
+
# initialize a list of lists of indices. The outer list will contain all
|
501 |
+
# additive tensor expressions, while the inner list will contain the
|
502 |
+
# free indices (rearranged according to the algorithm).
|
503 |
+
resulting_indices = [[]]
|
504 |
+
|
505 |
+
# start to count the `connected_components`, which together with the number
|
506 |
+
# of contractions, determines a -1 or +1 factor to be multiplied.
|
507 |
+
connected_components = 1
|
508 |
+
|
509 |
+
# First loop: here we fill `cum_sign_list`, and draw the links
|
510 |
+
# among consecutive indices (they are stored in `links`). Links among
|
511 |
+
# non-consecutive indices will be drawn later.
|
512 |
+
for i, is_free in enumerate(index_is_free):
|
513 |
+
# if `expression` starts with free indices, they are ignored here;
|
514 |
+
# they are later added as they are to the beginning of all
|
515 |
+
# `resulting_indices` list of lists of indices.
|
516 |
+
if i < first_dum_pos:
|
517 |
+
continue
|
518 |
+
|
519 |
+
if is_free:
|
520 |
+
block_free_count += 1
|
521 |
+
# if previous index was free as well, draw an arch in `links`.
|
522 |
+
if block_free_count > 1:
|
523 |
+
links[i - 1].append(i)
|
524 |
+
links[i].append(i - 1)
|
525 |
+
else:
|
526 |
+
# Change the sign of the index (`cum_sign`) if the number of free
|
527 |
+
# indices preceding it is even.
|
528 |
+
cum_sign *= 1 if (block_free_count % 2) else -1
|
529 |
+
if block_free_count == 0 and i != first_dum_pos:
|
530 |
+
# check if there are two consecutive dummy indices:
|
531 |
+
# in this case create virtual indices with negative position,
|
532 |
+
# these "virtual" indices represent the insertion of two
|
533 |
+
# gamma^0 matrices to separate consecutive dummy indices, as
|
534 |
+
# Kahane's algorithm requires dummy indices to be separated by
|
535 |
+
# free indices. The product of two gamma^0 matrices is unity,
|
536 |
+
# so the new expression being examined is the same as the
|
537 |
+
# original one.
|
538 |
+
if cum_sign == -1:
|
539 |
+
links[-1-i] = [-1-i+1]
|
540 |
+
links[-1-i+1] = [-1-i]
|
541 |
+
if (i - cum_sign) in links:
|
542 |
+
if i != first_dum_pos:
|
543 |
+
links[i].append(i - cum_sign)
|
544 |
+
if block_free_count != 0:
|
545 |
+
if i - cum_sign < len(index_is_free):
|
546 |
+
if index_is_free[i - cum_sign]:
|
547 |
+
links[i - cum_sign].append(i)
|
548 |
+
block_free_count = 0
|
549 |
+
|
550 |
+
cum_sign_list[i] = cum_sign
|
551 |
+
|
552 |
+
# The previous loop has only created links between consecutive free indices,
|
553 |
+
# it is necessary to properly create links among dummy (contracted) indices,
|
554 |
+
# according to the rules described in Kahane's paper. There is only one exception
|
555 |
+
# to Kahane's rules: the negative indices, which handle the case of some
|
556 |
+
# consecutive free indices (Kahane's paper just describes dummy indices
|
557 |
+
# separated by free indices, hinting that free indices can be added without
|
558 |
+
# altering the expression result).
|
559 |
+
for i in dum:
|
560 |
+
# get the positions of the two contracted indices:
|
561 |
+
pos1 = i[0]
|
562 |
+
pos2 = i[1]
|
563 |
+
|
564 |
+
# create Kahane's upper links, i.e. the upper arcs between dummy
|
565 |
+
# (i.e. contracted) indices:
|
566 |
+
links[pos1].append(pos2)
|
567 |
+
links[pos2].append(pos1)
|
568 |
+
|
569 |
+
# create Kahane's lower links, this corresponds to the arcs below
|
570 |
+
# the line described in the paper:
|
571 |
+
|
572 |
+
# first we move `pos1` and `pos2` according to the sign of the indices:
|
573 |
+
linkpos1 = pos1 + cum_sign_list[pos1]
|
574 |
+
linkpos2 = pos2 + cum_sign_list[pos2]
|
575 |
+
|
576 |
+
# otherwise, perform some checks before creating the lower arcs:
|
577 |
+
|
578 |
+
# make sure we are not exceeding the total number of indices:
|
579 |
+
if linkpos1 >= total_number:
|
580 |
+
continue
|
581 |
+
if linkpos2 >= total_number:
|
582 |
+
continue
|
583 |
+
|
584 |
+
# make sure we are not below the first dummy index in `expression`:
|
585 |
+
if linkpos1 < first_dum_pos:
|
586 |
+
continue
|
587 |
+
if linkpos2 < first_dum_pos:
|
588 |
+
continue
|
589 |
+
|
590 |
+
# check if the previous loop created "virtual" indices between dummy
|
591 |
+
# indices, in such a case relink `linkpos1` and `linkpos2`:
|
592 |
+
if (-1-linkpos1) in links:
|
593 |
+
linkpos1 = -1-linkpos1
|
594 |
+
if (-1-linkpos2) in links:
|
595 |
+
linkpos2 = -1-linkpos2
|
596 |
+
|
597 |
+
# move only if not next to free index:
|
598 |
+
if linkpos1 >= 0 and not index_is_free[linkpos1]:
|
599 |
+
linkpos1 = pos1
|
600 |
+
|
601 |
+
if linkpos2 >=0 and not index_is_free[linkpos2]:
|
602 |
+
linkpos2 = pos2
|
603 |
+
|
604 |
+
# create the lower arcs:
|
605 |
+
if linkpos2 not in links[linkpos1]:
|
606 |
+
links[linkpos1].append(linkpos2)
|
607 |
+
if linkpos1 not in links[linkpos2]:
|
608 |
+
links[linkpos2].append(linkpos1)
|
609 |
+
|
610 |
+
# This loop starts from the `first_dum_pos` index (first dummy index)
|
611 |
+
# walks through the graph deleting the visited indices from `links`,
|
612 |
+
# it adds a gamma matrix for every free index in encounters, while it
|
613 |
+
# completely ignores dummy indices and virtual indices.
|
614 |
+
pointer = first_dum_pos
|
615 |
+
previous_pointer = 0
|
616 |
+
while True:
|
617 |
+
if pointer in links:
|
618 |
+
next_ones = links.pop(pointer)
|
619 |
+
else:
|
620 |
+
break
|
621 |
+
|
622 |
+
if previous_pointer in next_ones:
|
623 |
+
next_ones.remove(previous_pointer)
|
624 |
+
|
625 |
+
previous_pointer = pointer
|
626 |
+
|
627 |
+
if next_ones:
|
628 |
+
pointer = next_ones[0]
|
629 |
+
else:
|
630 |
+
break
|
631 |
+
|
632 |
+
if pointer == previous_pointer:
|
633 |
+
break
|
634 |
+
if pointer >=0 and free_pos[pointer] is not None:
|
635 |
+
for ri in resulting_indices:
|
636 |
+
ri.append(free_pos[pointer])
|
637 |
+
|
638 |
+
# The following loop removes the remaining connected components in `links`.
|
639 |
+
# If there are free indices inside a connected component, it gives a
|
640 |
+
# contribution to the resulting expression given by the factor
|
641 |
+
# `gamma_a gamma_b ... gamma_z + gamma_z ... gamma_b gamma_a`, in Kahanes's
|
642 |
+
# paper represented as {gamma_a, gamma_b, ... , gamma_z},
|
643 |
+
# virtual indices are ignored. The variable `connected_components` is
|
644 |
+
# increased by one for every connected component this loop encounters.
|
645 |
+
|
646 |
+
# If the connected component has virtual and dummy indices only
|
647 |
+
# (no free indices), it contributes to `resulting_indices` by a factor of two.
|
648 |
+
# The multiplication by two is a result of the
|
649 |
+
# factor {gamma^0, gamma^0} = 2 I, as it appears in Kahane's paper.
|
650 |
+
# Note: curly brackets are meant as in the paper, as a generalized
|
651 |
+
# multi-element anticommutator!
|
652 |
+
|
653 |
+
while links:
|
654 |
+
connected_components += 1
|
655 |
+
pointer = min(links.keys())
|
656 |
+
previous_pointer = pointer
|
657 |
+
# the inner loop erases the visited indices from `links`, and it adds
|
658 |
+
# all free indices to `prepend_indices` list, virtual indices are
|
659 |
+
# ignored.
|
660 |
+
prepend_indices = []
|
661 |
+
while True:
|
662 |
+
if pointer in links:
|
663 |
+
next_ones = links.pop(pointer)
|
664 |
+
else:
|
665 |
+
break
|
666 |
+
|
667 |
+
if previous_pointer in next_ones:
|
668 |
+
if len(next_ones) > 1:
|
669 |
+
next_ones.remove(previous_pointer)
|
670 |
+
|
671 |
+
previous_pointer = pointer
|
672 |
+
|
673 |
+
if next_ones:
|
674 |
+
pointer = next_ones[0]
|
675 |
+
|
676 |
+
if pointer >= first_dum_pos and free_pos[pointer] is not None:
|
677 |
+
prepend_indices.insert(0, free_pos[pointer])
|
678 |
+
# if `prepend_indices` is void, it means there are no free indices
|
679 |
+
# in the loop (and it can be shown that there must be a virtual index),
|
680 |
+
# loops of virtual indices only contribute by a factor of two:
|
681 |
+
if len(prepend_indices) == 0:
|
682 |
+
resulting_coeff *= 2
|
683 |
+
# otherwise, add the free indices in `prepend_indices` to
|
684 |
+
# the `resulting_indices`:
|
685 |
+
else:
|
686 |
+
expr1 = prepend_indices
|
687 |
+
expr2 = list(reversed(prepend_indices))
|
688 |
+
resulting_indices = [expri + ri for ri in resulting_indices for expri in (expr1, expr2)]
|
689 |
+
|
690 |
+
# sign correction, as described in Kahane's paper:
|
691 |
+
resulting_coeff *= -1 if (number_of_contractions - connected_components + 1) % 2 else 1
|
692 |
+
# power of two factor, as described in Kahane's paper:
|
693 |
+
resulting_coeff *= 2**(number_of_contractions)
|
694 |
+
|
695 |
+
# If `first_dum_pos` is not zero, it means that there are trailing free gamma
|
696 |
+
# matrices in front of `expression`, so multiply by them:
|
697 |
+
resulting_indices = [ free_pos[0:first_dum_pos] + ri for ri in resulting_indices ]
|
698 |
+
|
699 |
+
resulting_expr = S.Zero
|
700 |
+
for i in resulting_indices:
|
701 |
+
temp_expr = S.One
|
702 |
+
for j in i:
|
703 |
+
temp_expr *= GammaMatrix(j)
|
704 |
+
resulting_expr += temp_expr
|
705 |
+
|
706 |
+
t = resulting_coeff * resulting_expr
|
707 |
+
t1 = None
|
708 |
+
if isinstance(t, TensAdd):
|
709 |
+
t1 = t.args[0]
|
710 |
+
elif isinstance(t, TensMul):
|
711 |
+
t1 = t
|
712 |
+
if t1:
|
713 |
+
pass
|
714 |
+
else:
|
715 |
+
t = eye(4)*t
|
716 |
+
return t
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/matrices.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Known matrices related to physics"""
|
2 |
+
|
3 |
+
from sympy.core.numbers import I
|
4 |
+
from sympy.matrices.dense import MutableDenseMatrix as Matrix
|
5 |
+
from sympy.utilities.decorator import deprecated
|
6 |
+
|
7 |
+
|
8 |
+
def msigma(i):
|
9 |
+
r"""Returns a Pauli matrix `\sigma_i` with `i=1,2,3`.
|
10 |
+
|
11 |
+
References
|
12 |
+
==========
|
13 |
+
|
14 |
+
.. [1] https://en.wikipedia.org/wiki/Pauli_matrices
|
15 |
+
|
16 |
+
Examples
|
17 |
+
========
|
18 |
+
|
19 |
+
>>> from sympy.physics.matrices import msigma
|
20 |
+
>>> msigma(1)
|
21 |
+
Matrix([
|
22 |
+
[0, 1],
|
23 |
+
[1, 0]])
|
24 |
+
"""
|
25 |
+
if i == 1:
|
26 |
+
mat = (
|
27 |
+
(0, 1),
|
28 |
+
(1, 0)
|
29 |
+
)
|
30 |
+
elif i == 2:
|
31 |
+
mat = (
|
32 |
+
(0, -I),
|
33 |
+
(I, 0)
|
34 |
+
)
|
35 |
+
elif i == 3:
|
36 |
+
mat = (
|
37 |
+
(1, 0),
|
38 |
+
(0, -1)
|
39 |
+
)
|
40 |
+
else:
|
41 |
+
raise IndexError("Invalid Pauli index")
|
42 |
+
return Matrix(mat)
|
43 |
+
|
44 |
+
|
45 |
+
def pat_matrix(m, dx, dy, dz):
|
46 |
+
"""Returns the Parallel Axis Theorem matrix to translate the inertia
|
47 |
+
matrix a distance of `(dx, dy, dz)` for a body of mass m.
|
48 |
+
|
49 |
+
Examples
|
50 |
+
========
|
51 |
+
|
52 |
+
To translate a body having a mass of 2 units a distance of 1 unit along
|
53 |
+
the `x`-axis we get:
|
54 |
+
|
55 |
+
>>> from sympy.physics.matrices import pat_matrix
|
56 |
+
>>> pat_matrix(2, 1, 0, 0)
|
57 |
+
Matrix([
|
58 |
+
[0, 0, 0],
|
59 |
+
[0, 2, 0],
|
60 |
+
[0, 0, 2]])
|
61 |
+
|
62 |
+
"""
|
63 |
+
dxdy = -dx*dy
|
64 |
+
dydz = -dy*dz
|
65 |
+
dzdx = -dz*dx
|
66 |
+
dxdx = dx**2
|
67 |
+
dydy = dy**2
|
68 |
+
dzdz = dz**2
|
69 |
+
mat = ((dydy + dzdz, dxdy, dzdx),
|
70 |
+
(dxdy, dxdx + dzdz, dydz),
|
71 |
+
(dzdx, dydz, dydy + dxdx))
|
72 |
+
return m*Matrix(mat)
|
73 |
+
|
74 |
+
|
75 |
+
def mgamma(mu, lower=False):
|
76 |
+
r"""Returns a Dirac gamma matrix `\gamma^\mu` in the standard
|
77 |
+
(Dirac) representation.
|
78 |
+
|
79 |
+
Explanation
|
80 |
+
===========
|
81 |
+
|
82 |
+
If you want `\gamma_\mu`, use ``gamma(mu, True)``.
|
83 |
+
|
84 |
+
We use a convention:
|
85 |
+
|
86 |
+
`\gamma^5 = i \cdot \gamma^0 \cdot \gamma^1 \cdot \gamma^2 \cdot \gamma^3`
|
87 |
+
|
88 |
+
`\gamma_5 = i \cdot \gamma_0 \cdot \gamma_1 \cdot \gamma_2 \cdot \gamma_3 = - \gamma^5`
|
89 |
+
|
90 |
+
References
|
91 |
+
==========
|
92 |
+
|
93 |
+
.. [1] https://en.wikipedia.org/wiki/Gamma_matrices
|
94 |
+
|
95 |
+
Examples
|
96 |
+
========
|
97 |
+
|
98 |
+
>>> from sympy.physics.matrices import mgamma
|
99 |
+
>>> mgamma(1)
|
100 |
+
Matrix([
|
101 |
+
[ 0, 0, 0, 1],
|
102 |
+
[ 0, 0, 1, 0],
|
103 |
+
[ 0, -1, 0, 0],
|
104 |
+
[-1, 0, 0, 0]])
|
105 |
+
"""
|
106 |
+
if mu not in (0, 1, 2, 3, 5):
|
107 |
+
raise IndexError("Invalid Dirac index")
|
108 |
+
if mu == 0:
|
109 |
+
mat = (
|
110 |
+
(1, 0, 0, 0),
|
111 |
+
(0, 1, 0, 0),
|
112 |
+
(0, 0, -1, 0),
|
113 |
+
(0, 0, 0, -1)
|
114 |
+
)
|
115 |
+
elif mu == 1:
|
116 |
+
mat = (
|
117 |
+
(0, 0, 0, 1),
|
118 |
+
(0, 0, 1, 0),
|
119 |
+
(0, -1, 0, 0),
|
120 |
+
(-1, 0, 0, 0)
|
121 |
+
)
|
122 |
+
elif mu == 2:
|
123 |
+
mat = (
|
124 |
+
(0, 0, 0, -I),
|
125 |
+
(0, 0, I, 0),
|
126 |
+
(0, I, 0, 0),
|
127 |
+
(-I, 0, 0, 0)
|
128 |
+
)
|
129 |
+
elif mu == 3:
|
130 |
+
mat = (
|
131 |
+
(0, 0, 1, 0),
|
132 |
+
(0, 0, 0, -1),
|
133 |
+
(-1, 0, 0, 0),
|
134 |
+
(0, 1, 0, 0)
|
135 |
+
)
|
136 |
+
elif mu == 5:
|
137 |
+
mat = (
|
138 |
+
(0, 0, 1, 0),
|
139 |
+
(0, 0, 0, 1),
|
140 |
+
(1, 0, 0, 0),
|
141 |
+
(0, 1, 0, 0)
|
142 |
+
)
|
143 |
+
m = Matrix(mat)
|
144 |
+
if lower:
|
145 |
+
if mu in (1, 2, 3, 5):
|
146 |
+
m = -m
|
147 |
+
return m
|
148 |
+
|
149 |
+
#Minkowski tensor using the convention (+,-,-,-) used in the Quantum Field
|
150 |
+
#Theory
|
151 |
+
minkowski_tensor = Matrix( (
|
152 |
+
(1, 0, 0, 0),
|
153 |
+
(0, -1, 0, 0),
|
154 |
+
(0, 0, -1, 0),
|
155 |
+
(0, 0, 0, -1)
|
156 |
+
))
|
157 |
+
|
158 |
+
|
159 |
+
@deprecated(
|
160 |
+
"""
|
161 |
+
The sympy.physics.matrices.mdft method is deprecated. Use
|
162 |
+
sympy.DFT(n).as_explicit() instead.
|
163 |
+
""",
|
164 |
+
deprecated_since_version="1.9",
|
165 |
+
active_deprecations_target="deprecated-physics-mdft",
|
166 |
+
)
|
167 |
+
def mdft(n):
|
168 |
+
r"""
|
169 |
+
.. deprecated:: 1.9
|
170 |
+
|
171 |
+
Use DFT from sympy.matrices.expressions.fourier instead.
|
172 |
+
|
173 |
+
To get identical behavior to ``mdft(n)``, use ``DFT(n).as_explicit()``.
|
174 |
+
"""
|
175 |
+
from sympy.matrices.expressions.fourier import DFT
|
176 |
+
return DFT(n).as_mutable()
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/paulialgebra.py
ADDED
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This module implements Pauli algebra by subclassing Symbol. Only algebraic
|
3 |
+
properties of Pauli matrices are used (we do not use the Matrix class).
|
4 |
+
|
5 |
+
See the documentation to the class Pauli for examples.
|
6 |
+
|
7 |
+
References
|
8 |
+
==========
|
9 |
+
|
10 |
+
.. [1] https://en.wikipedia.org/wiki/Pauli_matrices
|
11 |
+
"""
|
12 |
+
|
13 |
+
from sympy.core.add import Add
|
14 |
+
from sympy.core.mul import Mul
|
15 |
+
from sympy.core.numbers import I
|
16 |
+
from sympy.core.power import Pow
|
17 |
+
from sympy.core.symbol import Symbol
|
18 |
+
from sympy.physics.quantum import TensorProduct
|
19 |
+
|
20 |
+
__all__ = ['evaluate_pauli_product']
|
21 |
+
|
22 |
+
|
23 |
+
def delta(i, j):
|
24 |
+
"""
|
25 |
+
Returns 1 if ``i == j``, else 0.
|
26 |
+
|
27 |
+
This is used in the multiplication of Pauli matrices.
|
28 |
+
|
29 |
+
Examples
|
30 |
+
========
|
31 |
+
|
32 |
+
>>> from sympy.physics.paulialgebra import delta
|
33 |
+
>>> delta(1, 1)
|
34 |
+
1
|
35 |
+
>>> delta(2, 3)
|
36 |
+
0
|
37 |
+
"""
|
38 |
+
if i == j:
|
39 |
+
return 1
|
40 |
+
else:
|
41 |
+
return 0
|
42 |
+
|
43 |
+
|
44 |
+
def epsilon(i, j, k):
|
45 |
+
"""
|
46 |
+
Return 1 if i,j,k is equal to (1,2,3), (2,3,1), or (3,1,2);
|
47 |
+
-1 if ``i``,``j``,``k`` is equal to (1,3,2), (3,2,1), or (2,1,3);
|
48 |
+
else return 0.
|
49 |
+
|
50 |
+
This is used in the multiplication of Pauli matrices.
|
51 |
+
|
52 |
+
Examples
|
53 |
+
========
|
54 |
+
|
55 |
+
>>> from sympy.physics.paulialgebra import epsilon
|
56 |
+
>>> epsilon(1, 2, 3)
|
57 |
+
1
|
58 |
+
>>> epsilon(1, 3, 2)
|
59 |
+
-1
|
60 |
+
"""
|
61 |
+
if (i, j, k) in ((1, 2, 3), (2, 3, 1), (3, 1, 2)):
|
62 |
+
return 1
|
63 |
+
elif (i, j, k) in ((1, 3, 2), (3, 2, 1), (2, 1, 3)):
|
64 |
+
return -1
|
65 |
+
else:
|
66 |
+
return 0
|
67 |
+
|
68 |
+
|
69 |
+
class Pauli(Symbol):
|
70 |
+
"""
|
71 |
+
The class representing algebraic properties of Pauli matrices.
|
72 |
+
|
73 |
+
Explanation
|
74 |
+
===========
|
75 |
+
|
76 |
+
The symbol used to display the Pauli matrices can be changed with an
|
77 |
+
optional parameter ``label="sigma"``. Pauli matrices with different
|
78 |
+
``label`` attributes cannot multiply together.
|
79 |
+
|
80 |
+
If the left multiplication of symbol or number with Pauli matrix is needed,
|
81 |
+
please use parentheses to separate Pauli and symbolic multiplication
|
82 |
+
(for example: 2*I*(Pauli(3)*Pauli(2))).
|
83 |
+
|
84 |
+
Another variant is to use evaluate_pauli_product function to evaluate
|
85 |
+
the product of Pauli matrices and other symbols (with commutative
|
86 |
+
multiply rules).
|
87 |
+
|
88 |
+
See Also
|
89 |
+
========
|
90 |
+
|
91 |
+
evaluate_pauli_product
|
92 |
+
|
93 |
+
Examples
|
94 |
+
========
|
95 |
+
|
96 |
+
>>> from sympy.physics.paulialgebra import Pauli
|
97 |
+
>>> Pauli(1)
|
98 |
+
sigma1
|
99 |
+
>>> Pauli(1)*Pauli(2)
|
100 |
+
I*sigma3
|
101 |
+
>>> Pauli(1)*Pauli(1)
|
102 |
+
1
|
103 |
+
>>> Pauli(3)**4
|
104 |
+
1
|
105 |
+
>>> Pauli(1)*Pauli(2)*Pauli(3)
|
106 |
+
I
|
107 |
+
|
108 |
+
>>> from sympy.physics.paulialgebra import Pauli
|
109 |
+
>>> Pauli(1, label="tau")
|
110 |
+
tau1
|
111 |
+
>>> Pauli(1)*Pauli(2, label="tau")
|
112 |
+
sigma1*tau2
|
113 |
+
>>> Pauli(1, label="tau")*Pauli(2, label="tau")
|
114 |
+
I*tau3
|
115 |
+
|
116 |
+
>>> from sympy import I
|
117 |
+
>>> I*(Pauli(2)*Pauli(3))
|
118 |
+
-sigma1
|
119 |
+
|
120 |
+
>>> from sympy.physics.paulialgebra import evaluate_pauli_product
|
121 |
+
>>> f = I*Pauli(2)*Pauli(3)
|
122 |
+
>>> f
|
123 |
+
I*sigma2*sigma3
|
124 |
+
>>> evaluate_pauli_product(f)
|
125 |
+
-sigma1
|
126 |
+
"""
|
127 |
+
|
128 |
+
__slots__ = ("i", "label")
|
129 |
+
|
130 |
+
def __new__(cls, i, label="sigma"):
|
131 |
+
if i not in [1, 2, 3]:
|
132 |
+
raise IndexError("Invalid Pauli index")
|
133 |
+
obj = Symbol.__new__(cls, "%s%d" %(label,i), commutative=False, hermitian=True)
|
134 |
+
obj.i = i
|
135 |
+
obj.label = label
|
136 |
+
return obj
|
137 |
+
|
138 |
+
def __getnewargs_ex__(self):
|
139 |
+
return (self.i, self.label), {}
|
140 |
+
|
141 |
+
def _hashable_content(self):
|
142 |
+
return (self.i, self.label)
|
143 |
+
|
144 |
+
# FIXME don't work for -I*Pauli(2)*Pauli(3)
|
145 |
+
def __mul__(self, other):
|
146 |
+
if isinstance(other, Pauli):
|
147 |
+
j = self.i
|
148 |
+
k = other.i
|
149 |
+
jlab = self.label
|
150 |
+
klab = other.label
|
151 |
+
|
152 |
+
if jlab == klab:
|
153 |
+
return delta(j, k) \
|
154 |
+
+ I*epsilon(j, k, 1)*Pauli(1,jlab) \
|
155 |
+
+ I*epsilon(j, k, 2)*Pauli(2,jlab) \
|
156 |
+
+ I*epsilon(j, k, 3)*Pauli(3,jlab)
|
157 |
+
return super().__mul__(other)
|
158 |
+
|
159 |
+
def _eval_power(b, e):
|
160 |
+
if e.is_Integer and e.is_positive:
|
161 |
+
return super().__pow__(int(e) % 2)
|
162 |
+
|
163 |
+
|
164 |
+
def evaluate_pauli_product(arg):
|
165 |
+
'''Help function to evaluate Pauli matrices product
|
166 |
+
with symbolic objects.
|
167 |
+
|
168 |
+
Parameters
|
169 |
+
==========
|
170 |
+
|
171 |
+
arg: symbolic expression that contains Paulimatrices
|
172 |
+
|
173 |
+
Examples
|
174 |
+
========
|
175 |
+
|
176 |
+
>>> from sympy.physics.paulialgebra import Pauli, evaluate_pauli_product
|
177 |
+
>>> from sympy import I
|
178 |
+
>>> evaluate_pauli_product(I*Pauli(1)*Pauli(2))
|
179 |
+
-sigma3
|
180 |
+
|
181 |
+
>>> from sympy.abc import x
|
182 |
+
>>> evaluate_pauli_product(x**2*Pauli(2)*Pauli(1))
|
183 |
+
-I*x**2*sigma3
|
184 |
+
'''
|
185 |
+
start = arg
|
186 |
+
end = arg
|
187 |
+
|
188 |
+
if isinstance(arg, Pow) and isinstance(arg.args[0], Pauli):
|
189 |
+
if arg.args[1].is_odd:
|
190 |
+
return arg.args[0]
|
191 |
+
else:
|
192 |
+
return 1
|
193 |
+
|
194 |
+
if isinstance(arg, Add):
|
195 |
+
return Add(*[evaluate_pauli_product(part) for part in arg.args])
|
196 |
+
|
197 |
+
if isinstance(arg, TensorProduct):
|
198 |
+
return TensorProduct(*[evaluate_pauli_product(part) for part in arg.args])
|
199 |
+
|
200 |
+
elif not(isinstance(arg, Mul)):
|
201 |
+
return arg
|
202 |
+
|
203 |
+
while not start == end or start == arg and end == arg:
|
204 |
+
start = end
|
205 |
+
|
206 |
+
tmp = start.as_coeff_mul()
|
207 |
+
sigma_product = 1
|
208 |
+
com_product = 1
|
209 |
+
keeper = 1
|
210 |
+
|
211 |
+
for el in tmp[1]:
|
212 |
+
if isinstance(el, Pauli):
|
213 |
+
sigma_product *= el
|
214 |
+
elif not el.is_commutative:
|
215 |
+
if isinstance(el, Pow) and isinstance(el.args[0], Pauli):
|
216 |
+
if el.args[1].is_odd:
|
217 |
+
sigma_product *= el.args[0]
|
218 |
+
elif isinstance(el, TensorProduct):
|
219 |
+
keeper = keeper*sigma_product*\
|
220 |
+
TensorProduct(
|
221 |
+
*[evaluate_pauli_product(part) for part in el.args]
|
222 |
+
)
|
223 |
+
sigma_product = 1
|
224 |
+
else:
|
225 |
+
keeper = keeper*sigma_product*el
|
226 |
+
sigma_product = 1
|
227 |
+
else:
|
228 |
+
com_product *= el
|
229 |
+
end = tmp[0]*keeper*sigma_product*com_product
|
230 |
+
if end == arg: break
|
231 |
+
return end
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/pring.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sympy.core.numbers import (I, pi)
|
2 |
+
from sympy.core.singleton import S
|
3 |
+
from sympy.functions.elementary.exponential import exp
|
4 |
+
from sympy.functions.elementary.miscellaneous import sqrt
|
5 |
+
from sympy.physics.quantum.constants import hbar
|
6 |
+
|
7 |
+
|
8 |
+
def wavefunction(n, x):
|
9 |
+
"""
|
10 |
+
Returns the wavefunction for particle on ring.
|
11 |
+
|
12 |
+
Parameters
|
13 |
+
==========
|
14 |
+
|
15 |
+
n : The quantum number.
|
16 |
+
Here ``n`` can be positive as well as negative
|
17 |
+
which can be used to describe the direction of motion of particle.
|
18 |
+
x :
|
19 |
+
The angle.
|
20 |
+
|
21 |
+
Examples
|
22 |
+
========
|
23 |
+
|
24 |
+
>>> from sympy.physics.pring import wavefunction
|
25 |
+
>>> from sympy import Symbol, integrate, pi
|
26 |
+
>>> x=Symbol("x")
|
27 |
+
>>> wavefunction(1, x)
|
28 |
+
sqrt(2)*exp(I*x)/(2*sqrt(pi))
|
29 |
+
>>> wavefunction(2, x)
|
30 |
+
sqrt(2)*exp(2*I*x)/(2*sqrt(pi))
|
31 |
+
>>> wavefunction(3, x)
|
32 |
+
sqrt(2)*exp(3*I*x)/(2*sqrt(pi))
|
33 |
+
|
34 |
+
The normalization of the wavefunction is:
|
35 |
+
|
36 |
+
>>> integrate(wavefunction(2, x)*wavefunction(-2, x), (x, 0, 2*pi))
|
37 |
+
1
|
38 |
+
>>> integrate(wavefunction(4, x)*wavefunction(-4, x), (x, 0, 2*pi))
|
39 |
+
1
|
40 |
+
|
41 |
+
References
|
42 |
+
==========
|
43 |
+
|
44 |
+
.. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum
|
45 |
+
Mechanics (4th ed.). Pages 71-73.
|
46 |
+
|
47 |
+
"""
|
48 |
+
# sympify arguments
|
49 |
+
n, x = S(n), S(x)
|
50 |
+
return exp(n * I * x) / sqrt(2 * pi)
|
51 |
+
|
52 |
+
|
53 |
+
def energy(n, m, r):
|
54 |
+
"""
|
55 |
+
Returns the energy of the state corresponding to quantum number ``n``.
|
56 |
+
|
57 |
+
E=(n**2 * (hcross)**2) / (2 * m * r**2)
|
58 |
+
|
59 |
+
Parameters
|
60 |
+
==========
|
61 |
+
|
62 |
+
n :
|
63 |
+
The quantum number.
|
64 |
+
m :
|
65 |
+
Mass of the particle.
|
66 |
+
r :
|
67 |
+
Radius of circle.
|
68 |
+
|
69 |
+
Examples
|
70 |
+
========
|
71 |
+
|
72 |
+
>>> from sympy.physics.pring import energy
|
73 |
+
>>> from sympy import Symbol
|
74 |
+
>>> m=Symbol("m")
|
75 |
+
>>> r=Symbol("r")
|
76 |
+
>>> energy(1, m, r)
|
77 |
+
hbar**2/(2*m*r**2)
|
78 |
+
>>> energy(2, m, r)
|
79 |
+
2*hbar**2/(m*r**2)
|
80 |
+
>>> energy(-2, 2.0, 3.0)
|
81 |
+
0.111111111111111*hbar**2
|
82 |
+
|
83 |
+
References
|
84 |
+
==========
|
85 |
+
|
86 |
+
.. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum
|
87 |
+
Mechanics (4th ed.). Pages 71-73.
|
88 |
+
|
89 |
+
"""
|
90 |
+
n, m, r = S(n), S(m), S(r)
|
91 |
+
if n.is_integer:
|
92 |
+
return (n**2 * hbar**2) / (2 * m * r**2)
|
93 |
+
else:
|
94 |
+
raise ValueError("'n' must be integer")
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/qho_1d.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sympy.core import S, pi, Rational
|
2 |
+
from sympy.functions import hermite, sqrt, exp, factorial, Abs
|
3 |
+
from sympy.physics.quantum.constants import hbar
|
4 |
+
|
5 |
+
|
6 |
+
def psi_n(n, x, m, omega):
|
7 |
+
"""
|
8 |
+
Returns the wavefunction psi_{n} for the One-dimensional harmonic oscillator.
|
9 |
+
|
10 |
+
Parameters
|
11 |
+
==========
|
12 |
+
|
13 |
+
n :
|
14 |
+
the "nodal" quantum number. Corresponds to the number of nodes in the
|
15 |
+
wavefunction. ``n >= 0``
|
16 |
+
x :
|
17 |
+
x coordinate.
|
18 |
+
m :
|
19 |
+
Mass of the particle.
|
20 |
+
omega :
|
21 |
+
Angular frequency of the oscillator.
|
22 |
+
|
23 |
+
Examples
|
24 |
+
========
|
25 |
+
|
26 |
+
>>> from sympy.physics.qho_1d import psi_n
|
27 |
+
>>> from sympy.abc import m, x, omega
|
28 |
+
>>> psi_n(0, x, m, omega)
|
29 |
+
(m*omega)**(1/4)*exp(-m*omega*x**2/(2*hbar))/(hbar**(1/4)*pi**(1/4))
|
30 |
+
|
31 |
+
"""
|
32 |
+
|
33 |
+
# sympify arguments
|
34 |
+
n, x, m, omega = map(S, [n, x, m, omega])
|
35 |
+
nu = m * omega / hbar
|
36 |
+
# normalization coefficient
|
37 |
+
C = (nu/pi)**Rational(1, 4) * sqrt(1/(2**n*factorial(n)))
|
38 |
+
|
39 |
+
return C * exp(-nu* x**2 /2) * hermite(n, sqrt(nu)*x)
|
40 |
+
|
41 |
+
|
42 |
+
def E_n(n, omega):
|
43 |
+
"""
|
44 |
+
Returns the Energy of the One-dimensional harmonic oscillator.
|
45 |
+
|
46 |
+
Parameters
|
47 |
+
==========
|
48 |
+
|
49 |
+
n :
|
50 |
+
The "nodal" quantum number.
|
51 |
+
omega :
|
52 |
+
The harmonic oscillator angular frequency.
|
53 |
+
|
54 |
+
Notes
|
55 |
+
=====
|
56 |
+
|
57 |
+
The unit of the returned value matches the unit of hw, since the energy is
|
58 |
+
calculated as:
|
59 |
+
|
60 |
+
E_n = hbar * omega*(n + 1/2)
|
61 |
+
|
62 |
+
Examples
|
63 |
+
========
|
64 |
+
|
65 |
+
>>> from sympy.physics.qho_1d import E_n
|
66 |
+
>>> from sympy.abc import x, omega
|
67 |
+
>>> E_n(x, omega)
|
68 |
+
hbar*omega*(x + 1/2)
|
69 |
+
"""
|
70 |
+
|
71 |
+
return hbar * omega * (n + S.Half)
|
72 |
+
|
73 |
+
|
74 |
+
def coherent_state(n, alpha):
|
75 |
+
"""
|
76 |
+
Returns <n|alpha> for the coherent states of 1D harmonic oscillator.
|
77 |
+
See https://en.wikipedia.org/wiki/Coherent_states
|
78 |
+
|
79 |
+
Parameters
|
80 |
+
==========
|
81 |
+
|
82 |
+
n :
|
83 |
+
The "nodal" quantum number.
|
84 |
+
alpha :
|
85 |
+
The eigen value of annihilation operator.
|
86 |
+
"""
|
87 |
+
|
88 |
+
return exp(- Abs(alpha)**2/2)*(alpha**n)/sqrt(factorial(n))
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/secondquant.py
ADDED
@@ -0,0 +1,3114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Second quantization operators and states for bosons.
|
3 |
+
|
4 |
+
This follow the formulation of Fetter and Welecka, "Quantum Theory
|
5 |
+
of Many-Particle Systems."
|
6 |
+
"""
|
7 |
+
from collections import defaultdict
|
8 |
+
|
9 |
+
from sympy.core.add import Add
|
10 |
+
from sympy.core.basic import Basic
|
11 |
+
from sympy.core.cache import cacheit
|
12 |
+
from sympy.core.containers import Tuple
|
13 |
+
from sympy.core.expr import Expr
|
14 |
+
from sympy.core.function import Function
|
15 |
+
from sympy.core.mul import Mul
|
16 |
+
from sympy.core.numbers import I
|
17 |
+
from sympy.core.power import Pow
|
18 |
+
from sympy.core.singleton import S
|
19 |
+
from sympy.core.sorting import default_sort_key
|
20 |
+
from sympy.core.symbol import Dummy, Symbol
|
21 |
+
from sympy.core.sympify import sympify
|
22 |
+
from sympy.functions.elementary.miscellaneous import sqrt
|
23 |
+
from sympy.functions.special.tensor_functions import KroneckerDelta
|
24 |
+
from sympy.matrices.dense import zeros
|
25 |
+
from sympy.printing.str import StrPrinter
|
26 |
+
from sympy.utilities.iterables import has_dups
|
27 |
+
|
28 |
+
__all__ = [
|
29 |
+
'Dagger',
|
30 |
+
'KroneckerDelta',
|
31 |
+
'BosonicOperator',
|
32 |
+
'AnnihilateBoson',
|
33 |
+
'CreateBoson',
|
34 |
+
'AnnihilateFermion',
|
35 |
+
'CreateFermion',
|
36 |
+
'FockState',
|
37 |
+
'FockStateBra',
|
38 |
+
'FockStateKet',
|
39 |
+
'FockStateBosonKet',
|
40 |
+
'FockStateBosonBra',
|
41 |
+
'FockStateFermionKet',
|
42 |
+
'FockStateFermionBra',
|
43 |
+
'BBra',
|
44 |
+
'BKet',
|
45 |
+
'FBra',
|
46 |
+
'FKet',
|
47 |
+
'F',
|
48 |
+
'Fd',
|
49 |
+
'B',
|
50 |
+
'Bd',
|
51 |
+
'apply_operators',
|
52 |
+
'InnerProduct',
|
53 |
+
'BosonicBasis',
|
54 |
+
'VarBosonicBasis',
|
55 |
+
'FixedBosonicBasis',
|
56 |
+
'Commutator',
|
57 |
+
'matrix_rep',
|
58 |
+
'contraction',
|
59 |
+
'wicks',
|
60 |
+
'NO',
|
61 |
+
'evaluate_deltas',
|
62 |
+
'AntiSymmetricTensor',
|
63 |
+
'substitute_dummies',
|
64 |
+
'PermutationOperator',
|
65 |
+
'simplify_index_permutations',
|
66 |
+
]
|
67 |
+
|
68 |
+
|
69 |
+
class SecondQuantizationError(Exception):
|
70 |
+
pass
|
71 |
+
|
72 |
+
|
73 |
+
class AppliesOnlyToSymbolicIndex(SecondQuantizationError):
|
74 |
+
pass
|
75 |
+
|
76 |
+
|
77 |
+
class ContractionAppliesOnlyToFermions(SecondQuantizationError):
|
78 |
+
pass
|
79 |
+
|
80 |
+
|
81 |
+
class ViolationOfPauliPrinciple(SecondQuantizationError):
|
82 |
+
pass
|
83 |
+
|
84 |
+
|
85 |
+
class SubstitutionOfAmbigousOperatorFailed(SecondQuantizationError):
|
86 |
+
pass
|
87 |
+
|
88 |
+
|
89 |
+
class WicksTheoremDoesNotApply(SecondQuantizationError):
|
90 |
+
pass
|
91 |
+
|
92 |
+
|
93 |
+
class Dagger(Expr):
|
94 |
+
"""
|
95 |
+
Hermitian conjugate of creation/annihilation operators.
|
96 |
+
|
97 |
+
Examples
|
98 |
+
========
|
99 |
+
|
100 |
+
>>> from sympy import I
|
101 |
+
>>> from sympy.physics.secondquant import Dagger, B, Bd
|
102 |
+
>>> Dagger(2*I)
|
103 |
+
-2*I
|
104 |
+
>>> Dagger(B(0))
|
105 |
+
CreateBoson(0)
|
106 |
+
>>> Dagger(Bd(0))
|
107 |
+
AnnihilateBoson(0)
|
108 |
+
|
109 |
+
"""
|
110 |
+
|
111 |
+
def __new__(cls, arg):
|
112 |
+
arg = sympify(arg)
|
113 |
+
r = cls.eval(arg)
|
114 |
+
if isinstance(r, Basic):
|
115 |
+
return r
|
116 |
+
obj = Basic.__new__(cls, arg)
|
117 |
+
return obj
|
118 |
+
|
119 |
+
@classmethod
|
120 |
+
def eval(cls, arg):
|
121 |
+
"""
|
122 |
+
Evaluates the Dagger instance.
|
123 |
+
|
124 |
+
Examples
|
125 |
+
========
|
126 |
+
|
127 |
+
>>> from sympy import I
|
128 |
+
>>> from sympy.physics.secondquant import Dagger, B, Bd
|
129 |
+
>>> Dagger(2*I)
|
130 |
+
-2*I
|
131 |
+
>>> Dagger(B(0))
|
132 |
+
CreateBoson(0)
|
133 |
+
>>> Dagger(Bd(0))
|
134 |
+
AnnihilateBoson(0)
|
135 |
+
|
136 |
+
The eval() method is called automatically.
|
137 |
+
|
138 |
+
"""
|
139 |
+
dagger = getattr(arg, '_dagger_', None)
|
140 |
+
if dagger is not None:
|
141 |
+
return dagger()
|
142 |
+
if isinstance(arg, Basic):
|
143 |
+
if arg.is_Add:
|
144 |
+
return Add(*tuple(map(Dagger, arg.args)))
|
145 |
+
if arg.is_Mul:
|
146 |
+
return Mul(*tuple(map(Dagger, reversed(arg.args))))
|
147 |
+
if arg.is_Number:
|
148 |
+
return arg
|
149 |
+
if arg.is_Pow:
|
150 |
+
return Pow(Dagger(arg.args[0]), arg.args[1])
|
151 |
+
if arg == I:
|
152 |
+
return -arg
|
153 |
+
else:
|
154 |
+
return None
|
155 |
+
|
156 |
+
def _dagger_(self):
|
157 |
+
return self.args[0]
|
158 |
+
|
159 |
+
|
160 |
+
class TensorSymbol(Expr):
|
161 |
+
|
162 |
+
is_commutative = True
|
163 |
+
|
164 |
+
|
165 |
+
class AntiSymmetricTensor(TensorSymbol):
|
166 |
+
"""Stores upper and lower indices in separate Tuple's.
|
167 |
+
|
168 |
+
Each group of indices is assumed to be antisymmetric.
|
169 |
+
|
170 |
+
Examples
|
171 |
+
========
|
172 |
+
|
173 |
+
>>> from sympy import symbols
|
174 |
+
>>> from sympy.physics.secondquant import AntiSymmetricTensor
|
175 |
+
>>> i, j = symbols('i j', below_fermi=True)
|
176 |
+
>>> a, b = symbols('a b', above_fermi=True)
|
177 |
+
>>> AntiSymmetricTensor('v', (a, i), (b, j))
|
178 |
+
AntiSymmetricTensor(v, (a, i), (b, j))
|
179 |
+
>>> AntiSymmetricTensor('v', (i, a), (b, j))
|
180 |
+
-AntiSymmetricTensor(v, (a, i), (b, j))
|
181 |
+
|
182 |
+
As you can see, the indices are automatically sorted to a canonical form.
|
183 |
+
|
184 |
+
"""
|
185 |
+
|
186 |
+
def __new__(cls, symbol, upper, lower):
|
187 |
+
|
188 |
+
try:
|
189 |
+
upper, signu = _sort_anticommuting_fermions(
|
190 |
+
upper, key=cls._sortkey)
|
191 |
+
lower, signl = _sort_anticommuting_fermions(
|
192 |
+
lower, key=cls._sortkey)
|
193 |
+
|
194 |
+
except ViolationOfPauliPrinciple:
|
195 |
+
return S.Zero
|
196 |
+
|
197 |
+
symbol = sympify(symbol)
|
198 |
+
upper = Tuple(*upper)
|
199 |
+
lower = Tuple(*lower)
|
200 |
+
|
201 |
+
if (signu + signl) % 2:
|
202 |
+
return -TensorSymbol.__new__(cls, symbol, upper, lower)
|
203 |
+
else:
|
204 |
+
|
205 |
+
return TensorSymbol.__new__(cls, symbol, upper, lower)
|
206 |
+
|
207 |
+
@classmethod
|
208 |
+
def _sortkey(cls, index):
|
209 |
+
"""Key for sorting of indices.
|
210 |
+
|
211 |
+
particle < hole < general
|
212 |
+
|
213 |
+
FIXME: This is a bottle-neck, can we do it faster?
|
214 |
+
"""
|
215 |
+
h = hash(index)
|
216 |
+
label = str(index)
|
217 |
+
if isinstance(index, Dummy):
|
218 |
+
if index.assumptions0.get('above_fermi'):
|
219 |
+
return (20, label, h)
|
220 |
+
elif index.assumptions0.get('below_fermi'):
|
221 |
+
return (21, label, h)
|
222 |
+
else:
|
223 |
+
return (22, label, h)
|
224 |
+
|
225 |
+
if index.assumptions0.get('above_fermi'):
|
226 |
+
return (10, label, h)
|
227 |
+
elif index.assumptions0.get('below_fermi'):
|
228 |
+
return (11, label, h)
|
229 |
+
else:
|
230 |
+
return (12, label, h)
|
231 |
+
|
232 |
+
def _latex(self, printer):
|
233 |
+
return "{%s^{%s}_{%s}}" % (
|
234 |
+
self.symbol,
|
235 |
+
"".join([ i.name for i in self.args[1]]),
|
236 |
+
"".join([ i.name for i in self.args[2]])
|
237 |
+
)
|
238 |
+
|
239 |
+
@property
|
240 |
+
def symbol(self):
|
241 |
+
"""
|
242 |
+
Returns the symbol of the tensor.
|
243 |
+
|
244 |
+
Examples
|
245 |
+
========
|
246 |
+
|
247 |
+
>>> from sympy import symbols
|
248 |
+
>>> from sympy.physics.secondquant import AntiSymmetricTensor
|
249 |
+
>>> i, j = symbols('i,j', below_fermi=True)
|
250 |
+
>>> a, b = symbols('a,b', above_fermi=True)
|
251 |
+
>>> AntiSymmetricTensor('v', (a, i), (b, j))
|
252 |
+
AntiSymmetricTensor(v, (a, i), (b, j))
|
253 |
+
>>> AntiSymmetricTensor('v', (a, i), (b, j)).symbol
|
254 |
+
v
|
255 |
+
|
256 |
+
"""
|
257 |
+
return self.args[0]
|
258 |
+
|
259 |
+
@property
|
260 |
+
def upper(self):
|
261 |
+
"""
|
262 |
+
Returns the upper indices.
|
263 |
+
|
264 |
+
Examples
|
265 |
+
========
|
266 |
+
|
267 |
+
>>> from sympy import symbols
|
268 |
+
>>> from sympy.physics.secondquant import AntiSymmetricTensor
|
269 |
+
>>> i, j = symbols('i,j', below_fermi=True)
|
270 |
+
>>> a, b = symbols('a,b', above_fermi=True)
|
271 |
+
>>> AntiSymmetricTensor('v', (a, i), (b, j))
|
272 |
+
AntiSymmetricTensor(v, (a, i), (b, j))
|
273 |
+
>>> AntiSymmetricTensor('v', (a, i), (b, j)).upper
|
274 |
+
(a, i)
|
275 |
+
|
276 |
+
|
277 |
+
"""
|
278 |
+
return self.args[1]
|
279 |
+
|
280 |
+
@property
|
281 |
+
def lower(self):
|
282 |
+
"""
|
283 |
+
Returns the lower indices.
|
284 |
+
|
285 |
+
Examples
|
286 |
+
========
|
287 |
+
|
288 |
+
>>> from sympy import symbols
|
289 |
+
>>> from sympy.physics.secondquant import AntiSymmetricTensor
|
290 |
+
>>> i, j = symbols('i,j', below_fermi=True)
|
291 |
+
>>> a, b = symbols('a,b', above_fermi=True)
|
292 |
+
>>> AntiSymmetricTensor('v', (a, i), (b, j))
|
293 |
+
AntiSymmetricTensor(v, (a, i), (b, j))
|
294 |
+
>>> AntiSymmetricTensor('v', (a, i), (b, j)).lower
|
295 |
+
(b, j)
|
296 |
+
|
297 |
+
"""
|
298 |
+
return self.args[2]
|
299 |
+
|
300 |
+
def __str__(self):
|
301 |
+
return "%s(%s,%s)" % self.args
|
302 |
+
|
303 |
+
|
304 |
+
class SqOperator(Expr):
|
305 |
+
"""
|
306 |
+
Base class for Second Quantization operators.
|
307 |
+
"""
|
308 |
+
|
309 |
+
op_symbol = 'sq'
|
310 |
+
|
311 |
+
is_commutative = False
|
312 |
+
|
313 |
+
def __new__(cls, k):
|
314 |
+
obj = Basic.__new__(cls, sympify(k))
|
315 |
+
return obj
|
316 |
+
|
317 |
+
@property
|
318 |
+
def state(self):
|
319 |
+
"""
|
320 |
+
Returns the state index related to this operator.
|
321 |
+
|
322 |
+
Examples
|
323 |
+
========
|
324 |
+
|
325 |
+
>>> from sympy import Symbol
|
326 |
+
>>> from sympy.physics.secondquant import F, Fd, B, Bd
|
327 |
+
>>> p = Symbol('p')
|
328 |
+
>>> F(p).state
|
329 |
+
p
|
330 |
+
>>> Fd(p).state
|
331 |
+
p
|
332 |
+
>>> B(p).state
|
333 |
+
p
|
334 |
+
>>> Bd(p).state
|
335 |
+
p
|
336 |
+
|
337 |
+
"""
|
338 |
+
return self.args[0]
|
339 |
+
|
340 |
+
@property
|
341 |
+
def is_symbolic(self):
|
342 |
+
"""
|
343 |
+
Returns True if the state is a symbol (as opposed to a number).
|
344 |
+
|
345 |
+
Examples
|
346 |
+
========
|
347 |
+
|
348 |
+
>>> from sympy import Symbol
|
349 |
+
>>> from sympy.physics.secondquant import F
|
350 |
+
>>> p = Symbol('p')
|
351 |
+
>>> F(p).is_symbolic
|
352 |
+
True
|
353 |
+
>>> F(1).is_symbolic
|
354 |
+
False
|
355 |
+
|
356 |
+
"""
|
357 |
+
if self.state.is_Integer:
|
358 |
+
return False
|
359 |
+
else:
|
360 |
+
return True
|
361 |
+
|
362 |
+
def __repr__(self):
|
363 |
+
return NotImplemented
|
364 |
+
|
365 |
+
def __str__(self):
|
366 |
+
return "%s(%r)" % (self.op_symbol, self.state)
|
367 |
+
|
368 |
+
def apply_operator(self, state):
|
369 |
+
"""
|
370 |
+
Applies an operator to itself.
|
371 |
+
"""
|
372 |
+
raise NotImplementedError('implement apply_operator in a subclass')
|
373 |
+
|
374 |
+
|
375 |
+
class BosonicOperator(SqOperator):
|
376 |
+
pass
|
377 |
+
|
378 |
+
|
379 |
+
class Annihilator(SqOperator):
|
380 |
+
pass
|
381 |
+
|
382 |
+
|
383 |
+
class Creator(SqOperator):
|
384 |
+
pass
|
385 |
+
|
386 |
+
|
387 |
+
class AnnihilateBoson(BosonicOperator, Annihilator):
|
388 |
+
"""
|
389 |
+
Bosonic annihilation operator.
|
390 |
+
|
391 |
+
Examples
|
392 |
+
========
|
393 |
+
|
394 |
+
>>> from sympy.physics.secondquant import B
|
395 |
+
>>> from sympy.abc import x
|
396 |
+
>>> B(x)
|
397 |
+
AnnihilateBoson(x)
|
398 |
+
"""
|
399 |
+
|
400 |
+
op_symbol = 'b'
|
401 |
+
|
402 |
+
def _dagger_(self):
|
403 |
+
return CreateBoson(self.state)
|
404 |
+
|
405 |
+
def apply_operator(self, state):
|
406 |
+
"""
|
407 |
+
Apply state to self if self is not symbolic and state is a FockStateKet, else
|
408 |
+
multiply self by state.
|
409 |
+
|
410 |
+
Examples
|
411 |
+
========
|
412 |
+
|
413 |
+
>>> from sympy.physics.secondquant import B, BKet
|
414 |
+
>>> from sympy.abc import x, y, n
|
415 |
+
>>> B(x).apply_operator(y)
|
416 |
+
y*AnnihilateBoson(x)
|
417 |
+
>>> B(0).apply_operator(BKet((n,)))
|
418 |
+
sqrt(n)*FockStateBosonKet((n - 1,))
|
419 |
+
|
420 |
+
"""
|
421 |
+
if not self.is_symbolic and isinstance(state, FockStateKet):
|
422 |
+
element = self.state
|
423 |
+
amp = sqrt(state[element])
|
424 |
+
return amp*state.down(element)
|
425 |
+
else:
|
426 |
+
return Mul(self, state)
|
427 |
+
|
428 |
+
def __repr__(self):
|
429 |
+
return "AnnihilateBoson(%s)" % self.state
|
430 |
+
|
431 |
+
def _latex(self, printer):
|
432 |
+
if self.state is S.Zero:
|
433 |
+
return "b_{0}"
|
434 |
+
else:
|
435 |
+
return "b_{%s}" % self.state.name
|
436 |
+
|
437 |
+
class CreateBoson(BosonicOperator, Creator):
|
438 |
+
"""
|
439 |
+
Bosonic creation operator.
|
440 |
+
"""
|
441 |
+
|
442 |
+
op_symbol = 'b+'
|
443 |
+
|
444 |
+
def _dagger_(self):
|
445 |
+
return AnnihilateBoson(self.state)
|
446 |
+
|
447 |
+
def apply_operator(self, state):
|
448 |
+
"""
|
449 |
+
Apply state to self if self is not symbolic and state is a FockStateKet, else
|
450 |
+
multiply self by state.
|
451 |
+
|
452 |
+
Examples
|
453 |
+
========
|
454 |
+
|
455 |
+
>>> from sympy.physics.secondquant import B, Dagger, BKet
|
456 |
+
>>> from sympy.abc import x, y, n
|
457 |
+
>>> Dagger(B(x)).apply_operator(y)
|
458 |
+
y*CreateBoson(x)
|
459 |
+
>>> B(0).apply_operator(BKet((n,)))
|
460 |
+
sqrt(n)*FockStateBosonKet((n - 1,))
|
461 |
+
"""
|
462 |
+
if not self.is_symbolic and isinstance(state, FockStateKet):
|
463 |
+
element = self.state
|
464 |
+
amp = sqrt(state[element] + 1)
|
465 |
+
return amp*state.up(element)
|
466 |
+
else:
|
467 |
+
return Mul(self, state)
|
468 |
+
|
469 |
+
def __repr__(self):
|
470 |
+
return "CreateBoson(%s)" % self.state
|
471 |
+
|
472 |
+
def _latex(self, printer):
|
473 |
+
if self.state is S.Zero:
|
474 |
+
return "{b^\\dagger_{0}}"
|
475 |
+
else:
|
476 |
+
return "{b^\\dagger_{%s}}" % self.state.name
|
477 |
+
|
478 |
+
B = AnnihilateBoson
|
479 |
+
Bd = CreateBoson
|
480 |
+
|
481 |
+
|
482 |
+
class FermionicOperator(SqOperator):
|
483 |
+
|
484 |
+
@property
|
485 |
+
def is_restricted(self):
|
486 |
+
"""
|
487 |
+
Is this FermionicOperator restricted with respect to fermi level?
|
488 |
+
|
489 |
+
Returns
|
490 |
+
=======
|
491 |
+
|
492 |
+
1 : restricted to orbits above fermi
|
493 |
+
0 : no restriction
|
494 |
+
-1 : restricted to orbits below fermi
|
495 |
+
|
496 |
+
Examples
|
497 |
+
========
|
498 |
+
|
499 |
+
>>> from sympy import Symbol
|
500 |
+
>>> from sympy.physics.secondquant import F, Fd
|
501 |
+
>>> a = Symbol('a', above_fermi=True)
|
502 |
+
>>> i = Symbol('i', below_fermi=True)
|
503 |
+
>>> p = Symbol('p')
|
504 |
+
|
505 |
+
>>> F(a).is_restricted
|
506 |
+
1
|
507 |
+
>>> Fd(a).is_restricted
|
508 |
+
1
|
509 |
+
>>> F(i).is_restricted
|
510 |
+
-1
|
511 |
+
>>> Fd(i).is_restricted
|
512 |
+
-1
|
513 |
+
>>> F(p).is_restricted
|
514 |
+
0
|
515 |
+
>>> Fd(p).is_restricted
|
516 |
+
0
|
517 |
+
|
518 |
+
"""
|
519 |
+
ass = self.args[0].assumptions0
|
520 |
+
if ass.get("below_fermi"):
|
521 |
+
return -1
|
522 |
+
if ass.get("above_fermi"):
|
523 |
+
return 1
|
524 |
+
return 0
|
525 |
+
|
526 |
+
@property
|
527 |
+
def is_above_fermi(self):
|
528 |
+
"""
|
529 |
+
Does the index of this FermionicOperator allow values above fermi?
|
530 |
+
|
531 |
+
Examples
|
532 |
+
========
|
533 |
+
|
534 |
+
>>> from sympy import Symbol
|
535 |
+
>>> from sympy.physics.secondquant import F
|
536 |
+
>>> a = Symbol('a', above_fermi=True)
|
537 |
+
>>> i = Symbol('i', below_fermi=True)
|
538 |
+
>>> p = Symbol('p')
|
539 |
+
|
540 |
+
>>> F(a).is_above_fermi
|
541 |
+
True
|
542 |
+
>>> F(i).is_above_fermi
|
543 |
+
False
|
544 |
+
>>> F(p).is_above_fermi
|
545 |
+
True
|
546 |
+
|
547 |
+
Note
|
548 |
+
====
|
549 |
+
|
550 |
+
The same applies to creation operators Fd
|
551 |
+
|
552 |
+
"""
|
553 |
+
return not self.args[0].assumptions0.get("below_fermi")
|
554 |
+
|
555 |
+
@property
|
556 |
+
def is_below_fermi(self):
|
557 |
+
"""
|
558 |
+
Does the index of this FermionicOperator allow values below fermi?
|
559 |
+
|
560 |
+
Examples
|
561 |
+
========
|
562 |
+
|
563 |
+
>>> from sympy import Symbol
|
564 |
+
>>> from sympy.physics.secondquant import F
|
565 |
+
>>> a = Symbol('a', above_fermi=True)
|
566 |
+
>>> i = Symbol('i', below_fermi=True)
|
567 |
+
>>> p = Symbol('p')
|
568 |
+
|
569 |
+
>>> F(a).is_below_fermi
|
570 |
+
False
|
571 |
+
>>> F(i).is_below_fermi
|
572 |
+
True
|
573 |
+
>>> F(p).is_below_fermi
|
574 |
+
True
|
575 |
+
|
576 |
+
The same applies to creation operators Fd
|
577 |
+
|
578 |
+
"""
|
579 |
+
return not self.args[0].assumptions0.get("above_fermi")
|
580 |
+
|
581 |
+
@property
|
582 |
+
def is_only_below_fermi(self):
|
583 |
+
"""
|
584 |
+
Is the index of this FermionicOperator restricted to values below fermi?
|
585 |
+
|
586 |
+
Examples
|
587 |
+
========
|
588 |
+
|
589 |
+
>>> from sympy import Symbol
|
590 |
+
>>> from sympy.physics.secondquant import F
|
591 |
+
>>> a = Symbol('a', above_fermi=True)
|
592 |
+
>>> i = Symbol('i', below_fermi=True)
|
593 |
+
>>> p = Symbol('p')
|
594 |
+
|
595 |
+
>>> F(a).is_only_below_fermi
|
596 |
+
False
|
597 |
+
>>> F(i).is_only_below_fermi
|
598 |
+
True
|
599 |
+
>>> F(p).is_only_below_fermi
|
600 |
+
False
|
601 |
+
|
602 |
+
The same applies to creation operators Fd
|
603 |
+
"""
|
604 |
+
return self.is_below_fermi and not self.is_above_fermi
|
605 |
+
|
606 |
+
@property
|
607 |
+
def is_only_above_fermi(self):
|
608 |
+
"""
|
609 |
+
Is the index of this FermionicOperator restricted to values above fermi?
|
610 |
+
|
611 |
+
Examples
|
612 |
+
========
|
613 |
+
|
614 |
+
>>> from sympy import Symbol
|
615 |
+
>>> from sympy.physics.secondquant import F
|
616 |
+
>>> a = Symbol('a', above_fermi=True)
|
617 |
+
>>> i = Symbol('i', below_fermi=True)
|
618 |
+
>>> p = Symbol('p')
|
619 |
+
|
620 |
+
>>> F(a).is_only_above_fermi
|
621 |
+
True
|
622 |
+
>>> F(i).is_only_above_fermi
|
623 |
+
False
|
624 |
+
>>> F(p).is_only_above_fermi
|
625 |
+
False
|
626 |
+
|
627 |
+
The same applies to creation operators Fd
|
628 |
+
"""
|
629 |
+
return self.is_above_fermi and not self.is_below_fermi
|
630 |
+
|
631 |
+
def _sortkey(self):
|
632 |
+
h = hash(self)
|
633 |
+
label = str(self.args[0])
|
634 |
+
|
635 |
+
if self.is_only_q_creator:
|
636 |
+
return 1, label, h
|
637 |
+
if self.is_only_q_annihilator:
|
638 |
+
return 4, label, h
|
639 |
+
if isinstance(self, Annihilator):
|
640 |
+
return 3, label, h
|
641 |
+
if isinstance(self, Creator):
|
642 |
+
return 2, label, h
|
643 |
+
|
644 |
+
|
645 |
+
class AnnihilateFermion(FermionicOperator, Annihilator):
|
646 |
+
"""
|
647 |
+
Fermionic annihilation operator.
|
648 |
+
"""
|
649 |
+
|
650 |
+
op_symbol = 'f'
|
651 |
+
|
652 |
+
def _dagger_(self):
|
653 |
+
return CreateFermion(self.state)
|
654 |
+
|
655 |
+
def apply_operator(self, state):
|
656 |
+
"""
|
657 |
+
Apply state to self if self is not symbolic and state is a FockStateKet, else
|
658 |
+
multiply self by state.
|
659 |
+
|
660 |
+
Examples
|
661 |
+
========
|
662 |
+
|
663 |
+
>>> from sympy.physics.secondquant import B, Dagger, BKet
|
664 |
+
>>> from sympy.abc import x, y, n
|
665 |
+
>>> Dagger(B(x)).apply_operator(y)
|
666 |
+
y*CreateBoson(x)
|
667 |
+
>>> B(0).apply_operator(BKet((n,)))
|
668 |
+
sqrt(n)*FockStateBosonKet((n - 1,))
|
669 |
+
"""
|
670 |
+
if isinstance(state, FockStateFermionKet):
|
671 |
+
element = self.state
|
672 |
+
return state.down(element)
|
673 |
+
|
674 |
+
elif isinstance(state, Mul):
|
675 |
+
c_part, nc_part = state.args_cnc()
|
676 |
+
if isinstance(nc_part[0], FockStateFermionKet):
|
677 |
+
element = self.state
|
678 |
+
return Mul(*(c_part + [nc_part[0].down(element)] + nc_part[1:]))
|
679 |
+
else:
|
680 |
+
return Mul(self, state)
|
681 |
+
|
682 |
+
else:
|
683 |
+
return Mul(self, state)
|
684 |
+
|
685 |
+
@property
|
686 |
+
def is_q_creator(self):
|
687 |
+
"""
|
688 |
+
Can we create a quasi-particle? (create hole or create particle)
|
689 |
+
If so, would that be above or below the fermi surface?
|
690 |
+
|
691 |
+
Examples
|
692 |
+
========
|
693 |
+
|
694 |
+
>>> from sympy import Symbol
|
695 |
+
>>> from sympy.physics.secondquant import F
|
696 |
+
>>> a = Symbol('a', above_fermi=True)
|
697 |
+
>>> i = Symbol('i', below_fermi=True)
|
698 |
+
>>> p = Symbol('p')
|
699 |
+
|
700 |
+
>>> F(a).is_q_creator
|
701 |
+
0
|
702 |
+
>>> F(i).is_q_creator
|
703 |
+
-1
|
704 |
+
>>> F(p).is_q_creator
|
705 |
+
-1
|
706 |
+
|
707 |
+
"""
|
708 |
+
if self.is_below_fermi:
|
709 |
+
return -1
|
710 |
+
return 0
|
711 |
+
|
712 |
+
@property
|
713 |
+
def is_q_annihilator(self):
|
714 |
+
"""
|
715 |
+
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
|
716 |
+
If so, would that be above or below the fermi surface?
|
717 |
+
|
718 |
+
Examples
|
719 |
+
========
|
720 |
+
|
721 |
+
>>> from sympy import Symbol
|
722 |
+
>>> from sympy.physics.secondquant import F
|
723 |
+
>>> a = Symbol('a', above_fermi=1)
|
724 |
+
>>> i = Symbol('i', below_fermi=1)
|
725 |
+
>>> p = Symbol('p')
|
726 |
+
|
727 |
+
>>> F(a).is_q_annihilator
|
728 |
+
1
|
729 |
+
>>> F(i).is_q_annihilator
|
730 |
+
0
|
731 |
+
>>> F(p).is_q_annihilator
|
732 |
+
1
|
733 |
+
|
734 |
+
"""
|
735 |
+
if self.is_above_fermi:
|
736 |
+
return 1
|
737 |
+
return 0
|
738 |
+
|
739 |
+
@property
|
740 |
+
def is_only_q_creator(self):
|
741 |
+
"""
|
742 |
+
Always create a quasi-particle? (create hole or create particle)
|
743 |
+
|
744 |
+
Examples
|
745 |
+
========
|
746 |
+
|
747 |
+
>>> from sympy import Symbol
|
748 |
+
>>> from sympy.physics.secondquant import F
|
749 |
+
>>> a = Symbol('a', above_fermi=True)
|
750 |
+
>>> i = Symbol('i', below_fermi=True)
|
751 |
+
>>> p = Symbol('p')
|
752 |
+
|
753 |
+
>>> F(a).is_only_q_creator
|
754 |
+
False
|
755 |
+
>>> F(i).is_only_q_creator
|
756 |
+
True
|
757 |
+
>>> F(p).is_only_q_creator
|
758 |
+
False
|
759 |
+
|
760 |
+
"""
|
761 |
+
return self.is_only_below_fermi
|
762 |
+
|
763 |
+
@property
|
764 |
+
def is_only_q_annihilator(self):
|
765 |
+
"""
|
766 |
+
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
|
767 |
+
|
768 |
+
Examples
|
769 |
+
========
|
770 |
+
|
771 |
+
>>> from sympy import Symbol
|
772 |
+
>>> from sympy.physics.secondquant import F
|
773 |
+
>>> a = Symbol('a', above_fermi=True)
|
774 |
+
>>> i = Symbol('i', below_fermi=True)
|
775 |
+
>>> p = Symbol('p')
|
776 |
+
|
777 |
+
>>> F(a).is_only_q_annihilator
|
778 |
+
True
|
779 |
+
>>> F(i).is_only_q_annihilator
|
780 |
+
False
|
781 |
+
>>> F(p).is_only_q_annihilator
|
782 |
+
False
|
783 |
+
|
784 |
+
"""
|
785 |
+
return self.is_only_above_fermi
|
786 |
+
|
787 |
+
def __repr__(self):
|
788 |
+
return "AnnihilateFermion(%s)" % self.state
|
789 |
+
|
790 |
+
def _latex(self, printer):
|
791 |
+
if self.state is S.Zero:
|
792 |
+
return "a_{0}"
|
793 |
+
else:
|
794 |
+
return "a_{%s}" % self.state.name
|
795 |
+
|
796 |
+
|
797 |
+
class CreateFermion(FermionicOperator, Creator):
|
798 |
+
"""
|
799 |
+
Fermionic creation operator.
|
800 |
+
"""
|
801 |
+
|
802 |
+
op_symbol = 'f+'
|
803 |
+
|
804 |
+
def _dagger_(self):
|
805 |
+
return AnnihilateFermion(self.state)
|
806 |
+
|
807 |
+
def apply_operator(self, state):
|
808 |
+
"""
|
809 |
+
Apply state to self if self is not symbolic and state is a FockStateKet, else
|
810 |
+
multiply self by state.
|
811 |
+
|
812 |
+
Examples
|
813 |
+
========
|
814 |
+
|
815 |
+
>>> from sympy.physics.secondquant import B, Dagger, BKet
|
816 |
+
>>> from sympy.abc import x, y, n
|
817 |
+
>>> Dagger(B(x)).apply_operator(y)
|
818 |
+
y*CreateBoson(x)
|
819 |
+
>>> B(0).apply_operator(BKet((n,)))
|
820 |
+
sqrt(n)*FockStateBosonKet((n - 1,))
|
821 |
+
"""
|
822 |
+
if isinstance(state, FockStateFermionKet):
|
823 |
+
element = self.state
|
824 |
+
return state.up(element)
|
825 |
+
|
826 |
+
elif isinstance(state, Mul):
|
827 |
+
c_part, nc_part = state.args_cnc()
|
828 |
+
if isinstance(nc_part[0], FockStateFermionKet):
|
829 |
+
element = self.state
|
830 |
+
return Mul(*(c_part + [nc_part[0].up(element)] + nc_part[1:]))
|
831 |
+
|
832 |
+
return Mul(self, state)
|
833 |
+
|
834 |
+
@property
|
835 |
+
def is_q_creator(self):
|
836 |
+
"""
|
837 |
+
Can we create a quasi-particle? (create hole or create particle)
|
838 |
+
If so, would that be above or below the fermi surface?
|
839 |
+
|
840 |
+
Examples
|
841 |
+
========
|
842 |
+
|
843 |
+
>>> from sympy import Symbol
|
844 |
+
>>> from sympy.physics.secondquant import Fd
|
845 |
+
>>> a = Symbol('a', above_fermi=True)
|
846 |
+
>>> i = Symbol('i', below_fermi=True)
|
847 |
+
>>> p = Symbol('p')
|
848 |
+
|
849 |
+
>>> Fd(a).is_q_creator
|
850 |
+
1
|
851 |
+
>>> Fd(i).is_q_creator
|
852 |
+
0
|
853 |
+
>>> Fd(p).is_q_creator
|
854 |
+
1
|
855 |
+
|
856 |
+
"""
|
857 |
+
if self.is_above_fermi:
|
858 |
+
return 1
|
859 |
+
return 0
|
860 |
+
|
861 |
+
@property
|
862 |
+
def is_q_annihilator(self):
|
863 |
+
"""
|
864 |
+
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
|
865 |
+
If so, would that be above or below the fermi surface?
|
866 |
+
|
867 |
+
Examples
|
868 |
+
========
|
869 |
+
|
870 |
+
>>> from sympy import Symbol
|
871 |
+
>>> from sympy.physics.secondquant import Fd
|
872 |
+
>>> a = Symbol('a', above_fermi=1)
|
873 |
+
>>> i = Symbol('i', below_fermi=1)
|
874 |
+
>>> p = Symbol('p')
|
875 |
+
|
876 |
+
>>> Fd(a).is_q_annihilator
|
877 |
+
0
|
878 |
+
>>> Fd(i).is_q_annihilator
|
879 |
+
-1
|
880 |
+
>>> Fd(p).is_q_annihilator
|
881 |
+
-1
|
882 |
+
|
883 |
+
"""
|
884 |
+
if self.is_below_fermi:
|
885 |
+
return -1
|
886 |
+
return 0
|
887 |
+
|
888 |
+
@property
|
889 |
+
def is_only_q_creator(self):
|
890 |
+
"""
|
891 |
+
Always create a quasi-particle? (create hole or create particle)
|
892 |
+
|
893 |
+
Examples
|
894 |
+
========
|
895 |
+
|
896 |
+
>>> from sympy import Symbol
|
897 |
+
>>> from sympy.physics.secondquant import Fd
|
898 |
+
>>> a = Symbol('a', above_fermi=True)
|
899 |
+
>>> i = Symbol('i', below_fermi=True)
|
900 |
+
>>> p = Symbol('p')
|
901 |
+
|
902 |
+
>>> Fd(a).is_only_q_creator
|
903 |
+
True
|
904 |
+
>>> Fd(i).is_only_q_creator
|
905 |
+
False
|
906 |
+
>>> Fd(p).is_only_q_creator
|
907 |
+
False
|
908 |
+
|
909 |
+
"""
|
910 |
+
return self.is_only_above_fermi
|
911 |
+
|
912 |
+
@property
|
913 |
+
def is_only_q_annihilator(self):
|
914 |
+
"""
|
915 |
+
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
|
916 |
+
|
917 |
+
Examples
|
918 |
+
========
|
919 |
+
|
920 |
+
>>> from sympy import Symbol
|
921 |
+
>>> from sympy.physics.secondquant import Fd
|
922 |
+
>>> a = Symbol('a', above_fermi=True)
|
923 |
+
>>> i = Symbol('i', below_fermi=True)
|
924 |
+
>>> p = Symbol('p')
|
925 |
+
|
926 |
+
>>> Fd(a).is_only_q_annihilator
|
927 |
+
False
|
928 |
+
>>> Fd(i).is_only_q_annihilator
|
929 |
+
True
|
930 |
+
>>> Fd(p).is_only_q_annihilator
|
931 |
+
False
|
932 |
+
|
933 |
+
"""
|
934 |
+
return self.is_only_below_fermi
|
935 |
+
|
936 |
+
def __repr__(self):
|
937 |
+
return "CreateFermion(%s)" % self.state
|
938 |
+
|
939 |
+
def _latex(self, printer):
|
940 |
+
if self.state is S.Zero:
|
941 |
+
return "{a^\\dagger_{0}}"
|
942 |
+
else:
|
943 |
+
return "{a^\\dagger_{%s}}" % self.state.name
|
944 |
+
|
945 |
+
Fd = CreateFermion
|
946 |
+
F = AnnihilateFermion
|
947 |
+
|
948 |
+
|
949 |
+
class FockState(Expr):
|
950 |
+
"""
|
951 |
+
Many particle Fock state with a sequence of occupation numbers.
|
952 |
+
|
953 |
+
Anywhere you can have a FockState, you can also have S.Zero.
|
954 |
+
All code must check for this!
|
955 |
+
|
956 |
+
Base class to represent FockStates.
|
957 |
+
"""
|
958 |
+
is_commutative = False
|
959 |
+
|
960 |
+
def __new__(cls, occupations):
|
961 |
+
"""
|
962 |
+
occupations is a list with two possible meanings:
|
963 |
+
|
964 |
+
- For bosons it is a list of occupation numbers.
|
965 |
+
Element i is the number of particles in state i.
|
966 |
+
|
967 |
+
- For fermions it is a list of occupied orbits.
|
968 |
+
Element 0 is the state that was occupied first, element i
|
969 |
+
is the i'th occupied state.
|
970 |
+
"""
|
971 |
+
occupations = list(map(sympify, occupations))
|
972 |
+
obj = Basic.__new__(cls, Tuple(*occupations))
|
973 |
+
return obj
|
974 |
+
|
975 |
+
def __getitem__(self, i):
|
976 |
+
i = int(i)
|
977 |
+
return self.args[0][i]
|
978 |
+
|
979 |
+
def __repr__(self):
|
980 |
+
return ("FockState(%r)") % (self.args)
|
981 |
+
|
982 |
+
def __str__(self):
|
983 |
+
return "%s%r%s" % (getattr(self, 'lbracket', ""), self._labels(), getattr(self, 'rbracket', ""))
|
984 |
+
|
985 |
+
def _labels(self):
|
986 |
+
return self.args[0]
|
987 |
+
|
988 |
+
def __len__(self):
|
989 |
+
return len(self.args[0])
|
990 |
+
|
991 |
+
def _latex(self, printer):
|
992 |
+
return "%s%s%s" % (getattr(self, 'lbracket_latex', ""), printer._print(self._labels()), getattr(self, 'rbracket_latex', ""))
|
993 |
+
|
994 |
+
|
995 |
+
class BosonState(FockState):
|
996 |
+
"""
|
997 |
+
Base class for FockStateBoson(Ket/Bra).
|
998 |
+
"""
|
999 |
+
|
1000 |
+
def up(self, i):
|
1001 |
+
"""
|
1002 |
+
Performs the action of a creation operator.
|
1003 |
+
|
1004 |
+
Examples
|
1005 |
+
========
|
1006 |
+
|
1007 |
+
>>> from sympy.physics.secondquant import BBra
|
1008 |
+
>>> b = BBra([1, 2])
|
1009 |
+
>>> b
|
1010 |
+
FockStateBosonBra((1, 2))
|
1011 |
+
>>> b.up(1)
|
1012 |
+
FockStateBosonBra((1, 3))
|
1013 |
+
"""
|
1014 |
+
i = int(i)
|
1015 |
+
new_occs = list(self.args[0])
|
1016 |
+
new_occs[i] = new_occs[i] + S.One
|
1017 |
+
return self.__class__(new_occs)
|
1018 |
+
|
1019 |
+
def down(self, i):
|
1020 |
+
"""
|
1021 |
+
Performs the action of an annihilation operator.
|
1022 |
+
|
1023 |
+
Examples
|
1024 |
+
========
|
1025 |
+
|
1026 |
+
>>> from sympy.physics.secondquant import BBra
|
1027 |
+
>>> b = BBra([1, 2])
|
1028 |
+
>>> b
|
1029 |
+
FockStateBosonBra((1, 2))
|
1030 |
+
>>> b.down(1)
|
1031 |
+
FockStateBosonBra((1, 1))
|
1032 |
+
"""
|
1033 |
+
i = int(i)
|
1034 |
+
new_occs = list(self.args[0])
|
1035 |
+
if new_occs[i] == S.Zero:
|
1036 |
+
return S.Zero
|
1037 |
+
else:
|
1038 |
+
new_occs[i] = new_occs[i] - S.One
|
1039 |
+
return self.__class__(new_occs)
|
1040 |
+
|
1041 |
+
|
1042 |
+
class FermionState(FockState):
|
1043 |
+
"""
|
1044 |
+
Base class for FockStateFermion(Ket/Bra).
|
1045 |
+
"""
|
1046 |
+
|
1047 |
+
fermi_level = 0
|
1048 |
+
|
1049 |
+
def __new__(cls, occupations, fermi_level=0):
|
1050 |
+
occupations = list(map(sympify, occupations))
|
1051 |
+
if len(occupations) > 1:
|
1052 |
+
try:
|
1053 |
+
(occupations, sign) = _sort_anticommuting_fermions(
|
1054 |
+
occupations, key=hash)
|
1055 |
+
except ViolationOfPauliPrinciple:
|
1056 |
+
return S.Zero
|
1057 |
+
else:
|
1058 |
+
sign = 0
|
1059 |
+
|
1060 |
+
cls.fermi_level = fermi_level
|
1061 |
+
|
1062 |
+
if cls._count_holes(occupations) > fermi_level:
|
1063 |
+
return S.Zero
|
1064 |
+
|
1065 |
+
if sign % 2:
|
1066 |
+
return S.NegativeOne*FockState.__new__(cls, occupations)
|
1067 |
+
else:
|
1068 |
+
return FockState.__new__(cls, occupations)
|
1069 |
+
|
1070 |
+
def up(self, i):
|
1071 |
+
"""
|
1072 |
+
Performs the action of a creation operator.
|
1073 |
+
|
1074 |
+
Explanation
|
1075 |
+
===========
|
1076 |
+
|
1077 |
+
If below fermi we try to remove a hole,
|
1078 |
+
if above fermi we try to create a particle.
|
1079 |
+
|
1080 |
+
If general index p we return ``Kronecker(p,i)*self``
|
1081 |
+
where ``i`` is a new symbol with restriction above or below.
|
1082 |
+
|
1083 |
+
Examples
|
1084 |
+
========
|
1085 |
+
|
1086 |
+
>>> from sympy import Symbol
|
1087 |
+
>>> from sympy.physics.secondquant import FKet
|
1088 |
+
>>> a = Symbol('a', above_fermi=True)
|
1089 |
+
>>> i = Symbol('i', below_fermi=True)
|
1090 |
+
>>> p = Symbol('p')
|
1091 |
+
|
1092 |
+
>>> FKet([]).up(a)
|
1093 |
+
FockStateFermionKet((a,))
|
1094 |
+
|
1095 |
+
A creator acting on vacuum below fermi vanishes
|
1096 |
+
|
1097 |
+
>>> FKet([]).up(i)
|
1098 |
+
0
|
1099 |
+
|
1100 |
+
|
1101 |
+
"""
|
1102 |
+
present = i in self.args[0]
|
1103 |
+
|
1104 |
+
if self._only_above_fermi(i):
|
1105 |
+
if present:
|
1106 |
+
return S.Zero
|
1107 |
+
else:
|
1108 |
+
return self._add_orbit(i)
|
1109 |
+
elif self._only_below_fermi(i):
|
1110 |
+
if present:
|
1111 |
+
return self._remove_orbit(i)
|
1112 |
+
else:
|
1113 |
+
return S.Zero
|
1114 |
+
else:
|
1115 |
+
if present:
|
1116 |
+
hole = Dummy("i", below_fermi=True)
|
1117 |
+
return KroneckerDelta(i, hole)*self._remove_orbit(i)
|
1118 |
+
else:
|
1119 |
+
particle = Dummy("a", above_fermi=True)
|
1120 |
+
return KroneckerDelta(i, particle)*self._add_orbit(i)
|
1121 |
+
|
1122 |
+
def down(self, i):
|
1123 |
+
"""
|
1124 |
+
Performs the action of an annihilation operator.
|
1125 |
+
|
1126 |
+
Explanation
|
1127 |
+
===========
|
1128 |
+
|
1129 |
+
If below fermi we try to create a hole,
|
1130 |
+
If above fermi we try to remove a particle.
|
1131 |
+
|
1132 |
+
If general index p we return ``Kronecker(p,i)*self``
|
1133 |
+
where ``i`` is a new symbol with restriction above or below.
|
1134 |
+
|
1135 |
+
Examples
|
1136 |
+
========
|
1137 |
+
|
1138 |
+
>>> from sympy import Symbol
|
1139 |
+
>>> from sympy.physics.secondquant import FKet
|
1140 |
+
>>> a = Symbol('a', above_fermi=True)
|
1141 |
+
>>> i = Symbol('i', below_fermi=True)
|
1142 |
+
>>> p = Symbol('p')
|
1143 |
+
|
1144 |
+
An annihilator acting on vacuum above fermi vanishes
|
1145 |
+
|
1146 |
+
>>> FKet([]).down(a)
|
1147 |
+
0
|
1148 |
+
|
1149 |
+
Also below fermi, it vanishes, unless we specify a fermi level > 0
|
1150 |
+
|
1151 |
+
>>> FKet([]).down(i)
|
1152 |
+
0
|
1153 |
+
>>> FKet([],4).down(i)
|
1154 |
+
FockStateFermionKet((i,))
|
1155 |
+
|
1156 |
+
"""
|
1157 |
+
present = i in self.args[0]
|
1158 |
+
|
1159 |
+
if self._only_above_fermi(i):
|
1160 |
+
if present:
|
1161 |
+
return self._remove_orbit(i)
|
1162 |
+
else:
|
1163 |
+
return S.Zero
|
1164 |
+
|
1165 |
+
elif self._only_below_fermi(i):
|
1166 |
+
if present:
|
1167 |
+
return S.Zero
|
1168 |
+
else:
|
1169 |
+
return self._add_orbit(i)
|
1170 |
+
else:
|
1171 |
+
if present:
|
1172 |
+
hole = Dummy("i", below_fermi=True)
|
1173 |
+
return KroneckerDelta(i, hole)*self._add_orbit(i)
|
1174 |
+
else:
|
1175 |
+
particle = Dummy("a", above_fermi=True)
|
1176 |
+
return KroneckerDelta(i, particle)*self._remove_orbit(i)
|
1177 |
+
|
1178 |
+
@classmethod
|
1179 |
+
def _only_below_fermi(cls, i):
|
1180 |
+
"""
|
1181 |
+
Tests if given orbit is only below fermi surface.
|
1182 |
+
|
1183 |
+
If nothing can be concluded we return a conservative False.
|
1184 |
+
"""
|
1185 |
+
if i.is_number:
|
1186 |
+
return i <= cls.fermi_level
|
1187 |
+
if i.assumptions0.get('below_fermi'):
|
1188 |
+
return True
|
1189 |
+
return False
|
1190 |
+
|
1191 |
+
@classmethod
|
1192 |
+
def _only_above_fermi(cls, i):
|
1193 |
+
"""
|
1194 |
+
Tests if given orbit is only above fermi surface.
|
1195 |
+
|
1196 |
+
If fermi level has not been set we return True.
|
1197 |
+
If nothing can be concluded we return a conservative False.
|
1198 |
+
"""
|
1199 |
+
if i.is_number:
|
1200 |
+
return i > cls.fermi_level
|
1201 |
+
if i.assumptions0.get('above_fermi'):
|
1202 |
+
return True
|
1203 |
+
return not cls.fermi_level
|
1204 |
+
|
1205 |
+
def _remove_orbit(self, i):
|
1206 |
+
"""
|
1207 |
+
Removes particle/fills hole in orbit i. No input tests performed here.
|
1208 |
+
"""
|
1209 |
+
new_occs = list(self.args[0])
|
1210 |
+
pos = new_occs.index(i)
|
1211 |
+
del new_occs[pos]
|
1212 |
+
if (pos) % 2:
|
1213 |
+
return S.NegativeOne*self.__class__(new_occs, self.fermi_level)
|
1214 |
+
else:
|
1215 |
+
return self.__class__(new_occs, self.fermi_level)
|
1216 |
+
|
1217 |
+
def _add_orbit(self, i):
|
1218 |
+
"""
|
1219 |
+
Adds particle/creates hole in orbit i. No input tests performed here.
|
1220 |
+
"""
|
1221 |
+
return self.__class__((i,) + self.args[0], self.fermi_level)
|
1222 |
+
|
1223 |
+
@classmethod
|
1224 |
+
def _count_holes(cls, list):
|
1225 |
+
"""
|
1226 |
+
Returns the number of identified hole states in list.
|
1227 |
+
"""
|
1228 |
+
return len([i for i in list if cls._only_below_fermi(i)])
|
1229 |
+
|
1230 |
+
def _negate_holes(self, list):
|
1231 |
+
return tuple([-i if i <= self.fermi_level else i for i in list])
|
1232 |
+
|
1233 |
+
def __repr__(self):
|
1234 |
+
if self.fermi_level:
|
1235 |
+
return "FockStateKet(%r, fermi_level=%s)" % (self.args[0], self.fermi_level)
|
1236 |
+
else:
|
1237 |
+
return "FockStateKet(%r)" % (self.args[0],)
|
1238 |
+
|
1239 |
+
def _labels(self):
|
1240 |
+
return self._negate_holes(self.args[0])
|
1241 |
+
|
1242 |
+
|
1243 |
+
class FockStateKet(FockState):
|
1244 |
+
"""
|
1245 |
+
Representation of a ket.
|
1246 |
+
"""
|
1247 |
+
lbracket = '|'
|
1248 |
+
rbracket = '>'
|
1249 |
+
lbracket_latex = r'\left|'
|
1250 |
+
rbracket_latex = r'\right\rangle'
|
1251 |
+
|
1252 |
+
|
1253 |
+
class FockStateBra(FockState):
|
1254 |
+
"""
|
1255 |
+
Representation of a bra.
|
1256 |
+
"""
|
1257 |
+
lbracket = '<'
|
1258 |
+
rbracket = '|'
|
1259 |
+
lbracket_latex = r'\left\langle'
|
1260 |
+
rbracket_latex = r'\right|'
|
1261 |
+
|
1262 |
+
def __mul__(self, other):
|
1263 |
+
if isinstance(other, FockStateKet):
|
1264 |
+
return InnerProduct(self, other)
|
1265 |
+
else:
|
1266 |
+
return Expr.__mul__(self, other)
|
1267 |
+
|
1268 |
+
|
1269 |
+
class FockStateBosonKet(BosonState, FockStateKet):
|
1270 |
+
"""
|
1271 |
+
Many particle Fock state with a sequence of occupation numbers.
|
1272 |
+
|
1273 |
+
Occupation numbers can be any integer >= 0.
|
1274 |
+
|
1275 |
+
Examples
|
1276 |
+
========
|
1277 |
+
|
1278 |
+
>>> from sympy.physics.secondquant import BKet
|
1279 |
+
>>> BKet([1, 2])
|
1280 |
+
FockStateBosonKet((1, 2))
|
1281 |
+
"""
|
1282 |
+
def _dagger_(self):
|
1283 |
+
return FockStateBosonBra(*self.args)
|
1284 |
+
|
1285 |
+
|
1286 |
+
class FockStateBosonBra(BosonState, FockStateBra):
|
1287 |
+
"""
|
1288 |
+
Describes a collection of BosonBra particles.
|
1289 |
+
|
1290 |
+
Examples
|
1291 |
+
========
|
1292 |
+
|
1293 |
+
>>> from sympy.physics.secondquant import BBra
|
1294 |
+
>>> BBra([1, 2])
|
1295 |
+
FockStateBosonBra((1, 2))
|
1296 |
+
"""
|
1297 |
+
def _dagger_(self):
|
1298 |
+
return FockStateBosonKet(*self.args)
|
1299 |
+
|
1300 |
+
|
1301 |
+
class FockStateFermionKet(FermionState, FockStateKet):
|
1302 |
+
"""
|
1303 |
+
Many-particle Fock state with a sequence of occupied orbits.
|
1304 |
+
|
1305 |
+
Explanation
|
1306 |
+
===========
|
1307 |
+
|
1308 |
+
Each state can only have one particle, so we choose to store a list of
|
1309 |
+
occupied orbits rather than a tuple with occupation numbers (zeros and ones).
|
1310 |
+
|
1311 |
+
states below fermi level are holes, and are represented by negative labels
|
1312 |
+
in the occupation list.
|
1313 |
+
|
1314 |
+
For symbolic state labels, the fermi_level caps the number of allowed hole-
|
1315 |
+
states.
|
1316 |
+
|
1317 |
+
Examples
|
1318 |
+
========
|
1319 |
+
|
1320 |
+
>>> from sympy.physics.secondquant import FKet
|
1321 |
+
>>> FKet([1, 2])
|
1322 |
+
FockStateFermionKet((1, 2))
|
1323 |
+
"""
|
1324 |
+
def _dagger_(self):
|
1325 |
+
return FockStateFermionBra(*self.args)
|
1326 |
+
|
1327 |
+
|
1328 |
+
class FockStateFermionBra(FermionState, FockStateBra):
|
1329 |
+
"""
|
1330 |
+
See Also
|
1331 |
+
========
|
1332 |
+
|
1333 |
+
FockStateFermionKet
|
1334 |
+
|
1335 |
+
Examples
|
1336 |
+
========
|
1337 |
+
|
1338 |
+
>>> from sympy.physics.secondquant import FBra
|
1339 |
+
>>> FBra([1, 2])
|
1340 |
+
FockStateFermionBra((1, 2))
|
1341 |
+
"""
|
1342 |
+
def _dagger_(self):
|
1343 |
+
return FockStateFermionKet(*self.args)
|
1344 |
+
|
1345 |
+
BBra = FockStateBosonBra
|
1346 |
+
BKet = FockStateBosonKet
|
1347 |
+
FBra = FockStateFermionBra
|
1348 |
+
FKet = FockStateFermionKet
|
1349 |
+
|
1350 |
+
|
1351 |
+
def _apply_Mul(m):
|
1352 |
+
"""
|
1353 |
+
Take a Mul instance with operators and apply them to states.
|
1354 |
+
|
1355 |
+
Explanation
|
1356 |
+
===========
|
1357 |
+
|
1358 |
+
This method applies all operators with integer state labels
|
1359 |
+
to the actual states. For symbolic state labels, nothing is done.
|
1360 |
+
When inner products of FockStates are encountered (like <a|b>),
|
1361 |
+
they are converted to instances of InnerProduct.
|
1362 |
+
|
1363 |
+
This does not currently work on double inner products like,
|
1364 |
+
<a|b><c|d>.
|
1365 |
+
|
1366 |
+
If the argument is not a Mul, it is simply returned as is.
|
1367 |
+
"""
|
1368 |
+
if not isinstance(m, Mul):
|
1369 |
+
return m
|
1370 |
+
c_part, nc_part = m.args_cnc()
|
1371 |
+
n_nc = len(nc_part)
|
1372 |
+
if n_nc in (0, 1):
|
1373 |
+
return m
|
1374 |
+
else:
|
1375 |
+
last = nc_part[-1]
|
1376 |
+
next_to_last = nc_part[-2]
|
1377 |
+
if isinstance(last, FockStateKet):
|
1378 |
+
if isinstance(next_to_last, SqOperator):
|
1379 |
+
if next_to_last.is_symbolic:
|
1380 |
+
return m
|
1381 |
+
else:
|
1382 |
+
result = next_to_last.apply_operator(last)
|
1383 |
+
if result == 0:
|
1384 |
+
return S.Zero
|
1385 |
+
else:
|
1386 |
+
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
|
1387 |
+
elif isinstance(next_to_last, Pow):
|
1388 |
+
if isinstance(next_to_last.base, SqOperator) and \
|
1389 |
+
next_to_last.exp.is_Integer:
|
1390 |
+
if next_to_last.base.is_symbolic:
|
1391 |
+
return m
|
1392 |
+
else:
|
1393 |
+
result = last
|
1394 |
+
for i in range(next_to_last.exp):
|
1395 |
+
result = next_to_last.base.apply_operator(result)
|
1396 |
+
if result == 0:
|
1397 |
+
break
|
1398 |
+
if result == 0:
|
1399 |
+
return S.Zero
|
1400 |
+
else:
|
1401 |
+
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
|
1402 |
+
else:
|
1403 |
+
return m
|
1404 |
+
elif isinstance(next_to_last, FockStateBra):
|
1405 |
+
result = InnerProduct(next_to_last, last)
|
1406 |
+
if result == 0:
|
1407 |
+
return S.Zero
|
1408 |
+
else:
|
1409 |
+
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
|
1410 |
+
else:
|
1411 |
+
return m
|
1412 |
+
else:
|
1413 |
+
return m
|
1414 |
+
|
1415 |
+
|
1416 |
+
def apply_operators(e):
|
1417 |
+
"""
|
1418 |
+
Take a SymPy expression with operators and states and apply the operators.
|
1419 |
+
|
1420 |
+
Examples
|
1421 |
+
========
|
1422 |
+
|
1423 |
+
>>> from sympy.physics.secondquant import apply_operators
|
1424 |
+
>>> from sympy import sympify
|
1425 |
+
>>> apply_operators(sympify(3)+4)
|
1426 |
+
7
|
1427 |
+
"""
|
1428 |
+
e = e.expand()
|
1429 |
+
muls = e.atoms(Mul)
|
1430 |
+
subs_list = [(m, _apply_Mul(m)) for m in iter(muls)]
|
1431 |
+
return e.subs(subs_list)
|
1432 |
+
|
1433 |
+
|
1434 |
+
class InnerProduct(Basic):
|
1435 |
+
"""
|
1436 |
+
An unevaluated inner product between a bra and ket.
|
1437 |
+
|
1438 |
+
Explanation
|
1439 |
+
===========
|
1440 |
+
|
1441 |
+
Currently this class just reduces things to a product of
|
1442 |
+
Kronecker Deltas. In the future, we could introduce abstract
|
1443 |
+
states like ``|a>`` and ``|b>``, and leave the inner product unevaluated as
|
1444 |
+
``<a|b>``.
|
1445 |
+
|
1446 |
+
"""
|
1447 |
+
is_commutative = True
|
1448 |
+
|
1449 |
+
def __new__(cls, bra, ket):
|
1450 |
+
if not isinstance(bra, FockStateBra):
|
1451 |
+
raise TypeError("must be a bra")
|
1452 |
+
if not isinstance(ket, FockStateKet):
|
1453 |
+
raise TypeError("must be a ket")
|
1454 |
+
return cls.eval(bra, ket)
|
1455 |
+
|
1456 |
+
@classmethod
|
1457 |
+
def eval(cls, bra, ket):
|
1458 |
+
result = S.One
|
1459 |
+
for i, j in zip(bra.args[0], ket.args[0]):
|
1460 |
+
result *= KroneckerDelta(i, j)
|
1461 |
+
if result == 0:
|
1462 |
+
break
|
1463 |
+
return result
|
1464 |
+
|
1465 |
+
@property
|
1466 |
+
def bra(self):
|
1467 |
+
"""Returns the bra part of the state"""
|
1468 |
+
return self.args[0]
|
1469 |
+
|
1470 |
+
@property
|
1471 |
+
def ket(self):
|
1472 |
+
"""Returns the ket part of the state"""
|
1473 |
+
return self.args[1]
|
1474 |
+
|
1475 |
+
def __repr__(self):
|
1476 |
+
sbra = repr(self.bra)
|
1477 |
+
sket = repr(self.ket)
|
1478 |
+
return "%s|%s" % (sbra[:-1], sket[1:])
|
1479 |
+
|
1480 |
+
def __str__(self):
|
1481 |
+
return self.__repr__()
|
1482 |
+
|
1483 |
+
|
1484 |
+
def matrix_rep(op, basis):
|
1485 |
+
"""
|
1486 |
+
Find the representation of an operator in a basis.
|
1487 |
+
|
1488 |
+
Examples
|
1489 |
+
========
|
1490 |
+
|
1491 |
+
>>> from sympy.physics.secondquant import VarBosonicBasis, B, matrix_rep
|
1492 |
+
>>> b = VarBosonicBasis(5)
|
1493 |
+
>>> o = B(0)
|
1494 |
+
>>> matrix_rep(o, b)
|
1495 |
+
Matrix([
|
1496 |
+
[0, 1, 0, 0, 0],
|
1497 |
+
[0, 0, sqrt(2), 0, 0],
|
1498 |
+
[0, 0, 0, sqrt(3), 0],
|
1499 |
+
[0, 0, 0, 0, 2],
|
1500 |
+
[0, 0, 0, 0, 0]])
|
1501 |
+
"""
|
1502 |
+
a = zeros(len(basis))
|
1503 |
+
for i in range(len(basis)):
|
1504 |
+
for j in range(len(basis)):
|
1505 |
+
a[i, j] = apply_operators(Dagger(basis[i])*op*basis[j])
|
1506 |
+
return a
|
1507 |
+
|
1508 |
+
|
1509 |
+
class BosonicBasis:
|
1510 |
+
"""
|
1511 |
+
Base class for a basis set of bosonic Fock states.
|
1512 |
+
"""
|
1513 |
+
pass
|
1514 |
+
|
1515 |
+
|
1516 |
+
class VarBosonicBasis:
|
1517 |
+
"""
|
1518 |
+
A single state, variable particle number basis set.
|
1519 |
+
|
1520 |
+
Examples
|
1521 |
+
========
|
1522 |
+
|
1523 |
+
>>> from sympy.physics.secondquant import VarBosonicBasis
|
1524 |
+
>>> b = VarBosonicBasis(5)
|
1525 |
+
>>> b
|
1526 |
+
[FockState((0,)), FockState((1,)), FockState((2,)),
|
1527 |
+
FockState((3,)), FockState((4,))]
|
1528 |
+
"""
|
1529 |
+
|
1530 |
+
def __init__(self, n_max):
|
1531 |
+
self.n_max = n_max
|
1532 |
+
self._build_states()
|
1533 |
+
|
1534 |
+
def _build_states(self):
|
1535 |
+
self.basis = []
|
1536 |
+
for i in range(self.n_max):
|
1537 |
+
self.basis.append(FockStateBosonKet([i]))
|
1538 |
+
self.n_basis = len(self.basis)
|
1539 |
+
|
1540 |
+
def index(self, state):
|
1541 |
+
"""
|
1542 |
+
Returns the index of state in basis.
|
1543 |
+
|
1544 |
+
Examples
|
1545 |
+
========
|
1546 |
+
|
1547 |
+
>>> from sympy.physics.secondquant import VarBosonicBasis
|
1548 |
+
>>> b = VarBosonicBasis(3)
|
1549 |
+
>>> state = b.state(1)
|
1550 |
+
>>> b
|
1551 |
+
[FockState((0,)), FockState((1,)), FockState((2,))]
|
1552 |
+
>>> state
|
1553 |
+
FockStateBosonKet((1,))
|
1554 |
+
>>> b.index(state)
|
1555 |
+
1
|
1556 |
+
"""
|
1557 |
+
return self.basis.index(state)
|
1558 |
+
|
1559 |
+
def state(self, i):
|
1560 |
+
"""
|
1561 |
+
The state of a single basis.
|
1562 |
+
|
1563 |
+
Examples
|
1564 |
+
========
|
1565 |
+
|
1566 |
+
>>> from sympy.physics.secondquant import VarBosonicBasis
|
1567 |
+
>>> b = VarBosonicBasis(5)
|
1568 |
+
>>> b.state(3)
|
1569 |
+
FockStateBosonKet((3,))
|
1570 |
+
"""
|
1571 |
+
return self.basis[i]
|
1572 |
+
|
1573 |
+
def __getitem__(self, i):
|
1574 |
+
return self.state(i)
|
1575 |
+
|
1576 |
+
def __len__(self):
|
1577 |
+
return len(self.basis)
|
1578 |
+
|
1579 |
+
def __repr__(self):
|
1580 |
+
return repr(self.basis)
|
1581 |
+
|
1582 |
+
|
1583 |
+
class FixedBosonicBasis(BosonicBasis):
|
1584 |
+
"""
|
1585 |
+
Fixed particle number basis set.
|
1586 |
+
|
1587 |
+
Examples
|
1588 |
+
========
|
1589 |
+
|
1590 |
+
>>> from sympy.physics.secondquant import FixedBosonicBasis
|
1591 |
+
>>> b = FixedBosonicBasis(2, 2)
|
1592 |
+
>>> state = b.state(1)
|
1593 |
+
>>> b
|
1594 |
+
[FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]
|
1595 |
+
>>> state
|
1596 |
+
FockStateBosonKet((1, 1))
|
1597 |
+
>>> b.index(state)
|
1598 |
+
1
|
1599 |
+
"""
|
1600 |
+
def __init__(self, n_particles, n_levels):
|
1601 |
+
self.n_particles = n_particles
|
1602 |
+
self.n_levels = n_levels
|
1603 |
+
self._build_particle_locations()
|
1604 |
+
self._build_states()
|
1605 |
+
|
1606 |
+
def _build_particle_locations(self):
|
1607 |
+
tup = ["i%i" % i for i in range(self.n_particles)]
|
1608 |
+
first_loop = "for i0 in range(%i)" % self.n_levels
|
1609 |
+
other_loops = ''
|
1610 |
+
for cur, prev in zip(tup[1:], tup):
|
1611 |
+
temp = "for %s in range(%s + 1) " % (cur, prev)
|
1612 |
+
other_loops = other_loops + temp
|
1613 |
+
tup_string = "(%s)" % ", ".join(tup)
|
1614 |
+
list_comp = "[%s %s %s]" % (tup_string, first_loop, other_loops)
|
1615 |
+
result = eval(list_comp)
|
1616 |
+
if self.n_particles == 1:
|
1617 |
+
result = [(item,) for item in result]
|
1618 |
+
self.particle_locations = result
|
1619 |
+
|
1620 |
+
def _build_states(self):
|
1621 |
+
self.basis = []
|
1622 |
+
for tuple_of_indices in self.particle_locations:
|
1623 |
+
occ_numbers = self.n_levels*[0]
|
1624 |
+
for level in tuple_of_indices:
|
1625 |
+
occ_numbers[level] += 1
|
1626 |
+
self.basis.append(FockStateBosonKet(occ_numbers))
|
1627 |
+
self.n_basis = len(self.basis)
|
1628 |
+
|
1629 |
+
def index(self, state):
|
1630 |
+
"""Returns the index of state in basis.
|
1631 |
+
|
1632 |
+
Examples
|
1633 |
+
========
|
1634 |
+
|
1635 |
+
>>> from sympy.physics.secondquant import FixedBosonicBasis
|
1636 |
+
>>> b = FixedBosonicBasis(2, 3)
|
1637 |
+
>>> b.index(b.state(3))
|
1638 |
+
3
|
1639 |
+
"""
|
1640 |
+
return self.basis.index(state)
|
1641 |
+
|
1642 |
+
def state(self, i):
|
1643 |
+
"""Returns the state that lies at index i of the basis
|
1644 |
+
|
1645 |
+
Examples
|
1646 |
+
========
|
1647 |
+
|
1648 |
+
>>> from sympy.physics.secondquant import FixedBosonicBasis
|
1649 |
+
>>> b = FixedBosonicBasis(2, 3)
|
1650 |
+
>>> b.state(3)
|
1651 |
+
FockStateBosonKet((1, 0, 1))
|
1652 |
+
"""
|
1653 |
+
return self.basis[i]
|
1654 |
+
|
1655 |
+
def __getitem__(self, i):
|
1656 |
+
return self.state(i)
|
1657 |
+
|
1658 |
+
def __len__(self):
|
1659 |
+
return len(self.basis)
|
1660 |
+
|
1661 |
+
def __repr__(self):
|
1662 |
+
return repr(self.basis)
|
1663 |
+
|
1664 |
+
|
1665 |
+
class Commutator(Function):
|
1666 |
+
"""
|
1667 |
+
The Commutator: [A, B] = A*B - B*A
|
1668 |
+
|
1669 |
+
The arguments are ordered according to .__cmp__()
|
1670 |
+
|
1671 |
+
Examples
|
1672 |
+
========
|
1673 |
+
|
1674 |
+
>>> from sympy import symbols
|
1675 |
+
>>> from sympy.physics.secondquant import Commutator
|
1676 |
+
>>> A, B = symbols('A,B', commutative=False)
|
1677 |
+
>>> Commutator(B, A)
|
1678 |
+
-Commutator(A, B)
|
1679 |
+
|
1680 |
+
Evaluate the commutator with .doit()
|
1681 |
+
|
1682 |
+
>>> comm = Commutator(A,B); comm
|
1683 |
+
Commutator(A, B)
|
1684 |
+
>>> comm.doit()
|
1685 |
+
A*B - B*A
|
1686 |
+
|
1687 |
+
|
1688 |
+
For two second quantization operators the commutator is evaluated
|
1689 |
+
immediately:
|
1690 |
+
|
1691 |
+
>>> from sympy.physics.secondquant import Fd, F
|
1692 |
+
>>> a = symbols('a', above_fermi=True)
|
1693 |
+
>>> i = symbols('i', below_fermi=True)
|
1694 |
+
>>> p,q = symbols('p,q')
|
1695 |
+
|
1696 |
+
>>> Commutator(Fd(a),Fd(i))
|
1697 |
+
2*NO(CreateFermion(a)*CreateFermion(i))
|
1698 |
+
|
1699 |
+
But for more complicated expressions, the evaluation is triggered by
|
1700 |
+
a call to .doit()
|
1701 |
+
|
1702 |
+
>>> comm = Commutator(Fd(p)*Fd(q),F(i)); comm
|
1703 |
+
Commutator(CreateFermion(p)*CreateFermion(q), AnnihilateFermion(i))
|
1704 |
+
>>> comm.doit(wicks=True)
|
1705 |
+
-KroneckerDelta(i, p)*CreateFermion(q) +
|
1706 |
+
KroneckerDelta(i, q)*CreateFermion(p)
|
1707 |
+
|
1708 |
+
"""
|
1709 |
+
|
1710 |
+
is_commutative = False
|
1711 |
+
|
1712 |
+
@classmethod
|
1713 |
+
def eval(cls, a, b):
|
1714 |
+
"""
|
1715 |
+
The Commutator [A,B] is on canonical form if A < B.
|
1716 |
+
|
1717 |
+
Examples
|
1718 |
+
========
|
1719 |
+
|
1720 |
+
>>> from sympy.physics.secondquant import Commutator, F, Fd
|
1721 |
+
>>> from sympy.abc import x
|
1722 |
+
>>> c1 = Commutator(F(x), Fd(x))
|
1723 |
+
>>> c2 = Commutator(Fd(x), F(x))
|
1724 |
+
>>> Commutator.eval(c1, c2)
|
1725 |
+
0
|
1726 |
+
"""
|
1727 |
+
if not (a and b):
|
1728 |
+
return S.Zero
|
1729 |
+
if a == b:
|
1730 |
+
return S.Zero
|
1731 |
+
if a.is_commutative or b.is_commutative:
|
1732 |
+
return S.Zero
|
1733 |
+
|
1734 |
+
#
|
1735 |
+
# [A+B,C] -> [A,C] + [B,C]
|
1736 |
+
#
|
1737 |
+
a = a.expand()
|
1738 |
+
if isinstance(a, Add):
|
1739 |
+
return Add(*[cls(term, b) for term in a.args])
|
1740 |
+
b = b.expand()
|
1741 |
+
if isinstance(b, Add):
|
1742 |
+
return Add(*[cls(a, term) for term in b.args])
|
1743 |
+
|
1744 |
+
#
|
1745 |
+
# [xA,yB] -> xy*[A,B]
|
1746 |
+
#
|
1747 |
+
ca, nca = a.args_cnc()
|
1748 |
+
cb, ncb = b.args_cnc()
|
1749 |
+
c_part = list(ca) + list(cb)
|
1750 |
+
if c_part:
|
1751 |
+
return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb)))
|
1752 |
+
|
1753 |
+
#
|
1754 |
+
# single second quantization operators
|
1755 |
+
#
|
1756 |
+
if isinstance(a, BosonicOperator) and isinstance(b, BosonicOperator):
|
1757 |
+
if isinstance(b, CreateBoson) and isinstance(a, AnnihilateBoson):
|
1758 |
+
return KroneckerDelta(a.state, b.state)
|
1759 |
+
if isinstance(a, CreateBoson) and isinstance(b, AnnihilateBoson):
|
1760 |
+
return S.NegativeOne*KroneckerDelta(a.state, b.state)
|
1761 |
+
else:
|
1762 |
+
return S.Zero
|
1763 |
+
if isinstance(a, FermionicOperator) and isinstance(b, FermionicOperator):
|
1764 |
+
return wicks(a*b) - wicks(b*a)
|
1765 |
+
|
1766 |
+
#
|
1767 |
+
# Canonical ordering of arguments
|
1768 |
+
#
|
1769 |
+
if a.sort_key() > b.sort_key():
|
1770 |
+
return S.NegativeOne*cls(b, a)
|
1771 |
+
|
1772 |
+
def doit(self, **hints):
|
1773 |
+
"""
|
1774 |
+
Enables the computation of complex expressions.
|
1775 |
+
|
1776 |
+
Examples
|
1777 |
+
========
|
1778 |
+
|
1779 |
+
>>> from sympy.physics.secondquant import Commutator, F, Fd
|
1780 |
+
>>> from sympy import symbols
|
1781 |
+
>>> i, j = symbols('i,j', below_fermi=True)
|
1782 |
+
>>> a, b = symbols('a,b', above_fermi=True)
|
1783 |
+
>>> c = Commutator(Fd(a)*F(i),Fd(b)*F(j))
|
1784 |
+
>>> c.doit(wicks=True)
|
1785 |
+
0
|
1786 |
+
"""
|
1787 |
+
a = self.args[0]
|
1788 |
+
b = self.args[1]
|
1789 |
+
|
1790 |
+
if hints.get("wicks"):
|
1791 |
+
a = a.doit(**hints)
|
1792 |
+
b = b.doit(**hints)
|
1793 |
+
try:
|
1794 |
+
return wicks(a*b) - wicks(b*a)
|
1795 |
+
except ContractionAppliesOnlyToFermions:
|
1796 |
+
pass
|
1797 |
+
except WicksTheoremDoesNotApply:
|
1798 |
+
pass
|
1799 |
+
|
1800 |
+
return (a*b - b*a).doit(**hints)
|
1801 |
+
|
1802 |
+
def __repr__(self):
|
1803 |
+
return "Commutator(%s,%s)" % (self.args[0], self.args[1])
|
1804 |
+
|
1805 |
+
def __str__(self):
|
1806 |
+
return "[%s,%s]" % (self.args[0], self.args[1])
|
1807 |
+
|
1808 |
+
def _latex(self, printer):
|
1809 |
+
return "\\left[%s,%s\\right]" % tuple([
|
1810 |
+
printer._print(arg) for arg in self.args])
|
1811 |
+
|
1812 |
+
|
1813 |
+
class NO(Expr):
|
1814 |
+
"""
|
1815 |
+
This Object is used to represent normal ordering brackets.
|
1816 |
+
|
1817 |
+
i.e. {abcd} sometimes written :abcd:
|
1818 |
+
|
1819 |
+
Explanation
|
1820 |
+
===========
|
1821 |
+
|
1822 |
+
Applying the function NO(arg) to an argument means that all operators in
|
1823 |
+
the argument will be assumed to anticommute, and have vanishing
|
1824 |
+
contractions. This allows an immediate reordering to canonical form
|
1825 |
+
upon object creation.
|
1826 |
+
|
1827 |
+
Examples
|
1828 |
+
========
|
1829 |
+
|
1830 |
+
>>> from sympy import symbols
|
1831 |
+
>>> from sympy.physics.secondquant import NO, F, Fd
|
1832 |
+
>>> p,q = symbols('p,q')
|
1833 |
+
>>> NO(Fd(p)*F(q))
|
1834 |
+
NO(CreateFermion(p)*AnnihilateFermion(q))
|
1835 |
+
>>> NO(F(q)*Fd(p))
|
1836 |
+
-NO(CreateFermion(p)*AnnihilateFermion(q))
|
1837 |
+
|
1838 |
+
|
1839 |
+
Note
|
1840 |
+
====
|
1841 |
+
|
1842 |
+
If you want to generate a normal ordered equivalent of an expression, you
|
1843 |
+
should use the function wicks(). This class only indicates that all
|
1844 |
+
operators inside the brackets anticommute, and have vanishing contractions.
|
1845 |
+
Nothing more, nothing less.
|
1846 |
+
|
1847 |
+
"""
|
1848 |
+
is_commutative = False
|
1849 |
+
|
1850 |
+
def __new__(cls, arg):
|
1851 |
+
"""
|
1852 |
+
Use anticommutation to get canonical form of operators.
|
1853 |
+
|
1854 |
+
Explanation
|
1855 |
+
===========
|
1856 |
+
|
1857 |
+
Employ associativity of normal ordered product: {ab{cd}} = {abcd}
|
1858 |
+
but note that {ab}{cd} /= {abcd}.
|
1859 |
+
|
1860 |
+
We also employ distributivity: {ab + cd} = {ab} + {cd}.
|
1861 |
+
|
1862 |
+
Canonical form also implies expand() {ab(c+d)} = {abc} + {abd}.
|
1863 |
+
|
1864 |
+
"""
|
1865 |
+
|
1866 |
+
# {ab + cd} = {ab} + {cd}
|
1867 |
+
arg = sympify(arg)
|
1868 |
+
arg = arg.expand()
|
1869 |
+
if arg.is_Add:
|
1870 |
+
return Add(*[ cls(term) for term in arg.args])
|
1871 |
+
|
1872 |
+
if arg.is_Mul:
|
1873 |
+
|
1874 |
+
# take coefficient outside of normal ordering brackets
|
1875 |
+
c_part, seq = arg.args_cnc()
|
1876 |
+
if c_part:
|
1877 |
+
coeff = Mul(*c_part)
|
1878 |
+
if not seq:
|
1879 |
+
return coeff
|
1880 |
+
else:
|
1881 |
+
coeff = S.One
|
1882 |
+
|
1883 |
+
# {ab{cd}} = {abcd}
|
1884 |
+
newseq = []
|
1885 |
+
foundit = False
|
1886 |
+
for fac in seq:
|
1887 |
+
if isinstance(fac, NO):
|
1888 |
+
newseq.extend(fac.args)
|
1889 |
+
foundit = True
|
1890 |
+
else:
|
1891 |
+
newseq.append(fac)
|
1892 |
+
if foundit:
|
1893 |
+
return coeff*cls(Mul(*newseq))
|
1894 |
+
|
1895 |
+
# We assume that the user don't mix B and F operators
|
1896 |
+
if isinstance(seq[0], BosonicOperator):
|
1897 |
+
raise NotImplementedError
|
1898 |
+
|
1899 |
+
try:
|
1900 |
+
newseq, sign = _sort_anticommuting_fermions(seq)
|
1901 |
+
except ViolationOfPauliPrinciple:
|
1902 |
+
return S.Zero
|
1903 |
+
|
1904 |
+
if sign % 2:
|
1905 |
+
return (S.NegativeOne*coeff)*cls(Mul(*newseq))
|
1906 |
+
elif sign:
|
1907 |
+
return coeff*cls(Mul(*newseq))
|
1908 |
+
else:
|
1909 |
+
pass # since sign==0, no permutations was necessary
|
1910 |
+
|
1911 |
+
# if we couldn't do anything with Mul object, we just
|
1912 |
+
# mark it as normal ordered
|
1913 |
+
if coeff != S.One:
|
1914 |
+
return coeff*cls(Mul(*newseq))
|
1915 |
+
return Expr.__new__(cls, Mul(*newseq))
|
1916 |
+
|
1917 |
+
if isinstance(arg, NO):
|
1918 |
+
return arg
|
1919 |
+
|
1920 |
+
# if object was not Mul or Add, normal ordering does not apply
|
1921 |
+
return arg
|
1922 |
+
|
1923 |
+
@property
|
1924 |
+
def has_q_creators(self):
|
1925 |
+
"""
|
1926 |
+
Return 0 if the leftmost argument of the first argument is a not a
|
1927 |
+
q_creator, else 1 if it is above fermi or -1 if it is below fermi.
|
1928 |
+
|
1929 |
+
Examples
|
1930 |
+
========
|
1931 |
+
|
1932 |
+
>>> from sympy import symbols
|
1933 |
+
>>> from sympy.physics.secondquant import NO, F, Fd
|
1934 |
+
|
1935 |
+
>>> a = symbols('a', above_fermi=True)
|
1936 |
+
>>> i = symbols('i', below_fermi=True)
|
1937 |
+
>>> NO(Fd(a)*Fd(i)).has_q_creators
|
1938 |
+
1
|
1939 |
+
>>> NO(F(i)*F(a)).has_q_creators
|
1940 |
+
-1
|
1941 |
+
>>> NO(Fd(i)*F(a)).has_q_creators #doctest: +SKIP
|
1942 |
+
0
|
1943 |
+
|
1944 |
+
"""
|
1945 |
+
return self.args[0].args[0].is_q_creator
|
1946 |
+
|
1947 |
+
@property
|
1948 |
+
def has_q_annihilators(self):
|
1949 |
+
"""
|
1950 |
+
Return 0 if the rightmost argument of the first argument is a not a
|
1951 |
+
q_annihilator, else 1 if it is above fermi or -1 if it is below fermi.
|
1952 |
+
|
1953 |
+
Examples
|
1954 |
+
========
|
1955 |
+
|
1956 |
+
>>> from sympy import symbols
|
1957 |
+
>>> from sympy.physics.secondquant import NO, F, Fd
|
1958 |
+
|
1959 |
+
>>> a = symbols('a', above_fermi=True)
|
1960 |
+
>>> i = symbols('i', below_fermi=True)
|
1961 |
+
>>> NO(Fd(a)*Fd(i)).has_q_annihilators
|
1962 |
+
-1
|
1963 |
+
>>> NO(F(i)*F(a)).has_q_annihilators
|
1964 |
+
1
|
1965 |
+
>>> NO(Fd(a)*F(i)).has_q_annihilators
|
1966 |
+
0
|
1967 |
+
|
1968 |
+
"""
|
1969 |
+
return self.args[0].args[-1].is_q_annihilator
|
1970 |
+
|
1971 |
+
def doit(self, **hints):
|
1972 |
+
"""
|
1973 |
+
Either removes the brackets or enables complex computations
|
1974 |
+
in its arguments.
|
1975 |
+
|
1976 |
+
Examples
|
1977 |
+
========
|
1978 |
+
|
1979 |
+
>>> from sympy.physics.secondquant import NO, Fd, F
|
1980 |
+
>>> from textwrap import fill
|
1981 |
+
>>> from sympy import symbols, Dummy
|
1982 |
+
>>> p,q = symbols('p,q', cls=Dummy)
|
1983 |
+
>>> print(fill(str(NO(Fd(p)*F(q)).doit())))
|
1984 |
+
KroneckerDelta(_a, _p)*KroneckerDelta(_a,
|
1985 |
+
_q)*CreateFermion(_a)*AnnihilateFermion(_a) + KroneckerDelta(_a,
|
1986 |
+
_p)*KroneckerDelta(_i, _q)*CreateFermion(_a)*AnnihilateFermion(_i) -
|
1987 |
+
KroneckerDelta(_a, _q)*KroneckerDelta(_i,
|
1988 |
+
_p)*AnnihilateFermion(_a)*CreateFermion(_i) - KroneckerDelta(_i,
|
1989 |
+
_p)*KroneckerDelta(_i, _q)*AnnihilateFermion(_i)*CreateFermion(_i)
|
1990 |
+
"""
|
1991 |
+
if hints.get("remove_brackets", True):
|
1992 |
+
return self._remove_brackets()
|
1993 |
+
else:
|
1994 |
+
return self.__new__(type(self), self.args[0].doit(**hints))
|
1995 |
+
|
1996 |
+
def _remove_brackets(self):
|
1997 |
+
"""
|
1998 |
+
Returns the sorted string without normal order brackets.
|
1999 |
+
|
2000 |
+
The returned string have the property that no nonzero
|
2001 |
+
contractions exist.
|
2002 |
+
"""
|
2003 |
+
|
2004 |
+
# check if any creator is also an annihilator
|
2005 |
+
subslist = []
|
2006 |
+
for i in self.iter_q_creators():
|
2007 |
+
if self[i].is_q_annihilator:
|
2008 |
+
assume = self[i].state.assumptions0
|
2009 |
+
|
2010 |
+
# only operators with a dummy index can be split in two terms
|
2011 |
+
if isinstance(self[i].state, Dummy):
|
2012 |
+
|
2013 |
+
# create indices with fermi restriction
|
2014 |
+
assume.pop("above_fermi", None)
|
2015 |
+
assume["below_fermi"] = True
|
2016 |
+
below = Dummy('i', **assume)
|
2017 |
+
assume.pop("below_fermi", None)
|
2018 |
+
assume["above_fermi"] = True
|
2019 |
+
above = Dummy('a', **assume)
|
2020 |
+
|
2021 |
+
cls = type(self[i])
|
2022 |
+
split = (
|
2023 |
+
self[i].__new__(cls, below)
|
2024 |
+
* KroneckerDelta(below, self[i].state)
|
2025 |
+
+ self[i].__new__(cls, above)
|
2026 |
+
* KroneckerDelta(above, self[i].state)
|
2027 |
+
)
|
2028 |
+
subslist.append((self[i], split))
|
2029 |
+
else:
|
2030 |
+
raise SubstitutionOfAmbigousOperatorFailed(self[i])
|
2031 |
+
if subslist:
|
2032 |
+
result = NO(self.subs(subslist))
|
2033 |
+
if isinstance(result, Add):
|
2034 |
+
return Add(*[term.doit() for term in result.args])
|
2035 |
+
else:
|
2036 |
+
return self.args[0]
|
2037 |
+
|
2038 |
+
def _expand_operators(self):
|
2039 |
+
"""
|
2040 |
+
Returns a sum of NO objects that contain no ambiguous q-operators.
|
2041 |
+
|
2042 |
+
Explanation
|
2043 |
+
===========
|
2044 |
+
|
2045 |
+
If an index q has range both above and below fermi, the operator F(q)
|
2046 |
+
is ambiguous in the sense that it can be both a q-creator and a q-annihilator.
|
2047 |
+
If q is dummy, it is assumed to be a summation variable and this method
|
2048 |
+
rewrites it into a sum of NO terms with unambiguous operators:
|
2049 |
+
|
2050 |
+
{Fd(p)*F(q)} = {Fd(a)*F(b)} + {Fd(a)*F(i)} + {Fd(j)*F(b)} -{F(i)*Fd(j)}
|
2051 |
+
|
2052 |
+
where a,b are above and i,j are below fermi level.
|
2053 |
+
"""
|
2054 |
+
return NO(self._remove_brackets)
|
2055 |
+
|
2056 |
+
def __getitem__(self, i):
|
2057 |
+
if isinstance(i, slice):
|
2058 |
+
indices = i.indices(len(self))
|
2059 |
+
return [self.args[0].args[i] for i in range(*indices)]
|
2060 |
+
else:
|
2061 |
+
return self.args[0].args[i]
|
2062 |
+
|
2063 |
+
def __len__(self):
|
2064 |
+
return len(self.args[0].args)
|
2065 |
+
|
2066 |
+
def iter_q_annihilators(self):
|
2067 |
+
"""
|
2068 |
+
Iterates over the annihilation operators.
|
2069 |
+
|
2070 |
+
Examples
|
2071 |
+
========
|
2072 |
+
|
2073 |
+
>>> from sympy import symbols
|
2074 |
+
>>> i, j = symbols('i j', below_fermi=True)
|
2075 |
+
>>> a, b = symbols('a b', above_fermi=True)
|
2076 |
+
>>> from sympy.physics.secondquant import NO, F, Fd
|
2077 |
+
>>> no = NO(Fd(a)*F(i)*F(b)*Fd(j))
|
2078 |
+
|
2079 |
+
>>> no.iter_q_creators()
|
2080 |
+
<generator object... at 0x...>
|
2081 |
+
>>> list(no.iter_q_creators())
|
2082 |
+
[0, 1]
|
2083 |
+
>>> list(no.iter_q_annihilators())
|
2084 |
+
[3, 2]
|
2085 |
+
|
2086 |
+
"""
|
2087 |
+
ops = self.args[0].args
|
2088 |
+
iter = range(len(ops) - 1, -1, -1)
|
2089 |
+
for i in iter:
|
2090 |
+
if ops[i].is_q_annihilator:
|
2091 |
+
yield i
|
2092 |
+
else:
|
2093 |
+
break
|
2094 |
+
|
2095 |
+
def iter_q_creators(self):
|
2096 |
+
"""
|
2097 |
+
Iterates over the creation operators.
|
2098 |
+
|
2099 |
+
Examples
|
2100 |
+
========
|
2101 |
+
|
2102 |
+
>>> from sympy import symbols
|
2103 |
+
>>> i, j = symbols('i j', below_fermi=True)
|
2104 |
+
>>> a, b = symbols('a b', above_fermi=True)
|
2105 |
+
>>> from sympy.physics.secondquant import NO, F, Fd
|
2106 |
+
>>> no = NO(Fd(a)*F(i)*F(b)*Fd(j))
|
2107 |
+
|
2108 |
+
>>> no.iter_q_creators()
|
2109 |
+
<generator object... at 0x...>
|
2110 |
+
>>> list(no.iter_q_creators())
|
2111 |
+
[0, 1]
|
2112 |
+
>>> list(no.iter_q_annihilators())
|
2113 |
+
[3, 2]
|
2114 |
+
|
2115 |
+
"""
|
2116 |
+
|
2117 |
+
ops = self.args[0].args
|
2118 |
+
iter = range(0, len(ops))
|
2119 |
+
for i in iter:
|
2120 |
+
if ops[i].is_q_creator:
|
2121 |
+
yield i
|
2122 |
+
else:
|
2123 |
+
break
|
2124 |
+
|
2125 |
+
def get_subNO(self, i):
|
2126 |
+
"""
|
2127 |
+
Returns a NO() without FermionicOperator at index i.
|
2128 |
+
|
2129 |
+
Examples
|
2130 |
+
========
|
2131 |
+
|
2132 |
+
>>> from sympy import symbols
|
2133 |
+
>>> from sympy.physics.secondquant import F, NO
|
2134 |
+
>>> p, q, r = symbols('p,q,r')
|
2135 |
+
|
2136 |
+
>>> NO(F(p)*F(q)*F(r)).get_subNO(1)
|
2137 |
+
NO(AnnihilateFermion(p)*AnnihilateFermion(r))
|
2138 |
+
|
2139 |
+
"""
|
2140 |
+
arg0 = self.args[0] # it's a Mul by definition of how it's created
|
2141 |
+
mul = arg0._new_rawargs(*(arg0.args[:i] + arg0.args[i + 1:]))
|
2142 |
+
return NO(mul)
|
2143 |
+
|
2144 |
+
def _latex(self, printer):
|
2145 |
+
return "\\left\\{%s\\right\\}" % printer._print(self.args[0])
|
2146 |
+
|
2147 |
+
def __repr__(self):
|
2148 |
+
return "NO(%s)" % self.args[0]
|
2149 |
+
|
2150 |
+
def __str__(self):
|
2151 |
+
return ":%s:" % self.args[0]
|
2152 |
+
|
2153 |
+
|
2154 |
+
def contraction(a, b):
|
2155 |
+
"""
|
2156 |
+
Calculates contraction of Fermionic operators a and b.
|
2157 |
+
|
2158 |
+
Examples
|
2159 |
+
========
|
2160 |
+
|
2161 |
+
>>> from sympy import symbols
|
2162 |
+
>>> from sympy.physics.secondquant import F, Fd, contraction
|
2163 |
+
>>> p, q = symbols('p,q')
|
2164 |
+
>>> a, b = symbols('a,b', above_fermi=True)
|
2165 |
+
>>> i, j = symbols('i,j', below_fermi=True)
|
2166 |
+
|
2167 |
+
A contraction is non-zero only if a quasi-creator is to the right of a
|
2168 |
+
quasi-annihilator:
|
2169 |
+
|
2170 |
+
>>> contraction(F(a),Fd(b))
|
2171 |
+
KroneckerDelta(a, b)
|
2172 |
+
>>> contraction(Fd(i),F(j))
|
2173 |
+
KroneckerDelta(i, j)
|
2174 |
+
|
2175 |
+
For general indices a non-zero result restricts the indices to below/above
|
2176 |
+
the fermi surface:
|
2177 |
+
|
2178 |
+
>>> contraction(Fd(p),F(q))
|
2179 |
+
KroneckerDelta(_i, q)*KroneckerDelta(p, q)
|
2180 |
+
>>> contraction(F(p),Fd(q))
|
2181 |
+
KroneckerDelta(_a, q)*KroneckerDelta(p, q)
|
2182 |
+
|
2183 |
+
Two creators or two annihilators always vanishes:
|
2184 |
+
|
2185 |
+
>>> contraction(F(p),F(q))
|
2186 |
+
0
|
2187 |
+
>>> contraction(Fd(p),Fd(q))
|
2188 |
+
0
|
2189 |
+
|
2190 |
+
"""
|
2191 |
+
if isinstance(b, FermionicOperator) and isinstance(a, FermionicOperator):
|
2192 |
+
if isinstance(a, AnnihilateFermion) and isinstance(b, CreateFermion):
|
2193 |
+
if b.state.assumptions0.get("below_fermi"):
|
2194 |
+
return S.Zero
|
2195 |
+
if a.state.assumptions0.get("below_fermi"):
|
2196 |
+
return S.Zero
|
2197 |
+
if b.state.assumptions0.get("above_fermi"):
|
2198 |
+
return KroneckerDelta(a.state, b.state)
|
2199 |
+
if a.state.assumptions0.get("above_fermi"):
|
2200 |
+
return KroneckerDelta(a.state, b.state)
|
2201 |
+
|
2202 |
+
return (KroneckerDelta(a.state, b.state)*
|
2203 |
+
KroneckerDelta(b.state, Dummy('a', above_fermi=True)))
|
2204 |
+
if isinstance(b, AnnihilateFermion) and isinstance(a, CreateFermion):
|
2205 |
+
if b.state.assumptions0.get("above_fermi"):
|
2206 |
+
return S.Zero
|
2207 |
+
if a.state.assumptions0.get("above_fermi"):
|
2208 |
+
return S.Zero
|
2209 |
+
if b.state.assumptions0.get("below_fermi"):
|
2210 |
+
return KroneckerDelta(a.state, b.state)
|
2211 |
+
if a.state.assumptions0.get("below_fermi"):
|
2212 |
+
return KroneckerDelta(a.state, b.state)
|
2213 |
+
|
2214 |
+
return (KroneckerDelta(a.state, b.state)*
|
2215 |
+
KroneckerDelta(b.state, Dummy('i', below_fermi=True)))
|
2216 |
+
|
2217 |
+
# vanish if 2xAnnihilator or 2xCreator
|
2218 |
+
return S.Zero
|
2219 |
+
|
2220 |
+
else:
|
2221 |
+
#not fermion operators
|
2222 |
+
t = ( isinstance(i, FermionicOperator) for i in (a, b) )
|
2223 |
+
raise ContractionAppliesOnlyToFermions(*t)
|
2224 |
+
|
2225 |
+
|
2226 |
+
def _sqkey(sq_operator):
|
2227 |
+
"""Generates key for canonical sorting of SQ operators."""
|
2228 |
+
return sq_operator._sortkey()
|
2229 |
+
|
2230 |
+
|
2231 |
+
def _sort_anticommuting_fermions(string1, key=_sqkey):
|
2232 |
+
"""Sort fermionic operators to canonical order, assuming all pairs anticommute.
|
2233 |
+
|
2234 |
+
Explanation
|
2235 |
+
===========
|
2236 |
+
|
2237 |
+
Uses a bidirectional bubble sort. Items in string1 are not referenced
|
2238 |
+
so in principle they may be any comparable objects. The sorting depends on the
|
2239 |
+
operators '>' and '=='.
|
2240 |
+
|
2241 |
+
If the Pauli principle is violated, an exception is raised.
|
2242 |
+
|
2243 |
+
Returns
|
2244 |
+
=======
|
2245 |
+
|
2246 |
+
tuple (sorted_str, sign)
|
2247 |
+
|
2248 |
+
sorted_str: list containing the sorted operators
|
2249 |
+
sign: int telling how many times the sign should be changed
|
2250 |
+
(if sign==0 the string was already sorted)
|
2251 |
+
"""
|
2252 |
+
|
2253 |
+
verified = False
|
2254 |
+
sign = 0
|
2255 |
+
rng = list(range(len(string1) - 1))
|
2256 |
+
rev = list(range(len(string1) - 3, -1, -1))
|
2257 |
+
|
2258 |
+
keys = list(map(key, string1))
|
2259 |
+
key_val = dict(list(zip(keys, string1)))
|
2260 |
+
|
2261 |
+
while not verified:
|
2262 |
+
verified = True
|
2263 |
+
for i in rng:
|
2264 |
+
left = keys[i]
|
2265 |
+
right = keys[i + 1]
|
2266 |
+
if left == right:
|
2267 |
+
raise ViolationOfPauliPrinciple([left, right])
|
2268 |
+
if left > right:
|
2269 |
+
verified = False
|
2270 |
+
keys[i:i + 2] = [right, left]
|
2271 |
+
sign = sign + 1
|
2272 |
+
if verified:
|
2273 |
+
break
|
2274 |
+
for i in rev:
|
2275 |
+
left = keys[i]
|
2276 |
+
right = keys[i + 1]
|
2277 |
+
if left == right:
|
2278 |
+
raise ViolationOfPauliPrinciple([left, right])
|
2279 |
+
if left > right:
|
2280 |
+
verified = False
|
2281 |
+
keys[i:i + 2] = [right, left]
|
2282 |
+
sign = sign + 1
|
2283 |
+
string1 = [ key_val[k] for k in keys ]
|
2284 |
+
return (string1, sign)
|
2285 |
+
|
2286 |
+
|
2287 |
+
def evaluate_deltas(e):
|
2288 |
+
"""
|
2289 |
+
We evaluate KroneckerDelta symbols in the expression assuming Einstein summation.
|
2290 |
+
|
2291 |
+
Explanation
|
2292 |
+
===========
|
2293 |
+
|
2294 |
+
If one index is repeated it is summed over and in effect substituted with
|
2295 |
+
the other one. If both indices are repeated we substitute according to what
|
2296 |
+
is the preferred index. this is determined by
|
2297 |
+
KroneckerDelta.preferred_index and KroneckerDelta.killable_index.
|
2298 |
+
|
2299 |
+
In case there are no possible substitutions or if a substitution would
|
2300 |
+
imply a loss of information, nothing is done.
|
2301 |
+
|
2302 |
+
In case an index appears in more than one KroneckerDelta, the resulting
|
2303 |
+
substitution depends on the order of the factors. Since the ordering is platform
|
2304 |
+
dependent, the literal expression resulting from this function may be hard to
|
2305 |
+
predict.
|
2306 |
+
|
2307 |
+
Examples
|
2308 |
+
========
|
2309 |
+
|
2310 |
+
We assume the following:
|
2311 |
+
|
2312 |
+
>>> from sympy import symbols, Function, Dummy, KroneckerDelta
|
2313 |
+
>>> from sympy.physics.secondquant import evaluate_deltas
|
2314 |
+
>>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
|
2315 |
+
>>> a,b = symbols('a b', above_fermi=True, cls=Dummy)
|
2316 |
+
>>> p,q = symbols('p q', cls=Dummy)
|
2317 |
+
>>> f = Function('f')
|
2318 |
+
>>> t = Function('t')
|
2319 |
+
|
2320 |
+
The order of preference for these indices according to KroneckerDelta is
|
2321 |
+
(a, b, i, j, p, q).
|
2322 |
+
|
2323 |
+
Trivial cases:
|
2324 |
+
|
2325 |
+
>>> evaluate_deltas(KroneckerDelta(i,j)*f(i)) # d_ij f(i) -> f(j)
|
2326 |
+
f(_j)
|
2327 |
+
>>> evaluate_deltas(KroneckerDelta(i,j)*f(j)) # d_ij f(j) -> f(i)
|
2328 |
+
f(_i)
|
2329 |
+
>>> evaluate_deltas(KroneckerDelta(i,p)*f(p)) # d_ip f(p) -> f(i)
|
2330 |
+
f(_i)
|
2331 |
+
>>> evaluate_deltas(KroneckerDelta(q,p)*f(p)) # d_qp f(p) -> f(q)
|
2332 |
+
f(_q)
|
2333 |
+
>>> evaluate_deltas(KroneckerDelta(q,p)*f(q)) # d_qp f(q) -> f(p)
|
2334 |
+
f(_p)
|
2335 |
+
|
2336 |
+
More interesting cases:
|
2337 |
+
|
2338 |
+
>>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q))
|
2339 |
+
f(_i, _q)*t(_a, _i)
|
2340 |
+
>>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q))
|
2341 |
+
f(_a, _q)*t(_a, _i)
|
2342 |
+
>>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q))
|
2343 |
+
f(_p, _p)
|
2344 |
+
|
2345 |
+
Finally, here are some cases where nothing is done, because that would
|
2346 |
+
imply a loss of information:
|
2347 |
+
|
2348 |
+
>>> evaluate_deltas(KroneckerDelta(i,p)*f(q))
|
2349 |
+
f(_q)*KroneckerDelta(_i, _p)
|
2350 |
+
>>> evaluate_deltas(KroneckerDelta(i,p)*f(i))
|
2351 |
+
f(_i)*KroneckerDelta(_i, _p)
|
2352 |
+
"""
|
2353 |
+
|
2354 |
+
# We treat Deltas only in mul objects
|
2355 |
+
# for general function objects we don't evaluate KroneckerDeltas in arguments,
|
2356 |
+
# but here we hard code exceptions to this rule
|
2357 |
+
accepted_functions = (
|
2358 |
+
Add,
|
2359 |
+
)
|
2360 |
+
if isinstance(e, accepted_functions):
|
2361 |
+
return e.func(*[evaluate_deltas(arg) for arg in e.args])
|
2362 |
+
|
2363 |
+
elif isinstance(e, Mul):
|
2364 |
+
# find all occurrences of delta function and count each index present in
|
2365 |
+
# expression.
|
2366 |
+
deltas = []
|
2367 |
+
indices = {}
|
2368 |
+
for i in e.args:
|
2369 |
+
for s in i.free_symbols:
|
2370 |
+
if s in indices:
|
2371 |
+
indices[s] += 1
|
2372 |
+
else:
|
2373 |
+
indices[s] = 0 # geek counting simplifies logic below
|
2374 |
+
if isinstance(i, KroneckerDelta):
|
2375 |
+
deltas.append(i)
|
2376 |
+
|
2377 |
+
for d in deltas:
|
2378 |
+
# If we do something, and there are more deltas, we should recurse
|
2379 |
+
# to treat the resulting expression properly
|
2380 |
+
if d.killable_index.is_Symbol and indices[d.killable_index]:
|
2381 |
+
e = e.subs(d.killable_index, d.preferred_index)
|
2382 |
+
if len(deltas) > 1:
|
2383 |
+
return evaluate_deltas(e)
|
2384 |
+
elif (d.preferred_index.is_Symbol and indices[d.preferred_index]
|
2385 |
+
and d.indices_contain_equal_information):
|
2386 |
+
e = e.subs(d.preferred_index, d.killable_index)
|
2387 |
+
if len(deltas) > 1:
|
2388 |
+
return evaluate_deltas(e)
|
2389 |
+
else:
|
2390 |
+
pass
|
2391 |
+
|
2392 |
+
return e
|
2393 |
+
# nothing to do, maybe we hit a Symbol or a number
|
2394 |
+
else:
|
2395 |
+
return e
|
2396 |
+
|
2397 |
+
|
2398 |
+
def substitute_dummies(expr, new_indices=False, pretty_indices={}):
|
2399 |
+
"""
|
2400 |
+
Collect terms by substitution of dummy variables.
|
2401 |
+
|
2402 |
+
Explanation
|
2403 |
+
===========
|
2404 |
+
|
2405 |
+
This routine allows simplification of Add expressions containing terms
|
2406 |
+
which differ only due to dummy variables.
|
2407 |
+
|
2408 |
+
The idea is to substitute all dummy variables consistently depending on
|
2409 |
+
the structure of the term. For each term, we obtain a sequence of all
|
2410 |
+
dummy variables, where the order is determined by the index range, what
|
2411 |
+
factors the index belongs to and its position in each factor. See
|
2412 |
+
_get_ordered_dummies() for more information about the sorting of dummies.
|
2413 |
+
The index sequence is then substituted consistently in each term.
|
2414 |
+
|
2415 |
+
Examples
|
2416 |
+
========
|
2417 |
+
|
2418 |
+
>>> from sympy import symbols, Function, Dummy
|
2419 |
+
>>> from sympy.physics.secondquant import substitute_dummies
|
2420 |
+
>>> a,b,c,d = symbols('a b c d', above_fermi=True, cls=Dummy)
|
2421 |
+
>>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
|
2422 |
+
>>> f = Function('f')
|
2423 |
+
|
2424 |
+
>>> expr = f(a,b) + f(c,d); expr
|
2425 |
+
f(_a, _b) + f(_c, _d)
|
2426 |
+
|
2427 |
+
Since a, b, c and d are equivalent summation indices, the expression can be
|
2428 |
+
simplified to a single term (for which the dummy indices are still summed over)
|
2429 |
+
|
2430 |
+
>>> substitute_dummies(expr)
|
2431 |
+
2*f(_a, _b)
|
2432 |
+
|
2433 |
+
|
2434 |
+
Controlling output:
|
2435 |
+
|
2436 |
+
By default the dummy symbols that are already present in the expression
|
2437 |
+
will be reused in a different permutation. However, if new_indices=True,
|
2438 |
+
new dummies will be generated and inserted. The keyword 'pretty_indices'
|
2439 |
+
can be used to control this generation of new symbols.
|
2440 |
+
|
2441 |
+
By default the new dummies will be generated on the form i_1, i_2, a_1,
|
2442 |
+
etc. If you supply a dictionary with key:value pairs in the form:
|
2443 |
+
|
2444 |
+
{ index_group: string_of_letters }
|
2445 |
+
|
2446 |
+
The letters will be used as labels for the new dummy symbols. The
|
2447 |
+
index_groups must be one of 'above', 'below' or 'general'.
|
2448 |
+
|
2449 |
+
>>> expr = f(a,b,i,j)
|
2450 |
+
>>> my_dummies = { 'above':'st', 'below':'uv' }
|
2451 |
+
>>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
|
2452 |
+
f(_s, _t, _u, _v)
|
2453 |
+
|
2454 |
+
If we run out of letters, or if there is no keyword for some index_group
|
2455 |
+
the default dummy generator will be used as a fallback:
|
2456 |
+
|
2457 |
+
>>> p,q = symbols('p q', cls=Dummy) # general indices
|
2458 |
+
>>> expr = f(p,q)
|
2459 |
+
>>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
|
2460 |
+
f(_p_0, _p_1)
|
2461 |
+
|
2462 |
+
"""
|
2463 |
+
|
2464 |
+
# setup the replacing dummies
|
2465 |
+
if new_indices:
|
2466 |
+
letters_above = pretty_indices.get('above', "")
|
2467 |
+
letters_below = pretty_indices.get('below', "")
|
2468 |
+
letters_general = pretty_indices.get('general', "")
|
2469 |
+
len_above = len(letters_above)
|
2470 |
+
len_below = len(letters_below)
|
2471 |
+
len_general = len(letters_general)
|
2472 |
+
|
2473 |
+
def _i(number):
|
2474 |
+
try:
|
2475 |
+
return letters_below[number]
|
2476 |
+
except IndexError:
|
2477 |
+
return 'i_' + str(number - len_below)
|
2478 |
+
|
2479 |
+
def _a(number):
|
2480 |
+
try:
|
2481 |
+
return letters_above[number]
|
2482 |
+
except IndexError:
|
2483 |
+
return 'a_' + str(number - len_above)
|
2484 |
+
|
2485 |
+
def _p(number):
|
2486 |
+
try:
|
2487 |
+
return letters_general[number]
|
2488 |
+
except IndexError:
|
2489 |
+
return 'p_' + str(number - len_general)
|
2490 |
+
|
2491 |
+
aboves = []
|
2492 |
+
belows = []
|
2493 |
+
generals = []
|
2494 |
+
|
2495 |
+
dummies = expr.atoms(Dummy)
|
2496 |
+
if not new_indices:
|
2497 |
+
dummies = sorted(dummies, key=default_sort_key)
|
2498 |
+
|
2499 |
+
# generate lists with the dummies we will insert
|
2500 |
+
a = i = p = 0
|
2501 |
+
for d in dummies:
|
2502 |
+
assum = d.assumptions0
|
2503 |
+
|
2504 |
+
if assum.get("above_fermi"):
|
2505 |
+
if new_indices:
|
2506 |
+
sym = _a(a)
|
2507 |
+
a += 1
|
2508 |
+
l1 = aboves
|
2509 |
+
elif assum.get("below_fermi"):
|
2510 |
+
if new_indices:
|
2511 |
+
sym = _i(i)
|
2512 |
+
i += 1
|
2513 |
+
l1 = belows
|
2514 |
+
else:
|
2515 |
+
if new_indices:
|
2516 |
+
sym = _p(p)
|
2517 |
+
p += 1
|
2518 |
+
l1 = generals
|
2519 |
+
|
2520 |
+
if new_indices:
|
2521 |
+
l1.append(Dummy(sym, **assum))
|
2522 |
+
else:
|
2523 |
+
l1.append(d)
|
2524 |
+
|
2525 |
+
expr = expr.expand()
|
2526 |
+
terms = Add.make_args(expr)
|
2527 |
+
new_terms = []
|
2528 |
+
for term in terms:
|
2529 |
+
i = iter(belows)
|
2530 |
+
a = iter(aboves)
|
2531 |
+
p = iter(generals)
|
2532 |
+
ordered = _get_ordered_dummies(term)
|
2533 |
+
subsdict = {}
|
2534 |
+
for d in ordered:
|
2535 |
+
if d.assumptions0.get('below_fermi'):
|
2536 |
+
subsdict[d] = next(i)
|
2537 |
+
elif d.assumptions0.get('above_fermi'):
|
2538 |
+
subsdict[d] = next(a)
|
2539 |
+
else:
|
2540 |
+
subsdict[d] = next(p)
|
2541 |
+
subslist = []
|
2542 |
+
final_subs = []
|
2543 |
+
for k, v in subsdict.items():
|
2544 |
+
if k == v:
|
2545 |
+
continue
|
2546 |
+
if v in subsdict:
|
2547 |
+
# We check if the sequence of substitutions end quickly. In
|
2548 |
+
# that case, we can avoid temporary symbols if we ensure the
|
2549 |
+
# correct substitution order.
|
2550 |
+
if subsdict[v] in subsdict:
|
2551 |
+
# (x, y) -> (y, x), we need a temporary variable
|
2552 |
+
x = Dummy('x')
|
2553 |
+
subslist.append((k, x))
|
2554 |
+
final_subs.append((x, v))
|
2555 |
+
else:
|
2556 |
+
# (x, y) -> (y, a), x->y must be done last
|
2557 |
+
# but before temporary variables are resolved
|
2558 |
+
final_subs.insert(0, (k, v))
|
2559 |
+
else:
|
2560 |
+
subslist.append((k, v))
|
2561 |
+
subslist.extend(final_subs)
|
2562 |
+
new_terms.append(term.subs(subslist))
|
2563 |
+
return Add(*new_terms)
|
2564 |
+
|
2565 |
+
|
2566 |
+
class KeyPrinter(StrPrinter):
|
2567 |
+
"""Printer for which only equal objects are equal in print"""
|
2568 |
+
def _print_Dummy(self, expr):
|
2569 |
+
return "(%s_%i)" % (expr.name, expr.dummy_index)
|
2570 |
+
|
2571 |
+
|
2572 |
+
def __kprint(expr):
|
2573 |
+
p = KeyPrinter()
|
2574 |
+
return p.doprint(expr)
|
2575 |
+
|
2576 |
+
|
2577 |
+
def _get_ordered_dummies(mul, verbose=False):
|
2578 |
+
"""Returns all dummies in the mul sorted in canonical order.
|
2579 |
+
|
2580 |
+
Explanation
|
2581 |
+
===========
|
2582 |
+
|
2583 |
+
The purpose of the canonical ordering is that dummies can be substituted
|
2584 |
+
consistently across terms with the result that equivalent terms can be
|
2585 |
+
simplified.
|
2586 |
+
|
2587 |
+
It is not possible to determine if two terms are equivalent based solely on
|
2588 |
+
the dummy order. However, a consistent substitution guided by the ordered
|
2589 |
+
dummies should lead to trivially (non-)equivalent terms, thereby revealing
|
2590 |
+
the equivalence. This also means that if two terms have identical sequences of
|
2591 |
+
dummies, the (non-)equivalence should already be apparent.
|
2592 |
+
|
2593 |
+
Strategy
|
2594 |
+
--------
|
2595 |
+
|
2596 |
+
The canonical order is given by an arbitrary sorting rule. A sort key
|
2597 |
+
is determined for each dummy as a tuple that depends on all factors where
|
2598 |
+
the index is present. The dummies are thereby sorted according to the
|
2599 |
+
contraction structure of the term, instead of sorting based solely on the
|
2600 |
+
dummy symbol itself.
|
2601 |
+
|
2602 |
+
After all dummies in the term has been assigned a key, we check for identical
|
2603 |
+
keys, i.e. unorderable dummies. If any are found, we call a specialized
|
2604 |
+
method, _determine_ambiguous(), that will determine a unique order based
|
2605 |
+
on recursive calls to _get_ordered_dummies().
|
2606 |
+
|
2607 |
+
Key description
|
2608 |
+
---------------
|
2609 |
+
|
2610 |
+
A high level description of the sort key:
|
2611 |
+
|
2612 |
+
1. Range of the dummy index
|
2613 |
+
2. Relation to external (non-dummy) indices
|
2614 |
+
3. Position of the index in the first factor
|
2615 |
+
4. Position of the index in the second factor
|
2616 |
+
|
2617 |
+
The sort key is a tuple with the following components:
|
2618 |
+
|
2619 |
+
1. A single character indicating the range of the dummy (above, below
|
2620 |
+
or general.)
|
2621 |
+
2. A list of strings with fully masked string representations of all
|
2622 |
+
factors where the dummy is present. By masked, we mean that dummies
|
2623 |
+
are represented by a symbol to indicate either below fermi, above or
|
2624 |
+
general. No other information is displayed about the dummies at
|
2625 |
+
this point. The list is sorted stringwise.
|
2626 |
+
3. An integer number indicating the position of the index, in the first
|
2627 |
+
factor as sorted in 2.
|
2628 |
+
4. An integer number indicating the position of the index, in the second
|
2629 |
+
factor as sorted in 2.
|
2630 |
+
|
2631 |
+
If a factor is either of type AntiSymmetricTensor or SqOperator, the index
|
2632 |
+
position in items 3 and 4 is indicated as 'upper' or 'lower' only.
|
2633 |
+
(Creation operators are considered upper and annihilation operators lower.)
|
2634 |
+
|
2635 |
+
If the masked factors are identical, the two factors cannot be ordered
|
2636 |
+
unambiguously in item 2. In this case, items 3, 4 are left out. If several
|
2637 |
+
indices are contracted between the unorderable factors, it will be handled by
|
2638 |
+
_determine_ambiguous()
|
2639 |
+
|
2640 |
+
|
2641 |
+
"""
|
2642 |
+
# setup dicts to avoid repeated calculations in key()
|
2643 |
+
args = Mul.make_args(mul)
|
2644 |
+
fac_dum = { fac: fac.atoms(Dummy) for fac in args }
|
2645 |
+
fac_repr = { fac: __kprint(fac) for fac in args }
|
2646 |
+
all_dums = set().union(*fac_dum.values())
|
2647 |
+
mask = {}
|
2648 |
+
for d in all_dums:
|
2649 |
+
if d.assumptions0.get('below_fermi'):
|
2650 |
+
mask[d] = '0'
|
2651 |
+
elif d.assumptions0.get('above_fermi'):
|
2652 |
+
mask[d] = '1'
|
2653 |
+
else:
|
2654 |
+
mask[d] = '2'
|
2655 |
+
dum_repr = {d: __kprint(d) for d in all_dums}
|
2656 |
+
|
2657 |
+
def _key(d):
|
2658 |
+
dumstruct = [ fac for fac in fac_dum if d in fac_dum[fac] ]
|
2659 |
+
other_dums = set().union(*[fac_dum[fac] for fac in dumstruct])
|
2660 |
+
fac = dumstruct[-1]
|
2661 |
+
if other_dums is fac_dum[fac]:
|
2662 |
+
other_dums = fac_dum[fac].copy()
|
2663 |
+
other_dums.remove(d)
|
2664 |
+
masked_facs = [ fac_repr[fac] for fac in dumstruct ]
|
2665 |
+
for d2 in other_dums:
|
2666 |
+
masked_facs = [ fac.replace(dum_repr[d2], mask[d2])
|
2667 |
+
for fac in masked_facs ]
|
2668 |
+
all_masked = [ fac.replace(dum_repr[d], mask[d])
|
2669 |
+
for fac in masked_facs ]
|
2670 |
+
masked_facs = dict(list(zip(dumstruct, masked_facs)))
|
2671 |
+
|
2672 |
+
# dummies for which the ordering cannot be determined
|
2673 |
+
if has_dups(all_masked):
|
2674 |
+
all_masked.sort()
|
2675 |
+
return mask[d], tuple(all_masked) # positions are ambiguous
|
2676 |
+
|
2677 |
+
# sort factors according to fully masked strings
|
2678 |
+
keydict = dict(list(zip(dumstruct, all_masked)))
|
2679 |
+
dumstruct.sort(key=lambda x: keydict[x])
|
2680 |
+
all_masked.sort()
|
2681 |
+
|
2682 |
+
pos_val = []
|
2683 |
+
for fac in dumstruct:
|
2684 |
+
if isinstance(fac, AntiSymmetricTensor):
|
2685 |
+
if d in fac.upper:
|
2686 |
+
pos_val.append('u')
|
2687 |
+
if d in fac.lower:
|
2688 |
+
pos_val.append('l')
|
2689 |
+
elif isinstance(fac, Creator):
|
2690 |
+
pos_val.append('u')
|
2691 |
+
elif isinstance(fac, Annihilator):
|
2692 |
+
pos_val.append('l')
|
2693 |
+
elif isinstance(fac, NO):
|
2694 |
+
ops = [ op for op in fac if op.has(d) ]
|
2695 |
+
for op in ops:
|
2696 |
+
if isinstance(op, Creator):
|
2697 |
+
pos_val.append('u')
|
2698 |
+
else:
|
2699 |
+
pos_val.append('l')
|
2700 |
+
else:
|
2701 |
+
# fallback to position in string representation
|
2702 |
+
facpos = -1
|
2703 |
+
while 1:
|
2704 |
+
facpos = masked_facs[fac].find(dum_repr[d], facpos + 1)
|
2705 |
+
if facpos == -1:
|
2706 |
+
break
|
2707 |
+
pos_val.append(facpos)
|
2708 |
+
return (mask[d], tuple(all_masked), pos_val[0], pos_val[-1])
|
2709 |
+
dumkey = dict(list(zip(all_dums, list(map(_key, all_dums)))))
|
2710 |
+
result = sorted(all_dums, key=lambda x: dumkey[x])
|
2711 |
+
if has_dups(iter(dumkey.values())):
|
2712 |
+
# We have ambiguities
|
2713 |
+
unordered = defaultdict(set)
|
2714 |
+
for d, k in dumkey.items():
|
2715 |
+
unordered[k].add(d)
|
2716 |
+
for k in [ k for k in unordered if len(unordered[k]) < 2 ]:
|
2717 |
+
del unordered[k]
|
2718 |
+
|
2719 |
+
unordered = [ unordered[k] for k in sorted(unordered) ]
|
2720 |
+
result = _determine_ambiguous(mul, result, unordered)
|
2721 |
+
return result
|
2722 |
+
|
2723 |
+
|
2724 |
+
def _determine_ambiguous(term, ordered, ambiguous_groups):
|
2725 |
+
# We encountered a term for which the dummy substitution is ambiguous.
|
2726 |
+
# This happens for terms with 2 or more contractions between factors that
|
2727 |
+
# cannot be uniquely ordered independent of summation indices. For
|
2728 |
+
# example:
|
2729 |
+
#
|
2730 |
+
# Sum(p, q) v^{p, .}_{q, .}v^{q, .}_{p, .}
|
2731 |
+
#
|
2732 |
+
# Assuming that the indices represented by . are dummies with the
|
2733 |
+
# same range, the factors cannot be ordered, and there is no
|
2734 |
+
# way to determine a consistent ordering of p and q.
|
2735 |
+
#
|
2736 |
+
# The strategy employed here, is to relabel all unambiguous dummies with
|
2737 |
+
# non-dummy symbols and call _get_ordered_dummies again. This procedure is
|
2738 |
+
# applied to the entire term so there is a possibility that
|
2739 |
+
# _determine_ambiguous() is called again from a deeper recursion level.
|
2740 |
+
|
2741 |
+
# break recursion if there are no ordered dummies
|
2742 |
+
all_ambiguous = set()
|
2743 |
+
for dummies in ambiguous_groups:
|
2744 |
+
all_ambiguous |= dummies
|
2745 |
+
all_ordered = set(ordered) - all_ambiguous
|
2746 |
+
if not all_ordered:
|
2747 |
+
# FIXME: If we arrive here, there are no ordered dummies. A method to
|
2748 |
+
# handle this needs to be implemented. In order to return something
|
2749 |
+
# useful nevertheless, we choose arbitrarily the first dummy and
|
2750 |
+
# determine the rest from this one. This method is dependent on the
|
2751 |
+
# actual dummy labels which violates an assumption for the
|
2752 |
+
# canonicalization procedure. A better implementation is needed.
|
2753 |
+
group = [ d for d in ordered if d in ambiguous_groups[0] ]
|
2754 |
+
d = group[0]
|
2755 |
+
all_ordered.add(d)
|
2756 |
+
ambiguous_groups[0].remove(d)
|
2757 |
+
|
2758 |
+
stored_counter = _symbol_factory._counter
|
2759 |
+
subslist = []
|
2760 |
+
for d in [ d for d in ordered if d in all_ordered ]:
|
2761 |
+
nondum = _symbol_factory._next()
|
2762 |
+
subslist.append((d, nondum))
|
2763 |
+
newterm = term.subs(subslist)
|
2764 |
+
neworder = _get_ordered_dummies(newterm)
|
2765 |
+
_symbol_factory._set_counter(stored_counter)
|
2766 |
+
|
2767 |
+
# update ordered list with new information
|
2768 |
+
for group in ambiguous_groups:
|
2769 |
+
ordered_group = [ d for d in neworder if d in group ]
|
2770 |
+
ordered_group.reverse()
|
2771 |
+
result = []
|
2772 |
+
for d in ordered:
|
2773 |
+
if d in group:
|
2774 |
+
result.append(ordered_group.pop())
|
2775 |
+
else:
|
2776 |
+
result.append(d)
|
2777 |
+
ordered = result
|
2778 |
+
return ordered
|
2779 |
+
|
2780 |
+
|
2781 |
+
class _SymbolFactory:
|
2782 |
+
def __init__(self, label):
|
2783 |
+
self._counterVar = 0
|
2784 |
+
self._label = label
|
2785 |
+
|
2786 |
+
def _set_counter(self, value):
|
2787 |
+
"""
|
2788 |
+
Sets counter to value.
|
2789 |
+
"""
|
2790 |
+
self._counterVar = value
|
2791 |
+
|
2792 |
+
@property
|
2793 |
+
def _counter(self):
|
2794 |
+
"""
|
2795 |
+
What counter is currently at.
|
2796 |
+
"""
|
2797 |
+
return self._counterVar
|
2798 |
+
|
2799 |
+
def _next(self):
|
2800 |
+
"""
|
2801 |
+
Generates the next symbols and increments counter by 1.
|
2802 |
+
"""
|
2803 |
+
s = Symbol("%s%i" % (self._label, self._counterVar))
|
2804 |
+
self._counterVar += 1
|
2805 |
+
return s
|
2806 |
+
_symbol_factory = _SymbolFactory('_]"]_') # most certainly a unique label
|
2807 |
+
|
2808 |
+
|
2809 |
+
@cacheit
|
2810 |
+
def _get_contractions(string1, keep_only_fully_contracted=False):
|
2811 |
+
"""
|
2812 |
+
Returns Add-object with contracted terms.
|
2813 |
+
|
2814 |
+
Uses recursion to find all contractions. -- Internal helper function --
|
2815 |
+
|
2816 |
+
Will find nonzero contractions in string1 between indices given in
|
2817 |
+
leftrange and rightrange.
|
2818 |
+
|
2819 |
+
"""
|
2820 |
+
|
2821 |
+
# Should we store current level of contraction?
|
2822 |
+
if keep_only_fully_contracted and string1:
|
2823 |
+
result = []
|
2824 |
+
else:
|
2825 |
+
result = [NO(Mul(*string1))]
|
2826 |
+
|
2827 |
+
for i in range(len(string1) - 1):
|
2828 |
+
for j in range(i + 1, len(string1)):
|
2829 |
+
|
2830 |
+
c = contraction(string1[i], string1[j])
|
2831 |
+
|
2832 |
+
if c:
|
2833 |
+
sign = (j - i + 1) % 2
|
2834 |
+
if sign:
|
2835 |
+
coeff = S.NegativeOne*c
|
2836 |
+
else:
|
2837 |
+
coeff = c
|
2838 |
+
|
2839 |
+
#
|
2840 |
+
# Call next level of recursion
|
2841 |
+
# ============================
|
2842 |
+
#
|
2843 |
+
# We now need to find more contractions among operators
|
2844 |
+
#
|
2845 |
+
# oplist = string1[:i]+ string1[i+1:j] + string1[j+1:]
|
2846 |
+
#
|
2847 |
+
# To prevent overcounting, we don't allow contractions
|
2848 |
+
# we have already encountered. i.e. contractions between
|
2849 |
+
# string1[:i] <---> string1[i+1:j]
|
2850 |
+
# and string1[:i] <---> string1[j+1:].
|
2851 |
+
#
|
2852 |
+
# This leaves the case:
|
2853 |
+
oplist = string1[i + 1:j] + string1[j + 1:]
|
2854 |
+
|
2855 |
+
if oplist:
|
2856 |
+
|
2857 |
+
result.append(coeff*NO(
|
2858 |
+
Mul(*string1[:i])*_get_contractions( oplist,
|
2859 |
+
keep_only_fully_contracted=keep_only_fully_contracted)))
|
2860 |
+
|
2861 |
+
else:
|
2862 |
+
result.append(coeff*NO( Mul(*string1[:i])))
|
2863 |
+
|
2864 |
+
if keep_only_fully_contracted:
|
2865 |
+
break # next iteration over i leaves leftmost operator string1[0] uncontracted
|
2866 |
+
|
2867 |
+
return Add(*result)
|
2868 |
+
|
2869 |
+
|
2870 |
+
def wicks(e, **kw_args):
|
2871 |
+
"""
|
2872 |
+
Returns the normal ordered equivalent of an expression using Wicks Theorem.
|
2873 |
+
|
2874 |
+
Examples
|
2875 |
+
========
|
2876 |
+
|
2877 |
+
>>> from sympy import symbols, Dummy
|
2878 |
+
>>> from sympy.physics.secondquant import wicks, F, Fd
|
2879 |
+
>>> p, q, r = symbols('p,q,r')
|
2880 |
+
>>> wicks(Fd(p)*F(q))
|
2881 |
+
KroneckerDelta(_i, q)*KroneckerDelta(p, q) + NO(CreateFermion(p)*AnnihilateFermion(q))
|
2882 |
+
|
2883 |
+
By default, the expression is expanded:
|
2884 |
+
|
2885 |
+
>>> wicks(F(p)*(F(q)+F(r)))
|
2886 |
+
NO(AnnihilateFermion(p)*AnnihilateFermion(q)) + NO(AnnihilateFermion(p)*AnnihilateFermion(r))
|
2887 |
+
|
2888 |
+
With the keyword 'keep_only_fully_contracted=True', only fully contracted
|
2889 |
+
terms are returned.
|
2890 |
+
|
2891 |
+
By request, the result can be simplified in the following order:
|
2892 |
+
-- KroneckerDelta functions are evaluated
|
2893 |
+
-- Dummy variables are substituted consistently across terms
|
2894 |
+
|
2895 |
+
>>> p, q, r = symbols('p q r', cls=Dummy)
|
2896 |
+
>>> wicks(Fd(p)*(F(q)+F(r)), keep_only_fully_contracted=True)
|
2897 |
+
KroneckerDelta(_i, _q)*KroneckerDelta(_p, _q) + KroneckerDelta(_i, _r)*KroneckerDelta(_p, _r)
|
2898 |
+
|
2899 |
+
"""
|
2900 |
+
|
2901 |
+
if not e:
|
2902 |
+
return S.Zero
|
2903 |
+
|
2904 |
+
opts = {
|
2905 |
+
'simplify_kronecker_deltas': False,
|
2906 |
+
'expand': True,
|
2907 |
+
'simplify_dummies': False,
|
2908 |
+
'keep_only_fully_contracted': False
|
2909 |
+
}
|
2910 |
+
opts.update(kw_args)
|
2911 |
+
|
2912 |
+
# check if we are already normally ordered
|
2913 |
+
if isinstance(e, NO):
|
2914 |
+
if opts['keep_only_fully_contracted']:
|
2915 |
+
return S.Zero
|
2916 |
+
else:
|
2917 |
+
return e
|
2918 |
+
elif isinstance(e, FermionicOperator):
|
2919 |
+
if opts['keep_only_fully_contracted']:
|
2920 |
+
return S.Zero
|
2921 |
+
else:
|
2922 |
+
return e
|
2923 |
+
|
2924 |
+
# break up any NO-objects, and evaluate commutators
|
2925 |
+
e = e.doit(wicks=True)
|
2926 |
+
|
2927 |
+
# make sure we have only one term to consider
|
2928 |
+
e = e.expand()
|
2929 |
+
if isinstance(e, Add):
|
2930 |
+
if opts['simplify_dummies']:
|
2931 |
+
return substitute_dummies(Add(*[ wicks(term, **kw_args) for term in e.args]))
|
2932 |
+
else:
|
2933 |
+
return Add(*[ wicks(term, **kw_args) for term in e.args])
|
2934 |
+
|
2935 |
+
# For Mul-objects we can actually do something
|
2936 |
+
if isinstance(e, Mul):
|
2937 |
+
|
2938 |
+
# we don't want to mess around with commuting part of Mul
|
2939 |
+
# so we factorize it out before starting recursion
|
2940 |
+
c_part = []
|
2941 |
+
string1 = []
|
2942 |
+
for factor in e.args:
|
2943 |
+
if factor.is_commutative:
|
2944 |
+
c_part.append(factor)
|
2945 |
+
else:
|
2946 |
+
string1.append(factor)
|
2947 |
+
n = len(string1)
|
2948 |
+
|
2949 |
+
# catch trivial cases
|
2950 |
+
if n == 0:
|
2951 |
+
result = e
|
2952 |
+
elif n == 1:
|
2953 |
+
if opts['keep_only_fully_contracted']:
|
2954 |
+
return S.Zero
|
2955 |
+
else:
|
2956 |
+
result = e
|
2957 |
+
|
2958 |
+
else: # non-trivial
|
2959 |
+
|
2960 |
+
if isinstance(string1[0], BosonicOperator):
|
2961 |
+
raise NotImplementedError
|
2962 |
+
|
2963 |
+
string1 = tuple(string1)
|
2964 |
+
|
2965 |
+
# recursion over higher order contractions
|
2966 |
+
result = _get_contractions(string1,
|
2967 |
+
keep_only_fully_contracted=opts['keep_only_fully_contracted'] )
|
2968 |
+
result = Mul(*c_part)*result
|
2969 |
+
|
2970 |
+
if opts['expand']:
|
2971 |
+
result = result.expand()
|
2972 |
+
if opts['simplify_kronecker_deltas']:
|
2973 |
+
result = evaluate_deltas(result)
|
2974 |
+
|
2975 |
+
return result
|
2976 |
+
|
2977 |
+
# there was nothing to do
|
2978 |
+
return e
|
2979 |
+
|
2980 |
+
|
2981 |
+
class PermutationOperator(Expr):
|
2982 |
+
"""
|
2983 |
+
Represents the index permutation operator P(ij).
|
2984 |
+
|
2985 |
+
P(ij)*f(i)*g(j) = f(i)*g(j) - f(j)*g(i)
|
2986 |
+
"""
|
2987 |
+
is_commutative = True
|
2988 |
+
|
2989 |
+
def __new__(cls, i, j):
|
2990 |
+
i, j = sorted(map(sympify, (i, j)), key=default_sort_key)
|
2991 |
+
obj = Basic.__new__(cls, i, j)
|
2992 |
+
return obj
|
2993 |
+
|
2994 |
+
def get_permuted(self, expr):
|
2995 |
+
"""
|
2996 |
+
Returns -expr with permuted indices.
|
2997 |
+
|
2998 |
+
Explanation
|
2999 |
+
===========
|
3000 |
+
|
3001 |
+
>>> from sympy import symbols, Function
|
3002 |
+
>>> from sympy.physics.secondquant import PermutationOperator
|
3003 |
+
>>> p,q = symbols('p,q')
|
3004 |
+
>>> f = Function('f')
|
3005 |
+
>>> PermutationOperator(p,q).get_permuted(f(p,q))
|
3006 |
+
-f(q, p)
|
3007 |
+
|
3008 |
+
"""
|
3009 |
+
i = self.args[0]
|
3010 |
+
j = self.args[1]
|
3011 |
+
if expr.has(i) and expr.has(j):
|
3012 |
+
tmp = Dummy()
|
3013 |
+
expr = expr.subs(i, tmp)
|
3014 |
+
expr = expr.subs(j, i)
|
3015 |
+
expr = expr.subs(tmp, j)
|
3016 |
+
return S.NegativeOne*expr
|
3017 |
+
else:
|
3018 |
+
return expr
|
3019 |
+
|
3020 |
+
def _latex(self, printer):
|
3021 |
+
return "P(%s%s)" % self.args
|
3022 |
+
|
3023 |
+
|
3024 |
+
def simplify_index_permutations(expr, permutation_operators):
|
3025 |
+
"""
|
3026 |
+
Performs simplification by introducing PermutationOperators where appropriate.
|
3027 |
+
|
3028 |
+
Explanation
|
3029 |
+
===========
|
3030 |
+
|
3031 |
+
Schematically:
|
3032 |
+
[abij] - [abji] - [baij] + [baji] -> P(ab)*P(ij)*[abij]
|
3033 |
+
|
3034 |
+
permutation_operators is a list of PermutationOperators to consider.
|
3035 |
+
|
3036 |
+
If permutation_operators=[P(ab),P(ij)] we will try to introduce the
|
3037 |
+
permutation operators P(ij) and P(ab) in the expression. If there are other
|
3038 |
+
possible simplifications, we ignore them.
|
3039 |
+
|
3040 |
+
>>> from sympy import symbols, Function
|
3041 |
+
>>> from sympy.physics.secondquant import simplify_index_permutations
|
3042 |
+
>>> from sympy.physics.secondquant import PermutationOperator
|
3043 |
+
>>> p,q,r,s = symbols('p,q,r,s')
|
3044 |
+
>>> f = Function('f')
|
3045 |
+
>>> g = Function('g')
|
3046 |
+
|
3047 |
+
>>> expr = f(p)*g(q) - f(q)*g(p); expr
|
3048 |
+
f(p)*g(q) - f(q)*g(p)
|
3049 |
+
>>> simplify_index_permutations(expr,[PermutationOperator(p,q)])
|
3050 |
+
f(p)*g(q)*PermutationOperator(p, q)
|
3051 |
+
|
3052 |
+
>>> PermutList = [PermutationOperator(p,q),PermutationOperator(r,s)]
|
3053 |
+
>>> expr = f(p,r)*g(q,s) - f(q,r)*g(p,s) + f(q,s)*g(p,r) - f(p,s)*g(q,r)
|
3054 |
+
>>> simplify_index_permutations(expr,PermutList)
|
3055 |
+
f(p, r)*g(q, s)*PermutationOperator(p, q)*PermutationOperator(r, s)
|
3056 |
+
|
3057 |
+
"""
|
3058 |
+
|
3059 |
+
def _get_indices(expr, ind):
|
3060 |
+
"""
|
3061 |
+
Collects indices recursively in predictable order.
|
3062 |
+
"""
|
3063 |
+
result = []
|
3064 |
+
for arg in expr.args:
|
3065 |
+
if arg in ind:
|
3066 |
+
result.append(arg)
|
3067 |
+
else:
|
3068 |
+
if arg.args:
|
3069 |
+
result.extend(_get_indices(arg, ind))
|
3070 |
+
return result
|
3071 |
+
|
3072 |
+
def _choose_one_to_keep(a, b, ind):
|
3073 |
+
# we keep the one where indices in ind are in order ind[0] < ind[1]
|
3074 |
+
return min(a, b, key=lambda x: default_sort_key(_get_indices(x, ind)))
|
3075 |
+
|
3076 |
+
expr = expr.expand()
|
3077 |
+
if isinstance(expr, Add):
|
3078 |
+
terms = set(expr.args)
|
3079 |
+
|
3080 |
+
for P in permutation_operators:
|
3081 |
+
new_terms = set()
|
3082 |
+
on_hold = set()
|
3083 |
+
while terms:
|
3084 |
+
term = terms.pop()
|
3085 |
+
permuted = P.get_permuted(term)
|
3086 |
+
if permuted in terms | on_hold:
|
3087 |
+
try:
|
3088 |
+
terms.remove(permuted)
|
3089 |
+
except KeyError:
|
3090 |
+
on_hold.remove(permuted)
|
3091 |
+
keep = _choose_one_to_keep(term, permuted, P.args)
|
3092 |
+
new_terms.add(P*keep)
|
3093 |
+
else:
|
3094 |
+
|
3095 |
+
# Some terms must get a second chance because the permuted
|
3096 |
+
# term may already have canonical dummy ordering. Then
|
3097 |
+
# substitute_dummies() does nothing. However, the other
|
3098 |
+
# term, if it exists, will be able to match with us.
|
3099 |
+
permuted1 = permuted
|
3100 |
+
permuted = substitute_dummies(permuted)
|
3101 |
+
if permuted1 == permuted:
|
3102 |
+
on_hold.add(term)
|
3103 |
+
elif permuted in terms | on_hold:
|
3104 |
+
try:
|
3105 |
+
terms.remove(permuted)
|
3106 |
+
except KeyError:
|
3107 |
+
on_hold.remove(permuted)
|
3108 |
+
keep = _choose_one_to_keep(term, permuted, P.args)
|
3109 |
+
new_terms.add(P*keep)
|
3110 |
+
else:
|
3111 |
+
new_terms.add(term)
|
3112 |
+
terms = new_terms | on_hold
|
3113 |
+
return Add(*terms)
|
3114 |
+
return expr
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__init__.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__all__ = [
|
2 |
+
'CoordinateSym', 'ReferenceFrame',
|
3 |
+
|
4 |
+
'Dyadic',
|
5 |
+
|
6 |
+
'Vector',
|
7 |
+
|
8 |
+
'Point',
|
9 |
+
|
10 |
+
'cross', 'dot', 'express', 'time_derivative', 'outer',
|
11 |
+
'kinematic_equations', 'get_motion_params', 'partial_velocity',
|
12 |
+
'dynamicsymbols',
|
13 |
+
|
14 |
+
'vprint', 'vsstrrepr', 'vsprint', 'vpprint', 'vlatex', 'init_vprinting',
|
15 |
+
|
16 |
+
'curl', 'divergence', 'gradient', 'is_conservative', 'is_solenoidal',
|
17 |
+
'scalar_potential', 'scalar_potential_difference',
|
18 |
+
|
19 |
+
]
|
20 |
+
from .frame import CoordinateSym, ReferenceFrame
|
21 |
+
|
22 |
+
from .dyadic import Dyadic
|
23 |
+
|
24 |
+
from .vector import Vector
|
25 |
+
|
26 |
+
from .point import Point
|
27 |
+
|
28 |
+
from .functions import (cross, dot, express, time_derivative, outer,
|
29 |
+
kinematic_equations, get_motion_params, partial_velocity,
|
30 |
+
dynamicsymbols)
|
31 |
+
|
32 |
+
from .printing import (vprint, vsstrrepr, vsprint, vpprint, vlatex,
|
33 |
+
init_vprinting)
|
34 |
+
|
35 |
+
from .fieldfunctions import (curl, divergence, gradient, is_conservative,
|
36 |
+
is_solenoidal, scalar_potential, scalar_potential_difference)
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/dyadic.cpython-310.pyc
ADDED
Binary file (17.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/fieldfunctions.cpython-310.pyc
ADDED
Binary file (7.75 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/frame.cpython-310.pyc
ADDED
Binary file (45.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/functions.cpython-310.pyc
ADDED
Binary file (19.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/point.cpython-310.pyc
ADDED
Binary file (18.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/printing.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/__pycache__/vector.cpython-310.pyc
ADDED
Binary file (24.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/sympy/physics/vector/dyadic.py
ADDED
@@ -0,0 +1,601 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sympy.core.backend import sympify, Add, ImmutableMatrix as Matrix
|
2 |
+
from sympy.core.evalf import EvalfMixin
|
3 |
+
from sympy.printing.defaults import Printable
|
4 |
+
|
5 |
+
from mpmath.libmp.libmpf import prec_to_dps
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = ['Dyadic']
|
9 |
+
|
10 |
+
|
11 |
+
class Dyadic(Printable, EvalfMixin):
|
12 |
+
"""A Dyadic object.
|
13 |
+
|
14 |
+
See:
|
15 |
+
https://en.wikipedia.org/wiki/Dyadic_tensor
|
16 |
+
Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill
|
17 |
+
|
18 |
+
A more powerful way to represent a rigid body's inertia. While it is more
|
19 |
+
complex, by choosing Dyadic components to be in body fixed basis vectors,
|
20 |
+
the resulting matrix is equivalent to the inertia tensor.
|
21 |
+
|
22 |
+
"""
|
23 |
+
|
24 |
+
is_number = False
|
25 |
+
|
26 |
+
def __init__(self, inlist):
|
27 |
+
"""
|
28 |
+
Just like Vector's init, you should not call this unless creating a
|
29 |
+
zero dyadic.
|
30 |
+
|
31 |
+
zd = Dyadic(0)
|
32 |
+
|
33 |
+
Stores a Dyadic as a list of lists; the inner list has the measure
|
34 |
+
number and the two unit vectors; the outerlist holds each unique
|
35 |
+
unit vector pair.
|
36 |
+
|
37 |
+
"""
|
38 |
+
|
39 |
+
self.args = []
|
40 |
+
if inlist == 0:
|
41 |
+
inlist = []
|
42 |
+
while len(inlist) != 0:
|
43 |
+
added = 0
|
44 |
+
for i, v in enumerate(self.args):
|
45 |
+
if ((str(inlist[0][1]) == str(self.args[i][1])) and
|
46 |
+
(str(inlist[0][2]) == str(self.args[i][2]))):
|
47 |
+
self.args[i] = (self.args[i][0] + inlist[0][0],
|
48 |
+
inlist[0][1], inlist[0][2])
|
49 |
+
inlist.remove(inlist[0])
|
50 |
+
added = 1
|
51 |
+
break
|
52 |
+
if added != 1:
|
53 |
+
self.args.append(inlist[0])
|
54 |
+
inlist.remove(inlist[0])
|
55 |
+
i = 0
|
56 |
+
# This code is to remove empty parts from the list
|
57 |
+
while i < len(self.args):
|
58 |
+
if ((self.args[i][0] == 0) | (self.args[i][1] == 0) |
|
59 |
+
(self.args[i][2] == 0)):
|
60 |
+
self.args.remove(self.args[i])
|
61 |
+
i -= 1
|
62 |
+
i += 1
|
63 |
+
|
64 |
+
@property
|
65 |
+
def func(self):
|
66 |
+
"""Returns the class Dyadic. """
|
67 |
+
return Dyadic
|
68 |
+
|
69 |
+
def __add__(self, other):
|
70 |
+
"""The add operator for Dyadic. """
|
71 |
+
other = _check_dyadic(other)
|
72 |
+
return Dyadic(self.args + other.args)
|
73 |
+
|
74 |
+
def __and__(self, other):
|
75 |
+
"""The inner product operator for a Dyadic and a Dyadic or Vector.
|
76 |
+
|
77 |
+
Parameters
|
78 |
+
==========
|
79 |
+
|
80 |
+
other : Dyadic or Vector
|
81 |
+
The other Dyadic or Vector to take the inner product with
|
82 |
+
|
83 |
+
Examples
|
84 |
+
========
|
85 |
+
|
86 |
+
>>> from sympy.physics.vector import ReferenceFrame, outer
|
87 |
+
>>> N = ReferenceFrame('N')
|
88 |
+
>>> D1 = outer(N.x, N.y)
|
89 |
+
>>> D2 = outer(N.y, N.y)
|
90 |
+
>>> D1.dot(D2)
|
91 |
+
(N.x|N.y)
|
92 |
+
>>> D1.dot(N.y)
|
93 |
+
N.x
|
94 |
+
|
95 |
+
"""
|
96 |
+
from sympy.physics.vector.vector import Vector, _check_vector
|
97 |
+
if isinstance(other, Dyadic):
|
98 |
+
other = _check_dyadic(other)
|
99 |
+
ol = Dyadic(0)
|
100 |
+
for i, v in enumerate(self.args):
|
101 |
+
for i2, v2 in enumerate(other.args):
|
102 |
+
ol += v[0] * v2[0] * (v[2] & v2[1]) * (v[1] | v2[2])
|
103 |
+
else:
|
104 |
+
other = _check_vector(other)
|
105 |
+
ol = Vector(0)
|
106 |
+
for i, v in enumerate(self.args):
|
107 |
+
ol += v[0] * v[1] * (v[2] & other)
|
108 |
+
return ol
|
109 |
+
|
110 |
+
def __truediv__(self, other):
|
111 |
+
"""Divides the Dyadic by a sympifyable expression. """
|
112 |
+
return self.__mul__(1 / other)
|
113 |
+
|
114 |
+
def __eq__(self, other):
|
115 |
+
"""Tests for equality.
|
116 |
+
|
117 |
+
Is currently weak; needs stronger comparison testing
|
118 |
+
|
119 |
+
"""
|
120 |
+
|
121 |
+
if other == 0:
|
122 |
+
other = Dyadic(0)
|
123 |
+
other = _check_dyadic(other)
|
124 |
+
if (self.args == []) and (other.args == []):
|
125 |
+
return True
|
126 |
+
elif (self.args == []) or (other.args == []):
|
127 |
+
return False
|
128 |
+
return set(self.args) == set(other.args)
|
129 |
+
|
130 |
+
def __mul__(self, other):
|
131 |
+
"""Multiplies the Dyadic by a sympifyable expression.
|
132 |
+
|
133 |
+
Parameters
|
134 |
+
==========
|
135 |
+
|
136 |
+
other : Sympafiable
|
137 |
+
The scalar to multiply this Dyadic with
|
138 |
+
|
139 |
+
Examples
|
140 |
+
========
|
141 |
+
|
142 |
+
>>> from sympy.physics.vector import ReferenceFrame, outer
|
143 |
+
>>> N = ReferenceFrame('N')
|
144 |
+
>>> d = outer(N.x, N.x)
|
145 |
+
>>> 5 * d
|
146 |
+
5*(N.x|N.x)
|
147 |
+
|
148 |
+
"""
|
149 |
+
|
150 |
+
newlist = list(self.args)
|
151 |
+
other = sympify(other)
|
152 |
+
for i, v in enumerate(newlist):
|
153 |
+
newlist[i] = (other * newlist[i][0], newlist[i][1],
|
154 |
+
newlist[i][2])
|
155 |
+
return Dyadic(newlist)
|
156 |
+
|
157 |
+
def __ne__(self, other):
|
158 |
+
return not self == other
|
159 |
+
|
160 |
+
def __neg__(self):
|
161 |
+
return self * -1
|
162 |
+
|
163 |
+
def _latex(self, printer):
|
164 |
+
ar = self.args # just to shorten things
|
165 |
+
if len(ar) == 0:
|
166 |
+
return str(0)
|
167 |
+
ol = [] # output list, to be concatenated to a string
|
168 |
+
for i, v in enumerate(ar):
|
169 |
+
# if the coef of the dyadic is 1, we skip the 1
|
170 |
+
if ar[i][0] == 1:
|
171 |
+
ol.append(' + ' + printer._print(ar[i][1]) + r"\otimes " +
|
172 |
+
printer._print(ar[i][2]))
|
173 |
+
# if the coef of the dyadic is -1, we skip the 1
|
174 |
+
elif ar[i][0] == -1:
|
175 |
+
ol.append(' - ' +
|
176 |
+
printer._print(ar[i][1]) +
|
177 |
+
r"\otimes " +
|
178 |
+
printer._print(ar[i][2]))
|
179 |
+
# If the coefficient of the dyadic is not 1 or -1,
|
180 |
+
# we might wrap it in parentheses, for readability.
|
181 |
+
elif ar[i][0] != 0:
|
182 |
+
arg_str = printer._print(ar[i][0])
|
183 |
+
if isinstance(ar[i][0], Add):
|
184 |
+
arg_str = '(%s)' % arg_str
|
185 |
+
if arg_str.startswith('-'):
|
186 |
+
arg_str = arg_str[1:]
|
187 |
+
str_start = ' - '
|
188 |
+
else:
|
189 |
+
str_start = ' + '
|
190 |
+
ol.append(str_start + arg_str + printer._print(ar[i][1]) +
|
191 |
+
r"\otimes " + printer._print(ar[i][2]))
|
192 |
+
outstr = ''.join(ol)
|
193 |
+
if outstr.startswith(' + '):
|
194 |
+
outstr = outstr[3:]
|
195 |
+
elif outstr.startswith(' '):
|
196 |
+
outstr = outstr[1:]
|
197 |
+
return outstr
|
198 |
+
|
199 |
+
def _pretty(self, printer):
|
200 |
+
e = self
|
201 |
+
|
202 |
+
class Fake:
|
203 |
+
baseline = 0
|
204 |
+
|
205 |
+
def render(self, *args, **kwargs):
|
206 |
+
ar = e.args # just to shorten things
|
207 |
+
mpp = printer
|
208 |
+
if len(ar) == 0:
|
209 |
+
return str(0)
|
210 |
+
bar = "\N{CIRCLED TIMES}" if printer._use_unicode else "|"
|
211 |
+
ol = [] # output list, to be concatenated to a string
|
212 |
+
for i, v in enumerate(ar):
|
213 |
+
# if the coef of the dyadic is 1, we skip the 1
|
214 |
+
if ar[i][0] == 1:
|
215 |
+
ol.extend([" + ",
|
216 |
+
mpp.doprint(ar[i][1]),
|
217 |
+
bar,
|
218 |
+
mpp.doprint(ar[i][2])])
|
219 |
+
|
220 |
+
# if the coef of the dyadic is -1, we skip the 1
|
221 |
+
elif ar[i][0] == -1:
|
222 |
+
ol.extend([" - ",
|
223 |
+
mpp.doprint(ar[i][1]),
|
224 |
+
bar,
|
225 |
+
mpp.doprint(ar[i][2])])
|
226 |
+
|
227 |
+
# If the coefficient of the dyadic is not 1 or -1,
|
228 |
+
# we might wrap it in parentheses, for readability.
|
229 |
+
elif ar[i][0] != 0:
|
230 |
+
if isinstance(ar[i][0], Add):
|
231 |
+
arg_str = mpp._print(
|
232 |
+
ar[i][0]).parens()[0]
|
233 |
+
else:
|
234 |
+
arg_str = mpp.doprint(ar[i][0])
|
235 |
+
if arg_str.startswith("-"):
|
236 |
+
arg_str = arg_str[1:]
|
237 |
+
str_start = " - "
|
238 |
+
else:
|
239 |
+
str_start = " + "
|
240 |
+
ol.extend([str_start, arg_str, " ",
|
241 |
+
mpp.doprint(ar[i][1]),
|
242 |
+
bar,
|
243 |
+
mpp.doprint(ar[i][2])])
|
244 |
+
|
245 |
+
outstr = "".join(ol)
|
246 |
+
if outstr.startswith(" + "):
|
247 |
+
outstr = outstr[3:]
|
248 |
+
elif outstr.startswith(" "):
|
249 |
+
outstr = outstr[1:]
|
250 |
+
return outstr
|
251 |
+
return Fake()
|
252 |
+
|
253 |
+
def __rand__(self, other):
|
254 |
+
"""The inner product operator for a Vector or Dyadic, and a Dyadic
|
255 |
+
|
256 |
+
This is for: Vector dot Dyadic
|
257 |
+
|
258 |
+
Parameters
|
259 |
+
==========
|
260 |
+
|
261 |
+
other : Vector
|
262 |
+
The vector we are dotting with
|
263 |
+
|
264 |
+
Examples
|
265 |
+
========
|
266 |
+
|
267 |
+
>>> from sympy.physics.vector import ReferenceFrame, dot, outer
|
268 |
+
>>> N = ReferenceFrame('N')
|
269 |
+
>>> d = outer(N.x, N.x)
|
270 |
+
>>> dot(N.x, d)
|
271 |
+
N.x
|
272 |
+
|
273 |
+
"""
|
274 |
+
|
275 |
+
from sympy.physics.vector.vector import Vector, _check_vector
|
276 |
+
other = _check_vector(other)
|
277 |
+
ol = Vector(0)
|
278 |
+
for i, v in enumerate(self.args):
|
279 |
+
ol += v[0] * v[2] * (v[1] & other)
|
280 |
+
return ol
|
281 |
+
|
282 |
+
def __rsub__(self, other):
|
283 |
+
return (-1 * self) + other
|
284 |
+
|
285 |
+
def __rxor__(self, other):
|
286 |
+
"""For a cross product in the form: Vector x Dyadic
|
287 |
+
|
288 |
+
Parameters
|
289 |
+
==========
|
290 |
+
|
291 |
+
other : Vector
|
292 |
+
The Vector that we are crossing this Dyadic with
|
293 |
+
|
294 |
+
Examples
|
295 |
+
========
|
296 |
+
|
297 |
+
>>> from sympy.physics.vector import ReferenceFrame, outer, cross
|
298 |
+
>>> N = ReferenceFrame('N')
|
299 |
+
>>> d = outer(N.x, N.x)
|
300 |
+
>>> cross(N.y, d)
|
301 |
+
- (N.z|N.x)
|
302 |
+
|
303 |
+
"""
|
304 |
+
|
305 |
+
from sympy.physics.vector.vector import _check_vector
|
306 |
+
other = _check_vector(other)
|
307 |
+
ol = Dyadic(0)
|
308 |
+
for i, v in enumerate(self.args):
|
309 |
+
ol += v[0] * ((other ^ v[1]) | v[2])
|
310 |
+
return ol
|
311 |
+
|
312 |
+
def _sympystr(self, printer):
|
313 |
+
"""Printing method. """
|
314 |
+
ar = self.args # just to shorten things
|
315 |
+
if len(ar) == 0:
|
316 |
+
return printer._print(0)
|
317 |
+
ol = [] # output list, to be concatenated to a string
|
318 |
+
for i, v in enumerate(ar):
|
319 |
+
# if the coef of the dyadic is 1, we skip the 1
|
320 |
+
if ar[i][0] == 1:
|
321 |
+
ol.append(' + (' + printer._print(ar[i][1]) + '|' +
|
322 |
+
printer._print(ar[i][2]) + ')')
|
323 |
+
# if the coef of the dyadic is -1, we skip the 1
|
324 |
+
elif ar[i][0] == -1:
|
325 |
+
ol.append(' - (' + printer._print(ar[i][1]) + '|' +
|
326 |
+
printer._print(ar[i][2]) + ')')
|
327 |
+
# If the coefficient of the dyadic is not 1 or -1,
|
328 |
+
# we might wrap it in parentheses, for readability.
|
329 |
+
elif ar[i][0] != 0:
|
330 |
+
arg_str = printer._print(ar[i][0])
|
331 |
+
if isinstance(ar[i][0], Add):
|
332 |
+
arg_str = "(%s)" % arg_str
|
333 |
+
if arg_str[0] == '-':
|
334 |
+
arg_str = arg_str[1:]
|
335 |
+
str_start = ' - '
|
336 |
+
else:
|
337 |
+
str_start = ' + '
|
338 |
+
ol.append(str_start + arg_str + '*(' +
|
339 |
+
printer._print(ar[i][1]) +
|
340 |
+
'|' + printer._print(ar[i][2]) + ')')
|
341 |
+
outstr = ''.join(ol)
|
342 |
+
if outstr.startswith(' + '):
|
343 |
+
outstr = outstr[3:]
|
344 |
+
elif outstr.startswith(' '):
|
345 |
+
outstr = outstr[1:]
|
346 |
+
return outstr
|
347 |
+
|
348 |
+
def __sub__(self, other):
|
349 |
+
"""The subtraction operator. """
|
350 |
+
return self.__add__(other * -1)
|
351 |
+
|
352 |
+
def __xor__(self, other):
|
353 |
+
"""For a cross product in the form: Dyadic x Vector.
|
354 |
+
|
355 |
+
Parameters
|
356 |
+
==========
|
357 |
+
|
358 |
+
other : Vector
|
359 |
+
The Vector that we are crossing this Dyadic with
|
360 |
+
|
361 |
+
Examples
|
362 |
+
========
|
363 |
+
|
364 |
+
>>> from sympy.physics.vector import ReferenceFrame, outer, cross
|
365 |
+
>>> N = ReferenceFrame('N')
|
366 |
+
>>> d = outer(N.x, N.x)
|
367 |
+
>>> cross(d, N.y)
|
368 |
+
(N.x|N.z)
|
369 |
+
|
370 |
+
"""
|
371 |
+
|
372 |
+
from sympy.physics.vector.vector import _check_vector
|
373 |
+
other = _check_vector(other)
|
374 |
+
ol = Dyadic(0)
|
375 |
+
for i, v in enumerate(self.args):
|
376 |
+
ol += v[0] * (v[1] | (v[2] ^ other))
|
377 |
+
return ol
|
378 |
+
|
379 |
+
__radd__ = __add__
|
380 |
+
__rmul__ = __mul__
|
381 |
+
|
382 |
+
def express(self, frame1, frame2=None):
|
383 |
+
"""Expresses this Dyadic in alternate frame(s)
|
384 |
+
|
385 |
+
The first frame is the list side expression, the second frame is the
|
386 |
+
right side; if Dyadic is in form A.x|B.y, you can express it in two
|
387 |
+
different frames. If no second frame is given, the Dyadic is
|
388 |
+
expressed in only one frame.
|
389 |
+
|
390 |
+
Calls the global express function
|
391 |
+
|
392 |
+
Parameters
|
393 |
+
==========
|
394 |
+
|
395 |
+
frame1 : ReferenceFrame
|
396 |
+
The frame to express the left side of the Dyadic in
|
397 |
+
frame2 : ReferenceFrame
|
398 |
+
If provided, the frame to express the right side of the Dyadic in
|
399 |
+
|
400 |
+
Examples
|
401 |
+
========
|
402 |
+
|
403 |
+
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
|
404 |
+
>>> from sympy.physics.vector import init_vprinting
|
405 |
+
>>> init_vprinting(pretty_print=False)
|
406 |
+
>>> N = ReferenceFrame('N')
|
407 |
+
>>> q = dynamicsymbols('q')
|
408 |
+
>>> B = N.orientnew('B', 'Axis', [q, N.z])
|
409 |
+
>>> d = outer(N.x, N.x)
|
410 |
+
>>> d.express(B, N)
|
411 |
+
cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x)
|
412 |
+
|
413 |
+
"""
|
414 |
+
from sympy.physics.vector.functions import express
|
415 |
+
return express(self, frame1, frame2)
|
416 |
+
|
417 |
+
def to_matrix(self, reference_frame, second_reference_frame=None):
|
418 |
+
"""Returns the matrix form of the dyadic with respect to one or two
|
419 |
+
reference frames.
|
420 |
+
|
421 |
+
Parameters
|
422 |
+
----------
|
423 |
+
reference_frame : ReferenceFrame
|
424 |
+
The reference frame that the rows and columns of the matrix
|
425 |
+
correspond to. If a second reference frame is provided, this
|
426 |
+
only corresponds to the rows of the matrix.
|
427 |
+
second_reference_frame : ReferenceFrame, optional, default=None
|
428 |
+
The reference frame that the columns of the matrix correspond
|
429 |
+
to.
|
430 |
+
|
431 |
+
Returns
|
432 |
+
-------
|
433 |
+
matrix : ImmutableMatrix, shape(3,3)
|
434 |
+
The matrix that gives the 2D tensor form.
|
435 |
+
|
436 |
+
Examples
|
437 |
+
========
|
438 |
+
|
439 |
+
>>> from sympy import symbols
|
440 |
+
>>> from sympy.physics.vector import ReferenceFrame, Vector
|
441 |
+
>>> Vector.simp = True
|
442 |
+
>>> from sympy.physics.mechanics import inertia
|
443 |
+
>>> Ixx, Iyy, Izz, Ixy, Iyz, Ixz = symbols('Ixx, Iyy, Izz, Ixy, Iyz, Ixz')
|
444 |
+
>>> N = ReferenceFrame('N')
|
445 |
+
>>> inertia_dyadic = inertia(N, Ixx, Iyy, Izz, Ixy, Iyz, Ixz)
|
446 |
+
>>> inertia_dyadic.to_matrix(N)
|
447 |
+
Matrix([
|
448 |
+
[Ixx, Ixy, Ixz],
|
449 |
+
[Ixy, Iyy, Iyz],
|
450 |
+
[Ixz, Iyz, Izz]])
|
451 |
+
>>> beta = symbols('beta')
|
452 |
+
>>> A = N.orientnew('A', 'Axis', (beta, N.x))
|
453 |
+
>>> inertia_dyadic.to_matrix(A)
|
454 |
+
Matrix([
|
455 |
+
[ Ixx, Ixy*cos(beta) + Ixz*sin(beta), -Ixy*sin(beta) + Ixz*cos(beta)],
|
456 |
+
[ Ixy*cos(beta) + Ixz*sin(beta), Iyy*cos(2*beta)/2 + Iyy/2 + Iyz*sin(2*beta) - Izz*cos(2*beta)/2 + Izz/2, -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2],
|
457 |
+
[-Ixy*sin(beta) + Ixz*cos(beta), -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2, -Iyy*cos(2*beta)/2 + Iyy/2 - Iyz*sin(2*beta) + Izz*cos(2*beta)/2 + Izz/2]])
|
458 |
+
|
459 |
+
"""
|
460 |
+
|
461 |
+
if second_reference_frame is None:
|
462 |
+
second_reference_frame = reference_frame
|
463 |
+
|
464 |
+
return Matrix([i.dot(self).dot(j) for i in reference_frame for j in
|
465 |
+
second_reference_frame]).reshape(3, 3)
|
466 |
+
|
467 |
+
def doit(self, **hints):
|
468 |
+
"""Calls .doit() on each term in the Dyadic"""
|
469 |
+
return sum([Dyadic([(v[0].doit(**hints), v[1], v[2])])
|
470 |
+
for v in self.args], Dyadic(0))
|
471 |
+
|
472 |
+
def dt(self, frame):
|
473 |
+
"""Take the time derivative of this Dyadic in a frame.
|
474 |
+
|
475 |
+
This function calls the global time_derivative method
|
476 |
+
|
477 |
+
Parameters
|
478 |
+
==========
|
479 |
+
|
480 |
+
frame : ReferenceFrame
|
481 |
+
The frame to take the time derivative in
|
482 |
+
|
483 |
+
Examples
|
484 |
+
========
|
485 |
+
|
486 |
+
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
|
487 |
+
>>> from sympy.physics.vector import init_vprinting
|
488 |
+
>>> init_vprinting(pretty_print=False)
|
489 |
+
>>> N = ReferenceFrame('N')
|
490 |
+
>>> q = dynamicsymbols('q')
|
491 |
+
>>> B = N.orientnew('B', 'Axis', [q, N.z])
|
492 |
+
>>> d = outer(N.x, N.x)
|
493 |
+
>>> d.dt(B)
|
494 |
+
- q'*(N.y|N.x) - q'*(N.x|N.y)
|
495 |
+
|
496 |
+
"""
|
497 |
+
from sympy.physics.vector.functions import time_derivative
|
498 |
+
return time_derivative(self, frame)
|
499 |
+
|
500 |
+
def simplify(self):
|
501 |
+
"""Returns a simplified Dyadic."""
|
502 |
+
out = Dyadic(0)
|
503 |
+
for v in self.args:
|
504 |
+
out += Dyadic([(v[0].simplify(), v[1], v[2])])
|
505 |
+
return out
|
506 |
+
|
507 |
+
def subs(self, *args, **kwargs):
|
508 |
+
"""Substitution on the Dyadic.
|
509 |
+
|
510 |
+
Examples
|
511 |
+
========
|
512 |
+
|
513 |
+
>>> from sympy.physics.vector import ReferenceFrame
|
514 |
+
>>> from sympy import Symbol
|
515 |
+
>>> N = ReferenceFrame('N')
|
516 |
+
>>> s = Symbol('s')
|
517 |
+
>>> a = s*(N.x|N.x)
|
518 |
+
>>> a.subs({s: 2})
|
519 |
+
2*(N.x|N.x)
|
520 |
+
|
521 |
+
"""
|
522 |
+
|
523 |
+
return sum([Dyadic([(v[0].subs(*args, **kwargs), v[1], v[2])])
|
524 |
+
for v in self.args], Dyadic(0))
|
525 |
+
|
526 |
+
def applyfunc(self, f):
|
527 |
+
"""Apply a function to each component of a Dyadic."""
|
528 |
+
if not callable(f):
|
529 |
+
raise TypeError("`f` must be callable.")
|
530 |
+
|
531 |
+
out = Dyadic(0)
|
532 |
+
for a, b, c in self.args:
|
533 |
+
out += f(a) * (b | c)
|
534 |
+
return out
|
535 |
+
|
536 |
+
dot = __and__
|
537 |
+
cross = __xor__
|
538 |
+
|
539 |
+
def _eval_evalf(self, prec):
|
540 |
+
if not self.args:
|
541 |
+
return self
|
542 |
+
new_args = []
|
543 |
+
dps = prec_to_dps(prec)
|
544 |
+
for inlist in self.args:
|
545 |
+
new_inlist = list(inlist)
|
546 |
+
new_inlist[0] = inlist[0].evalf(n=dps)
|
547 |
+
new_args.append(tuple(new_inlist))
|
548 |
+
return Dyadic(new_args)
|
549 |
+
|
550 |
+
def xreplace(self, rule):
|
551 |
+
"""
|
552 |
+
Replace occurrences of objects within the measure numbers of the
|
553 |
+
Dyadic.
|
554 |
+
|
555 |
+
Parameters
|
556 |
+
==========
|
557 |
+
|
558 |
+
rule : dict-like
|
559 |
+
Expresses a replacement rule.
|
560 |
+
|
561 |
+
Returns
|
562 |
+
=======
|
563 |
+
|
564 |
+
Dyadic
|
565 |
+
Result of the replacement.
|
566 |
+
|
567 |
+
Examples
|
568 |
+
========
|
569 |
+
|
570 |
+
>>> from sympy import symbols, pi
|
571 |
+
>>> from sympy.physics.vector import ReferenceFrame, outer
|
572 |
+
>>> N = ReferenceFrame('N')
|
573 |
+
>>> D = outer(N.x, N.x)
|
574 |
+
>>> x, y, z = symbols('x y z')
|
575 |
+
>>> ((1 + x*y) * D).xreplace({x: pi})
|
576 |
+
(pi*y + 1)*(N.x|N.x)
|
577 |
+
>>> ((1 + x*y) * D).xreplace({x: pi, y: 2})
|
578 |
+
(1 + 2*pi)*(N.x|N.x)
|
579 |
+
|
580 |
+
Replacements occur only if an entire node in the expression tree is
|
581 |
+
matched:
|
582 |
+
|
583 |
+
>>> ((x*y + z) * D).xreplace({x*y: pi})
|
584 |
+
(z + pi)*(N.x|N.x)
|
585 |
+
>>> ((x*y*z) * D).xreplace({x*y: pi})
|
586 |
+
x*y*z*(N.x|N.x)
|
587 |
+
|
588 |
+
"""
|
589 |
+
|
590 |
+
new_args = []
|
591 |
+
for inlist in self.args:
|
592 |
+
new_inlist = list(inlist)
|
593 |
+
new_inlist[0] = new_inlist[0].xreplace(rule)
|
594 |
+
new_args.append(tuple(new_inlist))
|
595 |
+
return Dyadic(new_args)
|
596 |
+
|
597 |
+
|
598 |
+
def _check_dyadic(other):
|
599 |
+
if not isinstance(other, Dyadic):
|
600 |
+
raise TypeError('A Dyadic must be supplied')
|
601 |
+
return other
|