problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_12862 | rasdani/github-patches | git_diff | pyro-ppl__numpyro-987 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add required packages at the top of tutorials to run on colab
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright Contributors to the Pyro project.
2 # SPDX-License-Identifier: Apache-2.0
3
4 from __future__ import absolute_import, division, print_function
5
6 import os
7 import sys
8
9 from setuptools import find_packages, setup
10
11 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
12
13 # Find version
14 for line in open(os.path.join(PROJECT_PATH, "numpyro", "version.py")):
15 if line.startswith("__version__ = "):
16 version = line.strip().split()[2][1:-1]
17
18 # READ README.md for long description on PyPi.
19 try:
20 long_description = open("README.md", encoding="utf-8").read()
21 except Exception as e:
22 sys.stderr.write("Failed to read README.md:\n {}\n".format(e))
23 sys.stderr.flush()
24 long_description = ""
25
26
27 setup(
28 name="numpyro",
29 version=version,
30 description="Pyro PPL on NumPy",
31 packages=find_packages(include=["numpyro", "numpyro.*"]),
32 url="https://github.com/pyro-ppl/numpyro",
33 author="Uber AI Labs",
34 install_requires=[
35 "jax>=0.2.11",
36 "jaxlib>=0.1.62",
37 "tqdm",
38 ],
39 extras_require={
40 "doc": [
41 "ipython", # sphinx needs this to render codes
42 "nbsphinx",
43 "sphinx",
44 "sphinx_rtd_theme",
45 "sphinx-gallery",
46 ],
47 "test": [
48 "black",
49 "flake8",
50 "isort>=5.0",
51 "pytest>=4.1",
52 "pyro-api>=0.1.1",
53 "scipy>=1.1",
54 ],
55 "dev": [
56 "dm-haiku",
57 "flax",
58 "funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc",
59 "graphviz",
60 "tensorflow_probability",
61 ],
62 "examples": ["arviz", "jupyter", "matplotlib", "pandas", "seaborn"],
63 },
64 long_description=long_description,
65 long_description_content_type="text/markdown",
66 keywords="probabilistic machine learning bayesian statistics",
67 license="Apache License 2.0",
68 classifiers=[
69 "Intended Audience :: Developers",
70 "Intended Audience :: Education",
71 "Intended Audience :: Science/Research",
72 "License :: OSI Approved :: Apache Software License",
73 "Operating System :: POSIX :: Linux",
74 "Operating System :: MacOS :: MacOS X",
75 "Programming Language :: Python :: 3.6",
76 "Programming Language :: Python :: 3.7",
77 "Programming Language :: Python :: 3.8",
78 "Programming Language :: Python :: 3.9",
79 ],
80 )
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -55,9 +55,12 @@
"dev": [
"dm-haiku",
"flax",
+ # TODO: bump funsor version before the release
"funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc",
"graphviz",
- "tensorflow_probability",
+ # TODO: change this to tensorflow_probability>0.12.1 when the next version
+ # of tfp is released. The current release is not compatible with jax>=0.2.12.
+ "tfp-nightly",
],
"examples": ["arviz", "jupyter", "matplotlib", "pandas", "seaborn"],
},
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,9 +55,12 @@\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n+ # TODO: bump funsor version before the release\n \"funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc\",\n \"graphviz\",\n- \"tensorflow_probability\",\n+ # TODO: change this to tensorflow_probability>0.12.1 when the next version\n+ # of tfp is released. The current release is not compatible with jax>=0.2.12.\n+ \"tfp-nightly\",\n ],\n \"examples\": [\"arviz\", \"jupyter\", \"matplotlib\", \"pandas\", \"seaborn\"],\n },\n", "issue": "Add required packages at the top of tutorials to run on colab\n\n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Find version\nfor line in open(os.path.join(PROJECT_PATH, \"numpyro\", \"version.py\")):\n if line.startswith(\"__version__ = \"):\n version = line.strip().split()[2][1:-1]\n\n# READ README.md for long description on PyPi.\ntry:\n long_description = open(\"README.md\", encoding=\"utf-8\").read()\nexcept Exception as e:\n sys.stderr.write(\"Failed to read README.md:\\n {}\\n\".format(e))\n sys.stderr.flush()\n long_description = \"\"\n\n\nsetup(\n name=\"numpyro\",\n version=version,\n description=\"Pyro PPL on NumPy\",\n packages=find_packages(include=[\"numpyro\", \"numpyro.*\"]),\n url=\"https://github.com/pyro-ppl/numpyro\",\n author=\"Uber AI Labs\",\n install_requires=[\n \"jax>=0.2.11\",\n \"jaxlib>=0.1.62\",\n \"tqdm\",\n ],\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n \"nbsphinx\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n \"test\": [\n \"black\",\n \"flake8\",\n \"isort>=5.0\",\n \"pytest>=4.1\",\n \"pyro-api>=0.1.1\",\n \"scipy>=1.1\",\n ],\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n \"funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc\",\n \"graphviz\",\n \"tensorflow_probability\",\n ],\n \"examples\": [\"arviz\", \"jupyter\", \"matplotlib\", \"pandas\", \"seaborn\"],\n },\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"probabilistic machine learning bayesian statistics\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Find version\nfor line in open(os.path.join(PROJECT_PATH, \"numpyro\", \"version.py\")):\n if line.startswith(\"__version__ = \"):\n version = line.strip().split()[2][1:-1]\n\n# READ README.md for long description on PyPi.\ntry:\n long_description = open(\"README.md\", encoding=\"utf-8\").read()\nexcept Exception as e:\n sys.stderr.write(\"Failed to read README.md:\\n {}\\n\".format(e))\n sys.stderr.flush()\n long_description = \"\"\n\n\nsetup(\n name=\"numpyro\",\n version=version,\n description=\"Pyro PPL on NumPy\",\n packages=find_packages(include=[\"numpyro\", \"numpyro.*\"]),\n url=\"https://github.com/pyro-ppl/numpyro\",\n author=\"Uber AI Labs\",\n install_requires=[\n \"jax>=0.2.11\",\n \"jaxlib>=0.1.62\",\n \"tqdm\",\n ],\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n \"nbsphinx\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n \"test\": [\n \"black\",\n \"flake8\",\n \"isort>=5.0\",\n \"pytest>=4.1\",\n \"pyro-api>=0.1.1\",\n \"scipy>=1.1\",\n ],\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n # TODO: bump funsor version before the release\n \"funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc\",\n \"graphviz\",\n # TODO: change this to tensorflow_probability>0.12.1 when the next version\n # of tfp is released. The current release is not compatible with jax>=0.2.12.\n \"tfp-nightly\",\n ],\n \"examples\": [\"arviz\", \"jupyter\", \"matplotlib\", \"pandas\", \"seaborn\"],\n },\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"probabilistic machine learning bayesian statistics\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n)\n", "path": "setup.py"}]} | 1,063 | 216 |
gh_patches_debug_18571 | rasdani/github-patches | git_diff | urllib3__urllib3-823 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot run tests on Ubuntu 14.04 LTS + Python 3
#### Repro Steps
```
# (Install Docker from: http://www.docker.com )
# If OS X, start with:
$ docker-machine start default; eval "$(docker-machine env default)"
# If OS X or Linux, continue with:
$ docker run -it ubuntu:trusty # Ubuntu 14.04 LTS
$$ apt-get update
$$ apt-get install git -y
$$ apt-get install python3-pip -y
$$ pip3 install virtualenv
$$ git clone https://github.com/shazow/urllib3
$$ cd urllib3/
$$ virtualenv venv
$$ source venv/bin/activate
$$ make test
```
#### Output
```
python setup.py develop
Traceback (most recent call last):
File "setup.py", line 23, in <module>
long_description=open('README.rst').read() + '\n\n' + open('CHANGES.rst').read(),
File "/urllib3/venv/lib/python3.4/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 4597: ordinal not in range(128)
make: *** [*.egg-info] Error 1
```
#### Notes
My best guess is that the `read()`s in setup.py should have an encoding specified. Probably UTF-8.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup
4
5 import os
6 import re
7
8
9 base_path = os.path.dirname(__file__)
10
11 # Get the version (borrowed from SQLAlchemy)
12 fp = open(os.path.join(base_path, 'urllib3', '__init__.py'))
13 VERSION = re.compile(r".*__version__ = '(.*?)'",
14 re.S).match(fp.read()).group(1)
15 fp.close()
16
17
18 version = VERSION
19
20 setup(name='urllib3',
21 version=version,
22 description="HTTP library with thread-safe connection pooling, file post, and more.",
23 long_description=open('README.rst').read() + '\n\n' + open('CHANGES.rst').read(),
24 classifiers=[
25 'Environment :: Web Environment',
26 'Intended Audience :: Developers',
27 'License :: OSI Approved :: MIT License',
28 'Operating System :: OS Independent',
29 'Programming Language :: Python',
30 'Programming Language :: Python :: 2',
31 'Programming Language :: Python :: 3',
32 'Topic :: Internet :: WWW/HTTP',
33 'Topic :: Software Development :: Libraries',
34 ],
35 keywords='urllib httplib threadsafe filepost http https ssl pooling',
36 author='Andrey Petrov',
37 author_email='[email protected]',
38 url='http://urllib3.readthedocs.org/',
39 license='MIT',
40 packages=['urllib3',
41 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',
42 'urllib3.contrib', 'urllib3.util',
43 ],
44 requires=[],
45 tests_require=[
46 # These are a less-specific subset of dev-requirements.txt, for the
47 # convenience of distro package maintainers.
48 'nose',
49 'mock',
50 'tornado',
51 ],
52 test_suite='test',
53 extras_require={
54 'secure': [
55 'pyOpenSSL>=0.13',
56 'ndg-httpsclient',
57 'pyasn1',
58 'certifi',
59 ],
60 'socks': [
61 'PySocks>=1.5.6,<2.0',
62 ]
63 },
64 )
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@
import os
import re
-
+import codecs
base_path = os.path.dirname(__file__)
@@ -14,13 +14,14 @@
re.S).match(fp.read()).group(1)
fp.close()
-
+readme = codecs.open('README.rst', encoding='utf-8').read()
+changes = codecs.open('README.rst', encoding='utf-8').read()
version = VERSION
setup(name='urllib3',
version=version,
description="HTTP library with thread-safe connection pooling, file post, and more.",
- long_description=open('README.rst').read() + '\n\n' + open('CHANGES.rst').read(),
+ long_description=u'\n\n'.join([readme, changes]),
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,7 +4,7 @@\n \n import os\n import re\n-\n+import codecs\n \n base_path = os.path.dirname(__file__)\n \n@@ -14,13 +14,14 @@\n re.S).match(fp.read()).group(1)\n fp.close()\n \n-\n+readme = codecs.open('README.rst', encoding='utf-8').read()\n+changes = codecs.open('README.rst', encoding='utf-8').read()\n version = VERSION\n \n setup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n- long_description=open('README.rst').read() + '\\n\\n' + open('CHANGES.rst').read(),\n+ long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n", "issue": "Cannot run tests on Ubuntu 14.04 LTS + Python 3\n#### Repro Steps\n\n```\n# (Install Docker from: http://www.docker.com )\n\n# If OS X, start with:\n$ docker-machine start default; eval \"$(docker-machine env default)\"\n\n# If OS X or Linux, continue with:\n$ docker run -it ubuntu:trusty # Ubuntu 14.04 LTS\n$$ apt-get update\n$$ apt-get install git -y\n$$ apt-get install python3-pip -y\n$$ pip3 install virtualenv\n$$ git clone https://github.com/shazow/urllib3\n$$ cd urllib3/\n$$ virtualenv venv\n$$ source venv/bin/activate\n$$ make test\n```\n#### Output\n\n```\npython setup.py develop\nTraceback (most recent call last):\n File \"setup.py\", line 23, in <module>\n long_description=open('README.rst').read() + '\\n\\n' + open('CHANGES.rst').read(),\n File \"/urllib3/venv/lib/python3.4/encodings/ascii.py\", line 26, in decode\n return codecs.ascii_decode(input, self.errors)[0]\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 4597: ordinal not in range(128)\nmake: *** [*.egg-info] Error 1\n```\n#### Notes\n\nMy best guess is that the `read()`s in setup.py should have an encoding specified. Probably UTF-8.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\n\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nfp = open(os.path.join(base_path, 'urllib3', '__init__.py'))\nVERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\nfp.close()\n\n\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=open('README.rst').read() + '\\n\\n' + open('CHANGES.rst').read(),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='[email protected]',\n url='http://urllib3.readthedocs.org/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.contrib', 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.13',\n 'ndg-httpsclient',\n 'pyasn1',\n 'certifi',\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0',\n ]\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nfp = open(os.path.join(base_path, 'urllib3', '__init__.py'))\nVERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\nfp.close()\n\nreadme = codecs.open('README.rst', encoding='utf-8').read()\nchanges = codecs.open('README.rst', encoding='utf-8').read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='[email protected]',\n url='http://urllib3.readthedocs.org/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.contrib', 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.13',\n 'ndg-httpsclient',\n 'pyasn1',\n 'certifi',\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0',\n ]\n },\n )\n", "path": "setup.py"}]} | 1,168 | 214 |
gh_patches_debug_9009 | rasdani/github-patches | git_diff | CTFd__CTFd-2091 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Page preview doesn't consider format
Page preview needs to take into account format when previewing
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/admin/pages.py`
Content:
```
1 from flask import render_template, request
2
3 from CTFd.admin import admin
4 from CTFd.models import Pages
5 from CTFd.schemas.pages import PageSchema
6 from CTFd.utils import markdown
7 from CTFd.utils.decorators import admins_only
8
9
10 @admin.route("/admin/pages")
11 @admins_only
12 def pages_listing():
13 pages = Pages.query.all()
14 return render_template("admin/pages.html", pages=pages)
15
16
17 @admin.route("/admin/pages/new")
18 @admins_only
19 def pages_new():
20 return render_template("admin/editor.html")
21
22
23 @admin.route("/admin/pages/preview", methods=["POST"])
24 @admins_only
25 def pages_preview():
26 # We only care about content.
27 # Loading other attributes improperly will cause Marshmallow to incorrectly return a dict
28 data = {"content": request.form.get("content")}
29 schema = PageSchema()
30 page = schema.load(data)
31 return render_template("page.html", content=page.data.html)
32
33
34 @admin.route("/admin/pages/<int:page_id>")
35 @admins_only
36 def pages_detail(page_id):
37 page = Pages.query.filter_by(id=page_id).first_or_404()
38 page_op = request.args.get("operation")
39
40 if request.method == "GET" and page_op == "preview":
41 return render_template("page.html", content=markdown(page.content))
42
43 if request.method == "GET" and page_op == "create":
44 return render_template("admin/editor.html")
45
46 return render_template("admin/editor.html", page=page)
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/admin/pages.py b/CTFd/admin/pages.py
--- a/CTFd/admin/pages.py
+++ b/CTFd/admin/pages.py
@@ -25,7 +25,10 @@
def pages_preview():
# We only care about content.
# Loading other attributes improperly will cause Marshmallow to incorrectly return a dict
- data = {"content": request.form.get("content")}
+ data = {
+ "content": request.form.get("content"),
+ "format": request.form.get("format"),
+ }
schema = PageSchema()
page = schema.load(data)
return render_template("page.html", content=page.data.html)
| {"golden_diff": "diff --git a/CTFd/admin/pages.py b/CTFd/admin/pages.py\n--- a/CTFd/admin/pages.py\n+++ b/CTFd/admin/pages.py\n@@ -25,7 +25,10 @@\n def pages_preview():\n # We only care about content.\n # Loading other attributes improperly will cause Marshmallow to incorrectly return a dict\n- data = {\"content\": request.form.get(\"content\")}\n+ data = {\n+ \"content\": request.form.get(\"content\"),\n+ \"format\": request.form.get(\"format\"),\n+ }\n schema = PageSchema()\n page = schema.load(data)\n return render_template(\"page.html\", content=page.data.html)\n", "issue": "Page preview doesn't consider format\nPage preview needs to take into account format when previewing\n", "before_files": [{"content": "from flask import render_template, request\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Pages\nfrom CTFd.schemas.pages import PageSchema\nfrom CTFd.utils import markdown\nfrom CTFd.utils.decorators import admins_only\n\n\[email protected](\"/admin/pages\")\n@admins_only\ndef pages_listing():\n pages = Pages.query.all()\n return render_template(\"admin/pages.html\", pages=pages)\n\n\[email protected](\"/admin/pages/new\")\n@admins_only\ndef pages_new():\n return render_template(\"admin/editor.html\")\n\n\[email protected](\"/admin/pages/preview\", methods=[\"POST\"])\n@admins_only\ndef pages_preview():\n # We only care about content.\n # Loading other attributes improperly will cause Marshmallow to incorrectly return a dict\n data = {\"content\": request.form.get(\"content\")}\n schema = PageSchema()\n page = schema.load(data)\n return render_template(\"page.html\", content=page.data.html)\n\n\[email protected](\"/admin/pages/<int:page_id>\")\n@admins_only\ndef pages_detail(page_id):\n page = Pages.query.filter_by(id=page_id).first_or_404()\n page_op = request.args.get(\"operation\")\n\n if request.method == \"GET\" and page_op == \"preview\":\n return render_template(\"page.html\", content=markdown(page.content))\n\n if request.method == \"GET\" and page_op == \"create\":\n return render_template(\"admin/editor.html\")\n\n return render_template(\"admin/editor.html\", page=page)\n", "path": "CTFd/admin/pages.py"}], "after_files": [{"content": "from flask import render_template, request\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Pages\nfrom CTFd.schemas.pages import PageSchema\nfrom CTFd.utils import markdown\nfrom CTFd.utils.decorators import admins_only\n\n\[email protected](\"/admin/pages\")\n@admins_only\ndef pages_listing():\n pages = Pages.query.all()\n return render_template(\"admin/pages.html\", pages=pages)\n\n\[email protected](\"/admin/pages/new\")\n@admins_only\ndef pages_new():\n return render_template(\"admin/editor.html\")\n\n\[email protected](\"/admin/pages/preview\", methods=[\"POST\"])\n@admins_only\ndef pages_preview():\n # We only care about content.\n # Loading other attributes improperly will cause Marshmallow to incorrectly return a dict\n data = {\n \"content\": request.form.get(\"content\"),\n \"format\": request.form.get(\"format\"),\n }\n schema = PageSchema()\n page = schema.load(data)\n return render_template(\"page.html\", content=page.data.html)\n\n\[email protected](\"/admin/pages/<int:page_id>\")\n@admins_only\ndef pages_detail(page_id):\n page = Pages.query.filter_by(id=page_id).first_or_404()\n page_op = request.args.get(\"operation\")\n\n if request.method == \"GET\" and page_op == \"preview\":\n return render_template(\"page.html\", content=markdown(page.content))\n\n if request.method == \"GET\" and page_op == \"create\":\n return render_template(\"admin/editor.html\")\n\n return render_template(\"admin/editor.html\", page=page)\n", "path": "CTFd/admin/pages.py"}]} | 689 | 147 |
gh_patches_debug_1355 | rasdani/github-patches | git_diff | qtile__qtile-2707 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BitcoinTicker migration does not work
```
/tmp/crypto cat config.py
from libqtile.widget import BitcoinTicker
test = BitcoinTicker()
/tmp/crypto qtile migrate -c config.py
Config unchanged.
/tmp/crypto cat config.py
from libqtile.widget import BitcoinTicker
test = BitcoinTicker()
```
/cc @Graeme22
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/scripts/migrate.py`
Content:
```
1 # Copyright (c) 2021, Tycho Andersen. All rights reserved.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 import filecmp
20 import os
21 import os.path
22 import shutil
23 import sys
24 from glob import glob
25
26 BACKUP_SUFFIX = ".migrate.bak"
27
28 try:
29 import bowler
30 except ImportError:
31 pass
32
33
34 def rename_hook(query, fro, to):
35 # could match on dotted_name< 'hook' '.' 'subscribe' '.' '{name}' >
36 # but the replacement gets more complicated...
37 selector = "'{name}'".format(name=fro)
38 q = query.select_pattern(selector)
39 q.current.kwargs["name"] = fro
40 return q.rename(to)
41
42
43 def client_name_updated(query):
44 """ Rename window_name_change -> client_name_updated"""
45 return rename_hook(query, "window_name_change", "client_name_updated")
46
47
48 def tile_master_windows_rename(query):
49 return (
50 query
51 .select_function("Tile")
52 .modify_argument("masterWindows", "master_length")
53 )
54
55
56 def threaded_poll_text_rename(query):
57 return (
58 query
59 .select_class("ThreadedPollText")
60 .rename("ThreadPoolText")
61 )
62
63
64 def pacman_to_checkupdates(query):
65 return (
66 query
67 .select_class("Pacman")
68 .rename("CheckUpdates")
69 )
70
71
72 def bitcoin_to_crypto(query):
73 return (
74 query
75 .select_class("BitcoinTicker")
76 .rename("CryptoTicker")
77 )
78
79
80 def hook_main_function(query):
81 def modify_main(node, capture, filename):
82 main = capture.get("function_def")
83 if main.prev_sibling:
84 for leaf in main.prev_sibling.leaves():
85 if "startup" == leaf.value:
86 return
87 args = capture.get("function_arguments")
88 if args:
89 args[0].remove()
90 main.prefix += "from libqtile import hook, qtile\n"
91 main.prefix += "@hook.subscribe.startup\n"
92
93 return (
94 query
95 .select_function("main")
96 .is_def()
97 .modify(modify_main)
98 )
99
100
101 # Deprecated new_at_current key replaced by new_client_position.
102 # In the node, we want to change the key name
103 # and adapts its value depending of the previous value :
104 # new_at_current=True => new_client_position=before_current
105 # new_at_current<>True => new_client_position=after_current
106 def update_node_nac(node, capture, filename):
107 key = capture.get("k")
108 key.value = "new_client_position"
109 val = capture.get("v")
110 if val.value == "True":
111 val.value = "'before_current'"
112 else:
113 val.value = "'after_current'"
114
115
116 def new_at_current_to_new_client_position(query):
117 old_pattern = """
118 argument< k="new_at_current" "=" v=any >
119 """
120 return (
121 query
122 .select(old_pattern)
123 .modify(update_node_nac)
124 )
125
126
127 MIGRATIONS = [
128 client_name_updated,
129 tile_master_windows_rename,
130 threaded_poll_text_rename,
131 pacman_to_checkupdates,
132 hook_main_function,
133 new_at_current_to_new_client_position,
134 ]
135
136
137 MODULE_RENAMES = [
138 ("libqtile.command_graph", "libqtile.command.graph"),
139 ("libqtile.command_client", "libqtile.command.client"),
140 ("libqtile.command_interface", "libqtile.command.interface"),
141 ("libqtile.command_object", "libqtile.command.base"),
142 ("libqtile.window", "libqtile.backend.x11.window"),
143 ]
144
145 for (fro, to) in MODULE_RENAMES:
146 def f(query, fro=fro, to=to):
147 return (
148 query
149 .select_module(fro)
150 .rename(to)
151 )
152 MIGRATIONS.append(f)
153
154
155 def file_and_backup(config_dir):
156 for py in glob(os.path.join(config_dir, "*.py")):
157 backup = py + BACKUP_SUFFIX
158 yield py, backup
159
160
161 def do_migrate(args):
162 if "bowler" not in sys.modules:
163 print("bowler can't be found, not migrating config file")
164 print("install it and try again")
165 sys.exit(1)
166
167 config_dir = os.path.dirname(args.config)
168 for py, backup in file_and_backup(config_dir):
169 shutil.copyfile(py, backup)
170
171 for m in MIGRATIONS:
172 q = bowler.Query(config_dir)
173 m(q).execute(interactive=not args.yes, write=True)
174
175 changed = False
176 for py, backup in file_and_backup(config_dir):
177 backup = py + BACKUP_SUFFIX
178 if not filecmp.cmp(py, backup, shallow=False):
179 changed = True
180 break
181
182 if not changed:
183 print("Config unchanged.")
184 for _, backup in file_and_backup(config_dir):
185 os.remove(backup)
186
187
188 def add_subcommand(subparsers, parents):
189 parser = subparsers.add_parser(
190 "migrate",
191 parents=parents,
192 help="Migrate a configuration file to the current API"
193 )
194 parser.add_argument(
195 "-c",
196 "--config",
197 action="store",
198 default=os.path.expanduser(
199 os.path.join(os.getenv("XDG_CONFIG_HOME", "~/.config"), "qtile", "config.py")
200 ),
201 help="Use the specified configuration file (migrates every .py file in this directory)",
202 )
203 parser.add_argument(
204 "--yes",
205 action="store_true",
206 help="Automatically apply diffs with no confirmation",
207 )
208 parser.set_defaults(func=do_migrate)
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/scripts/migrate.py b/libqtile/scripts/migrate.py
--- a/libqtile/scripts/migrate.py
+++ b/libqtile/scripts/migrate.py
@@ -129,6 +129,7 @@
tile_master_windows_rename,
threaded_poll_text_rename,
pacman_to_checkupdates,
+ bitcoin_to_crypto,
hook_main_function,
new_at_current_to_new_client_position,
]
| {"golden_diff": "diff --git a/libqtile/scripts/migrate.py b/libqtile/scripts/migrate.py\n--- a/libqtile/scripts/migrate.py\n+++ b/libqtile/scripts/migrate.py\n@@ -129,6 +129,7 @@\n tile_master_windows_rename,\n threaded_poll_text_rename,\n pacman_to_checkupdates,\n+ bitcoin_to_crypto,\n hook_main_function,\n new_at_current_to_new_client_position,\n ]\n", "issue": "BitcoinTicker migration does not work\n```\r\n/tmp/crypto cat config.py \r\nfrom libqtile.widget import BitcoinTicker\r\n\r\ntest = BitcoinTicker()\r\n/tmp/crypto qtile migrate -c config.py \r\nConfig unchanged.\r\n/tmp/crypto cat config.py \r\nfrom libqtile.widget import BitcoinTicker\r\n\r\ntest = BitcoinTicker()\r\n```\r\n\r\n/cc @Graeme22\n", "before_files": [{"content": "# Copyright (c) 2021, Tycho Andersen. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nimport filecmp\nimport os\nimport os.path\nimport shutil\nimport sys\nfrom glob import glob\n\nBACKUP_SUFFIX = \".migrate.bak\"\n\ntry:\n import bowler\nexcept ImportError:\n pass\n\n\ndef rename_hook(query, fro, to):\n # could match on dotted_name< 'hook' '.' 'subscribe' '.' '{name}' >\n # but the replacement gets more complicated...\n selector = \"'{name}'\".format(name=fro)\n q = query.select_pattern(selector)\n q.current.kwargs[\"name\"] = fro\n return q.rename(to)\n\n\ndef client_name_updated(query):\n \"\"\" Rename window_name_change -> client_name_updated\"\"\"\n return rename_hook(query, \"window_name_change\", \"client_name_updated\")\n\n\ndef tile_master_windows_rename(query):\n return (\n query\n .select_function(\"Tile\")\n .modify_argument(\"masterWindows\", \"master_length\")\n )\n\n\ndef threaded_poll_text_rename(query):\n return (\n query\n .select_class(\"ThreadedPollText\")\n .rename(\"ThreadPoolText\")\n )\n\n\ndef pacman_to_checkupdates(query):\n return (\n query\n .select_class(\"Pacman\")\n .rename(\"CheckUpdates\")\n )\n\n\ndef bitcoin_to_crypto(query):\n return (\n query\n .select_class(\"BitcoinTicker\")\n .rename(\"CryptoTicker\")\n )\n\n\ndef hook_main_function(query):\n def modify_main(node, capture, filename):\n main = capture.get(\"function_def\")\n if main.prev_sibling:\n for leaf in main.prev_sibling.leaves():\n if \"startup\" == leaf.value:\n return\n args = capture.get(\"function_arguments\")\n if args:\n args[0].remove()\n main.prefix += \"from libqtile import hook, qtile\\n\"\n main.prefix += \"@hook.subscribe.startup\\n\"\n\n return (\n query\n .select_function(\"main\")\n .is_def()\n .modify(modify_main)\n )\n\n\n# Deprecated new_at_current key replaced by new_client_position.\n# In the node, we want to change the key name\n# and adapts its value depending of the previous value :\n# new_at_current=True => new_client_position=before_current\n# new_at_current<>True => new_client_position=after_current\ndef update_node_nac(node, capture, filename):\n key = capture.get(\"k\")\n key.value = \"new_client_position\"\n val = capture.get(\"v\")\n if val.value == \"True\":\n val.value = \"'before_current'\"\n else:\n val.value = \"'after_current'\"\n\n\ndef new_at_current_to_new_client_position(query):\n old_pattern = \"\"\"\n argument< k=\"new_at_current\" \"=\" v=any >\n \"\"\"\n return (\n query\n .select(old_pattern)\n .modify(update_node_nac)\n )\n\n\nMIGRATIONS = [\n client_name_updated,\n tile_master_windows_rename,\n threaded_poll_text_rename,\n pacman_to_checkupdates,\n hook_main_function,\n new_at_current_to_new_client_position,\n]\n\n\nMODULE_RENAMES = [\n (\"libqtile.command_graph\", \"libqtile.command.graph\"),\n (\"libqtile.command_client\", \"libqtile.command.client\"),\n (\"libqtile.command_interface\", \"libqtile.command.interface\"),\n (\"libqtile.command_object\", \"libqtile.command.base\"),\n (\"libqtile.window\", \"libqtile.backend.x11.window\"),\n]\n\nfor (fro, to) in MODULE_RENAMES:\n def f(query, fro=fro, to=to):\n return (\n query\n .select_module(fro)\n .rename(to)\n )\n MIGRATIONS.append(f)\n\n\ndef file_and_backup(config_dir):\n for py in glob(os.path.join(config_dir, \"*.py\")):\n backup = py + BACKUP_SUFFIX\n yield py, backup\n\n\ndef do_migrate(args):\n if \"bowler\" not in sys.modules:\n print(\"bowler can't be found, not migrating config file\")\n print(\"install it and try again\")\n sys.exit(1)\n\n config_dir = os.path.dirname(args.config)\n for py, backup in file_and_backup(config_dir):\n shutil.copyfile(py, backup)\n\n for m in MIGRATIONS:\n q = bowler.Query(config_dir)\n m(q).execute(interactive=not args.yes, write=True)\n\n changed = False\n for py, backup in file_and_backup(config_dir):\n backup = py + BACKUP_SUFFIX\n if not filecmp.cmp(py, backup, shallow=False):\n changed = True\n break\n\n if not changed:\n print(\"Config unchanged.\")\n for _, backup in file_and_backup(config_dir):\n os.remove(backup)\n\n\ndef add_subcommand(subparsers, parents):\n parser = subparsers.add_parser(\n \"migrate\",\n parents=parents,\n help=\"Migrate a configuration file to the current API\"\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n action=\"store\",\n default=os.path.expanduser(\n os.path.join(os.getenv(\"XDG_CONFIG_HOME\", \"~/.config\"), \"qtile\", \"config.py\")\n ),\n help=\"Use the specified configuration file (migrates every .py file in this directory)\",\n )\n parser.add_argument(\n \"--yes\",\n action=\"store_true\",\n help=\"Automatically apply diffs with no confirmation\",\n )\n parser.set_defaults(func=do_migrate)\n", "path": "libqtile/scripts/migrate.py"}], "after_files": [{"content": "# Copyright (c) 2021, Tycho Andersen. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nimport filecmp\nimport os\nimport os.path\nimport shutil\nimport sys\nfrom glob import glob\n\nBACKUP_SUFFIX = \".migrate.bak\"\n\ntry:\n import bowler\nexcept ImportError:\n pass\n\n\ndef rename_hook(query, fro, to):\n # could match on dotted_name< 'hook' '.' 'subscribe' '.' '{name}' >\n # but the replacement gets more complicated...\n selector = \"'{name}'\".format(name=fro)\n q = query.select_pattern(selector)\n q.current.kwargs[\"name\"] = fro\n return q.rename(to)\n\n\ndef client_name_updated(query):\n \"\"\" Rename window_name_change -> client_name_updated\"\"\"\n return rename_hook(query, \"window_name_change\", \"client_name_updated\")\n\n\ndef tile_master_windows_rename(query):\n return (\n query\n .select_function(\"Tile\")\n .modify_argument(\"masterWindows\", \"master_length\")\n )\n\n\ndef threaded_poll_text_rename(query):\n return (\n query\n .select_class(\"ThreadedPollText\")\n .rename(\"ThreadPoolText\")\n )\n\n\ndef pacman_to_checkupdates(query):\n return (\n query\n .select_class(\"Pacman\")\n .rename(\"CheckUpdates\")\n )\n\n\ndef bitcoin_to_crypto(query):\n return (\n query\n .select_class(\"BitcoinTicker\")\n .rename(\"CryptoTicker\")\n )\n\n\ndef hook_main_function(query):\n def modify_main(node, capture, filename):\n main = capture.get(\"function_def\")\n if main.prev_sibling:\n for leaf in main.prev_sibling.leaves():\n if \"startup\" == leaf.value:\n return\n args = capture.get(\"function_arguments\")\n if args:\n args[0].remove()\n main.prefix += \"from libqtile import hook, qtile\\n\"\n main.prefix += \"@hook.subscribe.startup\\n\"\n\n return (\n query\n .select_function(\"main\")\n .is_def()\n .modify(modify_main)\n )\n\n\n# Deprecated new_at_current key replaced by new_client_position.\n# In the node, we want to change the key name\n# and adapts its value depending of the previous value :\n# new_at_current=True => new_client_position=before_current\n# new_at_current<>True => new_client_position=after_current\ndef update_node_nac(node, capture, filename):\n key = capture.get(\"k\")\n key.value = \"new_client_position\"\n val = capture.get(\"v\")\n if val.value == \"True\":\n val.value = \"'before_current'\"\n else:\n val.value = \"'after_current'\"\n\n\ndef new_at_current_to_new_client_position(query):\n old_pattern = \"\"\"\n argument< k=\"new_at_current\" \"=\" v=any >\n \"\"\"\n return (\n query\n .select(old_pattern)\n .modify(update_node_nac)\n )\n\n\nMIGRATIONS = [\n client_name_updated,\n tile_master_windows_rename,\n threaded_poll_text_rename,\n pacman_to_checkupdates,\n bitcoin_to_crypto,\n hook_main_function,\n new_at_current_to_new_client_position,\n]\n\n\nMODULE_RENAMES = [\n (\"libqtile.command_graph\", \"libqtile.command.graph\"),\n (\"libqtile.command_client\", \"libqtile.command.client\"),\n (\"libqtile.command_interface\", \"libqtile.command.interface\"),\n (\"libqtile.command_object\", \"libqtile.command.base\"),\n (\"libqtile.window\", \"libqtile.backend.x11.window\"),\n]\n\nfor (fro, to) in MODULE_RENAMES:\n def f(query, fro=fro, to=to):\n return (\n query\n .select_module(fro)\n .rename(to)\n )\n MIGRATIONS.append(f)\n\n\ndef file_and_backup(config_dir):\n for py in glob(os.path.join(config_dir, \"*.py\")):\n backup = py + BACKUP_SUFFIX\n yield py, backup\n\n\ndef do_migrate(args):\n if \"bowler\" not in sys.modules:\n print(\"bowler can't be found, not migrating config file\")\n print(\"install it and try again\")\n sys.exit(1)\n\n config_dir = os.path.dirname(args.config)\n for py, backup in file_and_backup(config_dir):\n shutil.copyfile(py, backup)\n\n for m in MIGRATIONS:\n q = bowler.Query(config_dir)\n m(q).execute(interactive=not args.yes, write=True)\n\n changed = False\n for py, backup in file_and_backup(config_dir):\n backup = py + BACKUP_SUFFIX\n if not filecmp.cmp(py, backup, shallow=False):\n changed = True\n break\n\n if not changed:\n print(\"Config unchanged.\")\n for _, backup in file_and_backup(config_dir):\n os.remove(backup)\n\n\ndef add_subcommand(subparsers, parents):\n parser = subparsers.add_parser(\n \"migrate\",\n parents=parents,\n help=\"Migrate a configuration file to the current API\"\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n action=\"store\",\n default=os.path.expanduser(\n os.path.join(os.getenv(\"XDG_CONFIG_HOME\", \"~/.config\"), \"qtile\", \"config.py\")\n ),\n help=\"Use the specified configuration file (migrates every .py file in this directory)\",\n )\n parser.add_argument(\n \"--yes\",\n action=\"store_true\",\n help=\"Automatically apply diffs with no confirmation\",\n )\n parser.set_defaults(func=do_migrate)\n", "path": "libqtile/scripts/migrate.py"}]} | 2,270 | 93 |
gh_patches_debug_29099 | rasdani/github-patches | git_diff | Kinto__kinto-1556 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add creation of buckets in permissions endpoint
Currently with these settings:
```ini
kinto.experimental_permissions_endpoint = true
kinto.bucket_create_principals = system.Everyone # or whatever
```
I should see the permission to create buckets in http://localhost:8888/v1/permissions
Something like:
```js
{
"data": [
{
"uri": "/buckets",
"resource_name": "bucket",
"permissions": [
"create",
],
}
]
}
```
This is because creating bucket is a specific case: it does not involve parents (buckets are top level) and does not involve the permission backend (permission can only be set in `.ini` settings).
Currently we only iterate on existing buckets https://github.com/Kinto/kinto/blob/bf60689ae016a31b8c5bb1955397be46d7b4acca/kinto/views/permissions.py#L91
Would allow to fix https://github.com/Kinto/kinto-admin/issues/401
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/permissions.py`
Content:
```
1 import colander
2 from pyramid.security import NO_PERMISSION_REQUIRED
3 from pyramid.settings import aslist
4
5 from kinto.authorization import PERMISSIONS_INHERITANCE_TREE
6 from kinto.core import utils as core_utils, resource
7 from kinto.core.storage import Sort
8 from kinto.core.storage.memory import extract_record_set
9
10
11 def allowed_from_settings(settings, principals):
12 """Returns every permissions allowed from settings for the current user.
13 :param settings dict: app settings
14 :param principals list: list of principals of current user
15 :rtype: dict
16
17 Result example::
18
19 {
20 "bucket": {"write", "collection:create"},
21 "collection": {"read"}
22 }
23
24 XXX: This helper will be useful for Kinto/kinto#894
25 """
26 perms_settings = {k: aslist(v) for k, v in settings.items()
27 if k.endswith('_principals')}
28 from_settings = {}
29 for key, allowed_principals in perms_settings.items():
30 resource_name, permission, _ = key.split('_')
31 # Keep the known permissions only.
32 if resource_name not in PERMISSIONS_INHERITANCE_TREE.keys():
33 continue
34 # Keep the permissions of the current user only.
35 if not bool(set(principals) & set(allowed_principals)):
36 continue
37 # ``collection_create_principals`` means ``collection:create`` in bucket.
38 if permission == 'create':
39 permission = '{resource_name}:{permission}'.format(
40 resource_name=resource_name,
41 permission=permission)
42 resource_name = { # resource parents.
43 'collection': 'bucket',
44 'group': 'bucket',
45 'record': 'collection'}.get(resource_name, '')
46 # Store them in a convenient way.
47 from_settings.setdefault(resource_name, set()).add(permission)
48 return from_settings
49
50
51 class PermissionsModel:
52 id_field = 'id'
53 modified_field = 'last_modified'
54 deleted_field = 'deleted'
55
56 def __init__(self, request):
57 self.request = request
58
59 def timestamp(self, parent_id=None):
60 return 0
61
62 def get_records(self, filters=None, sorting=None, pagination_rules=None,
63 limit=None, include_deleted=False, parent_id=None):
64 # Invert the permissions inheritance tree.
65 perms_descending_tree = {}
66 for on_resource, tree in PERMISSIONS_INHERITANCE_TREE.items():
67 for obtained_perm, obtained_from in tree.items():
68 for from_resource, perms in obtained_from.items():
69 for perm in perms:
70 perms_descending_tree.setdefault(from_resource, {})\
71 .setdefault(perm, {})\
72 .setdefault(on_resource, set())\
73 .add(obtained_perm)
74
75 # Obtain current principals.
76 principals = self.request.prefixed_principals
77
78 # Query every possible permission of the current user from backend.
79 backend = self.request.registry.permission
80 perms_by_object_uri = backend.get_accessible_objects(principals)
81
82 # Check settings for every allowed resources.
83 from_settings = allowed_from_settings(self.request.registry.settings, principals)
84
85 # Expand permissions obtained from backend with the object URIs that
86 # correspond to permissions allowed from settings.
87 allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys())
88 if allowed_resources:
89 storage = self.request.registry.storage
90 every_bucket, _ = storage.get_all(parent_id='', collection_id='bucket')
91 for bucket in every_bucket:
92 bucket_uri = '/buckets/{id}'.format_map(bucket)
93 for res in allowed_resources:
94 resource_perms = from_settings[res]
95 # Bucket is always fetched.
96 if res == 'bucket':
97 perms_by_object_uri.setdefault(bucket_uri, set()).update(resource_perms)
98 continue
99 # Fetch bucket collections and groups.
100 # XXX: wrong approach: query in a loop!
101 every_subobjects, _ = storage.get_all(parent_id=bucket_uri,
102 collection_id=res)
103 for subobject in every_subobjects:
104 subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id'])
105 perms_by_object_uri.setdefault(subobj_uri, set()).update(resource_perms)
106
107 entries = []
108 for object_uri, perms in perms_by_object_uri.items():
109 try:
110 # Obtain associated res from object URI
111 resource_name, matchdict = core_utils.view_lookup(self.request,
112 object_uri)
113 except ValueError:
114 # Skip permissions entries that are not linked to an object URI
115 continue
116
117 # For consistency with event payloads, prefix id with resource name
118 matchdict[resource_name + '_id'] = matchdict.get('id')
119
120 # Expand implicit permissions using descending tree.
121 permissions = set(perms)
122 for perm in perms:
123 obtained = perms_descending_tree[resource_name][perm]
124 # Related to same resource only and not every sub-objects.
125 # (e.g "bucket:write" gives "bucket:read" but not "group:read")
126 permissions |= obtained[resource_name]
127
128 entry = dict(uri=object_uri,
129 resource_name=resource_name,
130 permissions=list(permissions),
131 **matchdict)
132 entries.append(entry)
133
134 return extract_record_set(entries, filters=filters, sorting=sorting,
135 pagination_rules=pagination_rules,
136 limit=limit)
137
138
139 class PermissionsSchema(resource.ResourceSchema):
140 uri = colander.SchemaNode(colander.String())
141 resource_name = colander.SchemaNode(colander.String())
142 permissions = colander.Sequence(colander.SchemaNode(colander.String()))
143 bucket_id = colander.SchemaNode(colander.String())
144 collection_id = colander.SchemaNode(colander.String(),
145 missing=colander.drop)
146 group_id = colander.SchemaNode(colander.String(),
147 missing=colander.drop)
148 record_id = colander.SchemaNode(colander.String(),
149 missing=colander.drop)
150
151 class Options:
152 preserve_unknown = False
153
154
155 @resource.register(name='permissions',
156 description='List of user permissions',
157 collection_path='/permissions',
158 record_path=None,
159 collection_methods=('GET',),
160 permission=NO_PERMISSION_REQUIRED)
161 class Permissions(resource.ShareableResource):
162
163 schema = PermissionsSchema
164
165 def __init__(self, request, context=None):
166 super().__init__(request, context)
167 self.model = PermissionsModel(request)
168
169 def _extract_sorting(self, limit):
170 # Permissions entries are not stored with timestamp, so do not
171 # force it.
172 result = super()._extract_sorting(limit)
173 without_last_modified = [s for s in result
174 if s.field != self.model.modified_field]
175 # For pagination, there must be at least one sort criteria.
176 # We use ``uri`` because its values are unique.
177 if 'uri' not in [s.field for s in without_last_modified]:
178 without_last_modified.append(Sort('uri', -1))
179 return without_last_modified
180
181 def _extract_filters(self):
182 result = super()._extract_filters()
183 without_last_modified = [s for s in result
184 if s.field != self.model.modified_field]
185 return without_last_modified
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/views/permissions.py b/kinto/views/permissions.py
--- a/kinto/views/permissions.py
+++ b/kinto/views/permissions.py
@@ -82,6 +82,14 @@
# Check settings for every allowed resources.
from_settings = allowed_from_settings(self.request.registry.settings, principals)
+ # Add additional resources and permissions defined in settings/plugins
+ for root_perm in from_settings.get('', []):
+ resource_name, _ = root_perm.split(':')
+ uri = core_utils.strip_uri_prefix(
+ self.request.route_path('{0}-collection'.format(resource_name)))
+ perms_by_object_uri[uri] = {root_perm}
+ perms_descending_tree[resource_name].update({root_perm: {resource_name: {root_perm}}})
+
# Expand permissions obtained from backend with the object URIs that
# correspond to permissions allowed from settings.
allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys())
@@ -114,8 +122,10 @@
# Skip permissions entries that are not linked to an object URI
continue
- # For consistency with event payloads, prefix id with resource name
- matchdict[resource_name + '_id'] = matchdict.get('id')
+ # For consistency with event payloads, if resource has an id,
+ # prefix it with its resource name
+ if "id" in matchdict:
+ matchdict[resource_name + '_id'] = matchdict['id']
# Expand implicit permissions using descending tree.
permissions = set(perms)
| {"golden_diff": "diff --git a/kinto/views/permissions.py b/kinto/views/permissions.py\n--- a/kinto/views/permissions.py\n+++ b/kinto/views/permissions.py\n@@ -82,6 +82,14 @@\n # Check settings for every allowed resources.\n from_settings = allowed_from_settings(self.request.registry.settings, principals)\n \n+ # Add additional resources and permissions defined in settings/plugins\n+ for root_perm in from_settings.get('', []):\n+ resource_name, _ = root_perm.split(':')\n+ uri = core_utils.strip_uri_prefix(\n+ self.request.route_path('{0}-collection'.format(resource_name)))\n+ perms_by_object_uri[uri] = {root_perm}\n+ perms_descending_tree[resource_name].update({root_perm: {resource_name: {root_perm}}})\n+\n # Expand permissions obtained from backend with the object URIs that\n # correspond to permissions allowed from settings.\n allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys())\n@@ -114,8 +122,10 @@\n # Skip permissions entries that are not linked to an object URI\n continue\n \n- # For consistency with event payloads, prefix id with resource name\n- matchdict[resource_name + '_id'] = matchdict.get('id')\n+ # For consistency with event payloads, if resource has an id,\n+ # prefix it with its resource name\n+ if \"id\" in matchdict:\n+ matchdict[resource_name + '_id'] = matchdict['id']\n \n # Expand implicit permissions using descending tree.\n permissions = set(perms)\n", "issue": "Add creation of buckets in permissions endpoint\nCurrently with these settings:\r\n\r\n```ini\r\nkinto.experimental_permissions_endpoint = true\r\nkinto.bucket_create_principals = system.Everyone # or whatever \r\n```\r\n\r\nI should see the permission to create buckets in http://localhost:8888/v1/permissions\r\n\r\nSomething like:\r\n```js\r\n{\r\n \"data\": [\r\n {\r\n \"uri\": \"/buckets\",\r\n \"resource_name\": \"bucket\",\r\n \"permissions\": [\r\n \"create\",\r\n ],\r\n }\r\n ]\r\n}\r\n```\r\n\r\nThis is because creating bucket is a specific case: it does not involve parents (buckets are top level) and does not involve the permission backend (permission can only be set in `.ini` settings).\r\nCurrently we only iterate on existing buckets https://github.com/Kinto/kinto/blob/bf60689ae016a31b8c5bb1955397be46d7b4acca/kinto/views/permissions.py#L91\r\n\r\nWould allow to fix https://github.com/Kinto/kinto-admin/issues/401\n", "before_files": [{"content": "import colander\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.settings import aslist\n\nfrom kinto.authorization import PERMISSIONS_INHERITANCE_TREE\nfrom kinto.core import utils as core_utils, resource\nfrom kinto.core.storage import Sort\nfrom kinto.core.storage.memory import extract_record_set\n\n\ndef allowed_from_settings(settings, principals):\n \"\"\"Returns every permissions allowed from settings for the current user.\n :param settings dict: app settings\n :param principals list: list of principals of current user\n :rtype: dict\n\n Result example::\n\n {\n \"bucket\": {\"write\", \"collection:create\"},\n \"collection\": {\"read\"}\n }\n\n XXX: This helper will be useful for Kinto/kinto#894\n \"\"\"\n perms_settings = {k: aslist(v) for k, v in settings.items()\n if k.endswith('_principals')}\n from_settings = {}\n for key, allowed_principals in perms_settings.items():\n resource_name, permission, _ = key.split('_')\n # Keep the known permissions only.\n if resource_name not in PERMISSIONS_INHERITANCE_TREE.keys():\n continue\n # Keep the permissions of the current user only.\n if not bool(set(principals) & set(allowed_principals)):\n continue\n # ``collection_create_principals`` means ``collection:create`` in bucket.\n if permission == 'create':\n permission = '{resource_name}:{permission}'.format(\n resource_name=resource_name,\n permission=permission)\n resource_name = { # resource parents.\n 'collection': 'bucket',\n 'group': 'bucket',\n 'record': 'collection'}.get(resource_name, '')\n # Store them in a convenient way.\n from_settings.setdefault(resource_name, set()).add(permission)\n return from_settings\n\n\nclass PermissionsModel:\n id_field = 'id'\n modified_field = 'last_modified'\n deleted_field = 'deleted'\n\n def __init__(self, request):\n self.request = request\n\n def timestamp(self, parent_id=None):\n return 0\n\n def get_records(self, filters=None, sorting=None, pagination_rules=None,\n limit=None, include_deleted=False, parent_id=None):\n # Invert the permissions inheritance tree.\n perms_descending_tree = {}\n for on_resource, tree in PERMISSIONS_INHERITANCE_TREE.items():\n for obtained_perm, obtained_from in tree.items():\n for from_resource, perms in obtained_from.items():\n for perm in perms:\n perms_descending_tree.setdefault(from_resource, {})\\\n .setdefault(perm, {})\\\n .setdefault(on_resource, set())\\\n .add(obtained_perm)\n\n # Obtain current principals.\n principals = self.request.prefixed_principals\n\n # Query every possible permission of the current user from backend.\n backend = self.request.registry.permission\n perms_by_object_uri = backend.get_accessible_objects(principals)\n\n # Check settings for every allowed resources.\n from_settings = allowed_from_settings(self.request.registry.settings, principals)\n\n # Expand permissions obtained from backend with the object URIs that\n # correspond to permissions allowed from settings.\n allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys())\n if allowed_resources:\n storage = self.request.registry.storage\n every_bucket, _ = storage.get_all(parent_id='', collection_id='bucket')\n for bucket in every_bucket:\n bucket_uri = '/buckets/{id}'.format_map(bucket)\n for res in allowed_resources:\n resource_perms = from_settings[res]\n # Bucket is always fetched.\n if res == 'bucket':\n perms_by_object_uri.setdefault(bucket_uri, set()).update(resource_perms)\n continue\n # Fetch bucket collections and groups.\n # XXX: wrong approach: query in a loop!\n every_subobjects, _ = storage.get_all(parent_id=bucket_uri,\n collection_id=res)\n for subobject in every_subobjects:\n subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id'])\n perms_by_object_uri.setdefault(subobj_uri, set()).update(resource_perms)\n\n entries = []\n for object_uri, perms in perms_by_object_uri.items():\n try:\n # Obtain associated res from object URI\n resource_name, matchdict = core_utils.view_lookup(self.request,\n object_uri)\n except ValueError:\n # Skip permissions entries that are not linked to an object URI\n continue\n\n # For consistency with event payloads, prefix id with resource name\n matchdict[resource_name + '_id'] = matchdict.get('id')\n\n # Expand implicit permissions using descending tree.\n permissions = set(perms)\n for perm in perms:\n obtained = perms_descending_tree[resource_name][perm]\n # Related to same resource only and not every sub-objects.\n # (e.g \"bucket:write\" gives \"bucket:read\" but not \"group:read\")\n permissions |= obtained[resource_name]\n\n entry = dict(uri=object_uri,\n resource_name=resource_name,\n permissions=list(permissions),\n **matchdict)\n entries.append(entry)\n\n return extract_record_set(entries, filters=filters, sorting=sorting,\n pagination_rules=pagination_rules,\n limit=limit)\n\n\nclass PermissionsSchema(resource.ResourceSchema):\n uri = colander.SchemaNode(colander.String())\n resource_name = colander.SchemaNode(colander.String())\n permissions = colander.Sequence(colander.SchemaNode(colander.String()))\n bucket_id = colander.SchemaNode(colander.String())\n collection_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n group_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n record_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n\n class Options:\n preserve_unknown = False\n\n\[email protected](name='permissions',\n description='List of user permissions',\n collection_path='/permissions',\n record_path=None,\n collection_methods=('GET',),\n permission=NO_PERMISSION_REQUIRED)\nclass Permissions(resource.ShareableResource):\n\n schema = PermissionsSchema\n\n def __init__(self, request, context=None):\n super().__init__(request, context)\n self.model = PermissionsModel(request)\n\n def _extract_sorting(self, limit):\n # Permissions entries are not stored with timestamp, so do not\n # force it.\n result = super()._extract_sorting(limit)\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n # For pagination, there must be at least one sort criteria.\n # We use ``uri`` because its values are unique.\n if 'uri' not in [s.field for s in without_last_modified]:\n without_last_modified.append(Sort('uri', -1))\n return without_last_modified\n\n def _extract_filters(self):\n result = super()._extract_filters()\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n return without_last_modified\n", "path": "kinto/views/permissions.py"}], "after_files": [{"content": "import colander\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.settings import aslist\n\nfrom kinto.authorization import PERMISSIONS_INHERITANCE_TREE\nfrom kinto.core import utils as core_utils, resource\nfrom kinto.core.storage import Sort\nfrom kinto.core.storage.memory import extract_record_set\n\n\ndef allowed_from_settings(settings, principals):\n \"\"\"Returns every permissions allowed from settings for the current user.\n :param settings dict: app settings\n :param principals list: list of principals of current user\n :rtype: dict\n\n Result example::\n\n {\n \"bucket\": {\"write\", \"collection:create\"},\n \"collection\": {\"read\"}\n }\n\n XXX: This helper will be useful for Kinto/kinto#894\n \"\"\"\n perms_settings = {k: aslist(v) for k, v in settings.items()\n if k.endswith('_principals')}\n from_settings = {}\n for key, allowed_principals in perms_settings.items():\n resource_name, permission, _ = key.split('_')\n # Keep the known permissions only.\n if resource_name not in PERMISSIONS_INHERITANCE_TREE.keys():\n continue\n # Keep the permissions of the current user only.\n if not bool(set(principals) & set(allowed_principals)):\n continue\n # ``collection_create_principals`` means ``collection:create`` in bucket.\n if permission == 'create':\n permission = '{resource_name}:{permission}'.format(\n resource_name=resource_name,\n permission=permission)\n resource_name = { # resource parents.\n 'collection': 'bucket',\n 'group': 'bucket',\n 'record': 'collection'}.get(resource_name, '')\n # Store them in a convenient way.\n from_settings.setdefault(resource_name, set()).add(permission)\n return from_settings\n\n\nclass PermissionsModel:\n id_field = 'id'\n modified_field = 'last_modified'\n deleted_field = 'deleted'\n\n def __init__(self, request):\n self.request = request\n\n def timestamp(self, parent_id=None):\n return 0\n\n def get_records(self, filters=None, sorting=None, pagination_rules=None,\n limit=None, include_deleted=False, parent_id=None):\n # Invert the permissions inheritance tree.\n perms_descending_tree = {}\n for on_resource, tree in PERMISSIONS_INHERITANCE_TREE.items():\n for obtained_perm, obtained_from in tree.items():\n for from_resource, perms in obtained_from.items():\n for perm in perms:\n perms_descending_tree.setdefault(from_resource, {})\\\n .setdefault(perm, {})\\\n .setdefault(on_resource, set())\\\n .add(obtained_perm)\n\n # Obtain current principals.\n principals = self.request.prefixed_principals\n\n # Query every possible permission of the current user from backend.\n backend = self.request.registry.permission\n perms_by_object_uri = backend.get_accessible_objects(principals)\n\n # Check settings for every allowed resources.\n from_settings = allowed_from_settings(self.request.registry.settings, principals)\n\n # Add additional resources and permissions defined in settings/plugins\n for root_perm in from_settings.get('', []):\n resource_name, _ = root_perm.split(':')\n uri = core_utils.strip_uri_prefix(\n self.request.route_path('{0}-collection'.format(resource_name)))\n perms_by_object_uri[uri] = {root_perm}\n perms_descending_tree[resource_name].update({root_perm: {resource_name: {root_perm}}})\n\n # Expand permissions obtained from backend with the object URIs that\n # correspond to permissions allowed from settings.\n allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys())\n if allowed_resources:\n storage = self.request.registry.storage\n every_bucket, _ = storage.get_all(parent_id='', collection_id='bucket')\n for bucket in every_bucket:\n bucket_uri = '/buckets/{id}'.format_map(bucket)\n for res in allowed_resources:\n resource_perms = from_settings[res]\n # Bucket is always fetched.\n if res == 'bucket':\n perms_by_object_uri.setdefault(bucket_uri, set()).update(resource_perms)\n continue\n # Fetch bucket collections and groups.\n # XXX: wrong approach: query in a loop!\n every_subobjects, _ = storage.get_all(parent_id=bucket_uri,\n collection_id=res)\n for subobject in every_subobjects:\n subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id'])\n perms_by_object_uri.setdefault(subobj_uri, set()).update(resource_perms)\n\n entries = []\n for object_uri, perms in perms_by_object_uri.items():\n try:\n # Obtain associated res from object URI\n resource_name, matchdict = core_utils.view_lookup(self.request,\n object_uri)\n except ValueError:\n # Skip permissions entries that are not linked to an object URI\n continue\n\n # For consistency with event payloads, if resource has an id,\n # prefix it with its resource name\n if \"id\" in matchdict:\n matchdict[resource_name + '_id'] = matchdict['id']\n\n # Expand implicit permissions using descending tree.\n permissions = set(perms)\n for perm in perms:\n obtained = perms_descending_tree[resource_name][perm]\n # Related to same resource only and not every sub-objects.\n # (e.g \"bucket:write\" gives \"bucket:read\" but not \"group:read\")\n permissions |= obtained[resource_name]\n\n entry = dict(uri=object_uri,\n resource_name=resource_name,\n permissions=list(permissions),\n **matchdict)\n entries.append(entry)\n\n return extract_record_set(entries, filters=filters, sorting=sorting,\n pagination_rules=pagination_rules,\n limit=limit)\n\n\nclass PermissionsSchema(resource.ResourceSchema):\n uri = colander.SchemaNode(colander.String())\n resource_name = colander.SchemaNode(colander.String())\n permissions = colander.Sequence(colander.SchemaNode(colander.String()))\n bucket_id = colander.SchemaNode(colander.String())\n collection_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n group_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n record_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n\n class Options:\n preserve_unknown = False\n\n\[email protected](name='permissions',\n description='List of user permissions',\n collection_path='/permissions',\n record_path=None,\n collection_methods=('GET',),\n permission=NO_PERMISSION_REQUIRED)\nclass Permissions(resource.ShareableResource):\n\n schema = PermissionsSchema\n\n def __init__(self, request, context=None):\n super().__init__(request, context)\n self.model = PermissionsModel(request)\n\n def _extract_sorting(self, limit):\n # Permissions entries are not stored with timestamp, so do not\n # force it.\n result = super()._extract_sorting(limit)\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n # For pagination, there must be at least one sort criteria.\n # We use ``uri`` because its values are unique.\n if 'uri' not in [s.field for s in without_last_modified]:\n without_last_modified.append(Sort('uri', -1))\n return without_last_modified\n\n def _extract_filters(self):\n result = super()._extract_filters()\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n return without_last_modified\n", "path": "kinto/views/permissions.py"}]} | 2,450 | 350 |
gh_patches_debug_23957 | rasdani/github-patches | git_diff | crytic__slither-601 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AssertionError parsing function-type return values
The following contract causes an assertion error:
```
contract FunctionReturningFunction {
function f() public { }
function g() internal returns (function() internal) {
return f;
}
}
```
Output:
```
ERROR:root:Error in .\function_returning_function.sol
ERROR:root:Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl
(slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,
File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all
(slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(
File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single
slither = Slither(target,
File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts
self._convert_to_slithir()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir
func.generate_slithir_and_analyze()
File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze
node.slithir_generation()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation
self._irs = convert_expression(expression, self)
File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression
visitor = ExpressionToSlithIR(expression, node)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 105, in __init__
r = Return(get(self.expression))
File "c:\users\x\documents\github\slither\slither\slithir\operations\return_operation.py", line 18, in __init__
assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None
AssertionError
```
AssertionError parsing function-type return values
The following contract causes an assertion error:
```
contract FunctionReturningFunction {
function f() public { }
function g() internal returns (function() internal) {
return f;
}
}
```
Output:
```
ERROR:root:Error in .\function_returning_function.sol
ERROR:root:Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl
(slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,
File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all
(slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(
File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single
slither = Slither(target,
File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts
self._convert_to_slithir()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir
func.generate_slithir_and_analyze()
File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze
node.slithir_generation()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation
self._irs = convert_expression(expression, self)
File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression
visitor = ExpressionToSlithIR(expression, node)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 105, in __init__
r = Return(get(self.expression))
File "c:\users\x\documents\github\slither\slither\slithir\operations\return_operation.py", line 18, in __init__
assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None
AssertionError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/slithir/operations/return_operation.py`
Content:
```
1 from slither.slithir.operations.operation import Operation
2
3 from slither.slithir.variables.tuple import TupleVariable
4 from slither.slithir.utils.utils import is_valid_rvalue
5
6
7 class Return(Operation):
8 """
9 Return
10 Only present as last operation in RETURN node
11 """
12
13 def __init__(self, values):
14 # Note: Can return None
15 # ex: return call()
16 # where call() dont return
17 if not isinstance(values, list):
18 assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None
19 if values is None:
20 values = []
21 else:
22 values = [values]
23 else:
24 # Remove None
25 # Prior Solidity 0.5
26 # return (0,)
27 # was valid for returns(uint)
28 values = [v for v in values if not v is None]
29 self._valid_value(values)
30 super(Return, self).__init__()
31 self._values = values
32
33 def _valid_value(self, value):
34 if isinstance(value, list):
35 assert all(self._valid_value(v) for v in value)
36 else:
37 assert is_valid_rvalue(value) or isinstance(value, TupleVariable)
38 return True
39
40 @property
41 def read(self):
42 return self._unroll(self.values)
43
44 @property
45 def values(self):
46 return self._unroll(self._values)
47
48 def __str__(self):
49 return "RETURN {}".format(','.join(['{}'.format(x) for x in self.values]))
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/slithir/operations/return_operation.py b/slither/slithir/operations/return_operation.py
--- a/slither/slithir/operations/return_operation.py
+++ b/slither/slithir/operations/return_operation.py
@@ -1,3 +1,4 @@
+from slither.core.declarations import Function
from slither.slithir.operations.operation import Operation
from slither.slithir.variables.tuple import TupleVariable
@@ -15,7 +16,7 @@
# ex: return call()
# where call() dont return
if not isinstance(values, list):
- assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None
+ assert is_valid_rvalue(values) or isinstance(values, (TupleVariable, Function)) or values is None
if values is None:
values = []
else:
@@ -34,7 +35,7 @@
if isinstance(value, list):
assert all(self._valid_value(v) for v in value)
else:
- assert is_valid_rvalue(value) or isinstance(value, TupleVariable)
+ assert is_valid_rvalue(value) or isinstance(value, (TupleVariable, Function))
return True
@property
| {"golden_diff": "diff --git a/slither/slithir/operations/return_operation.py b/slither/slithir/operations/return_operation.py\n--- a/slither/slithir/operations/return_operation.py\n+++ b/slither/slithir/operations/return_operation.py\n@@ -1,3 +1,4 @@\n+from slither.core.declarations import Function\n from slither.slithir.operations.operation import Operation\n \n from slither.slithir.variables.tuple import TupleVariable\n@@ -15,7 +16,7 @@\n # ex: return call()\n # where call() dont return\n if not isinstance(values, list):\n- assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None\n+ assert is_valid_rvalue(values) or isinstance(values, (TupleVariable, Function)) or values is None\n if values is None:\n values = []\n else:\n@@ -34,7 +35,7 @@\n if isinstance(value, list):\n assert all(self._valid_value(v) for v in value)\n else:\n- assert is_valid_rvalue(value) or isinstance(value, TupleVariable)\n+ assert is_valid_rvalue(value) or isinstance(value, (TupleVariable, Function))\n return True\n \n @property\n", "issue": "AssertionError parsing function-type return values\nThe following contract causes an assertion error:\r\n```\r\ncontract FunctionReturningFunction {\r\n function f() public { }\r\n\r\n function g() internal returns (function() internal) {\r\n return f;\r\n }\r\n}\r\n```\r\n\r\nOutput:\r\n```\r\nERROR:root:Error in .\\function_returning_function.sol\r\nERROR:root:Traceback (most recent call last):\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 610, in main_impl\r\n (slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 67, in process_all\r\n (slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 53, in process_single\r\n slither = Slither(target,\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slither.py\", line 86, in __init__\r\n self._parser.analyze_contracts()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\solc_parsing\\slitherSolc.py\", line 345, in analyze_contracts\r\n self._convert_to_slithir()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\solc_parsing\\slitherSolc.py\", line 489, in _convert_to_slithir\r\n func.generate_slithir_and_analyze()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\core\\declarations\\function.py\", line 1652, in generate_slithir_and_analyze\r\n node.slithir_generation()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\core\\cfg\\node.py\", line 702, in slithir_generation\r\n self._irs = convert_expression(expression, self)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slithir\\convert.py\", line 64, in convert_expression\r\n visitor = ExpressionToSlithIR(expression, node)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\visitors\\slithir\\expression_to_slithir.py\", line 105, in __init__\r\n r = Return(get(self.expression))\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slithir\\operations\\return_operation.py\", line 18, in __init__\r\n assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None\r\nAssertionError\r\n```\nAssertionError parsing function-type return values\nThe following contract causes an assertion error:\r\n```\r\ncontract FunctionReturningFunction {\r\n function f() public { }\r\n\r\n function g() internal returns (function() internal) {\r\n return f;\r\n }\r\n}\r\n```\r\n\r\nOutput:\r\n```\r\nERROR:root:Error in .\\function_returning_function.sol\r\nERROR:root:Traceback (most recent call last):\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 610, in main_impl\r\n (slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 67, in process_all\r\n (slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 53, in process_single\r\n slither = Slither(target,\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slither.py\", line 86, in __init__\r\n self._parser.analyze_contracts()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\solc_parsing\\slitherSolc.py\", line 345, in analyze_contracts\r\n self._convert_to_slithir()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\solc_parsing\\slitherSolc.py\", line 489, in _convert_to_slithir\r\n func.generate_slithir_and_analyze()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\core\\declarations\\function.py\", line 1652, in generate_slithir_and_analyze\r\n node.slithir_generation()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\core\\cfg\\node.py\", line 702, in slithir_generation\r\n self._irs = convert_expression(expression, self)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slithir\\convert.py\", line 64, in convert_expression\r\n visitor = ExpressionToSlithIR(expression, node)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\visitors\\slithir\\expression_to_slithir.py\", line 105, in __init__\r\n r = Return(get(self.expression))\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slithir\\operations\\return_operation.py\", line 18, in __init__\r\n assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None\r\nAssertionError\r\n```\n", "before_files": [{"content": "from slither.slithir.operations.operation import Operation\n\nfrom slither.slithir.variables.tuple import TupleVariable\nfrom slither.slithir.utils.utils import is_valid_rvalue\n\n\nclass Return(Operation):\n \"\"\"\n Return\n Only present as last operation in RETURN node\n \"\"\"\n\n def __init__(self, values):\n # Note: Can return None \n # ex: return call()\n # where call() dont return\n if not isinstance(values, list):\n assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None\n if values is None:\n values = []\n else:\n values = [values]\n else:\n # Remove None\n # Prior Solidity 0.5\n # return (0,)\n # was valid for returns(uint)\n values = [v for v in values if not v is None]\n self._valid_value(values)\n super(Return, self).__init__()\n self._values = values\n\n def _valid_value(self, value):\n if isinstance(value, list):\n assert all(self._valid_value(v) for v in value)\n else:\n assert is_valid_rvalue(value) or isinstance(value, TupleVariable)\n return True\n\n @property\n def read(self):\n return self._unroll(self.values)\n\n @property\n def values(self):\n return self._unroll(self._values)\n\n def __str__(self):\n return \"RETURN {}\".format(','.join(['{}'.format(x) for x in self.values]))\n", "path": "slither/slithir/operations/return_operation.py"}], "after_files": [{"content": "from slither.core.declarations import Function\nfrom slither.slithir.operations.operation import Operation\n\nfrom slither.slithir.variables.tuple import TupleVariable\nfrom slither.slithir.utils.utils import is_valid_rvalue\n\n\nclass Return(Operation):\n \"\"\"\n Return\n Only present as last operation in RETURN node\n \"\"\"\n\n def __init__(self, values):\n # Note: Can return None \n # ex: return call()\n # where call() dont return\n if not isinstance(values, list):\n assert is_valid_rvalue(values) or isinstance(values, (TupleVariable, Function)) or values is None\n if values is None:\n values = []\n else:\n values = [values]\n else:\n # Remove None\n # Prior Solidity 0.5\n # return (0,)\n # was valid for returns(uint)\n values = [v for v in values if not v is None]\n self._valid_value(values)\n super(Return, self).__init__()\n self._values = values\n\n def _valid_value(self, value):\n if isinstance(value, list):\n assert all(self._valid_value(v) for v in value)\n else:\n assert is_valid_rvalue(value) or isinstance(value, (TupleVariable, Function))\n return True\n\n @property\n def read(self):\n return self._unroll(self.values)\n\n @property\n def values(self):\n return self._unroll(self._values)\n\n def __str__(self):\n return \"RETURN {}\".format(','.join(['{}'.format(x) for x in self.values]))\n", "path": "slither/slithir/operations/return_operation.py"}]} | 1,947 | 272 |
gh_patches_debug_21974 | rasdani/github-patches | git_diff | CTPUG__wafer-678 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Custom talk deletion logic breaks with Django >= 4.0
The custom logic to turn talk deletions into withdrawals doesn't work with Django >= 4.0
Quoting from the Django 4 release notes
```In accordance with FormMixin, object deletion for POST requests is handled in form_valid(). Custom delete logic in delete() handlers should be moved to form_valid(), or a shared helper method, as needed.```
The change is mechanically fairly simple, but I still need to look how this works the older Dango releases we support.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wafer/talks/views.py`
Content:
```
1 from itertools import groupby
2
3 from django.conf import settings
4 from django.contrib.auth.mixins import (
5 LoginRequiredMixin, PermissionRequiredMixin)
6 from django.contrib.auth.models import AnonymousUser
7 from django.core.exceptions import PermissionDenied, ValidationError
8 from django.db.models import Q, F
9 from django.http import Http404
10 from django.http import HttpResponseRedirect
11 from django.urls import reverse_lazy
12 from django.views.generic.edit import CreateView, UpdateView, DeleteView
13
14 from bakery.views import BuildableDetailView, BuildableListView
15 from rest_framework import viewsets
16 from rest_framework.permissions import (
17 DjangoModelPermissions, DjangoModelPermissionsOrAnonReadOnly,
18 BasePermission)
19 from rest_framework_extensions.mixins import NestedViewSetMixin
20 from reversion import revisions
21 from reversion.models import Version
22
23 from wafer.talks.models import (
24 Review, Talk, TalkType, TalkUrl, Track,
25 ACCEPTED, CANCELLED, SUBMITTED, UNDER_CONSIDERATION, WITHDRAWN)
26 from wafer.talks.forms import ReviewForm, get_talk_form_class
27 from wafer.talks.serializers import TalkSerializer, TalkUrlSerializer
28 from wafer.users.models import UserProfile
29 from wafer.utils import order_results_by, PaginatedBuildableListView
30
31
32 class EditOwnTalksMixin(object):
33 '''Users can edit their own talks as long as the talk is
34 "Under Consideration"'''
35 def get_object(self, *args, **kwargs):
36 object_ = super().get_object(*args, **kwargs)
37 if object_.can_edit(self.request.user):
38 return object_
39 else:
40 raise PermissionDenied
41
42
43 class UsersTalks(PaginatedBuildableListView):
44 template_name = 'wafer.talks/talks.html'
45 build_prefix = 'talks'
46 paginate_by = 100
47
48 @order_results_by('talk_type', 'talk_id')
49 def get_queryset(self):
50 # self.request will be None when we come here via the static site
51 # renderer
52 if self.request and Talk.can_view_all(self.request.user):
53 talks = Talk.objects.all()
54 else:
55 talks = Talk.objects.filter(Q(status=ACCEPTED) | Q(status=CANCELLED))
56 return talks.prefetch_related(
57 "talk_type", "corresponding_author", "authors", "authors__userprofile"
58 )
59
60 def get_context_data(self, **kwargs):
61 context = super().get_context_data(**kwargs)
62 context["languages"] = Talk.LANGUAGES
63 context["see_all"] = Talk.can_view_all(self.request.user)
64 return context
65
66
67 class TalkView(BuildableDetailView):
68 template_name = 'wafer.talks/talk.html'
69 model = Talk
70
71 # Needed so django-bakery only renders public talks
72 def build_object(self, obj):
73 """Override django-bakery to skip talks that raise 403"""
74 try:
75 super().build_object(obj)
76 except PermissionDenied:
77 # We cleanup the directory created
78 self.unbuild_object(obj)
79
80 def create_request(self, path):
81 request = super().create_request(path)
82 request.user = AnonymousUser()
83 return request
84
85 def get_object(self, *args, **kwargs):
86 '''Only talk owners can see talks, unless they've been accepted'''
87 object_ = super().get_object(*args, **kwargs)
88 if not object_.can_view(self.request.user):
89 raise PermissionDenied
90 return object_
91
92 def canonical_url(self):
93 '''Return the canonical URL for this view'''
94 return self.object.get_absolute_url()
95
96 def render_to_response(self, *args, **kwargs):
97 '''Canonicalize the URL if the slug changed'''
98 canonical_url = self.canonical_url()
99 if self.request.path != canonical_url:
100 return HttpResponseRedirect(canonical_url)
101 return super().render_to_response(*args, **kwargs)
102
103 def get_context_data(self, **kwargs):
104 context = super().get_context_data(**kwargs)
105 talk = self.object
106 user = self.request.user
107
108 context['can_edit'] = talk.can_edit(user)
109
110 can_review = talk.can_review(user)
111 context['can_review'] = can_review
112 if can_review:
113 review = talk.reviews.filter(reviewer=user).first()
114 context['review_form'] = ReviewForm(
115 instance=review, talk=talk, user=user)
116
117 return context
118
119
120 class TalkCreate(LoginRequiredMixin, CreateView):
121 model = Talk
122 template_name = 'wafer.talks/talk_form.html'
123
124 def get_form_class(self):
125 return get_talk_form_class()
126
127 def get_form_kwargs(self):
128 kwargs = super().get_form_kwargs()
129 kwargs['user'] = self.request.user
130 return kwargs
131
132 def get_context_data(self, **kwargs):
133 context = super().get_context_data(**kwargs)
134 can_submit = getattr(settings, 'WAFER_TALKS_OPEN', True)
135 if can_submit and TalkType.objects.exists():
136 # Check for all talk types being disabled
137 can_submit = TalkType.objects.open_for_submission().count() > 0
138 context['can_submit'] = can_submit
139 return context
140
141 @revisions.create_revision()
142 def form_valid(self, form):
143 if not getattr(settings, 'WAFER_TALKS_OPEN', True):
144 # Should this be SuspiciousOperation?
145 raise ValidationError("Talk submission isn't open")
146 # Eaaargh we have to do the work of CreateView if we want to set values
147 # before saving
148 self.object = form.save(commit=False)
149 self.object.corresponding_author = self.request.user
150 self.object.save()
151 revisions.set_user(self.request.user)
152 revisions.set_comment("Talk Created")
153 # Save the author information as well (many-to-many fun)
154 form.save_m2m()
155 return HttpResponseRedirect(self.get_success_url())
156
157
158 class TalkUpdate(EditOwnTalksMixin, UpdateView):
159 model = Talk
160 template_name = 'wafer.talks/talk_form.html'
161
162 def get_form_class(self):
163 return get_talk_form_class()
164
165 def get_form_kwargs(self):
166 kwargs = super().get_form_kwargs()
167 kwargs['user'] = self.request.user
168 return kwargs
169
170 def get_context_data(self, **kwargs):
171 context = super().get_context_data(**kwargs)
172 context['can_edit'] = self.object.can_edit(self.request.user)
173 return context
174
175 @revisions.create_revision()
176 def form_valid(self, form):
177 revisions.set_user(self.request.user)
178 revisions.set_comment("Talk Modified")
179 return super().form_valid(form)
180
181
182 class TalkWithdraw(EditOwnTalksMixin, DeleteView):
183 model = Talk
184 template_name = 'wafer.talks/talk_withdraw.html'
185 success_url = reverse_lazy('wafer_page')
186
187 @revisions.create_revision()
188 def delete(self, request, *args, **kwargs):
189 """Override delete to only withdraw"""
190 talk = self.get_object()
191 talk.status = WITHDRAWN
192 talk.save()
193 revisions.set_user(self.request.user)
194 revisions.set_comment("Talk Withdrawn")
195 return HttpResponseRedirect(self.success_url)
196
197
198 class TalkReview(PermissionRequiredMixin, CreateView):
199 model = Review
200 form_class = ReviewForm
201 permission_required = 'talks.add_review'
202 template_name = 'wafer.talks/review_talk.html'
203
204 def get_form_kwargs(self):
205 kwargs = super().get_form_kwargs()
206 kwargs['talk'] = Talk.objects.get(pk=self.kwargs['pk'])
207 kwargs['instance'] = self.get_object()
208 kwargs['user'] = self.request.user
209 return kwargs
210
211 def get_object(self):
212 try:
213 return Review.objects.get(
214 talk_id=self.kwargs['pk'], reviewer=self.request.user)
215 except Review.DoesNotExist:
216 return None
217
218 def form_valid(self, form):
219 existing = self.get_object()
220 response = super().form_valid(form)
221
222 review = self.get_object()
223 # Update the talk to 'under consideration' if a review is
224 # added.
225 talk = review.talk
226 if talk.status == SUBMITTED:
227 talk.status = UNDER_CONSIDERATION
228 with revisions.create_revision():
229 revisions.set_user(self.request.user)
230 revisions.set_comment("Status changed by review process")
231 talk.save()
232
233 # Create the revision
234 # Note that we do this after the review has been saved
235 # (without a revision) in the super().form_valid call and
236 # after the scores have been added, so that the object_repr
237 # is correct
238 # We also do this after potentially updating the talk, so
239 # that the review revision time is correct for is_current
240 with revisions.create_revision():
241 revisions.set_user(self.request.user)
242 if existing:
243 revisions.set_comment("Review Modified")
244 else:
245 revisions.set_comment("Review Created")
246 review.save()
247
248 return response
249
250 def get_success_url(self):
251 return self.get_object().talk.get_absolute_url()
252
253
254 class Speakers(BuildableListView):
255 model = Talk
256 template_name = 'wafer.talks/speakers.html'
257 build_path = 'talks/speakers/index.html'
258
259 def _by_row(self, speakers, n):
260 return [speakers[i:i + n] for i in range(0, len(speakers), n)]
261
262 def get_context_data(self, **kwargs):
263 context = super().get_context_data(**kwargs)
264 speakers = UserProfile.objects.filter(
265 user__talks__status='A').distinct().prefetch_related(
266 'user').order_by('user__talks__talk_type',
267 'user__first_name',
268 'user__last_name',
269 'user__username').annotate(
270 talk_type=F('user__talks__talk_type__name'),
271 show_speakers=F('user__talks__talk_type__show_speakers'))
272 bytype = groupby(speakers, lambda x: x.talk_type)
273 context['speaker_rows'] = {}
274 for talk_type, type_speakers in bytype:
275 type_speakers = list(type_speakers)
276 # We explicitly check for False, as no talk type will give us None for
277 # show_speakers and we want to default to including that
278 if type_speakers and type_speakers[0].show_speakers is not False:
279 context["speaker_rows"][talk_type] = self._by_row(type_speakers, 4)
280 return context
281
282
283 class TracksView(BuildableListView):
284 model = Track
285 template_name = 'wafer.talks/talk_tracks.html'
286 build_path = 'talks/tracks/index.html'
287
288
289 class TalkTypesView(BuildableListView):
290 model = TalkType
291 template_name = 'wafer.talks/talk_types.html'
292 build_path = 'talks/types/index.html'
293
294
295 class TalksViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
296 """API endpoint that allows talks to be viewed or edited."""
297 queryset = Talk.objects.none() # Needed for the REST Permissions
298 serializer_class = TalkSerializer
299 # XXX: Do we want to allow authors to edit talks via the API?
300 permission_classes = (DjangoModelPermissionsOrAnonReadOnly, )
301
302 @order_results_by('talk_type', 'talk_id')
303 def get_queryset(self):
304 # We override the default implementation to only show accepted talks
305 # to people who aren't part of the management group
306 if self.request.user.id is None:
307 # Anonymous user, so just accepted or cancelled talks
308 return Talk.objects.filter(Q(status=ACCEPTED) |
309 Q(status=CANCELLED))
310 elif Talk.can_view_all(self.request.user):
311 return Talk.objects.all()
312 else:
313 # Also include talks owned by the user
314 # XXX: Should this be all authors rather than just
315 # the corresponding author?
316 return Talk.objects.filter(
317 Q(status=ACCEPTED) |
318 Q(status=CANCELLED) |
319 Q(corresponding_author=self.request.user))
320
321
322 class TalkExistsPermission(BasePermission):
323 def has_permission(self, request, view):
324 talk_id = view.get_parents_query_dict()['talk']
325 if not Talk.objects.filter(pk=talk_id).exists():
326 raise Http404
327 return True
328
329
330 class TalkUrlsViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
331 """API endpoint that allows talks to be viewed or edited."""
332 queryset = TalkUrl.objects.all().order_by('id')
333 serializer_class = TalkUrlSerializer
334 permission_classes = (DjangoModelPermissions, TalkExistsPermission)
335
336 def create(self, request, *args, **kw):
337 request.data['talk'] = self.get_parents_query_dict()['talk']
338 return super().create(request, *args, **kw)
339
340 def update(self, request, *args, **kw):
341 request.data['talk'] = self.get_parents_query_dict()['talk']
342 return super().update(request, *args, **kw)
343
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wafer/talks/views.py b/wafer/talks/views.py
--- a/wafer/talks/views.py
+++ b/wafer/talks/views.py
@@ -184,9 +184,10 @@
template_name = 'wafer.talks/talk_withdraw.html'
success_url = reverse_lazy('wafer_page')
+
@revisions.create_revision()
- def delete(self, request, *args, **kwargs):
- """Override delete to only withdraw"""
+ def withdraw_helper(self, request):
+ """Handle the logic for withdrawing a talk"""
talk = self.get_object()
talk.status = WITHDRAWN
talk.save()
@@ -194,6 +195,14 @@
revisions.set_comment("Talk Withdrawn")
return HttpResponseRedirect(self.success_url)
+ def delete(self, request, *args, **kwargs):
+ """Override delete to only withdraw for Django < 4"""
+ return self.withdraw_helper(request)
+
+ def form_valid(self, request, *args, **kwargs):
+ """Override delete to only withdraw for Django >= 4.0"""
+ return self.withdraw_helper(request)
+
class TalkReview(PermissionRequiredMixin, CreateView):
model = Review
| {"golden_diff": "diff --git a/wafer/talks/views.py b/wafer/talks/views.py\n--- a/wafer/talks/views.py\n+++ b/wafer/talks/views.py\n@@ -184,9 +184,10 @@\n template_name = 'wafer.talks/talk_withdraw.html'\n success_url = reverse_lazy('wafer_page')\n \n+\n @revisions.create_revision()\n- def delete(self, request, *args, **kwargs):\n- \"\"\"Override delete to only withdraw\"\"\"\n+ def withdraw_helper(self, request):\n+ \"\"\"Handle the logic for withdrawing a talk\"\"\"\n talk = self.get_object()\n talk.status = WITHDRAWN\n talk.save()\n@@ -194,6 +195,14 @@\n revisions.set_comment(\"Talk Withdrawn\")\n return HttpResponseRedirect(self.success_url)\n \n+ def delete(self, request, *args, **kwargs):\n+ \"\"\"Override delete to only withdraw for Django < 4\"\"\"\n+ return self.withdraw_helper(request)\n+\n+ def form_valid(self, request, *args, **kwargs):\n+ \"\"\"Override delete to only withdraw for Django >= 4.0\"\"\"\n+ return self.withdraw_helper(request)\n+\n \n class TalkReview(PermissionRequiredMixin, CreateView):\n model = Review\n", "issue": "Custom talk deletion logic breaks with Django >= 4.0\nThe custom logic to turn talk deletions into withdrawals doesn't work with Django >= 4.0\r\n\r\nQuoting from the Django 4 release notes\r\n\r\n```In accordance with FormMixin, object deletion for POST requests is handled in form_valid(). Custom delete logic in delete() handlers should be moved to form_valid(), or a shared helper method, as needed.```\r\n\r\nThe change is mechanically fairly simple, but I still need to look how this works the older Dango releases we support.\n", "before_files": [{"content": "from itertools import groupby\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import (\n LoginRequiredMixin, PermissionRequiredMixin)\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.core.exceptions import PermissionDenied, ValidationError\nfrom django.db.models import Q, F\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\n\nfrom bakery.views import BuildableDetailView, BuildableListView\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import (\n DjangoModelPermissions, DjangoModelPermissionsOrAnonReadOnly,\n BasePermission)\nfrom rest_framework_extensions.mixins import NestedViewSetMixin\nfrom reversion import revisions\nfrom reversion.models import Version\n\nfrom wafer.talks.models import (\n Review, Talk, TalkType, TalkUrl, Track,\n ACCEPTED, CANCELLED, SUBMITTED, UNDER_CONSIDERATION, WITHDRAWN)\nfrom wafer.talks.forms import ReviewForm, get_talk_form_class\nfrom wafer.talks.serializers import TalkSerializer, TalkUrlSerializer\nfrom wafer.users.models import UserProfile\nfrom wafer.utils import order_results_by, PaginatedBuildableListView\n\n\nclass EditOwnTalksMixin(object):\n '''Users can edit their own talks as long as the talk is\n \"Under Consideration\"'''\n def get_object(self, *args, **kwargs):\n object_ = super().get_object(*args, **kwargs)\n if object_.can_edit(self.request.user):\n return object_\n else:\n raise PermissionDenied\n\n\nclass UsersTalks(PaginatedBuildableListView):\n template_name = 'wafer.talks/talks.html'\n build_prefix = 'talks'\n paginate_by = 100\n\n @order_results_by('talk_type', 'talk_id')\n def get_queryset(self):\n # self.request will be None when we come here via the static site\n # renderer\n if self.request and Talk.can_view_all(self.request.user):\n talks = Talk.objects.all()\n else:\n talks = Talk.objects.filter(Q(status=ACCEPTED) | Q(status=CANCELLED))\n return talks.prefetch_related(\n \"talk_type\", \"corresponding_author\", \"authors\", \"authors__userprofile\"\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"languages\"] = Talk.LANGUAGES\n context[\"see_all\"] = Talk.can_view_all(self.request.user)\n return context\n\n\nclass TalkView(BuildableDetailView):\n template_name = 'wafer.talks/talk.html'\n model = Talk\n\n # Needed so django-bakery only renders public talks\n def build_object(self, obj):\n \"\"\"Override django-bakery to skip talks that raise 403\"\"\"\n try:\n super().build_object(obj)\n except PermissionDenied:\n # We cleanup the directory created\n self.unbuild_object(obj)\n\n def create_request(self, path):\n request = super().create_request(path)\n request.user = AnonymousUser()\n return request\n\n def get_object(self, *args, **kwargs):\n '''Only talk owners can see talks, unless they've been accepted'''\n object_ = super().get_object(*args, **kwargs)\n if not object_.can_view(self.request.user):\n raise PermissionDenied\n return object_\n\n def canonical_url(self):\n '''Return the canonical URL for this view'''\n return self.object.get_absolute_url()\n\n def render_to_response(self, *args, **kwargs):\n '''Canonicalize the URL if the slug changed'''\n canonical_url = self.canonical_url()\n if self.request.path != canonical_url:\n return HttpResponseRedirect(canonical_url)\n return super().render_to_response(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n talk = self.object\n user = self.request.user\n\n context['can_edit'] = talk.can_edit(user)\n\n can_review = talk.can_review(user)\n context['can_review'] = can_review\n if can_review:\n review = talk.reviews.filter(reviewer=user).first()\n context['review_form'] = ReviewForm(\n instance=review, talk=talk, user=user)\n\n return context\n\n\nclass TalkCreate(LoginRequiredMixin, CreateView):\n model = Talk\n template_name = 'wafer.talks/talk_form.html'\n\n def get_form_class(self):\n return get_talk_form_class()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n can_submit = getattr(settings, 'WAFER_TALKS_OPEN', True)\n if can_submit and TalkType.objects.exists():\n # Check for all talk types being disabled\n can_submit = TalkType.objects.open_for_submission().count() > 0\n context['can_submit'] = can_submit\n return context\n\n @revisions.create_revision()\n def form_valid(self, form):\n if not getattr(settings, 'WAFER_TALKS_OPEN', True):\n # Should this be SuspiciousOperation?\n raise ValidationError(\"Talk submission isn't open\")\n # Eaaargh we have to do the work of CreateView if we want to set values\n # before saving\n self.object = form.save(commit=False)\n self.object.corresponding_author = self.request.user\n self.object.save()\n revisions.set_user(self.request.user)\n revisions.set_comment(\"Talk Created\")\n # Save the author information as well (many-to-many fun)\n form.save_m2m()\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass TalkUpdate(EditOwnTalksMixin, UpdateView):\n model = Talk\n template_name = 'wafer.talks/talk_form.html'\n\n def get_form_class(self):\n return get_talk_form_class()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['can_edit'] = self.object.can_edit(self.request.user)\n return context\n\n @revisions.create_revision()\n def form_valid(self, form):\n revisions.set_user(self.request.user)\n revisions.set_comment(\"Talk Modified\")\n return super().form_valid(form)\n\n\nclass TalkWithdraw(EditOwnTalksMixin, DeleteView):\n model = Talk\n template_name = 'wafer.talks/talk_withdraw.html'\n success_url = reverse_lazy('wafer_page')\n\n @revisions.create_revision()\n def delete(self, request, *args, **kwargs):\n \"\"\"Override delete to only withdraw\"\"\"\n talk = self.get_object()\n talk.status = WITHDRAWN\n talk.save()\n revisions.set_user(self.request.user)\n revisions.set_comment(\"Talk Withdrawn\")\n return HttpResponseRedirect(self.success_url)\n\n\nclass TalkReview(PermissionRequiredMixin, CreateView):\n model = Review\n form_class = ReviewForm\n permission_required = 'talks.add_review'\n template_name = 'wafer.talks/review_talk.html'\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['talk'] = Talk.objects.get(pk=self.kwargs['pk'])\n kwargs['instance'] = self.get_object()\n kwargs['user'] = self.request.user\n return kwargs\n\n def get_object(self):\n try:\n return Review.objects.get(\n talk_id=self.kwargs['pk'], reviewer=self.request.user)\n except Review.DoesNotExist:\n return None\n\n def form_valid(self, form):\n existing = self.get_object()\n response = super().form_valid(form)\n\n review = self.get_object()\n # Update the talk to 'under consideration' if a review is\n # added.\n talk = review.talk\n if talk.status == SUBMITTED:\n talk.status = UNDER_CONSIDERATION\n with revisions.create_revision():\n revisions.set_user(self.request.user)\n revisions.set_comment(\"Status changed by review process\")\n talk.save()\n\n # Create the revision\n # Note that we do this after the review has been saved\n # (without a revision) in the super().form_valid call and\n # after the scores have been added, so that the object_repr\n # is correct\n # We also do this after potentially updating the talk, so\n # that the review revision time is correct for is_current\n with revisions.create_revision():\n revisions.set_user(self.request.user)\n if existing:\n revisions.set_comment(\"Review Modified\")\n else:\n revisions.set_comment(\"Review Created\")\n review.save()\n\n return response\n\n def get_success_url(self):\n return self.get_object().talk.get_absolute_url()\n\n\nclass Speakers(BuildableListView):\n model = Talk\n template_name = 'wafer.talks/speakers.html'\n build_path = 'talks/speakers/index.html'\n\n def _by_row(self, speakers, n):\n return [speakers[i:i + n] for i in range(0, len(speakers), n)]\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n speakers = UserProfile.objects.filter(\n user__talks__status='A').distinct().prefetch_related(\n 'user').order_by('user__talks__talk_type',\n 'user__first_name',\n 'user__last_name',\n 'user__username').annotate(\n talk_type=F('user__talks__talk_type__name'),\n show_speakers=F('user__talks__talk_type__show_speakers'))\n bytype = groupby(speakers, lambda x: x.talk_type)\n context['speaker_rows'] = {}\n for talk_type, type_speakers in bytype:\n type_speakers = list(type_speakers)\n # We explicitly check for False, as no talk type will give us None for\n # show_speakers and we want to default to including that\n if type_speakers and type_speakers[0].show_speakers is not False:\n context[\"speaker_rows\"][talk_type] = self._by_row(type_speakers, 4)\n return context\n\n\nclass TracksView(BuildableListView):\n model = Track\n template_name = 'wafer.talks/talk_tracks.html'\n build_path = 'talks/tracks/index.html'\n\n\nclass TalkTypesView(BuildableListView):\n model = TalkType\n template_name = 'wafer.talks/talk_types.html'\n build_path = 'talks/types/index.html'\n\n\nclass TalksViewSet(NestedViewSetMixin, viewsets.ModelViewSet):\n \"\"\"API endpoint that allows talks to be viewed or edited.\"\"\"\n queryset = Talk.objects.none() # Needed for the REST Permissions\n serializer_class = TalkSerializer\n # XXX: Do we want to allow authors to edit talks via the API?\n permission_classes = (DjangoModelPermissionsOrAnonReadOnly, )\n\n @order_results_by('talk_type', 'talk_id')\n def get_queryset(self):\n # We override the default implementation to only show accepted talks\n # to people who aren't part of the management group\n if self.request.user.id is None:\n # Anonymous user, so just accepted or cancelled talks\n return Talk.objects.filter(Q(status=ACCEPTED) |\n Q(status=CANCELLED))\n elif Talk.can_view_all(self.request.user):\n return Talk.objects.all()\n else:\n # Also include talks owned by the user\n # XXX: Should this be all authors rather than just\n # the corresponding author?\n return Talk.objects.filter(\n Q(status=ACCEPTED) |\n Q(status=CANCELLED) |\n Q(corresponding_author=self.request.user))\n\n\nclass TalkExistsPermission(BasePermission):\n def has_permission(self, request, view):\n talk_id = view.get_parents_query_dict()['talk']\n if not Talk.objects.filter(pk=talk_id).exists():\n raise Http404\n return True\n\n\nclass TalkUrlsViewSet(NestedViewSetMixin, viewsets.ModelViewSet):\n \"\"\"API endpoint that allows talks to be viewed or edited.\"\"\"\n queryset = TalkUrl.objects.all().order_by('id')\n serializer_class = TalkUrlSerializer\n permission_classes = (DjangoModelPermissions, TalkExistsPermission)\n\n def create(self, request, *args, **kw):\n request.data['talk'] = self.get_parents_query_dict()['talk']\n return super().create(request, *args, **kw)\n\n def update(self, request, *args, **kw):\n request.data['talk'] = self.get_parents_query_dict()['talk']\n return super().update(request, *args, **kw)\n", "path": "wafer/talks/views.py"}], "after_files": [{"content": "from itertools import groupby\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import (\n LoginRequiredMixin, PermissionRequiredMixin)\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.core.exceptions import PermissionDenied, ValidationError\nfrom django.db.models import Q, F\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\n\nfrom bakery.views import BuildableDetailView, BuildableListView\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import (\n DjangoModelPermissions, DjangoModelPermissionsOrAnonReadOnly,\n BasePermission)\nfrom rest_framework_extensions.mixins import NestedViewSetMixin\nfrom reversion import revisions\nfrom reversion.models import Version\n\nfrom wafer.talks.models import (\n Review, Talk, TalkType, TalkUrl, Track,\n ACCEPTED, CANCELLED, SUBMITTED, UNDER_CONSIDERATION, WITHDRAWN)\nfrom wafer.talks.forms import ReviewForm, get_talk_form_class\nfrom wafer.talks.serializers import TalkSerializer, TalkUrlSerializer\nfrom wafer.users.models import UserProfile\nfrom wafer.utils import order_results_by, PaginatedBuildableListView\n\n\nclass EditOwnTalksMixin(object):\n '''Users can edit their own talks as long as the talk is\n \"Under Consideration\"'''\n def get_object(self, *args, **kwargs):\n object_ = super().get_object(*args, **kwargs)\n if object_.can_edit(self.request.user):\n return object_\n else:\n raise PermissionDenied\n\n\nclass UsersTalks(PaginatedBuildableListView):\n template_name = 'wafer.talks/talks.html'\n build_prefix = 'talks'\n paginate_by = 100\n\n @order_results_by('talk_type', 'talk_id')\n def get_queryset(self):\n # self.request will be None when we come here via the static site\n # renderer\n if self.request and Talk.can_view_all(self.request.user):\n talks = Talk.objects.all()\n else:\n talks = Talk.objects.filter(Q(status=ACCEPTED) | Q(status=CANCELLED))\n return talks.prefetch_related(\n \"talk_type\", \"corresponding_author\", \"authors\", \"authors__userprofile\"\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"languages\"] = Talk.LANGUAGES\n context[\"see_all\"] = Talk.can_view_all(self.request.user)\n return context\n\n\nclass TalkView(BuildableDetailView):\n template_name = 'wafer.talks/talk.html'\n model = Talk\n\n # Needed so django-bakery only renders public talks\n def build_object(self, obj):\n \"\"\"Override django-bakery to skip talks that raise 403\"\"\"\n try:\n super().build_object(obj)\n except PermissionDenied:\n # We cleanup the directory created\n self.unbuild_object(obj)\n\n def create_request(self, path):\n request = super().create_request(path)\n request.user = AnonymousUser()\n return request\n\n def get_object(self, *args, **kwargs):\n '''Only talk owners can see talks, unless they've been accepted'''\n object_ = super().get_object(*args, **kwargs)\n if not object_.can_view(self.request.user):\n raise PermissionDenied\n return object_\n\n def canonical_url(self):\n '''Return the canonical URL for this view'''\n return self.object.get_absolute_url()\n\n def render_to_response(self, *args, **kwargs):\n '''Canonicalize the URL if the slug changed'''\n canonical_url = self.canonical_url()\n if self.request.path != canonical_url:\n return HttpResponseRedirect(canonical_url)\n return super().render_to_response(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n talk = self.object\n user = self.request.user\n\n context['can_edit'] = talk.can_edit(user)\n\n can_review = talk.can_review(user)\n context['can_review'] = can_review\n if can_review:\n review = talk.reviews.filter(reviewer=user).first()\n context['review_form'] = ReviewForm(\n instance=review, talk=talk, user=user)\n\n return context\n\n\nclass TalkCreate(LoginRequiredMixin, CreateView):\n model = Talk\n template_name = 'wafer.talks/talk_form.html'\n\n def get_form_class(self):\n return get_talk_form_class()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n can_submit = getattr(settings, 'WAFER_TALKS_OPEN', True)\n if can_submit and TalkType.objects.exists():\n # Check for all talk types being disabled\n can_submit = TalkType.objects.open_for_submission().count() > 0\n context['can_submit'] = can_submit\n return context\n\n @revisions.create_revision()\n def form_valid(self, form):\n if not getattr(settings, 'WAFER_TALKS_OPEN', True):\n # Should this be SuspiciousOperation?\n raise ValidationError(\"Talk submission isn't open\")\n # Eaaargh we have to do the work of CreateView if we want to set values\n # before saving\n self.object = form.save(commit=False)\n self.object.corresponding_author = self.request.user\n self.object.save()\n revisions.set_user(self.request.user)\n revisions.set_comment(\"Talk Created\")\n # Save the author information as well (many-to-many fun)\n form.save_m2m()\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass TalkUpdate(EditOwnTalksMixin, UpdateView):\n model = Talk\n template_name = 'wafer.talks/talk_form.html'\n\n def get_form_class(self):\n return get_talk_form_class()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['can_edit'] = self.object.can_edit(self.request.user)\n return context\n\n @revisions.create_revision()\n def form_valid(self, form):\n revisions.set_user(self.request.user)\n revisions.set_comment(\"Talk Modified\")\n return super().form_valid(form)\n\n\nclass TalkWithdraw(EditOwnTalksMixin, DeleteView):\n model = Talk\n template_name = 'wafer.talks/talk_withdraw.html'\n success_url = reverse_lazy('wafer_page')\n\n\n @revisions.create_revision()\n def withdraw_helper(self, request):\n \"\"\"Handle the logic for withdrawing a talk\"\"\"\n talk = self.get_object()\n talk.status = WITHDRAWN\n talk.save()\n revisions.set_user(self.request.user)\n revisions.set_comment(\"Talk Withdrawn\")\n return HttpResponseRedirect(self.success_url)\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Override delete to only withdraw for Django < 4\"\"\"\n return self.withdraw_helper(request)\n\n def form_valid(self, request, *args, **kwargs):\n \"\"\"Override delete to only withdraw for Django >= 4.0\"\"\"\n return self.withdraw_helper(request)\n\n\nclass TalkReview(PermissionRequiredMixin, CreateView):\n model = Review\n form_class = ReviewForm\n permission_required = 'talks.add_review'\n template_name = 'wafer.talks/review_talk.html'\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['talk'] = Talk.objects.get(pk=self.kwargs['pk'])\n kwargs['instance'] = self.get_object()\n kwargs['user'] = self.request.user\n return kwargs\n\n def get_object(self):\n try:\n return Review.objects.get(\n talk_id=self.kwargs['pk'], reviewer=self.request.user)\n except Review.DoesNotExist:\n return None\n\n def form_valid(self, form):\n existing = self.get_object()\n response = super().form_valid(form)\n\n review = self.get_object()\n # Update the talk to 'under consideration' if a review is\n # added.\n talk = review.talk\n if talk.status == SUBMITTED:\n talk.status = UNDER_CONSIDERATION\n with revisions.create_revision():\n revisions.set_user(self.request.user)\n revisions.set_comment(\"Status changed by review process\")\n talk.save()\n\n # Create the revision\n # Note that we do this after the review has been saved\n # (without a revision) in the super().form_valid call and\n # after the scores have been added, so that the object_repr\n # is correct\n # We also do this after potentially updating the talk, so\n # that the review revision time is correct for is_current\n with revisions.create_revision():\n revisions.set_user(self.request.user)\n if existing:\n revisions.set_comment(\"Review Modified\")\n else:\n revisions.set_comment(\"Review Created\")\n review.save()\n\n return response\n\n def get_success_url(self):\n return self.get_object().talk.get_absolute_url()\n\n\nclass Speakers(BuildableListView):\n model = Talk\n template_name = 'wafer.talks/speakers.html'\n build_path = 'talks/speakers/index.html'\n\n def _by_row(self, speakers, n):\n return [speakers[i:i + n] for i in range(0, len(speakers), n)]\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n speakers = UserProfile.objects.filter(\n user__talks__status='A').distinct().prefetch_related(\n 'user').order_by('user__talks__talk_type',\n 'user__first_name',\n 'user__last_name',\n 'user__username').annotate(\n talk_type=F('user__talks__talk_type__name'),\n show_speakers=F('user__talks__talk_type__show_speakers'))\n bytype = groupby(speakers, lambda x: x.talk_type)\n context['speaker_rows'] = {}\n for talk_type, type_speakers in bytype:\n type_speakers = list(type_speakers)\n # We explicitly check for False, as no talk type will give us None for\n # show_speakers and we want to default to including that\n if type_speakers and type_speakers[0].show_speakers is not False:\n context[\"speaker_rows\"][talk_type] = self._by_row(type_speakers, 4)\n return context\n\n\nclass TracksView(BuildableListView):\n model = Track\n template_name = 'wafer.talks/talk_tracks.html'\n build_path = 'talks/tracks/index.html'\n\n\nclass TalkTypesView(BuildableListView):\n model = TalkType\n template_name = 'wafer.talks/talk_types.html'\n build_path = 'talks/types/index.html'\n\n\nclass TalksViewSet(NestedViewSetMixin, viewsets.ModelViewSet):\n \"\"\"API endpoint that allows talks to be viewed or edited.\"\"\"\n queryset = Talk.objects.none() # Needed for the REST Permissions\n serializer_class = TalkSerializer\n # XXX: Do we want to allow authors to edit talks via the API?\n permission_classes = (DjangoModelPermissionsOrAnonReadOnly, )\n\n @order_results_by('talk_type', 'talk_id')\n def get_queryset(self):\n # We override the default implementation to only show accepted talks\n # to people who aren't part of the management group\n if self.request.user.id is None:\n # Anonymous user, so just accepted or cancelled talks\n return Talk.objects.filter(Q(status=ACCEPTED) |\n Q(status=CANCELLED))\n elif Talk.can_view_all(self.request.user):\n return Talk.objects.all()\n else:\n # Also include talks owned by the user\n # XXX: Should this be all authors rather than just\n # the corresponding author?\n return Talk.objects.filter(\n Q(status=ACCEPTED) |\n Q(status=CANCELLED) |\n Q(corresponding_author=self.request.user))\n\n\nclass TalkExistsPermission(BasePermission):\n def has_permission(self, request, view):\n talk_id = view.get_parents_query_dict()['talk']\n if not Talk.objects.filter(pk=talk_id).exists():\n raise Http404\n return True\n\n\nclass TalkUrlsViewSet(NestedViewSetMixin, viewsets.ModelViewSet):\n \"\"\"API endpoint that allows talks to be viewed or edited.\"\"\"\n queryset = TalkUrl.objects.all().order_by('id')\n serializer_class = TalkUrlSerializer\n permission_classes = (DjangoModelPermissions, TalkExistsPermission)\n\n def create(self, request, *args, **kw):\n request.data['talk'] = self.get_parents_query_dict()['talk']\n return super().create(request, *args, **kw)\n\n def update(self, request, *args, **kw):\n request.data['talk'] = self.get_parents_query_dict()['talk']\n return super().update(request, *args, **kw)\n", "path": "wafer/talks/views.py"}]} | 4,089 | 284 |
gh_patches_debug_25284 | rasdani/github-patches | git_diff | pydantic__pydantic-740 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
__post_init__ is not triggered for descendant dataclass
The `__post_init__` method of parent is not called in child classes.
* OS: **Ubuntu**
* Python version `import sys; print(sys.version)`: **3.7.2 (default, Mar 25 2019, 19:29:53) **
* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.32.1**
```py
from dataclasses import dataclass, field
from typing import Any
# comment the pydantinc import below to see the expected result
from pydantic.dataclasses import dataclass
@dataclass
class Base:
x: float
y: float
def __post_init__(self):
print('Called!')
self.z = self.x + self.y
@dataclass
class Child(Base):
a: int
obj = Child(a=0, x=1.5, y=2.5)
print(obj.z)
```
Everything works fine when using the `dataclass` from standard library. After looking through the code I expected that an easy workaround would be to add simple `__post_init__` in child:
```python
@dataclass
class Child(Base):
a: int
def __post_init__(self):
super().__post_init__()
```
Now I do get 'Called!' message, but the 'z' attribute is not preserved anyway...
Do I miss something obvious in the usage of pydantic?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydantic/dataclasses.py`
Content:
```
1 import dataclasses
2 from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, Optional, Type, Union
3
4 from .class_validators import gather_validators
5 from .error_wrappers import ValidationError
6 from .errors import DataclassTypeError
7 from .fields import Required
8 from .main import create_model, validate_model
9 from .utils import AnyType
10
11 if TYPE_CHECKING: # pragma: no cover
12 from .main import BaseConfig, BaseModel # noqa: F401
13
14 class DataclassType:
15 __pydantic_model__: Type[BaseModel]
16 __initialised__: bool
17
18 def __init__(self, *args: Any, **kwargs: Any) -> None:
19 pass
20
21 @classmethod
22 def __validate__(cls, v: Any) -> 'DataclassType':
23 pass
24
25
26 def _validate_dataclass(cls: Type['DataclassType'], v: Any) -> 'DataclassType':
27 if isinstance(v, cls):
28 return v
29 elif isinstance(v, (list, tuple)):
30 return cls(*v)
31 elif isinstance(v, dict):
32 return cls(**v)
33 else:
34 raise DataclassTypeError(class_name=cls.__name__)
35
36
37 def _get_validators(cls: Type['DataclassType']) -> Generator[Any, None, None]:
38 yield cls.__validate__
39
40
41 def setattr_validate_assignment(self: 'DataclassType', name: str, value: Any) -> None:
42 if self.__initialised__:
43 d = dict(self.__dict__)
44 d.pop(name, None)
45 known_field = self.__pydantic_model__.__fields__.get(name, None)
46 if known_field:
47 value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__)
48 if error_:
49 raise ValidationError([error_], type(self))
50
51 object.__setattr__(self, name, value)
52
53
54 def _process_class(
55 _cls: AnyType,
56 init: bool,
57 repr: bool,
58 eq: bool,
59 order: bool,
60 unsafe_hash: bool,
61 frozen: bool,
62 config: Type['BaseConfig'],
63 ) -> 'DataclassType':
64 post_init_original = getattr(_cls, '__post_init__', None)
65 post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)
66 if post_init_original and post_init_original.__name__ == '_pydantic_post_init':
67 post_init_original = None
68
69 def _pydantic_post_init(self: 'DataclassType', *initvars: Any) -> None:
70 if post_init_original is not None:
71 post_init_original(self, *initvars)
72 d = validate_model(self.__pydantic_model__, self.__dict__, cls=self.__class__)[0]
73 object.__setattr__(self, '__dict__', d)
74 object.__setattr__(self, '__initialised__', True)
75 if post_init_post_parse is not None:
76 post_init_post_parse(self)
77
78 _cls.__post_init__ = _pydantic_post_init
79 cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen) # type: ignore
80
81 fields: Dict[str, Any] = {
82 field.name: (field.type, field.default if field.default != dataclasses.MISSING else Required)
83 for field in dataclasses.fields(cls)
84 }
85
86 validators = gather_validators(cls)
87 cls.__pydantic_model__ = create_model(
88 cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields
89 )
90
91 cls.__initialised__ = False
92 cls.__validate__ = classmethod(_validate_dataclass)
93 cls.__get_validators__ = classmethod(_get_validators)
94
95 if cls.__pydantic_model__.__config__.validate_assignment and not frozen:
96 cls.__setattr__ = setattr_validate_assignment
97
98 return cls
99
100
101 if TYPE_CHECKING: # pragma: no cover
102 # see https://github.com/python/mypy/issues/6239 for explanation of why we do this
103 from dataclasses import dataclass
104 else:
105
106 def dataclass(
107 _cls: Optional[AnyType] = None,
108 *,
109 init: bool = True,
110 repr: bool = True,
111 eq: bool = True,
112 order: bool = False,
113 unsafe_hash: bool = False,
114 frozen: bool = False,
115 config: Type['BaseConfig'] = None,
116 ) -> Union[Callable[[AnyType], 'DataclassType'], 'DataclassType']:
117 """
118 Like the python standard lib dataclasses but with type validation.
119
120 Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning
121 as Config.validate_assignment.
122 """
123
124 def wrap(cls: AnyType) -> 'DataclassType':
125 return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config)
126
127 if _cls is None:
128 return wrap
129
130 return wrap(_cls)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydantic/dataclasses.py b/pydantic/dataclasses.py
--- a/pydantic/dataclasses.py
+++ b/pydantic/dataclasses.py
@@ -62,9 +62,12 @@
config: Type['BaseConfig'],
) -> 'DataclassType':
post_init_original = getattr(_cls, '__post_init__', None)
- post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)
if post_init_original and post_init_original.__name__ == '_pydantic_post_init':
post_init_original = None
+ if not post_init_original:
+ post_init_original = getattr(_cls, '__post_init_original__', None)
+
+ post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)
def _pydantic_post_init(self: 'DataclassType', *initvars: Any) -> None:
if post_init_original is not None:
@@ -91,6 +94,8 @@
cls.__initialised__ = False
cls.__validate__ = classmethod(_validate_dataclass)
cls.__get_validators__ = classmethod(_get_validators)
+ if post_init_original:
+ cls.__post_init_original__ = post_init_original
if cls.__pydantic_model__.__config__.validate_assignment and not frozen:
cls.__setattr__ = setattr_validate_assignment
| {"golden_diff": "diff --git a/pydantic/dataclasses.py b/pydantic/dataclasses.py\n--- a/pydantic/dataclasses.py\n+++ b/pydantic/dataclasses.py\n@@ -62,9 +62,12 @@\n config: Type['BaseConfig'],\n ) -> 'DataclassType':\n post_init_original = getattr(_cls, '__post_init__', None)\n- post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)\n if post_init_original and post_init_original.__name__ == '_pydantic_post_init':\n post_init_original = None\n+ if not post_init_original:\n+ post_init_original = getattr(_cls, '__post_init_original__', None)\n+\n+ post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)\n \n def _pydantic_post_init(self: 'DataclassType', *initvars: Any) -> None:\n if post_init_original is not None:\n@@ -91,6 +94,8 @@\n cls.__initialised__ = False\n cls.__validate__ = classmethod(_validate_dataclass)\n cls.__get_validators__ = classmethod(_get_validators)\n+ if post_init_original:\n+ cls.__post_init_original__ = post_init_original\n \n if cls.__pydantic_model__.__config__.validate_assignment and not frozen:\n cls.__setattr__ = setattr_validate_assignment\n", "issue": "__post_init__ is not triggered for descendant dataclass \nThe `__post_init__` method of parent is not called in child classes.\r\n\r\n* OS: **Ubuntu**\r\n* Python version `import sys; print(sys.version)`: **3.7.2 (default, Mar 25 2019, 19:29:53) **\r\n* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.32.1**\r\n\r\n\r\n```py\r\nfrom dataclasses import dataclass, field\r\nfrom typing import Any\r\n\r\n# comment the pydantinc import below to see the expected result\r\nfrom pydantic.dataclasses import dataclass\r\n\r\n@dataclass\r\nclass Base:\r\n x: float\r\n y: float\r\n\r\n def __post_init__(self):\r\n print('Called!')\r\n self.z = self.x + self.y\r\n\r\n@dataclass\r\nclass Child(Base):\r\n a: int\r\n\r\nobj = Child(a=0, x=1.5, y=2.5)\r\nprint(obj.z)\r\n```\r\n\r\nEverything works fine when using the `dataclass` from standard library. After looking through the code I expected that an easy workaround would be to add simple `__post_init__` in child:\r\n\r\n```python\r\n@dataclass\r\nclass Child(Base):\r\n a: int\r\n\r\n def __post_init__(self):\r\n super().__post_init__()\r\n```\r\n\r\nNow I do get 'Called!' message, but the 'z' attribute is not preserved anyway...\r\n\r\nDo I miss something obvious in the usage of pydantic?\n", "before_files": [{"content": "import dataclasses\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Generator, Optional, Type, Union\n\nfrom .class_validators import gather_validators\nfrom .error_wrappers import ValidationError\nfrom .errors import DataclassTypeError\nfrom .fields import Required\nfrom .main import create_model, validate_model\nfrom .utils import AnyType\n\nif TYPE_CHECKING: # pragma: no cover\n from .main import BaseConfig, BaseModel # noqa: F401\n\n class DataclassType:\n __pydantic_model__: Type[BaseModel]\n __initialised__: bool\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n pass\n\n @classmethod\n def __validate__(cls, v: Any) -> 'DataclassType':\n pass\n\n\ndef _validate_dataclass(cls: Type['DataclassType'], v: Any) -> 'DataclassType':\n if isinstance(v, cls):\n return v\n elif isinstance(v, (list, tuple)):\n return cls(*v)\n elif isinstance(v, dict):\n return cls(**v)\n else:\n raise DataclassTypeError(class_name=cls.__name__)\n\n\ndef _get_validators(cls: Type['DataclassType']) -> Generator[Any, None, None]:\n yield cls.__validate__\n\n\ndef setattr_validate_assignment(self: 'DataclassType', name: str, value: Any) -> None:\n if self.__initialised__:\n d = dict(self.__dict__)\n d.pop(name, None)\n known_field = self.__pydantic_model__.__fields__.get(name, None)\n if known_field:\n value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__)\n if error_:\n raise ValidationError([error_], type(self))\n\n object.__setattr__(self, name, value)\n\n\ndef _process_class(\n _cls: AnyType,\n init: bool,\n repr: bool,\n eq: bool,\n order: bool,\n unsafe_hash: bool,\n frozen: bool,\n config: Type['BaseConfig'],\n) -> 'DataclassType':\n post_init_original = getattr(_cls, '__post_init__', None)\n post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)\n if post_init_original and post_init_original.__name__ == '_pydantic_post_init':\n post_init_original = None\n\n def _pydantic_post_init(self: 'DataclassType', *initvars: Any) -> None:\n if post_init_original is not None:\n post_init_original(self, *initvars)\n d = validate_model(self.__pydantic_model__, self.__dict__, cls=self.__class__)[0]\n object.__setattr__(self, '__dict__', d)\n object.__setattr__(self, '__initialised__', True)\n if post_init_post_parse is not None:\n post_init_post_parse(self)\n\n _cls.__post_init__ = _pydantic_post_init\n cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen) # type: ignore\n\n fields: Dict[str, Any] = {\n field.name: (field.type, field.default if field.default != dataclasses.MISSING else Required)\n for field in dataclasses.fields(cls)\n }\n\n validators = gather_validators(cls)\n cls.__pydantic_model__ = create_model(\n cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields\n )\n\n cls.__initialised__ = False\n cls.__validate__ = classmethod(_validate_dataclass)\n cls.__get_validators__ = classmethod(_get_validators)\n\n if cls.__pydantic_model__.__config__.validate_assignment and not frozen:\n cls.__setattr__ = setattr_validate_assignment\n\n return cls\n\n\nif TYPE_CHECKING: # pragma: no cover\n # see https://github.com/python/mypy/issues/6239 for explanation of why we do this\n from dataclasses import dataclass\nelse:\n\n def dataclass(\n _cls: Optional[AnyType] = None,\n *,\n init: bool = True,\n repr: bool = True,\n eq: bool = True,\n order: bool = False,\n unsafe_hash: bool = False,\n frozen: bool = False,\n config: Type['BaseConfig'] = None,\n ) -> Union[Callable[[AnyType], 'DataclassType'], 'DataclassType']:\n \"\"\"\n Like the python standard lib dataclasses but with type validation.\n\n Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning\n as Config.validate_assignment.\n \"\"\"\n\n def wrap(cls: AnyType) -> 'DataclassType':\n return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config)\n\n if _cls is None:\n return wrap\n\n return wrap(_cls)\n", "path": "pydantic/dataclasses.py"}], "after_files": [{"content": "import dataclasses\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Generator, Optional, Type, Union\n\nfrom .class_validators import gather_validators\nfrom .error_wrappers import ValidationError\nfrom .errors import DataclassTypeError\nfrom .fields import Required\nfrom .main import create_model, validate_model\nfrom .utils import AnyType\n\nif TYPE_CHECKING: # pragma: no cover\n from .main import BaseConfig, BaseModel # noqa: F401\n\n class DataclassType:\n __pydantic_model__: Type[BaseModel]\n __initialised__: bool\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n pass\n\n @classmethod\n def __validate__(cls, v: Any) -> 'DataclassType':\n pass\n\n\ndef _validate_dataclass(cls: Type['DataclassType'], v: Any) -> 'DataclassType':\n if isinstance(v, cls):\n return v\n elif isinstance(v, (list, tuple)):\n return cls(*v)\n elif isinstance(v, dict):\n return cls(**v)\n else:\n raise DataclassTypeError(class_name=cls.__name__)\n\n\ndef _get_validators(cls: Type['DataclassType']) -> Generator[Any, None, None]:\n yield cls.__validate__\n\n\ndef setattr_validate_assignment(self: 'DataclassType', name: str, value: Any) -> None:\n if self.__initialised__:\n d = dict(self.__dict__)\n d.pop(name, None)\n known_field = self.__pydantic_model__.__fields__.get(name, None)\n if known_field:\n value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__)\n if error_:\n raise ValidationError([error_], type(self))\n\n object.__setattr__(self, name, value)\n\n\ndef _process_class(\n _cls: AnyType,\n init: bool,\n repr: bool,\n eq: bool,\n order: bool,\n unsafe_hash: bool,\n frozen: bool,\n config: Type['BaseConfig'],\n) -> 'DataclassType':\n post_init_original = getattr(_cls, '__post_init__', None)\n if post_init_original and post_init_original.__name__ == '_pydantic_post_init':\n post_init_original = None\n if not post_init_original:\n post_init_original = getattr(_cls, '__post_init_original__', None)\n\n post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)\n\n def _pydantic_post_init(self: 'DataclassType', *initvars: Any) -> None:\n if post_init_original is not None:\n post_init_original(self, *initvars)\n d = validate_model(self.__pydantic_model__, self.__dict__, cls=self.__class__)[0]\n object.__setattr__(self, '__dict__', d)\n object.__setattr__(self, '__initialised__', True)\n if post_init_post_parse is not None:\n post_init_post_parse(self)\n\n _cls.__post_init__ = _pydantic_post_init\n cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen) # type: ignore\n\n fields: Dict[str, Any] = {\n field.name: (field.type, field.default if field.default != dataclasses.MISSING else Required)\n for field in dataclasses.fields(cls)\n }\n\n validators = gather_validators(cls)\n cls.__pydantic_model__ = create_model(\n cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields\n )\n\n cls.__initialised__ = False\n cls.__validate__ = classmethod(_validate_dataclass)\n cls.__get_validators__ = classmethod(_get_validators)\n if post_init_original:\n cls.__post_init_original__ = post_init_original\n\n if cls.__pydantic_model__.__config__.validate_assignment and not frozen:\n cls.__setattr__ = setattr_validate_assignment\n\n return cls\n\n\nif TYPE_CHECKING: # pragma: no cover\n # see https://github.com/python/mypy/issues/6239 for explanation of why we do this\n from dataclasses import dataclass\nelse:\n\n def dataclass(\n _cls: Optional[AnyType] = None,\n *,\n init: bool = True,\n repr: bool = True,\n eq: bool = True,\n order: bool = False,\n unsafe_hash: bool = False,\n frozen: bool = False,\n config: Type['BaseConfig'] = None,\n ) -> Union[Callable[[AnyType], 'DataclassType'], 'DataclassType']:\n \"\"\"\n Like the python standard lib dataclasses but with type validation.\n\n Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning\n as Config.validate_assignment.\n \"\"\"\n\n def wrap(cls: AnyType) -> 'DataclassType':\n return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config)\n\n if _cls is None:\n return wrap\n\n return wrap(_cls)\n", "path": "pydantic/dataclasses.py"}]} | 1,969 | 300 |
gh_patches_debug_36954 | rasdani/github-patches | git_diff | huggingface__dataset-viewer-2423 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve the message in Parquet conversion discussions
see https://huggingface.slack.com/archives/C02V51Q3800/p1707388935912969 (internal)
> Would it make sense to add some message in Parquet PRs (e.g. https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset/discussions/2#65c41c1580497543ca3f8a5a) for less technical people?
> As an aside, I don't think the docs really mention the advantages of Parquet even if we link there for advantages (https://huggingface.co/docs/datasets-server/parquet)
> For example
> ```
> [bot] [No action needed] Conversion to Parquet
>
> The parquet-converter bot has created a version of this dataset in the Parquet format in the `refs/convert/parquet` branch. This will allow X, Y, Z.
>
> You can learn more about the advantages associated with this format in the documentation.
> ```
related to https://github.com/huggingface/datasets-server/issues/2349#issuecomment-1916292176
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jobs/cache_maintenance/src/cache_maintenance/discussions.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2023 The HuggingFace Authors.
3
4 import logging
5 from dataclasses import dataclass
6 from typing import Literal, Optional
7 from urllib import parse
8
9 from huggingface_hub import HfApi
10 from huggingface_hub.constants import REPO_TYPE_DATASET
11 from libcommon.simple_cache import get_datasets_with_last_updated_kind
12
13 PARQUET_CACHE_KIND = "config-parquet"
14 DAYS = 1
15
16
17 @dataclass
18 class ParquetCounters:
19 datasets: int = 0
20 new_discussions: int = 0
21 dismissed_discussions: int = 0
22 errors: int = 0
23
24
25 @dataclass
26 class Counters:
27 parquet: ParquetCounters
28
29
30 def post_messages(
31 hf_endpoint: str, bot_associated_user_name: Optional[str], bot_token: Optional[str], parquet_revision: str
32 ) -> Counters:
33 """
34 Post messages in Hub discussions to notify users.
35 """
36 if not (bot_associated_user_name and bot_token):
37 raise Exception("No bot token or user name provided, skipping posting messages.")
38
39 return Counters(
40 parquet=post_messages_on_parquet_conversion(
41 hf_endpoint=hf_endpoint,
42 bot_associated_user_name=bot_associated_user_name,
43 bot_token=bot_token,
44 parquet_revision=parquet_revision,
45 )
46 )
47
48
49 def post_messages_on_parquet_conversion(
50 hf_endpoint: str,
51 bot_associated_user_name: str,
52 bot_token: str,
53 parquet_revision: str,
54 ) -> ParquetCounters:
55 logging.info("Create a Hub discussion to notify about parquet conversion")
56 datasets = limit_to_one_dataset_per_namespace(
57 get_datasets_with_last_updated_kind(kind=PARQUET_CACHE_KIND, days=DAYS)
58 )
59
60 logging.info(f"Creating discussions for {len(datasets)} datasets")
61 log_batch = 100
62 counters = ParquetCounters()
63
64 def get_log() -> str:
65 return (
66 f" [{counters.datasets}/{len(datasets)}] {counters.new_discussions} discussions"
67 f" have been opened, {counters.dismissed_discussions} datasets"
68 f" already had a discussion (open or closed). {counters.errors} errors."
69 )
70
71 hf_api = HfApi(endpoint=hf_endpoint, token=bot_token)
72
73 for dataset in datasets:
74 counters.datasets += 1
75 prefix = f"[{counters.datasets}/{len(datasets)}]"
76 logging.info(f"{prefix} Processing dataset {dataset}")
77 try:
78 try:
79 next(
80 hf_api.get_repo_discussions(
81 repo_id=dataset, repo_type=REPO_TYPE_DATASET, token=bot_token, author=bot_associated_user_name
82 )
83 )
84 # if we get here, the bot has already opened a discussion for this dataset
85 counters.dismissed_discussions += 1
86 logging.info(f"{prefix} [dismissed] Dataset {dataset} already has a discussion, skipping")
87 except StopIteration:
88 hf_api.create_discussion(
89 repo_id=dataset,
90 repo_type=REPO_TYPE_DATASET,
91 title="[bot] Conversion to Parquet",
92 description=create_discussion_description(
93 dataset=dataset,
94 hf_endpoint=hf_endpoint,
95 parquet_revision=parquet_revision,
96 bot_associated_user_name=bot_associated_user_name,
97 ),
98 token=bot_token,
99 )
100 counters.new_discussions += 1
101 logging.info(f"{prefix} [new] Dataset {dataset} has a new discussion")
102 except Exception as e:
103 counters.errors += 1
104 logging.warning(f"{prefix} [error] Failed to process dataset {dataset}: {e}")
105
106 logging.debug(get_log())
107 if (counters.datasets) % log_batch == 0:
108 logging.info(get_log())
109
110 logging.info(get_log())
111 logging.info("All the messages about parquet conversion have been posted.")
112
113 return counters
114
115
116 def create_discussion_description(
117 dataset: str, hf_endpoint: str, parquet_revision: str, bot_associated_user_name: str
118 ) -> str:
119 link_parquet = create_link(
120 text=parquet_revision,
121 dataset=dataset,
122 hf_endpoint=hf_endpoint,
123 revision_type="tree",
124 revision=parquet_revision,
125 )
126 return (
127 f"The {bot_associated_user_name} bot has created a version of this dataset in the [Parquet"
128 " format](https://parquet.apache.org/). You can learn more about the advantages associated with this format"
129 f""" in the [documentation](https://huggingface.co/docs/datasets-server/parquet).
130
131 The Parquet files are published in the {link_parquet} branch."""
132 )
133
134
135 def create_link(
136 text: str, dataset: str, hf_endpoint: str, revision_type: Literal["commit", "tree"], revision: str
137 ) -> str:
138 return f"[`{text}`]({hf_endpoint}/datasets/{dataset}/{revision_type}/{parse.quote(revision, safe='')})"
139
140
141 def limit_to_one_dataset_per_namespace(datasets: list[str]) -> list[str]:
142 """
143 Limit the number of datasets to one per namespace.
144
145 For instance, if we have `a/b` and `a/c`, we will only keep one of them.
146 The choice is arbitrary. The filtered list has no particular order.
147
148 Args:
149 datasets (`list[str]`): The list of datasets to filter.
150
151 Returns:
152 `list[str]`: The filtered list of datasets.
153 """
154 namespaces: set[str] = set()
155 selected_datasets: list[str] = []
156 for dataset in datasets:
157 namespace = get_namespace(dataset)
158 if (namespace is None) or (namespace in namespaces):
159 continue
160 namespaces.add(namespace)
161 selected_datasets.append(dataset)
162 return selected_datasets
163
164
165 def get_namespace(dataset: str) -> Optional[str]:
166 splits = dataset.split("/")
167 return splits[0] if len(splits) == 2 else None
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jobs/cache_maintenance/src/cache_maintenance/discussions.py b/jobs/cache_maintenance/src/cache_maintenance/discussions.py
--- a/jobs/cache_maintenance/src/cache_maintenance/discussions.py
+++ b/jobs/cache_maintenance/src/cache_maintenance/discussions.py
@@ -13,6 +13,35 @@
PARQUET_CACHE_KIND = "config-parquet"
DAYS = 1
+DISCUSSION_TITLE = "[bot] [No action needed] Conversion to Parquet"
+DISCUSSION_DESCRIPTION = """The {bot_name} bot has created a version of this dataset in the Parquet format in the {parquet_link} branch.
+
+## What is Parquet?
+
+Apache Parquet is a popular columnar storage format known for:
+
+- reduced memory requirement,
+- fast data retrieval and filtering,
+- efficient storage.
+
+It powers the Dataset Viewer (random access, search, filter) and every dataset on the Hub can be accessed with the same code (use HF Datasets, ClickHouse, DuckDB, Pandas or Polars, [you tell](https://huggingface.co/docs/datasets-server/parquet_process)).
+
+You can learn more about the advantages associated with Parquet in the [documentation](https://huggingface.co/docs/datasets-server/parquet).
+
+## How to access the Parquet version of the dataset?
+
+You can access the Parquet version of the dataset by following this link: {parquet_link}
+
+## What if my dataset was already in Parquet?
+
+When the dataset is already in Parquet format, the data are not converted and the files in `refs/convert/parquet` are links to the original files. This rule suffers an exception to ensure the Datasets Server API to stay fast: if the row group size of the original Parquet files is too big, new Parquet files are generated.
+
+## What should I do?
+
+You don't need to do anything. The Parquet version of the dataset is available for you to use. Refer to the [documentation](https://huggingface.co/docs/datasets-server/parquet_process) for examples and code snippets on how to query the Parquet files with ClickHouse, DuckDB, Pandas or Polars.
+
+If you have any questions or concerns, feel free to ask in the discussion below. You can also close the discussion if you don't have any questions."""
+
@dataclass
class ParquetCounters:
@@ -116,20 +145,14 @@
def create_discussion_description(
dataset: str, hf_endpoint: str, parquet_revision: str, bot_associated_user_name: str
) -> str:
- link_parquet = create_link(
+ parquet_link = create_link(
text=parquet_revision,
dataset=dataset,
hf_endpoint=hf_endpoint,
revision_type="tree",
revision=parquet_revision,
)
- return (
- f"The {bot_associated_user_name} bot has created a version of this dataset in the [Parquet"
- " format](https://parquet.apache.org/). You can learn more about the advantages associated with this format"
- f""" in the [documentation](https://huggingface.co/docs/datasets-server/parquet).
-
-The Parquet files are published in the {link_parquet} branch."""
- )
+ return DISCUSSION_DESCRIPTION.format(bot_name=bot_associated_user_name, parquet_link=parquet_link)
def create_link(
| {"golden_diff": "diff --git a/jobs/cache_maintenance/src/cache_maintenance/discussions.py b/jobs/cache_maintenance/src/cache_maintenance/discussions.py\n--- a/jobs/cache_maintenance/src/cache_maintenance/discussions.py\n+++ b/jobs/cache_maintenance/src/cache_maintenance/discussions.py\n@@ -13,6 +13,35 @@\n PARQUET_CACHE_KIND = \"config-parquet\"\n DAYS = 1\n \n+DISCUSSION_TITLE = \"[bot] [No action needed] Conversion to Parquet\"\n+DISCUSSION_DESCRIPTION = \"\"\"The {bot_name} bot has created a version of this dataset in the Parquet format in the {parquet_link} branch.\n+\n+## What is Parquet?\n+\n+Apache Parquet is a popular columnar storage format known for:\n+\n+- reduced memory requirement,\n+- fast data retrieval and filtering,\n+- efficient storage.\n+\n+It powers the Dataset Viewer (random access, search, filter) and every dataset on the Hub can be accessed with the same code (use HF Datasets, ClickHouse, DuckDB, Pandas or Polars, [you tell](https://huggingface.co/docs/datasets-server/parquet_process)). \n+\n+You can learn more about the advantages associated with Parquet in the [documentation](https://huggingface.co/docs/datasets-server/parquet).\n+\n+## How to access the Parquet version of the dataset?\n+\n+You can access the Parquet version of the dataset by following this link: {parquet_link}\n+\n+## What if my dataset was already in Parquet?\n+\n+When the dataset is already in Parquet format, the data are not converted and the files in `refs/convert/parquet` are links to the original files. This rule suffers an exception to ensure the Datasets Server API to stay fast: if the row group size of the original Parquet files is too big, new Parquet files are generated.\n+\n+## What should I do?\n+\n+You don't need to do anything. The Parquet version of the dataset is available for you to use. Refer to the [documentation](https://huggingface.co/docs/datasets-server/parquet_process) for examples and code snippets on how to query the Parquet files with ClickHouse, DuckDB, Pandas or Polars.\n+\n+If you have any questions or concerns, feel free to ask in the discussion below. You can also close the discussion if you don't have any questions.\"\"\"\n+\n \n @dataclass\n class ParquetCounters:\n@@ -116,20 +145,14 @@\n def create_discussion_description(\n dataset: str, hf_endpoint: str, parquet_revision: str, bot_associated_user_name: str\n ) -> str:\n- link_parquet = create_link(\n+ parquet_link = create_link(\n text=parquet_revision,\n dataset=dataset,\n hf_endpoint=hf_endpoint,\n revision_type=\"tree\",\n revision=parquet_revision,\n )\n- return (\n- f\"The {bot_associated_user_name} bot has created a version of this dataset in the [Parquet\"\n- \" format](https://parquet.apache.org/). You can learn more about the advantages associated with this format\"\n- f\"\"\" in the [documentation](https://huggingface.co/docs/datasets-server/parquet).\n-\n-The Parquet files are published in the {link_parquet} branch.\"\"\"\n- )\n+ return DISCUSSION_DESCRIPTION.format(bot_name=bot_associated_user_name, parquet_link=parquet_link)\n \n \n def create_link(\n", "issue": "Improve the message in Parquet conversion discussions\nsee https://huggingface.slack.com/archives/C02V51Q3800/p1707388935912969 (internal)\r\n\r\n> Would it make sense to add some message in Parquet PRs (e.g. https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset/discussions/2#65c41c1580497543ca3f8a5a) for less technical people?\r\n> As an aside, I don't think the docs really mention the advantages of Parquet even if we link there for advantages (https://huggingface.co/docs/datasets-server/parquet)\r\n> For example\r\n> ```\r\n> [bot] [No action needed] Conversion to Parquet\r\n> \r\n> The parquet-converter bot has created a version of this dataset in the Parquet format in the `refs/convert/parquet` branch. This will allow X, Y, Z.\r\n> \r\n> You can learn more about the advantages associated with this format in the documentation.\r\n> ```\r\n\r\nrelated to https://github.com/huggingface/datasets-server/issues/2349#issuecomment-1916292176\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2023 The HuggingFace Authors.\n\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Literal, Optional\nfrom urllib import parse\n\nfrom huggingface_hub import HfApi\nfrom huggingface_hub.constants import REPO_TYPE_DATASET\nfrom libcommon.simple_cache import get_datasets_with_last_updated_kind\n\nPARQUET_CACHE_KIND = \"config-parquet\"\nDAYS = 1\n\n\n@dataclass\nclass ParquetCounters:\n datasets: int = 0\n new_discussions: int = 0\n dismissed_discussions: int = 0\n errors: int = 0\n\n\n@dataclass\nclass Counters:\n parquet: ParquetCounters\n\n\ndef post_messages(\n hf_endpoint: str, bot_associated_user_name: Optional[str], bot_token: Optional[str], parquet_revision: str\n) -> Counters:\n \"\"\"\n Post messages in Hub discussions to notify users.\n \"\"\"\n if not (bot_associated_user_name and bot_token):\n raise Exception(\"No bot token or user name provided, skipping posting messages.\")\n\n return Counters(\n parquet=post_messages_on_parquet_conversion(\n hf_endpoint=hf_endpoint,\n bot_associated_user_name=bot_associated_user_name,\n bot_token=bot_token,\n parquet_revision=parquet_revision,\n )\n )\n\n\ndef post_messages_on_parquet_conversion(\n hf_endpoint: str,\n bot_associated_user_name: str,\n bot_token: str,\n parquet_revision: str,\n) -> ParquetCounters:\n logging.info(\"Create a Hub discussion to notify about parquet conversion\")\n datasets = limit_to_one_dataset_per_namespace(\n get_datasets_with_last_updated_kind(kind=PARQUET_CACHE_KIND, days=DAYS)\n )\n\n logging.info(f\"Creating discussions for {len(datasets)} datasets\")\n log_batch = 100\n counters = ParquetCounters()\n\n def get_log() -> str:\n return (\n f\" [{counters.datasets}/{len(datasets)}] {counters.new_discussions} discussions\"\n f\" have been opened, {counters.dismissed_discussions} datasets\"\n f\" already had a discussion (open or closed). {counters.errors} errors.\"\n )\n\n hf_api = HfApi(endpoint=hf_endpoint, token=bot_token)\n\n for dataset in datasets:\n counters.datasets += 1\n prefix = f\"[{counters.datasets}/{len(datasets)}]\"\n logging.info(f\"{prefix} Processing dataset {dataset}\")\n try:\n try:\n next(\n hf_api.get_repo_discussions(\n repo_id=dataset, repo_type=REPO_TYPE_DATASET, token=bot_token, author=bot_associated_user_name\n )\n )\n # if we get here, the bot has already opened a discussion for this dataset\n counters.dismissed_discussions += 1\n logging.info(f\"{prefix} [dismissed] Dataset {dataset} already has a discussion, skipping\")\n except StopIteration:\n hf_api.create_discussion(\n repo_id=dataset,\n repo_type=REPO_TYPE_DATASET,\n title=\"[bot] Conversion to Parquet\",\n description=create_discussion_description(\n dataset=dataset,\n hf_endpoint=hf_endpoint,\n parquet_revision=parquet_revision,\n bot_associated_user_name=bot_associated_user_name,\n ),\n token=bot_token,\n )\n counters.new_discussions += 1\n logging.info(f\"{prefix} [new] Dataset {dataset} has a new discussion\")\n except Exception as e:\n counters.errors += 1\n logging.warning(f\"{prefix} [error] Failed to process dataset {dataset}: {e}\")\n\n logging.debug(get_log())\n if (counters.datasets) % log_batch == 0:\n logging.info(get_log())\n\n logging.info(get_log())\n logging.info(\"All the messages about parquet conversion have been posted.\")\n\n return counters\n\n\ndef create_discussion_description(\n dataset: str, hf_endpoint: str, parquet_revision: str, bot_associated_user_name: str\n) -> str:\n link_parquet = create_link(\n text=parquet_revision,\n dataset=dataset,\n hf_endpoint=hf_endpoint,\n revision_type=\"tree\",\n revision=parquet_revision,\n )\n return (\n f\"The {bot_associated_user_name} bot has created a version of this dataset in the [Parquet\"\n \" format](https://parquet.apache.org/). You can learn more about the advantages associated with this format\"\n f\"\"\" in the [documentation](https://huggingface.co/docs/datasets-server/parquet).\n\nThe Parquet files are published in the {link_parquet} branch.\"\"\"\n )\n\n\ndef create_link(\n text: str, dataset: str, hf_endpoint: str, revision_type: Literal[\"commit\", \"tree\"], revision: str\n) -> str:\n return f\"[`{text}`]({hf_endpoint}/datasets/{dataset}/{revision_type}/{parse.quote(revision, safe='')})\"\n\n\ndef limit_to_one_dataset_per_namespace(datasets: list[str]) -> list[str]:\n \"\"\"\n Limit the number of datasets to one per namespace.\n\n For instance, if we have `a/b` and `a/c`, we will only keep one of them.\n The choice is arbitrary. The filtered list has no particular order.\n\n Args:\n datasets (`list[str]`): The list of datasets to filter.\n\n Returns:\n `list[str]`: The filtered list of datasets.\n \"\"\"\n namespaces: set[str] = set()\n selected_datasets: list[str] = []\n for dataset in datasets:\n namespace = get_namespace(dataset)\n if (namespace is None) or (namespace in namespaces):\n continue\n namespaces.add(namespace)\n selected_datasets.append(dataset)\n return selected_datasets\n\n\ndef get_namespace(dataset: str) -> Optional[str]:\n splits = dataset.split(\"/\")\n return splits[0] if len(splits) == 2 else None\n", "path": "jobs/cache_maintenance/src/cache_maintenance/discussions.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2023 The HuggingFace Authors.\n\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Literal, Optional\nfrom urllib import parse\n\nfrom huggingface_hub import HfApi\nfrom huggingface_hub.constants import REPO_TYPE_DATASET\nfrom libcommon.simple_cache import get_datasets_with_last_updated_kind\n\nPARQUET_CACHE_KIND = \"config-parquet\"\nDAYS = 1\n\nDISCUSSION_TITLE = \"[bot] [No action needed] Conversion to Parquet\"\nDISCUSSION_DESCRIPTION = \"\"\"The {bot_name} bot has created a version of this dataset in the Parquet format in the {parquet_link} branch.\n\n## What is Parquet?\n\nApache Parquet is a popular columnar storage format known for:\n\n- reduced memory requirement,\n- fast data retrieval and filtering,\n- efficient storage.\n\nIt powers the Dataset Viewer (random access, search, filter) and every dataset on the Hub can be accessed with the same code (use HF Datasets, ClickHouse, DuckDB, Pandas or Polars, [you tell](https://huggingface.co/docs/datasets-server/parquet_process)). \n\nYou can learn more about the advantages associated with Parquet in the [documentation](https://huggingface.co/docs/datasets-server/parquet).\n\n## How to access the Parquet version of the dataset?\n\nYou can access the Parquet version of the dataset by following this link: {parquet_link}\n\n## What if my dataset was already in Parquet?\n\nWhen the dataset is already in Parquet format, the data are not converted and the files in `refs/convert/parquet` are links to the original files. This rule suffers an exception to ensure the Datasets Server API to stay fast: if the row group size of the original Parquet files is too big, new Parquet files are generated.\n\n## What should I do?\n\nYou don't need to do anything. The Parquet version of the dataset is available for you to use. Refer to the [documentation](https://huggingface.co/docs/datasets-server/parquet_process) for examples and code snippets on how to query the Parquet files with ClickHouse, DuckDB, Pandas or Polars.\n\nIf you have any questions or concerns, feel free to ask in the discussion below. You can also close the discussion if you don't have any questions.\"\"\"\n\n\n@dataclass\nclass ParquetCounters:\n datasets: int = 0\n new_discussions: int = 0\n dismissed_discussions: int = 0\n errors: int = 0\n\n\n@dataclass\nclass Counters:\n parquet: ParquetCounters\n\n\ndef post_messages(\n hf_endpoint: str, bot_associated_user_name: Optional[str], bot_token: Optional[str], parquet_revision: str\n) -> Counters:\n \"\"\"\n Post messages in Hub discussions to notify users.\n \"\"\"\n if not (bot_associated_user_name and bot_token):\n raise Exception(\"No bot token or user name provided, skipping posting messages.\")\n\n return Counters(\n parquet=post_messages_on_parquet_conversion(\n hf_endpoint=hf_endpoint,\n bot_associated_user_name=bot_associated_user_name,\n bot_token=bot_token,\n parquet_revision=parquet_revision,\n )\n )\n\n\ndef post_messages_on_parquet_conversion(\n hf_endpoint: str,\n bot_associated_user_name: str,\n bot_token: str,\n parquet_revision: str,\n) -> ParquetCounters:\n logging.info(\"Create a Hub discussion to notify about parquet conversion\")\n datasets = limit_to_one_dataset_per_namespace(\n get_datasets_with_last_updated_kind(kind=PARQUET_CACHE_KIND, days=DAYS)\n )\n\n logging.info(f\"Creating discussions for {len(datasets)} datasets\")\n log_batch = 100\n counters = ParquetCounters()\n\n def get_log() -> str:\n return (\n f\" [{counters.datasets}/{len(datasets)}] {counters.new_discussions} discussions\"\n f\" have been opened, {counters.dismissed_discussions} datasets\"\n f\" already had a discussion (open or closed). {counters.errors} errors.\"\n )\n\n hf_api = HfApi(endpoint=hf_endpoint, token=bot_token)\n\n for dataset in datasets:\n counters.datasets += 1\n prefix = f\"[{counters.datasets}/{len(datasets)}]\"\n logging.info(f\"{prefix} Processing dataset {dataset}\")\n try:\n try:\n next(\n hf_api.get_repo_discussions(\n repo_id=dataset, repo_type=REPO_TYPE_DATASET, token=bot_token, author=bot_associated_user_name\n )\n )\n # if we get here, the bot has already opened a discussion for this dataset\n counters.dismissed_discussions += 1\n logging.info(f\"{prefix} [dismissed] Dataset {dataset} already has a discussion, skipping\")\n except StopIteration:\n hf_api.create_discussion(\n repo_id=dataset,\n repo_type=REPO_TYPE_DATASET,\n title=\"[bot] Conversion to Parquet\",\n description=create_discussion_description(\n dataset=dataset,\n hf_endpoint=hf_endpoint,\n parquet_revision=parquet_revision,\n bot_associated_user_name=bot_associated_user_name,\n ),\n token=bot_token,\n )\n counters.new_discussions += 1\n logging.info(f\"{prefix} [new] Dataset {dataset} has a new discussion\")\n except Exception as e:\n counters.errors += 1\n logging.warning(f\"{prefix} [error] Failed to process dataset {dataset}: {e}\")\n\n logging.debug(get_log())\n if (counters.datasets) % log_batch == 0:\n logging.info(get_log())\n\n logging.info(get_log())\n logging.info(\"All the messages about parquet conversion have been posted.\")\n\n return counters\n\n\ndef create_discussion_description(\n dataset: str, hf_endpoint: str, parquet_revision: str, bot_associated_user_name: str\n) -> str:\n parquet_link = create_link(\n text=parquet_revision,\n dataset=dataset,\n hf_endpoint=hf_endpoint,\n revision_type=\"tree\",\n revision=parquet_revision,\n )\n return DISCUSSION_DESCRIPTION.format(bot_name=bot_associated_user_name, parquet_link=parquet_link)\n\n\ndef create_link(\n text: str, dataset: str, hf_endpoint: str, revision_type: Literal[\"commit\", \"tree\"], revision: str\n) -> str:\n return f\"[`{text}`]({hf_endpoint}/datasets/{dataset}/{revision_type}/{parse.quote(revision, safe='')})\"\n\n\ndef limit_to_one_dataset_per_namespace(datasets: list[str]) -> list[str]:\n \"\"\"\n Limit the number of datasets to one per namespace.\n\n For instance, if we have `a/b` and `a/c`, we will only keep one of them.\n The choice is arbitrary. The filtered list has no particular order.\n\n Args:\n datasets (`list[str]`): The list of datasets to filter.\n\n Returns:\n `list[str]`: The filtered list of datasets.\n \"\"\"\n namespaces: set[str] = set()\n selected_datasets: list[str] = []\n for dataset in datasets:\n namespace = get_namespace(dataset)\n if (namespace is None) or (namespace in namespaces):\n continue\n namespaces.add(namespace)\n selected_datasets.append(dataset)\n return selected_datasets\n\n\ndef get_namespace(dataset: str) -> Optional[str]:\n splits = dataset.split(\"/\")\n return splits[0] if len(splits) == 2 else None\n", "path": "jobs/cache_maintenance/src/cache_maintenance/discussions.py"}]} | 2,246 | 755 |
gh_patches_debug_23042 | rasdani/github-patches | git_diff | NVIDIA__apex-553 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'function' object has no attribute '__self__' in Pytorch 1.3
After upgrading to Pytorch 1.3.0, my training script crashes with `AttributeError`. Relevant lines from the stack:
```
Traceback (most recent call last):
...
File "/usr/lib/python3.7/site-packages/torch/optim/lr_scheduler.py", line 299, in __init__
super(CosineAnnealingLR, self).__init__(optimizer, last_epoch)
File "/usr/lib/python3.7/site-packages/torch/optim/lr_scheduler.py", line 58, in __init__
self.optimizer.step = with_counter(self.optimizer.step)
File "/usr/lib/python3.7/site-packages/torch/optim/lr_scheduler.py", line 40, in with_counter
instance_ref = weakref.ref(method.__self__)
AttributeError: 'function' object has no attribute '__self__'
```
The `step` method of the Amp optimizer is simply patched with a function, but is not made a method again in https://github.com/NVIDIA/apex/blob/088985936518be7e25795a30d8ab33affa9db6ed/apex/amp/_initialize.py#L235-L245, therefore it does not have a `__self__`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apex/amp/_initialize.py`
Content:
```
1 import torch
2 from torch._six import string_classes
3 import functools
4 import numpy as np
5 import sys
6 import warnings
7 from ._amp_state import _amp_state, warn_or_err, container_abcs
8 from .handle import disable_casts
9 from .scaler import LossScaler
10 from ._process_optimizer import _process_optimizer
11 from apex.fp16_utils import convert_network
12 from ..fp16_utils import FP16_Optimizer as FP16_Optimizer_general
13 from ..contrib.optimizers import FP16_Optimizer as FP16_Optimizer_for_fused
14
15 if torch.distributed.is_available():
16 from ..parallel import DistributedDataParallel as apex_DDP
17 from ..parallel.LARC import LARC
18
19
20 def to_type(dtype, t):
21 if isinstance(t, torch.Tensor):
22 if not t.is_cuda:
23 # This should not be a hard error, since it may be legitimate.
24 warnings.warn("An input tensor was not cuda.")
25 # GANs require this.
26 # if t.requires_grad:
27 # warn_or_err("input data requires grad. Since input data is not a model parameter,\n"
28 # "its gradients will not be properly allreduced by DDP.")
29 if t.is_floating_point():
30 return t.to(dtype)
31 return t
32 else:
33 # Trust the user's custom batch type, that's all I can do here.
34 return t.to(dtype)
35
36
37 # Modified from torch.optim.optimizer.py. This is a bit more general than casted_args in utils.py.
38 def applier(value, fn):
39 if isinstance(value, torch.Tensor):
40 return fn(value)
41 elif isinstance(value, string_classes):
42 return value
43 elif isinstance(value, np.ndarray):
44 return value
45 elif hasattr(value, "to"): # Allow handling of custom batch classes
46 return fn(value)
47 elif isinstance(value, container_abcs.Mapping):
48 return {applier(k, fn) : applier(v, fn) for k, v in value.items()}
49 elif isinstance(value, container_abcs.Iterable):
50 return type(value)(applier(v, fn) for v in value)
51 else:
52 # Do I want this to fire off even if someone chooses to pass something ordinary like
53 # an int or float? May be more annoying than it's worth.
54 # print("Warning: unrecognized type in applier. If your input data is a custom class, "
55 # "provide it with a .to(dtype) method which converts its floating-point Tensors to dtype. "
56 # "Amp will check for your custom to() and invoke it to cast the batch's "
57 # "floating-point Tensors to the appropriate type. "
58 # "Also, if your data is a custom class, it is your responsibility to ensure that "
59 # "any Tensors you want to be cuda are already cuda."
60 return value
61
62
63 def check_models(models):
64 for model in models:
65 parallel_type = None
66 if isinstance(model, torch.nn.parallel.DistributedDataParallel):
67 parallel_type = "torch.nn.parallel.DistributedDataParallel"
68 if ('apex_DDP' in sys.modules) and isinstance(model, apex_DDP):
69 parallel_type = "apex.parallel.DistributedDataParallel"
70 if isinstance(model, torch.nn.parallel.DataParallel):
71 parallel_type = "torch.nn.parallel.DataParallel"
72 if parallel_type is not None:
73 raise RuntimeError("Incoming model is an instance of {}. ".format(parallel_type) +
74 "Parallel wrappers should only be applied to the model(s) AFTER \n"
75 "the model(s) have been returned from amp.initialize.")
76
77
78 def check_params_fp32(models):
79 for model in models:
80 for name, param in model.named_parameters():
81 if param.is_floating_point():
82 if 'Half' in param.type():
83 warn_or_err("Found param {} with type {}, expected torch.cuda.FloatTensor.\n"
84 "When using amp.initialize, you do not need to call .half() on your model\n"
85 "before passing it, no matter what optimization level you choose.".format(
86 name, param.type()))
87 elif not param.is_cuda:
88 warn_or_err("Found param {} with type {}, expected torch.cuda.FloatTensor.\n"
89 "When using amp.initialize, you need to provide a model with parameters\n"
90 "located on a CUDA device before passing it no matter what optimization level\n"
91 "you chose. Use model.to('cuda') to use the default device.".format(
92 name, param.type()))
93
94 # Backward compatibility for PyTorch 0.4
95 if hasattr(model, 'named_buffers'):
96 buf_iter = model.named_buffers()
97 else:
98 buf_iter = model._buffers
99 for obj in buf_iter:
100 if type(obj)==tuple:
101 name, buf = obj
102 else:
103 name, buf = obj, buf_iter[obj]
104 if buf.is_floating_point():
105 if 'Half' in buf.type():
106 warn_or_err("Found buffer {} with type {}, expected torch.cuda.FloatTensor.\n"
107 "When using amp.initialize, you do not need to call .half() on your model\n"
108 "before passing it, no matter what optimization level you choose.".format(
109 name, buf.type()))
110 elif not buf.is_cuda:
111 warn_or_err("Found buffer {} with type {}, expected torch.cuda.FloatTensor.\n"
112 "When using amp.initialize, you need to provide a model with buffers\n"
113 "located on a CUDA device before passing it no matter what optimization level\n"
114 "you chose. Use model.to('cuda') to use the default device.".format(
115 name, buf.type()))
116
117
118 def check_optimizers(optimizers):
119 for optim in optimizers:
120 bad_optim_type = None
121 if isinstance(optim, FP16_Optimizer_general):
122 bad_optim_type = "apex.fp16_utils.FP16_Optimizer"
123 if isinstance(optim, FP16_Optimizer_for_fused):
124 bad_optim_type = "apex.optimizers.FP16_Optimizer"
125 if bad_optim_type is not None:
126 raise RuntimeError("An incoming optimizer is an instance of {}. ".format(bad_optim_type) +
127 "The optimizer(s) passed to amp.initialize() must be bare \n"
128 "instances of either ordinary Pytorch optimizers, or Apex fused \n"
129 "optimizers.\n")
130
131
132 class O2StateDictHook(object):
133 def __init__(self, fn):
134 self.fn = fn
135
136 def __call__(self, module, state_dict, prefix, local_metadata):
137 for key in state_dict:
138 param = state_dict[key]
139 if 'Half' in param.type():
140 param = param.to(torch.float32)
141 state_dict[key] = param
142
143
144 def _initialize(models, optimizers, properties, num_losses=1, cast_model_outputs=None):
145 from .amp import init as amp_init
146
147 optimizers_was_list = False
148 if isinstance(optimizers, torch.optim.Optimizer) or ('LARC' in sys.modules and isinstance(optimizers, LARC)):
149 optimizers = [optimizers]
150 elif optimizers is None:
151 optimizers = []
152 elif isinstance(optimizers, list):
153 optimizers_was_list = True
154 check_optimizers(optimizers)
155 else:
156 check_optimizers([optimizers])
157 raise TypeError("optimizers must be either a single optimizer or a list of optimizers.")
158
159 if isinstance(models, torch.nn.Module):
160 models_was_list = False
161 models = [models]
162 elif isinstance(models, list):
163 models_was_list = True
164 else:
165 raise TypeError("models must be either a single model or a list of models.")
166
167 check_models(models)
168
169 if not _amp_state.allow_incoming_model_not_fp32:
170 check_params_fp32(models)
171
172 # In the future, when FP16_Optimizer can be deprecated and master weights can
173 # become an attribute, remember to stash master weights before casting the model.
174
175 if properties.cast_model_type:
176 if properties.keep_batchnorm_fp32:
177 for model in models:
178 convert_network(model, properties.cast_model_type)
179 else:
180 for model in models:
181 model.to(properties.cast_model_type)
182
183 input_caster = functools.partial(to_type, properties.cast_model_type)
184 if cast_model_outputs is not None:
185 output_caster = functools.partial(to_type, cast_model_outputs)
186 else:
187 output_caster = functools.partial(to_type, torch.float32)
188
189 for model in models:
190 # Patch the forward method to cast incoming data to the correct type, and
191 # outgoing data to float32, so "the user never needs to call .half()."
192 # I like writing things explicitly more than decorators.
193 def patch_forward(old_fwd):
194 def new_fwd(*args, **kwargs):
195 output = old_fwd(*applier(args, input_caster),
196 **applier(kwargs, input_caster))
197 return applier(output, output_caster)
198 return new_fwd
199
200 model.forward = patch_forward(model.forward)
201
202 # State dict trick to recast any preexisting per-param state tensors
203 for optimizer in optimizers:
204 optimizer.load_state_dict(optimizer.state_dict())
205
206 # patch model.state_dict() to return float32 params
207 for model in models:
208 for module in model.modules():
209 module._register_state_dict_hook(O2StateDictHook(functools.partial(to_type, torch.float32)))
210
211 elif cast_model_outputs is not None:
212 output_caster = functools.partial(to_type, cast_model_outputs)
213
214 for model in models:
215 def patch_forward(old_fwd):
216 def new_fwd(*args, **kwargs):
217 output = old_fwd(*args, **kwargs)
218 return applier(output, output_caster)
219 return new_fwd
220
221 model.forward = patch_forward(model.forward)
222
223 for i, optimizer in enumerate(optimizers):
224 optimizers[i] = _process_optimizer(optimizer, properties)
225
226 _amp_state.loss_scalers = []
227 for _ in range(num_losses):
228 _amp_state.loss_scalers.append(LossScaler(properties.loss_scale,
229 min_loss_scale=_amp_state.min_loss_scale,
230 max_loss_scale=_amp_state.max_loss_scale))
231
232 if properties.patch_torch_functions:
233 # handle is unused here. It's accessible later through a global value anyway.
234 handle = amp_init(loss_scale=properties.loss_scale, verbose=(_amp_state.verbosity == 2))
235 for optimizer in optimizers:
236 # Disable Amp casting for the optimizer step, because it should only be
237 # applied to FP32 master params anyway.
238 def patch_step(old_step):
239 def new_step(*args, **kwargs):
240 with disable_casts():
241 output = old_step(*args, **kwargs)
242 return output
243 return new_step
244
245 optimizer.step = patch_step(optimizer.step)
246
247 if optimizers_was_list:
248 if models_was_list:
249 return models, optimizers
250 else:
251 return models[0], optimizers
252 else:
253 if models_was_list:
254 if len(optimizers) == 0:
255 return models
256 else:
257 return models, optimizers[0]
258 else:
259 if len(optimizers) == 0:
260 return models[0]
261 else:
262 return models[0], optimizers[0]
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apex/amp/_initialize.py b/apex/amp/_initialize.py
--- a/apex/amp/_initialize.py
+++ b/apex/amp/_initialize.py
@@ -3,6 +3,7 @@
import functools
import numpy as np
import sys
+from types import MethodType
import warnings
from ._amp_state import _amp_state, warn_or_err, container_abcs
from .handle import disable_casts
@@ -236,13 +237,13 @@
# Disable Amp casting for the optimizer step, because it should only be
# applied to FP32 master params anyway.
def patch_step(old_step):
- def new_step(*args, **kwargs):
+ def new_step(self, *args, **kwargs):
with disable_casts():
output = old_step(*args, **kwargs)
return output
return new_step
- optimizer.step = patch_step(optimizer.step)
+ optimizer.step = MethodType(patch_step(optimizer.step), optimizer)
if optimizers_was_list:
if models_was_list:
| {"golden_diff": "diff --git a/apex/amp/_initialize.py b/apex/amp/_initialize.py\n--- a/apex/amp/_initialize.py\n+++ b/apex/amp/_initialize.py\n@@ -3,6 +3,7 @@\n import functools\n import numpy as np\n import sys\n+from types import MethodType\n import warnings\n from ._amp_state import _amp_state, warn_or_err, container_abcs\n from .handle import disable_casts\n@@ -236,13 +237,13 @@\n # Disable Amp casting for the optimizer step, because it should only be\n # applied to FP32 master params anyway.\n def patch_step(old_step):\n- def new_step(*args, **kwargs):\n+ def new_step(self, *args, **kwargs):\n with disable_casts():\n output = old_step(*args, **kwargs)\n return output\n return new_step\n \n- optimizer.step = patch_step(optimizer.step)\n+ optimizer.step = MethodType(patch_step(optimizer.step), optimizer)\n \n if optimizers_was_list:\n if models_was_list:\n", "issue": "'function' object has no attribute '__self__' in Pytorch 1.3\nAfter upgrading to Pytorch 1.3.0, my training script crashes with `AttributeError`. Relevant lines from the stack:\r\n```\r\nTraceback (most recent call last):\r\n...\r\n File \"/usr/lib/python3.7/site-packages/torch/optim/lr_scheduler.py\", line 299, in __init__\r\n super(CosineAnnealingLR, self).__init__(optimizer, last_epoch)\r\n File \"/usr/lib/python3.7/site-packages/torch/optim/lr_scheduler.py\", line 58, in __init__\r\n self.optimizer.step = with_counter(self.optimizer.step)\r\n File \"/usr/lib/python3.7/site-packages/torch/optim/lr_scheduler.py\", line 40, in with_counter\r\n instance_ref = weakref.ref(method.__self__)\r\nAttributeError: 'function' object has no attribute '__self__'\r\n```\r\n\r\nThe `step` method of the Amp optimizer is simply patched with a function, but is not made a method again in https://github.com/NVIDIA/apex/blob/088985936518be7e25795a30d8ab33affa9db6ed/apex/amp/_initialize.py#L235-L245, therefore it does not have a `__self__`.\n", "before_files": [{"content": "import torch\nfrom torch._six import string_classes\nimport functools\nimport numpy as np\nimport sys\nimport warnings\nfrom ._amp_state import _amp_state, warn_or_err, container_abcs\nfrom .handle import disable_casts\nfrom .scaler import LossScaler\nfrom ._process_optimizer import _process_optimizer\nfrom apex.fp16_utils import convert_network\nfrom ..fp16_utils import FP16_Optimizer as FP16_Optimizer_general\nfrom ..contrib.optimizers import FP16_Optimizer as FP16_Optimizer_for_fused\n\nif torch.distributed.is_available():\n from ..parallel import DistributedDataParallel as apex_DDP\n from ..parallel.LARC import LARC\n\n\ndef to_type(dtype, t):\n if isinstance(t, torch.Tensor):\n if not t.is_cuda:\n # This should not be a hard error, since it may be legitimate.\n warnings.warn(\"An input tensor was not cuda.\")\n # GANs require this.\n # if t.requires_grad:\n # warn_or_err(\"input data requires grad. Since input data is not a model parameter,\\n\"\n # \"its gradients will not be properly allreduced by DDP.\")\n if t.is_floating_point():\n return t.to(dtype)\n return t\n else:\n # Trust the user's custom batch type, that's all I can do here.\n return t.to(dtype)\n\n\n# Modified from torch.optim.optimizer.py. This is a bit more general than casted_args in utils.py.\ndef applier(value, fn):\n if isinstance(value, torch.Tensor):\n return fn(value)\n elif isinstance(value, string_classes):\n return value\n elif isinstance(value, np.ndarray):\n return value\n elif hasattr(value, \"to\"): # Allow handling of custom batch classes\n return fn(value)\n elif isinstance(value, container_abcs.Mapping):\n return {applier(k, fn) : applier(v, fn) for k, v in value.items()}\n elif isinstance(value, container_abcs.Iterable):\n return type(value)(applier(v, fn) for v in value)\n else:\n # Do I want this to fire off even if someone chooses to pass something ordinary like\n # an int or float? May be more annoying than it's worth.\n # print(\"Warning: unrecognized type in applier. If your input data is a custom class, \"\n # \"provide it with a .to(dtype) method which converts its floating-point Tensors to dtype. \"\n # \"Amp will check for your custom to() and invoke it to cast the batch's \"\n # \"floating-point Tensors to the appropriate type. \"\n # \"Also, if your data is a custom class, it is your responsibility to ensure that \"\n # \"any Tensors you want to be cuda are already cuda.\"\n return value\n\n\ndef check_models(models):\n for model in models:\n parallel_type = None\n if isinstance(model, torch.nn.parallel.DistributedDataParallel):\n parallel_type = \"torch.nn.parallel.DistributedDataParallel\"\n if ('apex_DDP' in sys.modules) and isinstance(model, apex_DDP):\n parallel_type = \"apex.parallel.DistributedDataParallel\"\n if isinstance(model, torch.nn.parallel.DataParallel):\n parallel_type = \"torch.nn.parallel.DataParallel\"\n if parallel_type is not None:\n raise RuntimeError(\"Incoming model is an instance of {}. \".format(parallel_type) +\n \"Parallel wrappers should only be applied to the model(s) AFTER \\n\"\n \"the model(s) have been returned from amp.initialize.\")\n\n\ndef check_params_fp32(models):\n for model in models:\n for name, param in model.named_parameters():\n if param.is_floating_point():\n if 'Half' in param.type():\n warn_or_err(\"Found param {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you do not need to call .half() on your model\\n\"\n \"before passing it, no matter what optimization level you choose.\".format(\n name, param.type()))\n elif not param.is_cuda:\n warn_or_err(\"Found param {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you need to provide a model with parameters\\n\"\n \"located on a CUDA device before passing it no matter what optimization level\\n\"\n \"you chose. Use model.to('cuda') to use the default device.\".format(\n name, param.type()))\n\n # Backward compatibility for PyTorch 0.4\n if hasattr(model, 'named_buffers'):\n buf_iter = model.named_buffers()\n else:\n buf_iter = model._buffers\n for obj in buf_iter:\n if type(obj)==tuple:\n name, buf = obj\n else:\n name, buf = obj, buf_iter[obj]\n if buf.is_floating_point():\n if 'Half' in buf.type():\n warn_or_err(\"Found buffer {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you do not need to call .half() on your model\\n\"\n \"before passing it, no matter what optimization level you choose.\".format(\n name, buf.type()))\n elif not buf.is_cuda:\n warn_or_err(\"Found buffer {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you need to provide a model with buffers\\n\"\n \"located on a CUDA device before passing it no matter what optimization level\\n\"\n \"you chose. Use model.to('cuda') to use the default device.\".format(\n name, buf.type()))\n\n\ndef check_optimizers(optimizers):\n for optim in optimizers:\n bad_optim_type = None\n if isinstance(optim, FP16_Optimizer_general):\n bad_optim_type = \"apex.fp16_utils.FP16_Optimizer\"\n if isinstance(optim, FP16_Optimizer_for_fused):\n bad_optim_type = \"apex.optimizers.FP16_Optimizer\"\n if bad_optim_type is not None:\n raise RuntimeError(\"An incoming optimizer is an instance of {}. \".format(bad_optim_type) +\n \"The optimizer(s) passed to amp.initialize() must be bare \\n\"\n \"instances of either ordinary Pytorch optimizers, or Apex fused \\n\"\n \"optimizers.\\n\")\n\n\nclass O2StateDictHook(object):\n def __init__(self, fn):\n self.fn = fn\n\n def __call__(self, module, state_dict, prefix, local_metadata):\n for key in state_dict:\n param = state_dict[key]\n if 'Half' in param.type():\n param = param.to(torch.float32)\n state_dict[key] = param\n\n\ndef _initialize(models, optimizers, properties, num_losses=1, cast_model_outputs=None):\n from .amp import init as amp_init\n\n optimizers_was_list = False\n if isinstance(optimizers, torch.optim.Optimizer) or ('LARC' in sys.modules and isinstance(optimizers, LARC)):\n optimizers = [optimizers]\n elif optimizers is None:\n optimizers = []\n elif isinstance(optimizers, list):\n optimizers_was_list = True\n check_optimizers(optimizers)\n else:\n check_optimizers([optimizers])\n raise TypeError(\"optimizers must be either a single optimizer or a list of optimizers.\")\n\n if isinstance(models, torch.nn.Module):\n models_was_list = False\n models = [models]\n elif isinstance(models, list):\n models_was_list = True\n else:\n raise TypeError(\"models must be either a single model or a list of models.\")\n\n check_models(models)\n\n if not _amp_state.allow_incoming_model_not_fp32:\n check_params_fp32(models)\n\n # In the future, when FP16_Optimizer can be deprecated and master weights can\n # become an attribute, remember to stash master weights before casting the model.\n\n if properties.cast_model_type:\n if properties.keep_batchnorm_fp32:\n for model in models:\n convert_network(model, properties.cast_model_type)\n else:\n for model in models:\n model.to(properties.cast_model_type)\n\n input_caster = functools.partial(to_type, properties.cast_model_type)\n if cast_model_outputs is not None:\n output_caster = functools.partial(to_type, cast_model_outputs)\n else:\n output_caster = functools.partial(to_type, torch.float32)\n\n for model in models:\n # Patch the forward method to cast incoming data to the correct type, and\n # outgoing data to float32, so \"the user never needs to call .half().\"\n # I like writing things explicitly more than decorators.\n def patch_forward(old_fwd):\n def new_fwd(*args, **kwargs):\n output = old_fwd(*applier(args, input_caster),\n **applier(kwargs, input_caster))\n return applier(output, output_caster)\n return new_fwd\n\n model.forward = patch_forward(model.forward)\n\n # State dict trick to recast any preexisting per-param state tensors\n for optimizer in optimizers:\n optimizer.load_state_dict(optimizer.state_dict())\n\n # patch model.state_dict() to return float32 params\n for model in models:\n for module in model.modules():\n module._register_state_dict_hook(O2StateDictHook(functools.partial(to_type, torch.float32)))\n\n elif cast_model_outputs is not None:\n output_caster = functools.partial(to_type, cast_model_outputs)\n\n for model in models:\n def patch_forward(old_fwd):\n def new_fwd(*args, **kwargs):\n output = old_fwd(*args, **kwargs)\n return applier(output, output_caster)\n return new_fwd\n\n model.forward = patch_forward(model.forward)\n\n for i, optimizer in enumerate(optimizers):\n optimizers[i] = _process_optimizer(optimizer, properties)\n\n _amp_state.loss_scalers = []\n for _ in range(num_losses):\n _amp_state.loss_scalers.append(LossScaler(properties.loss_scale,\n min_loss_scale=_amp_state.min_loss_scale,\n max_loss_scale=_amp_state.max_loss_scale))\n\n if properties.patch_torch_functions:\n # handle is unused here. It's accessible later through a global value anyway.\n handle = amp_init(loss_scale=properties.loss_scale, verbose=(_amp_state.verbosity == 2))\n for optimizer in optimizers:\n # Disable Amp casting for the optimizer step, because it should only be\n # applied to FP32 master params anyway.\n def patch_step(old_step):\n def new_step(*args, **kwargs):\n with disable_casts():\n output = old_step(*args, **kwargs)\n return output\n return new_step\n\n optimizer.step = patch_step(optimizer.step)\n\n if optimizers_was_list:\n if models_was_list:\n return models, optimizers\n else:\n return models[0], optimizers\n else:\n if models_was_list:\n if len(optimizers) == 0:\n return models\n else:\n return models, optimizers[0]\n else:\n if len(optimizers) == 0:\n return models[0]\n else:\n return models[0], optimizers[0]\n", "path": "apex/amp/_initialize.py"}], "after_files": [{"content": "import torch\nfrom torch._six import string_classes\nimport functools\nimport numpy as np\nimport sys\nfrom types import MethodType\nimport warnings\nfrom ._amp_state import _amp_state, warn_or_err, container_abcs\nfrom .handle import disable_casts\nfrom .scaler import LossScaler\nfrom ._process_optimizer import _process_optimizer\nfrom apex.fp16_utils import convert_network\nfrom ..fp16_utils import FP16_Optimizer as FP16_Optimizer_general\nfrom ..contrib.optimizers import FP16_Optimizer as FP16_Optimizer_for_fused\n\nif torch.distributed.is_available():\n from ..parallel import DistributedDataParallel as apex_DDP\n from ..parallel.LARC import LARC\n\n\ndef to_type(dtype, t):\n if isinstance(t, torch.Tensor):\n if not t.is_cuda:\n # This should not be a hard error, since it may be legitimate.\n warnings.warn(\"An input tensor was not cuda.\")\n # GANs require this.\n # if t.requires_grad:\n # warn_or_err(\"input data requires grad. Since input data is not a model parameter,\\n\"\n # \"its gradients will not be properly allreduced by DDP.\")\n if t.is_floating_point():\n return t.to(dtype)\n return t\n else:\n # Trust the user's custom batch type, that's all I can do here.\n return t.to(dtype)\n\n\n# Modified from torch.optim.optimizer.py. This is a bit more general than casted_args in utils.py.\ndef applier(value, fn):\n if isinstance(value, torch.Tensor):\n return fn(value)\n elif isinstance(value, string_classes):\n return value\n elif isinstance(value, np.ndarray):\n return value\n elif hasattr(value, \"to\"): # Allow handling of custom batch classes\n return fn(value)\n elif isinstance(value, container_abcs.Mapping):\n return {applier(k, fn) : applier(v, fn) for k, v in value.items()}\n elif isinstance(value, container_abcs.Iterable):\n return type(value)(applier(v, fn) for v in value)\n else:\n # Do I want this to fire off even if someone chooses to pass something ordinary like\n # an int or float? May be more annoying than it's worth.\n # print(\"Warning: unrecognized type in applier. If your input data is a custom class, \"\n # \"provide it with a .to(dtype) method which converts its floating-point Tensors to dtype. \"\n # \"Amp will check for your custom to() and invoke it to cast the batch's \"\n # \"floating-point Tensors to the appropriate type. \"\n # \"Also, if your data is a custom class, it is your responsibility to ensure that \"\n # \"any Tensors you want to be cuda are already cuda.\"\n return value\n\n\ndef check_models(models):\n for model in models:\n parallel_type = None\n if isinstance(model, torch.nn.parallel.DistributedDataParallel):\n parallel_type = \"torch.nn.parallel.DistributedDataParallel\"\n if ('apex_DDP' in sys.modules) and isinstance(model, apex_DDP):\n parallel_type = \"apex.parallel.DistributedDataParallel\"\n if isinstance(model, torch.nn.parallel.DataParallel):\n parallel_type = \"torch.nn.parallel.DataParallel\"\n if parallel_type is not None:\n raise RuntimeError(\"Incoming model is an instance of {}. \".format(parallel_type) +\n \"Parallel wrappers should only be applied to the model(s) AFTER \\n\"\n \"the model(s) have been returned from amp.initialize.\")\n\n\ndef check_params_fp32(models):\n for model in models:\n for name, param in model.named_parameters():\n if param.is_floating_point():\n if 'Half' in param.type():\n warn_or_err(\"Found param {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you do not need to call .half() on your model\\n\"\n \"before passing it, no matter what optimization level you choose.\".format(\n name, param.type()))\n elif not param.is_cuda:\n warn_or_err(\"Found param {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you need to provide a model with parameters\\n\"\n \"located on a CUDA device before passing it no matter what optimization level\\n\"\n \"you chose. Use model.to('cuda') to use the default device.\".format(\n name, param.type()))\n\n # Backward compatibility for PyTorch 0.4\n if hasattr(model, 'named_buffers'):\n buf_iter = model.named_buffers()\n else:\n buf_iter = model._buffers\n for obj in buf_iter:\n if type(obj)==tuple:\n name, buf = obj\n else:\n name, buf = obj, buf_iter[obj]\n if buf.is_floating_point():\n if 'Half' in buf.type():\n warn_or_err(\"Found buffer {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you do not need to call .half() on your model\\n\"\n \"before passing it, no matter what optimization level you choose.\".format(\n name, buf.type()))\n elif not buf.is_cuda:\n warn_or_err(\"Found buffer {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you need to provide a model with buffers\\n\"\n \"located on a CUDA device before passing it no matter what optimization level\\n\"\n \"you chose. Use model.to('cuda') to use the default device.\".format(\n name, buf.type()))\n\n\ndef check_optimizers(optimizers):\n for optim in optimizers:\n bad_optim_type = None\n if isinstance(optim, FP16_Optimizer_general):\n bad_optim_type = \"apex.fp16_utils.FP16_Optimizer\"\n if isinstance(optim, FP16_Optimizer_for_fused):\n bad_optim_type = \"apex.optimizers.FP16_Optimizer\"\n if bad_optim_type is not None:\n raise RuntimeError(\"An incoming optimizer is an instance of {}. \".format(bad_optim_type) +\n \"The optimizer(s) passed to amp.initialize() must be bare \\n\"\n \"instances of either ordinary Pytorch optimizers, or Apex fused \\n\"\n \"optimizers.\\n\")\n\n\nclass O2StateDictHook(object):\n def __init__(self, fn):\n self.fn = fn\n\n def __call__(self, module, state_dict, prefix, local_metadata):\n for key in state_dict:\n param = state_dict[key]\n if 'Half' in param.type():\n param = param.to(torch.float32)\n state_dict[key] = param\n\n\ndef _initialize(models, optimizers, properties, num_losses=1, cast_model_outputs=None):\n from .amp import init as amp_init\n\n optimizers_was_list = False\n if isinstance(optimizers, torch.optim.Optimizer) or ('LARC' in sys.modules and isinstance(optimizers, LARC)):\n optimizers = [optimizers]\n elif optimizers is None:\n optimizers = []\n elif isinstance(optimizers, list):\n optimizers_was_list = True\n check_optimizers(optimizers)\n else:\n check_optimizers([optimizers])\n raise TypeError(\"optimizers must be either a single optimizer or a list of optimizers.\")\n\n if isinstance(models, torch.nn.Module):\n models_was_list = False\n models = [models]\n elif isinstance(models, list):\n models_was_list = True\n else:\n raise TypeError(\"models must be either a single model or a list of models.\")\n\n check_models(models)\n\n if not _amp_state.allow_incoming_model_not_fp32:\n check_params_fp32(models)\n\n # In the future, when FP16_Optimizer can be deprecated and master weights can\n # become an attribute, remember to stash master weights before casting the model.\n\n if properties.cast_model_type:\n if properties.keep_batchnorm_fp32:\n for model in models:\n convert_network(model, properties.cast_model_type)\n else:\n for model in models:\n model.to(properties.cast_model_type)\n\n input_caster = functools.partial(to_type, properties.cast_model_type)\n if cast_model_outputs is not None:\n output_caster = functools.partial(to_type, cast_model_outputs)\n else:\n output_caster = functools.partial(to_type, torch.float32)\n\n for model in models:\n # Patch the forward method to cast incoming data to the correct type, and\n # outgoing data to float32, so \"the user never needs to call .half().\"\n # I like writing things explicitly more than decorators.\n def patch_forward(old_fwd):\n def new_fwd(*args, **kwargs):\n output = old_fwd(*applier(args, input_caster),\n **applier(kwargs, input_caster))\n return applier(output, output_caster)\n return new_fwd\n\n model.forward = patch_forward(model.forward)\n\n # State dict trick to recast any preexisting per-param state tensors\n for optimizer in optimizers:\n optimizer.load_state_dict(optimizer.state_dict())\n\n # patch model.state_dict() to return float32 params\n for model in models:\n for module in model.modules():\n module._register_state_dict_hook(O2StateDictHook(functools.partial(to_type, torch.float32)))\n\n elif cast_model_outputs is not None:\n output_caster = functools.partial(to_type, cast_model_outputs)\n\n for model in models:\n def patch_forward(old_fwd):\n def new_fwd(*args, **kwargs):\n output = old_fwd(*args, **kwargs)\n return applier(output, output_caster)\n return new_fwd\n\n model.forward = patch_forward(model.forward)\n\n for i, optimizer in enumerate(optimizers):\n optimizers[i] = _process_optimizer(optimizer, properties)\n\n _amp_state.loss_scalers = []\n for _ in range(num_losses):\n _amp_state.loss_scalers.append(LossScaler(properties.loss_scale,\n min_loss_scale=_amp_state.min_loss_scale,\n max_loss_scale=_amp_state.max_loss_scale))\n\n if properties.patch_torch_functions:\n # handle is unused here. It's accessible later through a global value anyway.\n handle = amp_init(loss_scale=properties.loss_scale, verbose=(_amp_state.verbosity == 2))\n for optimizer in optimizers:\n # Disable Amp casting for the optimizer step, because it should only be\n # applied to FP32 master params anyway.\n def patch_step(old_step):\n def new_step(self, *args, **kwargs):\n with disable_casts():\n output = old_step(*args, **kwargs)\n return output\n return new_step\n\n optimizer.step = MethodType(patch_step(optimizer.step), optimizer)\n\n if optimizers_was_list:\n if models_was_list:\n return models, optimizers\n else:\n return models[0], optimizers\n else:\n if models_was_list:\n if len(optimizers) == 0:\n return models\n else:\n return models, optimizers[0]\n else:\n if len(optimizers) == 0:\n return models[0]\n else:\n return models[0], optimizers[0]\n", "path": "apex/amp/_initialize.py"}]} | 3,688 | 234 |
gh_patches_debug_27303 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3179 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typing space after completing full match does nothing
When typing `:set`, then tabbing to select `:set` and then pressing space, the completion doesn't change (it only does so when typing another space).
This also seems to happen with v0.11.0 (with the old completion code), but tagging @rcorre anyways in case you want to take a look :wink:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/completion/completer.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Completer attached to a CompletionView."""
21
22 import attr
23 from PyQt5.QtCore import pyqtSlot, QObject, QTimer
24
25 from qutebrowser.config import config
26 from qutebrowser.commands import cmdutils, runners
27 from qutebrowser.utils import log, utils, debug
28 from qutebrowser.completion.models import miscmodels
29
30
31 @attr.s
32 class CompletionInfo:
33
34 """Context passed into all completion functions."""
35
36 config = attr.ib()
37 keyconf = attr.ib()
38
39
40 class Completer(QObject):
41
42 """Completer which manages completions in a CompletionView.
43
44 Attributes:
45 _cmd: The statusbar Command object this completer belongs to.
46 _ignore_change: Whether to ignore the next completion update.
47 _timer: The timer used to trigger the completion update.
48 _last_cursor_pos: The old cursor position so we avoid double completion
49 updates.
50 _last_text: The old command text so we avoid double completion updates.
51 _last_completion_func: The completion function used for the last text.
52 """
53
54 def __init__(self, cmd, parent=None):
55 super().__init__(parent)
56 self._cmd = cmd
57 self._ignore_change = False
58 self._timer = QTimer()
59 self._timer.setSingleShot(True)
60 self._timer.setInterval(0)
61 self._timer.timeout.connect(self._update_completion)
62 self._last_cursor_pos = None
63 self._last_text = None
64 self._last_completion_func = None
65 self._cmd.update_completion.connect(self.schedule_completion_update)
66
67 def __repr__(self):
68 return utils.get_repr(self)
69
70 def _model(self):
71 """Convenience method to get the current completion model."""
72 completion = self.parent()
73 return completion.model()
74
75 def _get_new_completion(self, before_cursor, under_cursor):
76 """Get the completion function based on the current command text.
77
78 Args:
79 before_cursor: The command chunks before the cursor.
80 under_cursor: The command chunk under the cursor.
81
82 Return:
83 A completion model.
84 """
85 if '--' in before_cursor or under_cursor.startswith('-'):
86 # cursor on a flag or after an explicit split (--)
87 return None
88 log.completion.debug("Before removing flags: {}".format(before_cursor))
89 before_cursor = [x for x in before_cursor if not x.startswith('-')]
90 log.completion.debug("After removing flags: {}".format(before_cursor))
91 if not before_cursor:
92 # '|' or 'set|'
93 log.completion.debug('Starting command completion')
94 return miscmodels.command
95 try:
96 cmd = cmdutils.cmd_dict[before_cursor[0]]
97 except KeyError:
98 log.completion.debug("No completion for unknown command: {}"
99 .format(before_cursor[0]))
100 return None
101 argpos = len(before_cursor) - 1
102 try:
103 func = cmd.get_pos_arg_info(argpos).completion
104 except IndexError:
105 log.completion.debug("No completion in position {}".format(argpos))
106 return None
107 return func
108
109 def _quote(self, s):
110 """Quote s if it needs quoting for the commandline.
111
112 Note we don't use shlex.quote because that quotes a lot of shell
113 metachars we don't need to have quoted.
114 """
115 if not s:
116 return "''"
117 elif any(c in s for c in ' "\'\t\n\\'):
118 # use single quotes, and put single quotes into double quotes
119 # the string $'b is then quoted as '$'"'"'b'
120 return "'" + s.replace("'", "'\"'\"'") + "'"
121 else:
122 return s
123
124 def _partition(self):
125 """Divide the commandline text into chunks around the cursor position.
126
127 Return:
128 ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor])
129 """
130 text = self._cmd.text()[len(self._cmd.prefix()):]
131 if not text or not text.strip():
132 # Only ":", empty part under the cursor with nothing before/after
133 return [], '', []
134 parser = runners.CommandParser()
135 result = parser.parse(text, fallback=True, keep=True)
136 # pylint: disable=not-an-iterable
137 parts = [x for x in result.cmdline if x]
138 # pylint: enable=not-an-iterable
139 pos = self._cmd.cursorPosition() - len(self._cmd.prefix())
140 pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars
141 log.completion.debug('partitioning {} around position {}'.format(parts,
142 pos))
143 for i, part in enumerate(parts):
144 pos -= len(part)
145 if pos <= 0:
146 if part[pos-1:pos+1].isspace():
147 # cursor is in a space between two existing words
148 parts.insert(i, '')
149 prefix = [x.strip() for x in parts[:i]]
150 center = parts[i].strip()
151 # strip trailing whitepsace included as a separate token
152 postfix = [x.strip() for x in parts[i+1:] if not x.isspace()]
153 log.completion.debug(
154 "partitioned: {} '{}' {}".format(prefix, center, postfix))
155 return prefix, center, postfix
156
157 # We should always return above
158 assert False, parts
159
160 @pyqtSlot(str)
161 def on_selection_changed(self, text):
162 """Change the completed part if a new item was selected.
163
164 Called from the views selectionChanged method.
165
166 Args:
167 text: Newly selected text.
168 """
169 if text is None:
170 return
171 before, center, after = self._partition()
172 log.completion.debug("Changing {} to '{}'".format(center, text))
173 try:
174 maxsplit = cmdutils.cmd_dict[before[0]].maxsplit
175 except (KeyError, IndexError):
176 maxsplit = None
177 if maxsplit is None:
178 text = self._quote(text)
179 model = self._model()
180 if model.count() == 1 and config.val.completion.quick:
181 # If we only have one item, we want to apply it immediately
182 # and go on to the next part.
183 self._change_completed_part(text, before, after, immediate=True)
184 if maxsplit is not None and maxsplit < len(before):
185 # If we are quick-completing the part after maxsplit, don't
186 # keep offering completions (see issue #1519)
187 self._ignore_change = True
188 else:
189 log.completion.debug("Will ignore next completion update.")
190 self._ignore_change = True
191 self._change_completed_part(text, before, after)
192
193 @pyqtSlot()
194 def schedule_completion_update(self):
195 """Schedule updating/enabling completion.
196
197 For performance reasons we don't want to block here, instead we do this
198 in the background.
199
200 We delay the update only if we've already input some text and ignore
201 updates if the text is shorter than completion.min_chars (unless we're
202 hitting backspace in which case updates won't be ignored).
203 """
204 _cmd, _sep, rest = self._cmd.text().partition(' ')
205 input_length = len(rest)
206 if (0 < input_length < config.val.completion.min_chars and
207 self._cmd.cursorPosition() > self._last_cursor_pos):
208 log.completion.debug("Ignoring update because the length of "
209 "the text is less than completion.min_chars.")
210 elif (self._cmd.cursorPosition() == self._last_cursor_pos and
211 self._cmd.text() == self._last_text):
212 log.completion.debug("Ignoring update because there were no "
213 "changes.")
214 else:
215 log.completion.debug("Scheduling completion update.")
216 start_delay = config.val.completion.delay if self._last_text else 0
217 self._timer.start(start_delay)
218 self._last_cursor_pos = self._cmd.cursorPosition()
219 self._last_text = self._cmd.text()
220
221 @pyqtSlot()
222 def _update_completion(self):
223 """Check if completions are available and activate them."""
224 if self._ignore_change:
225 log.completion.debug("Ignoring completion update because "
226 "ignore_change is True.")
227 self._ignore_change = False
228 return
229
230 completion = self.parent()
231
232 if self._cmd.prefix() != ':':
233 # This is a search or gibberish, so we don't need to complete
234 # anything (yet)
235 # FIXME complete searches
236 # https://github.com/qutebrowser/qutebrowser/issues/32
237 completion.set_model(None)
238 self._last_completion_func = None
239 return
240
241 before_cursor, pattern, after_cursor = self._partition()
242
243 log.completion.debug("Updating completion: {} {} {}".format(
244 before_cursor, pattern, after_cursor))
245
246 pattern = pattern.strip("'\"")
247 func = self._get_new_completion(before_cursor, pattern)
248
249 if func is None:
250 log.completion.debug('Clearing completion')
251 completion.set_model(None)
252 self._last_completion_func = None
253 return
254
255 if func != self._last_completion_func:
256 self._last_completion_func = func
257 args = (x for x in before_cursor[1:] if not x.startswith('-'))
258 with debug.log_time(log.completion,
259 'Starting {} completion'.format(func.__name__)):
260 info = CompletionInfo(config=config.instance,
261 keyconf=config.key_instance)
262 model = func(*args, info=info)
263 with debug.log_time(log.completion, 'Set completion model'):
264 completion.set_model(model)
265
266 completion.set_pattern(pattern)
267
268 def _change_completed_part(self, newtext, before, after, immediate=False):
269 """Change the part we're currently completing in the commandline.
270
271 Args:
272 text: The text to set (string) for the token under the cursor.
273 before: Commandline tokens before the token under the cursor.
274 after: Commandline tokens after the token under the cursor.
275 immediate: True if the text should be completed immediately
276 including a trailing space and we shouldn't continue
277 completing the current item.
278 """
279 text = self._cmd.prefix() + ' '.join(before + [newtext])
280 pos = len(text) + (1 if immediate else 0)
281 if after:
282 text += ' ' + ' '.join(after)
283 elif immediate:
284 # pad with a space if quick-completing the last entry
285 text += ' '
286 log.completion.debug("setting text = '{}', pos = {}".format(text, pos))
287 self._cmd.setText(text)
288 self._cmd.setCursorPosition(pos)
289 self._cmd.setFocus()
290 self._cmd.show_cmd.emit()
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/completion/completer.py b/qutebrowser/completion/completer.py
--- a/qutebrowser/completion/completer.py
+++ b/qutebrowser/completion/completer.py
@@ -186,8 +186,6 @@
# keep offering completions (see issue #1519)
self._ignore_change = True
else:
- log.completion.debug("Will ignore next completion update.")
- self._ignore_change = True
self._change_completed_part(text, before, after)
@pyqtSlot()
@@ -284,7 +282,20 @@
# pad with a space if quick-completing the last entry
text += ' '
log.completion.debug("setting text = '{}', pos = {}".format(text, pos))
+
+ # generally, we don't want to let self._cmd emit cursorPositionChanged,
+ # because that'll schedule a completion update. That happens when
+ # tabbing through the completions, and we want to change the command
+ # text but we also want to keep the original completion list for the
+ # command the user manually entered. The exception is when we're
+ # immediately completing, in which case we *do* want to update the
+ # completion view so that we can start completing the next part
+ if not immediate:
+ self._cmd.blockSignals(True)
+
self._cmd.setText(text)
self._cmd.setCursorPosition(pos)
self._cmd.setFocus()
+
+ self._cmd.blockSignals(False)
self._cmd.show_cmd.emit()
| {"golden_diff": "diff --git a/qutebrowser/completion/completer.py b/qutebrowser/completion/completer.py\n--- a/qutebrowser/completion/completer.py\n+++ b/qutebrowser/completion/completer.py\n@@ -186,8 +186,6 @@\n # keep offering completions (see issue #1519)\n self._ignore_change = True\n else:\n- log.completion.debug(\"Will ignore next completion update.\")\n- self._ignore_change = True\n self._change_completed_part(text, before, after)\n \n @pyqtSlot()\n@@ -284,7 +282,20 @@\n # pad with a space if quick-completing the last entry\n text += ' '\n log.completion.debug(\"setting text = '{}', pos = {}\".format(text, pos))\n+\n+ # generally, we don't want to let self._cmd emit cursorPositionChanged,\n+ # because that'll schedule a completion update. That happens when\n+ # tabbing through the completions, and we want to change the command\n+ # text but we also want to keep the original completion list for the\n+ # command the user manually entered. The exception is when we're\n+ # immediately completing, in which case we *do* want to update the\n+ # completion view so that we can start completing the next part\n+ if not immediate:\n+ self._cmd.blockSignals(True)\n+\n self._cmd.setText(text)\n self._cmd.setCursorPosition(pos)\n self._cmd.setFocus()\n+\n+ self._cmd.blockSignals(False)\n self._cmd.show_cmd.emit()\n", "issue": "Typing space after completing full match does nothing\nWhen typing `:set`, then tabbing to select `:set` and then pressing space, the completion doesn't change (it only does so when typing another space).\r\n\r\nThis also seems to happen with v0.11.0 (with the old completion code), but tagging @rcorre anyways in case you want to take a look :wink:\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completer attached to a CompletionView.\"\"\"\n\nimport attr\nfrom PyQt5.QtCore import pyqtSlot, QObject, QTimer\n\nfrom qutebrowser.config import config\nfrom qutebrowser.commands import cmdutils, runners\nfrom qutebrowser.utils import log, utils, debug\nfrom qutebrowser.completion.models import miscmodels\n\n\[email protected]\nclass CompletionInfo:\n\n \"\"\"Context passed into all completion functions.\"\"\"\n\n config = attr.ib()\n keyconf = attr.ib()\n\n\nclass Completer(QObject):\n\n \"\"\"Completer which manages completions in a CompletionView.\n\n Attributes:\n _cmd: The statusbar Command object this completer belongs to.\n _ignore_change: Whether to ignore the next completion update.\n _timer: The timer used to trigger the completion update.\n _last_cursor_pos: The old cursor position so we avoid double completion\n updates.\n _last_text: The old command text so we avoid double completion updates.\n _last_completion_func: The completion function used for the last text.\n \"\"\"\n\n def __init__(self, cmd, parent=None):\n super().__init__(parent)\n self._cmd = cmd\n self._ignore_change = False\n self._timer = QTimer()\n self._timer.setSingleShot(True)\n self._timer.setInterval(0)\n self._timer.timeout.connect(self._update_completion)\n self._last_cursor_pos = None\n self._last_text = None\n self._last_completion_func = None\n self._cmd.update_completion.connect(self.schedule_completion_update)\n\n def __repr__(self):\n return utils.get_repr(self)\n\n def _model(self):\n \"\"\"Convenience method to get the current completion model.\"\"\"\n completion = self.parent()\n return completion.model()\n\n def _get_new_completion(self, before_cursor, under_cursor):\n \"\"\"Get the completion function based on the current command text.\n\n Args:\n before_cursor: The command chunks before the cursor.\n under_cursor: The command chunk under the cursor.\n\n Return:\n A completion model.\n \"\"\"\n if '--' in before_cursor or under_cursor.startswith('-'):\n # cursor on a flag or after an explicit split (--)\n return None\n log.completion.debug(\"Before removing flags: {}\".format(before_cursor))\n before_cursor = [x for x in before_cursor if not x.startswith('-')]\n log.completion.debug(\"After removing flags: {}\".format(before_cursor))\n if not before_cursor:\n # '|' or 'set|'\n log.completion.debug('Starting command completion')\n return miscmodels.command\n try:\n cmd = cmdutils.cmd_dict[before_cursor[0]]\n except KeyError:\n log.completion.debug(\"No completion for unknown command: {}\"\n .format(before_cursor[0]))\n return None\n argpos = len(before_cursor) - 1\n try:\n func = cmd.get_pos_arg_info(argpos).completion\n except IndexError:\n log.completion.debug(\"No completion in position {}\".format(argpos))\n return None\n return func\n\n def _quote(self, s):\n \"\"\"Quote s if it needs quoting for the commandline.\n\n Note we don't use shlex.quote because that quotes a lot of shell\n metachars we don't need to have quoted.\n \"\"\"\n if not s:\n return \"''\"\n elif any(c in s for c in ' \"\\'\\t\\n\\\\'):\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"\n else:\n return s\n\n def _partition(self):\n \"\"\"Divide the commandline text into chunks around the cursor position.\n\n Return:\n ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor])\n \"\"\"\n text = self._cmd.text()[len(self._cmd.prefix()):]\n if not text or not text.strip():\n # Only \":\", empty part under the cursor with nothing before/after\n return [], '', []\n parser = runners.CommandParser()\n result = parser.parse(text, fallback=True, keep=True)\n # pylint: disable=not-an-iterable\n parts = [x for x in result.cmdline if x]\n # pylint: enable=not-an-iterable\n pos = self._cmd.cursorPosition() - len(self._cmd.prefix())\n pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars\n log.completion.debug('partitioning {} around position {}'.format(parts,\n pos))\n for i, part in enumerate(parts):\n pos -= len(part)\n if pos <= 0:\n if part[pos-1:pos+1].isspace():\n # cursor is in a space between two existing words\n parts.insert(i, '')\n prefix = [x.strip() for x in parts[:i]]\n center = parts[i].strip()\n # strip trailing whitepsace included as a separate token\n postfix = [x.strip() for x in parts[i+1:] if not x.isspace()]\n log.completion.debug(\n \"partitioned: {} '{}' {}\".format(prefix, center, postfix))\n return prefix, center, postfix\n\n # We should always return above\n assert False, parts\n\n @pyqtSlot(str)\n def on_selection_changed(self, text):\n \"\"\"Change the completed part if a new item was selected.\n\n Called from the views selectionChanged method.\n\n Args:\n text: Newly selected text.\n \"\"\"\n if text is None:\n return\n before, center, after = self._partition()\n log.completion.debug(\"Changing {} to '{}'\".format(center, text))\n try:\n maxsplit = cmdutils.cmd_dict[before[0]].maxsplit\n except (KeyError, IndexError):\n maxsplit = None\n if maxsplit is None:\n text = self._quote(text)\n model = self._model()\n if model.count() == 1 and config.val.completion.quick:\n # If we only have one item, we want to apply it immediately\n # and go on to the next part.\n self._change_completed_part(text, before, after, immediate=True)\n if maxsplit is not None and maxsplit < len(before):\n # If we are quick-completing the part after maxsplit, don't\n # keep offering completions (see issue #1519)\n self._ignore_change = True\n else:\n log.completion.debug(\"Will ignore next completion update.\")\n self._ignore_change = True\n self._change_completed_part(text, before, after)\n\n @pyqtSlot()\n def schedule_completion_update(self):\n \"\"\"Schedule updating/enabling completion.\n\n For performance reasons we don't want to block here, instead we do this\n in the background.\n\n We delay the update only if we've already input some text and ignore\n updates if the text is shorter than completion.min_chars (unless we're\n hitting backspace in which case updates won't be ignored).\n \"\"\"\n _cmd, _sep, rest = self._cmd.text().partition(' ')\n input_length = len(rest)\n if (0 < input_length < config.val.completion.min_chars and\n self._cmd.cursorPosition() > self._last_cursor_pos):\n log.completion.debug(\"Ignoring update because the length of \"\n \"the text is less than completion.min_chars.\")\n elif (self._cmd.cursorPosition() == self._last_cursor_pos and\n self._cmd.text() == self._last_text):\n log.completion.debug(\"Ignoring update because there were no \"\n \"changes.\")\n else:\n log.completion.debug(\"Scheduling completion update.\")\n start_delay = config.val.completion.delay if self._last_text else 0\n self._timer.start(start_delay)\n self._last_cursor_pos = self._cmd.cursorPosition()\n self._last_text = self._cmd.text()\n\n @pyqtSlot()\n def _update_completion(self):\n \"\"\"Check if completions are available and activate them.\"\"\"\n if self._ignore_change:\n log.completion.debug(\"Ignoring completion update because \"\n \"ignore_change is True.\")\n self._ignore_change = False\n return\n\n completion = self.parent()\n\n if self._cmd.prefix() != ':':\n # This is a search or gibberish, so we don't need to complete\n # anything (yet)\n # FIXME complete searches\n # https://github.com/qutebrowser/qutebrowser/issues/32\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n before_cursor, pattern, after_cursor = self._partition()\n\n log.completion.debug(\"Updating completion: {} {} {}\".format(\n before_cursor, pattern, after_cursor))\n\n pattern = pattern.strip(\"'\\\"\")\n func = self._get_new_completion(before_cursor, pattern)\n\n if func is None:\n log.completion.debug('Clearing completion')\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n if func != self._last_completion_func:\n self._last_completion_func = func\n args = (x for x in before_cursor[1:] if not x.startswith('-'))\n with debug.log_time(log.completion,\n 'Starting {} completion'.format(func.__name__)):\n info = CompletionInfo(config=config.instance,\n keyconf=config.key_instance)\n model = func(*args, info=info)\n with debug.log_time(log.completion, 'Set completion model'):\n completion.set_model(model)\n\n completion.set_pattern(pattern)\n\n def _change_completed_part(self, newtext, before, after, immediate=False):\n \"\"\"Change the part we're currently completing in the commandline.\n\n Args:\n text: The text to set (string) for the token under the cursor.\n before: Commandline tokens before the token under the cursor.\n after: Commandline tokens after the token under the cursor.\n immediate: True if the text should be completed immediately\n including a trailing space and we shouldn't continue\n completing the current item.\n \"\"\"\n text = self._cmd.prefix() + ' '.join(before + [newtext])\n pos = len(text) + (1 if immediate else 0)\n if after:\n text += ' ' + ' '.join(after)\n elif immediate:\n # pad with a space if quick-completing the last entry\n text += ' '\n log.completion.debug(\"setting text = '{}', pos = {}\".format(text, pos))\n self._cmd.setText(text)\n self._cmd.setCursorPosition(pos)\n self._cmd.setFocus()\n self._cmd.show_cmd.emit()\n", "path": "qutebrowser/completion/completer.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completer attached to a CompletionView.\"\"\"\n\nimport attr\nfrom PyQt5.QtCore import pyqtSlot, QObject, QTimer\n\nfrom qutebrowser.config import config\nfrom qutebrowser.commands import cmdutils, runners\nfrom qutebrowser.utils import log, utils, debug\nfrom qutebrowser.completion.models import miscmodels\n\n\[email protected]\nclass CompletionInfo:\n\n \"\"\"Context passed into all completion functions.\"\"\"\n\n config = attr.ib()\n keyconf = attr.ib()\n\n\nclass Completer(QObject):\n\n \"\"\"Completer which manages completions in a CompletionView.\n\n Attributes:\n _cmd: The statusbar Command object this completer belongs to.\n _ignore_change: Whether to ignore the next completion update.\n _timer: The timer used to trigger the completion update.\n _last_cursor_pos: The old cursor position so we avoid double completion\n updates.\n _last_text: The old command text so we avoid double completion updates.\n _last_completion_func: The completion function used for the last text.\n \"\"\"\n\n def __init__(self, cmd, parent=None):\n super().__init__(parent)\n self._cmd = cmd\n self._ignore_change = False\n self._timer = QTimer()\n self._timer.setSingleShot(True)\n self._timer.setInterval(0)\n self._timer.timeout.connect(self._update_completion)\n self._last_cursor_pos = None\n self._last_text = None\n self._last_completion_func = None\n self._cmd.update_completion.connect(self.schedule_completion_update)\n\n def __repr__(self):\n return utils.get_repr(self)\n\n def _model(self):\n \"\"\"Convenience method to get the current completion model.\"\"\"\n completion = self.parent()\n return completion.model()\n\n def _get_new_completion(self, before_cursor, under_cursor):\n \"\"\"Get the completion function based on the current command text.\n\n Args:\n before_cursor: The command chunks before the cursor.\n under_cursor: The command chunk under the cursor.\n\n Return:\n A completion model.\n \"\"\"\n if '--' in before_cursor or under_cursor.startswith('-'):\n # cursor on a flag or after an explicit split (--)\n return None\n log.completion.debug(\"Before removing flags: {}\".format(before_cursor))\n before_cursor = [x for x in before_cursor if not x.startswith('-')]\n log.completion.debug(\"After removing flags: {}\".format(before_cursor))\n if not before_cursor:\n # '|' or 'set|'\n log.completion.debug('Starting command completion')\n return miscmodels.command\n try:\n cmd = cmdutils.cmd_dict[before_cursor[0]]\n except KeyError:\n log.completion.debug(\"No completion for unknown command: {}\"\n .format(before_cursor[0]))\n return None\n argpos = len(before_cursor) - 1\n try:\n func = cmd.get_pos_arg_info(argpos).completion\n except IndexError:\n log.completion.debug(\"No completion in position {}\".format(argpos))\n return None\n return func\n\n def _quote(self, s):\n \"\"\"Quote s if it needs quoting for the commandline.\n\n Note we don't use shlex.quote because that quotes a lot of shell\n metachars we don't need to have quoted.\n \"\"\"\n if not s:\n return \"''\"\n elif any(c in s for c in ' \"\\'\\t\\n\\\\'):\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"\n else:\n return s\n\n def _partition(self):\n \"\"\"Divide the commandline text into chunks around the cursor position.\n\n Return:\n ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor])\n \"\"\"\n text = self._cmd.text()[len(self._cmd.prefix()):]\n if not text or not text.strip():\n # Only \":\", empty part under the cursor with nothing before/after\n return [], '', []\n parser = runners.CommandParser()\n result = parser.parse(text, fallback=True, keep=True)\n # pylint: disable=not-an-iterable\n parts = [x for x in result.cmdline if x]\n # pylint: enable=not-an-iterable\n pos = self._cmd.cursorPosition() - len(self._cmd.prefix())\n pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars\n log.completion.debug('partitioning {} around position {}'.format(parts,\n pos))\n for i, part in enumerate(parts):\n pos -= len(part)\n if pos <= 0:\n if part[pos-1:pos+1].isspace():\n # cursor is in a space between two existing words\n parts.insert(i, '')\n prefix = [x.strip() for x in parts[:i]]\n center = parts[i].strip()\n # strip trailing whitepsace included as a separate token\n postfix = [x.strip() for x in parts[i+1:] if not x.isspace()]\n log.completion.debug(\n \"partitioned: {} '{}' {}\".format(prefix, center, postfix))\n return prefix, center, postfix\n\n # We should always return above\n assert False, parts\n\n @pyqtSlot(str)\n def on_selection_changed(self, text):\n \"\"\"Change the completed part if a new item was selected.\n\n Called from the views selectionChanged method.\n\n Args:\n text: Newly selected text.\n \"\"\"\n if text is None:\n return\n before, center, after = self._partition()\n log.completion.debug(\"Changing {} to '{}'\".format(center, text))\n try:\n maxsplit = cmdutils.cmd_dict[before[0]].maxsplit\n except (KeyError, IndexError):\n maxsplit = None\n if maxsplit is None:\n text = self._quote(text)\n model = self._model()\n if model.count() == 1 and config.val.completion.quick:\n # If we only have one item, we want to apply it immediately\n # and go on to the next part.\n self._change_completed_part(text, before, after, immediate=True)\n if maxsplit is not None and maxsplit < len(before):\n # If we are quick-completing the part after maxsplit, don't\n # keep offering completions (see issue #1519)\n self._ignore_change = True\n else:\n self._change_completed_part(text, before, after)\n\n @pyqtSlot()\n def schedule_completion_update(self):\n \"\"\"Schedule updating/enabling completion.\n\n For performance reasons we don't want to block here, instead we do this\n in the background.\n\n We delay the update only if we've already input some text and ignore\n updates if the text is shorter than completion.min_chars (unless we're\n hitting backspace in which case updates won't be ignored).\n \"\"\"\n _cmd, _sep, rest = self._cmd.text().partition(' ')\n input_length = len(rest)\n if (0 < input_length < config.val.completion.min_chars and\n self._cmd.cursorPosition() > self._last_cursor_pos):\n log.completion.debug(\"Ignoring update because the length of \"\n \"the text is less than completion.min_chars.\")\n elif (self._cmd.cursorPosition() == self._last_cursor_pos and\n self._cmd.text() == self._last_text):\n log.completion.debug(\"Ignoring update because there were no \"\n \"changes.\")\n else:\n log.completion.debug(\"Scheduling completion update.\")\n start_delay = config.val.completion.delay if self._last_text else 0\n self._timer.start(start_delay)\n self._last_cursor_pos = self._cmd.cursorPosition()\n self._last_text = self._cmd.text()\n\n @pyqtSlot()\n def _update_completion(self):\n \"\"\"Check if completions are available and activate them.\"\"\"\n if self._ignore_change:\n log.completion.debug(\"Ignoring completion update because \"\n \"ignore_change is True.\")\n self._ignore_change = False\n return\n\n completion = self.parent()\n\n if self._cmd.prefix() != ':':\n # This is a search or gibberish, so we don't need to complete\n # anything (yet)\n # FIXME complete searches\n # https://github.com/qutebrowser/qutebrowser/issues/32\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n before_cursor, pattern, after_cursor = self._partition()\n\n log.completion.debug(\"Updating completion: {} {} {}\".format(\n before_cursor, pattern, after_cursor))\n\n pattern = pattern.strip(\"'\\\"\")\n func = self._get_new_completion(before_cursor, pattern)\n\n if func is None:\n log.completion.debug('Clearing completion')\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n if func != self._last_completion_func:\n self._last_completion_func = func\n args = (x for x in before_cursor[1:] if not x.startswith('-'))\n with debug.log_time(log.completion,\n 'Starting {} completion'.format(func.__name__)):\n info = CompletionInfo(config=config.instance,\n keyconf=config.key_instance)\n model = func(*args, info=info)\n with debug.log_time(log.completion, 'Set completion model'):\n completion.set_model(model)\n\n completion.set_pattern(pattern)\n\n def _change_completed_part(self, newtext, before, after, immediate=False):\n \"\"\"Change the part we're currently completing in the commandline.\n\n Args:\n text: The text to set (string) for the token under the cursor.\n before: Commandline tokens before the token under the cursor.\n after: Commandline tokens after the token under the cursor.\n immediate: True if the text should be completed immediately\n including a trailing space and we shouldn't continue\n completing the current item.\n \"\"\"\n text = self._cmd.prefix() + ' '.join(before + [newtext])\n pos = len(text) + (1 if immediate else 0)\n if after:\n text += ' ' + ' '.join(after)\n elif immediate:\n # pad with a space if quick-completing the last entry\n text += ' '\n log.completion.debug(\"setting text = '{}', pos = {}\".format(text, pos))\n\n # generally, we don't want to let self._cmd emit cursorPositionChanged,\n # because that'll schedule a completion update. That happens when\n # tabbing through the completions, and we want to change the command\n # text but we also want to keep the original completion list for the\n # command the user manually entered. The exception is when we're\n # immediately completing, in which case we *do* want to update the\n # completion view so that we can start completing the next part\n if not immediate:\n self._cmd.blockSignals(True)\n\n self._cmd.setText(text)\n self._cmd.setCursorPosition(pos)\n self._cmd.setFocus()\n\n self._cmd.blockSignals(False)\n self._cmd.show_cmd.emit()\n", "path": "qutebrowser/completion/completer.py"}]} | 3,615 | 357 |
gh_patches_debug_15520 | rasdani/github-patches | git_diff | CTFd__CTFd-2539 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Users cant solve challenge after changing to user mode
**Environment**:
- CTFd Version/Commit: 3.7.0
- Operating System: Ubuntu 22.04.3 LTS
- Web Browser and Version: Bug is occurs in Newest Chrome, Chromium and Firefox. Not tested more.
**What happened?**
Some of our users was not able to submit correct flag to solve the challenge. This occurred for after we swapped to user mode for one of our events, our ctfd instance is usually in Team mode. We narrowed it down to being because the teams still exist, making some users unable to get a solve because they are in a "hidden" team where one of their team members already had solved the challenge.
**What did you expect to happen?**
Expected to be able to swap to user mode, and be able to have all users solve all challenges, so we dont have to create new teams and users every time we would like to swap mode.
**How to reproduce your issue**
Create a few users in the same team.
Swap to user mode.
Have one of the users solve a challenge.
Try to solve the same challenge with one of the other users.
**Any associated stack traces or error logs**
n/a
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/admin/__init__.py`
Content:
```
1 import csv # noqa: I001
2 import datetime
3 import os
4 from io import StringIO
5
6 from flask import Blueprint, abort
7 from flask import current_app as app
8 from flask import (
9 jsonify,
10 redirect,
11 render_template,
12 render_template_string,
13 request,
14 send_file,
15 url_for,
16 )
17
18 admin = Blueprint("admin", __name__)
19
20 # isort:imports-firstparty
21 from CTFd.admin import challenges # noqa: F401,I001
22 from CTFd.admin import notifications # noqa: F401,I001
23 from CTFd.admin import pages # noqa: F401,I001
24 from CTFd.admin import scoreboard # noqa: F401,I001
25 from CTFd.admin import statistics # noqa: F401,I001
26 from CTFd.admin import submissions # noqa: F401,I001
27 from CTFd.admin import teams # noqa: F401,I001
28 from CTFd.admin import users # noqa: F401,I001
29 from CTFd.cache import (
30 cache,
31 clear_challenges,
32 clear_config,
33 clear_pages,
34 clear_standings,
35 )
36 from CTFd.models import (
37 Awards,
38 Challenges,
39 Configs,
40 Notifications,
41 Pages,
42 Solves,
43 Submissions,
44 Teams,
45 Tracking,
46 Unlocks,
47 Users,
48 db,
49 )
50 from CTFd.utils import config as ctf_config
51 from CTFd.utils import get_app_config, get_config, set_config
52 from CTFd.utils.csv import dump_csv, load_challenges_csv, load_teams_csv, load_users_csv
53 from CTFd.utils.decorators import admins_only
54 from CTFd.utils.exports import background_import_ctf
55 from CTFd.utils.exports import export_ctf as export_ctf_util
56 from CTFd.utils.security.auth import logout_user
57 from CTFd.utils.uploads import delete_file
58 from CTFd.utils.user import is_admin
59
60
61 @admin.route("/admin", methods=["GET"])
62 def view():
63 if is_admin():
64 return redirect(url_for("admin.statistics"))
65 return redirect(url_for("auth.login"))
66
67
68 @admin.route("/admin/plugins/<plugin>", methods=["GET", "POST"])
69 @admins_only
70 def plugin(plugin):
71 if request.method == "GET":
72 plugins_path = os.path.join(app.root_path, "plugins")
73
74 config_html_plugins = [
75 name
76 for name in os.listdir(plugins_path)
77 if os.path.isfile(os.path.join(plugins_path, name, "config.html"))
78 ]
79
80 if plugin in config_html_plugins:
81 config_html = open(
82 os.path.join(app.root_path, "plugins", plugin, "config.html")
83 ).read()
84 return render_template_string(config_html)
85 abort(404)
86 elif request.method == "POST":
87 for k, v in request.form.items():
88 if k == "nonce":
89 continue
90 set_config(k, v)
91 with app.app_context():
92 clear_config()
93 return "1"
94
95
96 @admin.route("/admin/import", methods=["GET", "POST"])
97 @admins_only
98 def import_ctf():
99 if request.method == "GET":
100 start_time = cache.get("import_start_time")
101 end_time = cache.get("import_end_time")
102 import_status = cache.get("import_status")
103 import_error = cache.get("import_error")
104 return render_template(
105 "admin/import.html",
106 start_time=start_time,
107 end_time=end_time,
108 import_status=import_status,
109 import_error=import_error,
110 )
111 elif request.method == "POST":
112 backup = request.files["backup"]
113 background_import_ctf(backup)
114 return redirect(url_for("admin.import_ctf"))
115
116
117 @admin.route("/admin/export", methods=["GET", "POST"])
118 @admins_only
119 def export_ctf():
120 backup = export_ctf_util()
121 ctf_name = ctf_config.ctf_name()
122 day = datetime.datetime.now().strftime("%Y-%m-%d_%T")
123 full_name = "{}.{}.zip".format(ctf_name, day)
124 return send_file(
125 backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name
126 )
127
128
129 @admin.route("/admin/import/csv", methods=["POST"])
130 @admins_only
131 def import_csv():
132 csv_type = request.form["csv_type"]
133 # Try really hard to load data in properly no matter what nonsense Excel gave you
134 raw = request.files["csv_file"].stream.read()
135 try:
136 csvdata = raw.decode("utf-8-sig")
137 except UnicodeDecodeError:
138 try:
139 csvdata = raw.decode("cp1252")
140 except UnicodeDecodeError:
141 csvdata = raw.decode("latin-1")
142 csvfile = StringIO(csvdata)
143
144 loaders = {
145 "challenges": load_challenges_csv,
146 "users": load_users_csv,
147 "teams": load_teams_csv,
148 }
149
150 loader = loaders[csv_type]
151 reader = csv.DictReader(csvfile)
152 success = loader(reader)
153 if success is True:
154 return redirect(url_for("admin.config"))
155 else:
156 return jsonify(success), 500
157
158
159 @admin.route("/admin/export/csv")
160 @admins_only
161 def export_csv():
162 table = request.args.get("table")
163
164 output = dump_csv(name=table)
165
166 return send_file(
167 output,
168 as_attachment=True,
169 max_age=-1,
170 download_name="{name}-{table}.csv".format(
171 name=ctf_config.ctf_name(), table=table
172 ),
173 )
174
175
176 @admin.route("/admin/config", methods=["GET", "POST"])
177 @admins_only
178 def config():
179 # Clear the config cache so that we don't get stale values
180 clear_config()
181
182 configs = Configs.query.all()
183 configs = {c.key: get_config(c.key) for c in configs}
184
185 themes = ctf_config.get_themes()
186
187 # Remove current theme but ignore failure
188 try:
189 themes.remove(get_config("ctf_theme"))
190 except ValueError:
191 pass
192
193 force_html_sanitization = get_app_config("HTML_SANITIZATION")
194
195 return render_template(
196 "admin/config.html",
197 themes=themes,
198 **configs,
199 force_html_sanitization=force_html_sanitization
200 )
201
202
203 @admin.route("/admin/reset", methods=["GET", "POST"])
204 @admins_only
205 def reset():
206 if request.method == "POST":
207 require_setup = False
208 logout = False
209 next_url = url_for("admin.statistics")
210
211 data = request.form
212
213 if data.get("pages"):
214 _pages = Pages.query.all()
215 for p in _pages:
216 for f in p.files:
217 delete_file(file_id=f.id)
218
219 Pages.query.delete()
220
221 if data.get("notifications"):
222 Notifications.query.delete()
223
224 if data.get("challenges"):
225 _challenges = Challenges.query.all()
226 for c in _challenges:
227 for f in c.files:
228 delete_file(file_id=f.id)
229 Challenges.query.delete()
230
231 if data.get("accounts"):
232 Users.query.delete()
233 Teams.query.delete()
234 require_setup = True
235 logout = True
236
237 if data.get("submissions"):
238 Solves.query.delete()
239 Submissions.query.delete()
240 Awards.query.delete()
241 Unlocks.query.delete()
242 Tracking.query.delete()
243
244 if require_setup:
245 set_config("setup", False)
246 cache.clear()
247 logout_user()
248 next_url = url_for("views.setup")
249
250 db.session.commit()
251
252 clear_pages()
253 clear_standings()
254 clear_challenges()
255 clear_config()
256
257 if logout is True:
258 cache.clear()
259 logout_user()
260
261 db.session.close()
262 return redirect(next_url)
263
264 return render_template("admin/reset.html")
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/admin/__init__.py b/CTFd/admin/__init__.py
--- a/CTFd/admin/__init__.py
+++ b/CTFd/admin/__init__.py
@@ -28,6 +28,8 @@
from CTFd.admin import users # noqa: F401,I001
from CTFd.cache import (
cache,
+ clear_all_team_sessions,
+ clear_all_user_sessions,
clear_challenges,
clear_config,
clear_pages,
@@ -241,6 +243,13 @@
Unlocks.query.delete()
Tracking.query.delete()
+ if data.get("user_mode") == "users":
+ db.session.query(Users).update({Users.team_id: None})
+ Teams.query.delete()
+
+ clear_all_user_sessions()
+ clear_all_team_sessions()
+
if require_setup:
set_config("setup", False)
cache.clear()
| {"golden_diff": "diff --git a/CTFd/admin/__init__.py b/CTFd/admin/__init__.py\n--- a/CTFd/admin/__init__.py\n+++ b/CTFd/admin/__init__.py\n@@ -28,6 +28,8 @@\n from CTFd.admin import users # noqa: F401,I001\n from CTFd.cache import (\n cache,\n+ clear_all_team_sessions,\n+ clear_all_user_sessions,\n clear_challenges,\n clear_config,\n clear_pages,\n@@ -241,6 +243,13 @@\n Unlocks.query.delete()\n Tracking.query.delete()\n \n+ if data.get(\"user_mode\") == \"users\":\n+ db.session.query(Users).update({Users.team_id: None})\n+ Teams.query.delete()\n+\n+ clear_all_user_sessions()\n+ clear_all_team_sessions()\n+\n if require_setup:\n set_config(\"setup\", False)\n cache.clear()\n", "issue": "Users cant solve challenge after changing to user mode\n**Environment**:\r\n\r\n- CTFd Version/Commit: 3.7.0\r\n- Operating System: Ubuntu 22.04.3 LTS\r\n- Web Browser and Version: Bug is occurs in Newest Chrome, Chromium and Firefox. Not tested more.\r\n\r\n**What happened?**\r\nSome of our users was not able to submit correct flag to solve the challenge. This occurred for after we swapped to user mode for one of our events, our ctfd instance is usually in Team mode. We narrowed it down to being because the teams still exist, making some users unable to get a solve because they are in a \"hidden\" team where one of their team members already had solved the challenge. \r\n\r\n**What did you expect to happen?**\r\nExpected to be able to swap to user mode, and be able to have all users solve all challenges, so we dont have to create new teams and users every time we would like to swap mode.\r\n\r\n**How to reproduce your issue**\r\nCreate a few users in the same team. \r\nSwap to user mode.\r\nHave one of the users solve a challenge.\r\nTry to solve the same challenge with one of the other users.\r\n\r\n**Any associated stack traces or error logs**\r\nn/a\r\n\n", "before_files": [{"content": "import csv # noqa: I001\nimport datetime\nimport os\nfrom io import StringIO\n\nfrom flask import Blueprint, abort\nfrom flask import current_app as app\nfrom flask import (\n jsonify,\n redirect,\n render_template,\n render_template_string,\n request,\n send_file,\n url_for,\n)\n\nadmin = Blueprint(\"admin\", __name__)\n\n# isort:imports-firstparty\nfrom CTFd.admin import challenges # noqa: F401,I001\nfrom CTFd.admin import notifications # noqa: F401,I001\nfrom CTFd.admin import pages # noqa: F401,I001\nfrom CTFd.admin import scoreboard # noqa: F401,I001\nfrom CTFd.admin import statistics # noqa: F401,I001\nfrom CTFd.admin import submissions # noqa: F401,I001\nfrom CTFd.admin import teams # noqa: F401,I001\nfrom CTFd.admin import users # noqa: F401,I001\nfrom CTFd.cache import (\n cache,\n clear_challenges,\n clear_config,\n clear_pages,\n clear_standings,\n)\nfrom CTFd.models import (\n Awards,\n Challenges,\n Configs,\n Notifications,\n Pages,\n Solves,\n Submissions,\n Teams,\n Tracking,\n Unlocks,\n Users,\n db,\n)\nfrom CTFd.utils import config as ctf_config\nfrom CTFd.utils import get_app_config, get_config, set_config\nfrom CTFd.utils.csv import dump_csv, load_challenges_csv, load_teams_csv, load_users_csv\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.exports import background_import_ctf\nfrom CTFd.utils.exports import export_ctf as export_ctf_util\nfrom CTFd.utils.security.auth import logout_user\nfrom CTFd.utils.uploads import delete_file\nfrom CTFd.utils.user import is_admin\n\n\[email protected](\"/admin\", methods=[\"GET\"])\ndef view():\n if is_admin():\n return redirect(url_for(\"admin.statistics\"))\n return redirect(url_for(\"auth.login\"))\n\n\[email protected](\"/admin/plugins/<plugin>\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef plugin(plugin):\n if request.method == \"GET\":\n plugins_path = os.path.join(app.root_path, \"plugins\")\n\n config_html_plugins = [\n name\n for name in os.listdir(plugins_path)\n if os.path.isfile(os.path.join(plugins_path, name, \"config.html\"))\n ]\n\n if plugin in config_html_plugins:\n config_html = open(\n os.path.join(app.root_path, \"plugins\", plugin, \"config.html\")\n ).read()\n return render_template_string(config_html)\n abort(404)\n elif request.method == \"POST\":\n for k, v in request.form.items():\n if k == \"nonce\":\n continue\n set_config(k, v)\n with app.app_context():\n clear_config()\n return \"1\"\n\n\[email protected](\"/admin/import\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef import_ctf():\n if request.method == \"GET\":\n start_time = cache.get(\"import_start_time\")\n end_time = cache.get(\"import_end_time\")\n import_status = cache.get(\"import_status\")\n import_error = cache.get(\"import_error\")\n return render_template(\n \"admin/import.html\",\n start_time=start_time,\n end_time=end_time,\n import_status=import_status,\n import_error=import_error,\n )\n elif request.method == \"POST\":\n backup = request.files[\"backup\"]\n background_import_ctf(backup)\n return redirect(url_for(\"admin.import_ctf\"))\n\n\[email protected](\"/admin/export\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef export_ctf():\n backup = export_ctf_util()\n ctf_name = ctf_config.ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d_%T\")\n full_name = \"{}.{}.zip\".format(ctf_name, day)\n return send_file(\n backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name\n )\n\n\[email protected](\"/admin/import/csv\", methods=[\"POST\"])\n@admins_only\ndef import_csv():\n csv_type = request.form[\"csv_type\"]\n # Try really hard to load data in properly no matter what nonsense Excel gave you\n raw = request.files[\"csv_file\"].stream.read()\n try:\n csvdata = raw.decode(\"utf-8-sig\")\n except UnicodeDecodeError:\n try:\n csvdata = raw.decode(\"cp1252\")\n except UnicodeDecodeError:\n csvdata = raw.decode(\"latin-1\")\n csvfile = StringIO(csvdata)\n\n loaders = {\n \"challenges\": load_challenges_csv,\n \"users\": load_users_csv,\n \"teams\": load_teams_csv,\n }\n\n loader = loaders[csv_type]\n reader = csv.DictReader(csvfile)\n success = loader(reader)\n if success is True:\n return redirect(url_for(\"admin.config\"))\n else:\n return jsonify(success), 500\n\n\[email protected](\"/admin/export/csv\")\n@admins_only\ndef export_csv():\n table = request.args.get(\"table\")\n\n output = dump_csv(name=table)\n\n return send_file(\n output,\n as_attachment=True,\n max_age=-1,\n download_name=\"{name}-{table}.csv\".format(\n name=ctf_config.ctf_name(), table=table\n ),\n )\n\n\[email protected](\"/admin/config\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef config():\n # Clear the config cache so that we don't get stale values\n clear_config()\n\n configs = Configs.query.all()\n configs = {c.key: get_config(c.key) for c in configs}\n\n themes = ctf_config.get_themes()\n\n # Remove current theme but ignore failure\n try:\n themes.remove(get_config(\"ctf_theme\"))\n except ValueError:\n pass\n\n force_html_sanitization = get_app_config(\"HTML_SANITIZATION\")\n\n return render_template(\n \"admin/config.html\",\n themes=themes,\n **configs,\n force_html_sanitization=force_html_sanitization\n )\n\n\[email protected](\"/admin/reset\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef reset():\n if request.method == \"POST\":\n require_setup = False\n logout = False\n next_url = url_for(\"admin.statistics\")\n\n data = request.form\n\n if data.get(\"pages\"):\n _pages = Pages.query.all()\n for p in _pages:\n for f in p.files:\n delete_file(file_id=f.id)\n\n Pages.query.delete()\n\n if data.get(\"notifications\"):\n Notifications.query.delete()\n\n if data.get(\"challenges\"):\n _challenges = Challenges.query.all()\n for c in _challenges:\n for f in c.files:\n delete_file(file_id=f.id)\n Challenges.query.delete()\n\n if data.get(\"accounts\"):\n Users.query.delete()\n Teams.query.delete()\n require_setup = True\n logout = True\n\n if data.get(\"submissions\"):\n Solves.query.delete()\n Submissions.query.delete()\n Awards.query.delete()\n Unlocks.query.delete()\n Tracking.query.delete()\n\n if require_setup:\n set_config(\"setup\", False)\n cache.clear()\n logout_user()\n next_url = url_for(\"views.setup\")\n\n db.session.commit()\n\n clear_pages()\n clear_standings()\n clear_challenges()\n clear_config()\n\n if logout is True:\n cache.clear()\n logout_user()\n\n db.session.close()\n return redirect(next_url)\n\n return render_template(\"admin/reset.html\")\n", "path": "CTFd/admin/__init__.py"}], "after_files": [{"content": "import csv # noqa: I001\nimport datetime\nimport os\nfrom io import StringIO\n\nfrom flask import Blueprint, abort\nfrom flask import current_app as app\nfrom flask import (\n jsonify,\n redirect,\n render_template,\n render_template_string,\n request,\n send_file,\n url_for,\n)\n\nadmin = Blueprint(\"admin\", __name__)\n\n# isort:imports-firstparty\nfrom CTFd.admin import challenges # noqa: F401,I001\nfrom CTFd.admin import notifications # noqa: F401,I001\nfrom CTFd.admin import pages # noqa: F401,I001\nfrom CTFd.admin import scoreboard # noqa: F401,I001\nfrom CTFd.admin import statistics # noqa: F401,I001\nfrom CTFd.admin import submissions # noqa: F401,I001\nfrom CTFd.admin import teams # noqa: F401,I001\nfrom CTFd.admin import users # noqa: F401,I001\nfrom CTFd.cache import (\n cache,\n clear_all_team_sessions,\n clear_all_user_sessions,\n clear_challenges,\n clear_config,\n clear_pages,\n clear_standings,\n)\nfrom CTFd.models import (\n Awards,\n Challenges,\n Configs,\n Notifications,\n Pages,\n Solves,\n Submissions,\n Teams,\n Tracking,\n Unlocks,\n Users,\n db,\n)\nfrom CTFd.utils import config as ctf_config\nfrom CTFd.utils import get_app_config, get_config, set_config\nfrom CTFd.utils.csv import dump_csv, load_challenges_csv, load_teams_csv, load_users_csv\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.exports import background_import_ctf\nfrom CTFd.utils.exports import export_ctf as export_ctf_util\nfrom CTFd.utils.security.auth import logout_user\nfrom CTFd.utils.uploads import delete_file\nfrom CTFd.utils.user import is_admin\n\n\[email protected](\"/admin\", methods=[\"GET\"])\ndef view():\n if is_admin():\n return redirect(url_for(\"admin.statistics\"))\n return redirect(url_for(\"auth.login\"))\n\n\[email protected](\"/admin/plugins/<plugin>\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef plugin(plugin):\n if request.method == \"GET\":\n plugins_path = os.path.join(app.root_path, \"plugins\")\n\n config_html_plugins = [\n name\n for name in os.listdir(plugins_path)\n if os.path.isfile(os.path.join(plugins_path, name, \"config.html\"))\n ]\n\n if plugin in config_html_plugins:\n config_html = open(\n os.path.join(app.root_path, \"plugins\", plugin, \"config.html\")\n ).read()\n return render_template_string(config_html)\n abort(404)\n elif request.method == \"POST\":\n for k, v in request.form.items():\n if k == \"nonce\":\n continue\n set_config(k, v)\n with app.app_context():\n clear_config()\n return \"1\"\n\n\[email protected](\"/admin/import\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef import_ctf():\n if request.method == \"GET\":\n start_time = cache.get(\"import_start_time\")\n end_time = cache.get(\"import_end_time\")\n import_status = cache.get(\"import_status\")\n import_error = cache.get(\"import_error\")\n return render_template(\n \"admin/import.html\",\n start_time=start_time,\n end_time=end_time,\n import_status=import_status,\n import_error=import_error,\n )\n elif request.method == \"POST\":\n backup = request.files[\"backup\"]\n background_import_ctf(backup)\n return redirect(url_for(\"admin.import_ctf\"))\n\n\[email protected](\"/admin/export\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef export_ctf():\n backup = export_ctf_util()\n ctf_name = ctf_config.ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d_%T\")\n full_name = \"{}.{}.zip\".format(ctf_name, day)\n return send_file(\n backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name\n )\n\n\[email protected](\"/admin/import/csv\", methods=[\"POST\"])\n@admins_only\ndef import_csv():\n csv_type = request.form[\"csv_type\"]\n # Try really hard to load data in properly no matter what nonsense Excel gave you\n raw = request.files[\"csv_file\"].stream.read()\n try:\n csvdata = raw.decode(\"utf-8-sig\")\n except UnicodeDecodeError:\n try:\n csvdata = raw.decode(\"cp1252\")\n except UnicodeDecodeError:\n csvdata = raw.decode(\"latin-1\")\n csvfile = StringIO(csvdata)\n\n loaders = {\n \"challenges\": load_challenges_csv,\n \"users\": load_users_csv,\n \"teams\": load_teams_csv,\n }\n\n loader = loaders[csv_type]\n reader = csv.DictReader(csvfile)\n success = loader(reader)\n if success is True:\n return redirect(url_for(\"admin.config\"))\n else:\n return jsonify(success), 500\n\n\[email protected](\"/admin/export/csv\")\n@admins_only\ndef export_csv():\n table = request.args.get(\"table\")\n\n output = dump_csv(name=table)\n\n return send_file(\n output,\n as_attachment=True,\n max_age=-1,\n download_name=\"{name}-{table}.csv\".format(\n name=ctf_config.ctf_name(), table=table\n ),\n )\n\n\[email protected](\"/admin/config\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef config():\n # Clear the config cache so that we don't get stale values\n clear_config()\n\n configs = Configs.query.all()\n configs = {c.key: get_config(c.key) for c in configs}\n\n themes = ctf_config.get_themes()\n\n # Remove current theme but ignore failure\n try:\n themes.remove(get_config(\"ctf_theme\"))\n except ValueError:\n pass\n\n force_html_sanitization = get_app_config(\"HTML_SANITIZATION\")\n\n return render_template(\n \"admin/config.html\",\n themes=themes,\n **configs,\n force_html_sanitization=force_html_sanitization\n )\n\n\[email protected](\"/admin/reset\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef reset():\n if request.method == \"POST\":\n require_setup = False\n logout = False\n next_url = url_for(\"admin.statistics\")\n\n data = request.form\n\n if data.get(\"pages\"):\n _pages = Pages.query.all()\n for p in _pages:\n for f in p.files:\n delete_file(file_id=f.id)\n\n Pages.query.delete()\n\n if data.get(\"notifications\"):\n Notifications.query.delete()\n\n if data.get(\"challenges\"):\n _challenges = Challenges.query.all()\n for c in _challenges:\n for f in c.files:\n delete_file(file_id=f.id)\n Challenges.query.delete()\n\n if data.get(\"accounts\"):\n Users.query.delete()\n Teams.query.delete()\n require_setup = True\n logout = True\n\n if data.get(\"submissions\"):\n Solves.query.delete()\n Submissions.query.delete()\n Awards.query.delete()\n Unlocks.query.delete()\n Tracking.query.delete()\n\n if data.get(\"user_mode\") == \"users\":\n db.session.query(Users).update({Users.team_id: None})\n Teams.query.delete()\n\n clear_all_user_sessions()\n clear_all_team_sessions()\n\n if require_setup:\n set_config(\"setup\", False)\n cache.clear()\n logout_user()\n next_url = url_for(\"views.setup\")\n\n db.session.commit()\n\n clear_pages()\n clear_standings()\n clear_challenges()\n clear_config()\n\n if logout is True:\n cache.clear()\n logout_user()\n\n db.session.close()\n return redirect(next_url)\n\n return render_template(\"admin/reset.html\")\n", "path": "CTFd/admin/__init__.py"}]} | 2,912 | 208 |
gh_patches_debug_34011 | rasdani/github-patches | git_diff | statsmodels__statsmodels-5203 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
web._generate_url returns ValueError--> intentional?
```
return ValueError('Input not understood')
```
seems like it should be `raise` instead of `return`. Am I missing something?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `statsmodels/tools/web.py`
Content:
```
1 """
2 Provides a function to open the system browser to either search or go directly
3 to a function's reference
4 """
5 import webbrowser
6
7 from statsmodels.compat.python import urlencode
8 from statsmodels import __version__
9
10 BASE_URL = 'https://www.statsmodels.org/'
11
12
13 def _generate_url(arg, stable):
14 """
15 Parse inputs and return a correctly formatted URL or an error if the input
16 is not understandable
17 """
18 url = BASE_URL
19 if stable:
20 url += 'stable/'
21 else:
22 url += 'devel/'
23
24 if arg is None:
25 return url
26 elif type(arg) is str:
27 url += 'search.html?'
28 url += urlencode({'q': arg})
29 url += '&check_keywords=yes&area=default'
30 else:
31 try:
32 func = arg
33 func_name = func.__name__
34 func_module = func.__module__
35 if not func_module.startswith('statsmodels.'):
36 return ValueError('Function must be from statsmodels')
37 url += 'generated/'
38 url += func_module + '.' + func_name + '.html'
39 except:
40 return ValueError('Input not understood')
41 return url
42
43
44 def webdoc(arg=None, stable=None):
45 """
46 Opens a browser and displays online documentation
47
48 Parameters
49 ----------
50 arg, optional : string or statsmodels function
51 Either a string to search the documentation or a function
52 stable, optional : bool
53 Flag indicating whether to use the stable documentation (True) or
54 the development documentation (False). If not provided, opens
55 the stable documentation if the current version of statsmodels is a
56 release
57
58 Examples
59 --------
60 >>> import statsmodels.api as sm
61 >>> sm.webdoc() # Documention site
62 >>> sm.webdoc('glm') # Search for glm in docs
63 >>> sm.webdoc(sm.OLS, stable=False) # Go to generated help for OLS, devel
64
65 Notes
66 -----
67 By default, open stable documentation if the current version of statsmodels
68 is a release. Otherwise opens the development documentation.
69
70 Uses the default system browser.
71 """
72 stable = __version__ if 'dev' not in __version__ else stable
73 url_or_error = _generate_url(arg, stable)
74 if isinstance(url_or_error, ValueError):
75 raise url_or_error
76 webbrowser.open(url_or_error)
77 return None
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/statsmodels/tools/web.py b/statsmodels/tools/web.py
--- a/statsmodels/tools/web.py
+++ b/statsmodels/tools/web.py
@@ -4,7 +4,7 @@
"""
import webbrowser
-from statsmodels.compat.python import urlencode
+from statsmodels.compat.python import urlencode, string_types
from statsmodels import __version__
BASE_URL = 'https://www.statsmodels.org/'
@@ -12,8 +12,8 @@
def _generate_url(arg, stable):
"""
- Parse inputs and return a correctly formatted URL or an error if the input
- is not understandable
+ Parse inputs and return a correctly formatted URL or raises ValueError
+ if the input is not understandable
"""
url = BASE_URL
if stable:
@@ -23,7 +23,7 @@
if arg is None:
return url
- elif type(arg) is str:
+ elif isinstance(arg, string_types):
url += 'search.html?'
url += urlencode({'q': arg})
url += '&check_keywords=yes&area=default'
@@ -33,11 +33,11 @@
func_name = func.__name__
func_module = func.__module__
if not func_module.startswith('statsmodels.'):
- return ValueError('Function must be from statsmodels')
+ raise ValueError('Function must be from statsmodels')
url += 'generated/'
url += func_module + '.' + func_name + '.html'
- except:
- return ValueError('Input not understood')
+ except AttributeError:
+ raise ValueError('Input not understood')
return url
@@ -71,7 +71,5 @@
"""
stable = __version__ if 'dev' not in __version__ else stable
url_or_error = _generate_url(arg, stable)
- if isinstance(url_or_error, ValueError):
- raise url_or_error
webbrowser.open(url_or_error)
return None
| {"golden_diff": "diff --git a/statsmodels/tools/web.py b/statsmodels/tools/web.py\n--- a/statsmodels/tools/web.py\n+++ b/statsmodels/tools/web.py\n@@ -4,7 +4,7 @@\n \"\"\"\n import webbrowser\n \n-from statsmodels.compat.python import urlencode\n+from statsmodels.compat.python import urlencode, string_types\n from statsmodels import __version__\n \n BASE_URL = 'https://www.statsmodels.org/'\n@@ -12,8 +12,8 @@\n \n def _generate_url(arg, stable):\n \"\"\"\n- Parse inputs and return a correctly formatted URL or an error if the input\n- is not understandable\n+ Parse inputs and return a correctly formatted URL or raises ValueError\n+ if the input is not understandable\n \"\"\"\n url = BASE_URL\n if stable:\n@@ -23,7 +23,7 @@\n \n if arg is None:\n return url\n- elif type(arg) is str:\n+ elif isinstance(arg, string_types):\n url += 'search.html?'\n url += urlencode({'q': arg})\n url += '&check_keywords=yes&area=default'\n@@ -33,11 +33,11 @@\n func_name = func.__name__\n func_module = func.__module__\n if not func_module.startswith('statsmodels.'):\n- return ValueError('Function must be from statsmodels')\n+ raise ValueError('Function must be from statsmodels')\n url += 'generated/'\n url += func_module + '.' + func_name + '.html'\n- except:\n- return ValueError('Input not understood')\n+ except AttributeError:\n+ raise ValueError('Input not understood')\n return url\n \n \n@@ -71,7 +71,5 @@\n \"\"\"\n stable = __version__ if 'dev' not in __version__ else stable\n url_or_error = _generate_url(arg, stable)\n- if isinstance(url_or_error, ValueError):\n- raise url_or_error\n webbrowser.open(url_or_error)\n return None\n", "issue": "web._generate_url returns ValueError--> intentional?\n```\r\n return ValueError('Input not understood')\r\n```\r\nseems like it should be `raise` instead of `return`. Am I missing something?\n", "before_files": [{"content": "\"\"\"\nProvides a function to open the system browser to either search or go directly\nto a function's reference\n\"\"\"\nimport webbrowser\n\nfrom statsmodels.compat.python import urlencode\nfrom statsmodels import __version__\n\nBASE_URL = 'https://www.statsmodels.org/'\n\n\ndef _generate_url(arg, stable):\n \"\"\"\n Parse inputs and return a correctly formatted URL or an error if the input\n is not understandable\n \"\"\"\n url = BASE_URL\n if stable:\n url += 'stable/'\n else:\n url += 'devel/'\n\n if arg is None:\n return url\n elif type(arg) is str:\n url += 'search.html?'\n url += urlencode({'q': arg})\n url += '&check_keywords=yes&area=default'\n else:\n try:\n func = arg\n func_name = func.__name__\n func_module = func.__module__\n if not func_module.startswith('statsmodels.'):\n return ValueError('Function must be from statsmodels')\n url += 'generated/'\n url += func_module + '.' + func_name + '.html'\n except:\n return ValueError('Input not understood')\n return url\n\n\ndef webdoc(arg=None, stable=None):\n \"\"\"\n Opens a browser and displays online documentation\n\n Parameters\n ----------\n arg, optional : string or statsmodels function\n Either a string to search the documentation or a function\n stable, optional : bool\n Flag indicating whether to use the stable documentation (True) or\n the development documentation (False). If not provided, opens\n the stable documentation if the current version of statsmodels is a\n release\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> sm.webdoc() # Documention site\n >>> sm.webdoc('glm') # Search for glm in docs\n >>> sm.webdoc(sm.OLS, stable=False) # Go to generated help for OLS, devel\n\n Notes\n -----\n By default, open stable documentation if the current version of statsmodels\n is a release. Otherwise opens the development documentation.\n\n Uses the default system browser.\n \"\"\"\n stable = __version__ if 'dev' not in __version__ else stable\n url_or_error = _generate_url(arg, stable)\n if isinstance(url_or_error, ValueError):\n raise url_or_error\n webbrowser.open(url_or_error)\n return None\n", "path": "statsmodels/tools/web.py"}], "after_files": [{"content": "\"\"\"\nProvides a function to open the system browser to either search or go directly\nto a function's reference\n\"\"\"\nimport webbrowser\n\nfrom statsmodels.compat.python import urlencode, string_types\nfrom statsmodels import __version__\n\nBASE_URL = 'https://www.statsmodels.org/'\n\n\ndef _generate_url(arg, stable):\n \"\"\"\n Parse inputs and return a correctly formatted URL or raises ValueError\n if the input is not understandable\n \"\"\"\n url = BASE_URL\n if stable:\n url += 'stable/'\n else:\n url += 'devel/'\n\n if arg is None:\n return url\n elif isinstance(arg, string_types):\n url += 'search.html?'\n url += urlencode({'q': arg})\n url += '&check_keywords=yes&area=default'\n else:\n try:\n func = arg\n func_name = func.__name__\n func_module = func.__module__\n if not func_module.startswith('statsmodels.'):\n raise ValueError('Function must be from statsmodels')\n url += 'generated/'\n url += func_module + '.' + func_name + '.html'\n except AttributeError:\n raise ValueError('Input not understood')\n return url\n\n\ndef webdoc(arg=None, stable=None):\n \"\"\"\n Opens a browser and displays online documentation\n\n Parameters\n ----------\n arg, optional : string or statsmodels function\n Either a string to search the documentation or a function\n stable, optional : bool\n Flag indicating whether to use the stable documentation (True) or\n the development documentation (False). If not provided, opens\n the stable documentation if the current version of statsmodels is a\n release\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> sm.webdoc() # Documention site\n >>> sm.webdoc('glm') # Search for glm in docs\n >>> sm.webdoc(sm.OLS, stable=False) # Go to generated help for OLS, devel\n\n Notes\n -----\n By default, open stable documentation if the current version of statsmodels\n is a release. Otherwise opens the development documentation.\n\n Uses the default system browser.\n \"\"\"\n stable = __version__ if 'dev' not in __version__ else stable\n url_or_error = _generate_url(arg, stable)\n webbrowser.open(url_or_error)\n return None\n", "path": "statsmodels/tools/web.py"}]} | 964 | 426 |
gh_patches_debug_30706 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-196 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Pytorch-Image-Models to backbones
## 🚀 Feature
https://github.com/rwightman/pytorch-image-models#models
### Motivation
The torch vision models are very limiting these models seem to perform best on kaggle challenges would be great to integrate them
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/vision/backbones.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Tuple
15
16 import torchvision
17 from pytorch_lightning.utilities import _BOLTS_AVAILABLE, rank_zero_warn
18 from pytorch_lightning.utilities.exceptions import MisconfigurationException
19 from torch import nn as nn
20 from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
21
22 if _BOLTS_AVAILABLE:
23 from pl_bolts.models.self_supervised import SimCLR, SwAV
24
25 ROOT_S3_BUCKET = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com"
26
27 MOBILENET_MODELS = ["mobilenet_v2"]
28 VGG_MODELS = ["vgg11", "vgg13", "vgg16", "vgg19"]
29 RESNET_MODELS = ["resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "resnext50_32x4d", "resnext101_32x8d"]
30 DENSENET_MODELS = ["densenet121", "densenet169", "densenet161"]
31 TORCHVISION_MODELS = MOBILENET_MODELS + VGG_MODELS + RESNET_MODELS + DENSENET_MODELS
32
33 BOLTS_MODELS = ["simclr-imagenet", "swav-imagenet"]
34
35
36 def backbone_and_num_features(
37 model_name: str,
38 fpn: bool = False,
39 pretrained: bool = True,
40 trainable_backbone_layers: int = 3,
41 **kwargs
42 ) -> Tuple[nn.Module, int]:
43 """
44 Args:
45 model_name: backbone supported by `torchvision` and `bolts`
46 fpn: If True, creates a Feature Pyramind Network on top of Resnet based CNNs.
47 pretrained: if true, returns a model with backbone pre-trained on Imagenet
48 trainable_backbone_layers: number of trainable resnet layers starting from final block.
49
50 >>> backbone_and_num_features('mobilenet_v2') # doctest: +ELLIPSIS
51 (Sequential(...), 1280)
52 >>> backbone_and_num_features('resnet50', fpn=True) # doctest: +ELLIPSIS
53 (BackboneWithFPN(...), 256)
54 >>> backbone_and_num_features('swav-imagenet') # doctest: +ELLIPSIS
55 (Sequential(...), 2048)
56 """
57 if fpn:
58 if model_name in RESNET_MODELS:
59 backbone = resnet_fpn_backbone(
60 model_name, pretrained=pretrained, trainable_layers=trainable_backbone_layers, **kwargs
61 )
62 fpn_out_channels = 256
63 return backbone, fpn_out_channels
64 else:
65 rank_zero_warn(f"{model_name} backbone is not supported with `fpn=True`, `fpn` won't be added.")
66
67 if model_name in BOLTS_MODELS:
68 return bolts_backbone_and_num_features(model_name)
69
70 if model_name in TORCHVISION_MODELS:
71 return torchvision_backbone_and_num_features(model_name, pretrained)
72
73 raise ValueError(f"{model_name} is not supported yet.")
74
75
76 def bolts_backbone_and_num_features(model_name: str) -> Tuple[nn.Module, int]:
77 """
78 >>> bolts_backbone_and_num_features('simclr-imagenet') # doctest: +ELLIPSIS
79 (Sequential(...), 2048)
80 >>> bolts_backbone_and_num_features('swav-imagenet') # doctest: +ELLIPSIS
81 (Sequential(...), 2048)
82 """
83
84 # TODO: maybe we should plain pytorch weights so we don't need to rely on bolts to load these
85 # also mabye just use torchhub for the ssl lib
86 def load_simclr_imagenet(path_or_url: str = f"{ROOT_S3_BUCKET}/simclr/bolts_simclr_imagenet/simclr_imagenet.ckpt"):
87 simclr = SimCLR.load_from_checkpoint(path_or_url, strict=False)
88 # remove the last two layers & turn it into a Sequential model
89 backbone = nn.Sequential(*list(simclr.encoder.children())[:-2])
90 return backbone, 2048
91
92 def load_swav_imagenet(path_or_url: str = f"{ROOT_S3_BUCKET}/swav/swav_imagenet/swav_imagenet.pth.tar"):
93 swav = SwAV.load_from_checkpoint(path_or_url, strict=True)
94 # remove the last two layers & turn it into a Sequential model
95 backbone = nn.Sequential(*list(swav.model.children())[:-2])
96 return backbone, 2048
97
98 models = {
99 'simclr-imagenet': load_simclr_imagenet,
100 'swav-imagenet': load_swav_imagenet,
101 }
102 if not _BOLTS_AVAILABLE:
103 raise MisconfigurationException("Bolts isn't installed. Please, use ``pip install lightning-bolts``.")
104 if model_name in models:
105 return models[model_name]()
106
107 raise ValueError(f"{model_name} is not supported yet.")
108
109
110 def torchvision_backbone_and_num_features(model_name: str, pretrained: bool = True) -> Tuple[nn.Module, int]:
111 """
112 >>> torchvision_backbone_and_num_features('mobilenet_v2') # doctest: +ELLIPSIS
113 (Sequential(...), 1280)
114 >>> torchvision_backbone_and_num_features('resnet18') # doctest: +ELLIPSIS
115 (Sequential(...), 512)
116 >>> torchvision_backbone_and_num_features('densenet121') # doctest: +ELLIPSIS
117 (Sequential(...), 1024)
118 """
119 model = getattr(torchvision.models, model_name, None)
120 if model is None:
121 raise MisconfigurationException(f"{model_name} is not supported by torchvision")
122
123 if model_name in MOBILENET_MODELS + VGG_MODELS:
124 model = model(pretrained=pretrained)
125 backbone = model.features
126 num_features = 512 if model_name in VGG_MODELS else model.classifier[-1].in_features
127 return backbone, num_features
128
129 elif model_name in RESNET_MODELS:
130 model = model(pretrained=pretrained)
131 # remove the last two layers & turn it into a Sequential model
132 backbone = nn.Sequential(*list(model.children())[:-2])
133 num_features = model.fc.in_features
134 return backbone, num_features
135
136 elif model_name in DENSENET_MODELS:
137 model = model(pretrained=pretrained)
138 backbone = nn.Sequential(*model.features, nn.ReLU(inplace=True))
139 num_features = model.classifier.in_features
140 return backbone, num_features
141
142 raise ValueError(f"{model_name} is not supported yet.")
143
```
Path: `flash/utils/imports.py`
Content:
```
1 from pytorch_lightning.utilities.imports import _module_available
2
3 _TABNET_AVAILABLE = _module_available("pytorch_tabnet")
4 _KORNIA_AVAILABLE = _module_available("kornia")
5 _COCO_AVAILABLE = _module_available("pycocotools")
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flash/utils/imports.py b/flash/utils/imports.py
--- a/flash/utils/imports.py
+++ b/flash/utils/imports.py
@@ -3,3 +3,4 @@
_TABNET_AVAILABLE = _module_available("pytorch_tabnet")
_KORNIA_AVAILABLE = _module_available("kornia")
_COCO_AVAILABLE = _module_available("pycocotools")
+_TIMM_AVAILABLE = _module_available("timm")
diff --git a/flash/vision/backbones.py b/flash/vision/backbones.py
--- a/flash/vision/backbones.py
+++ b/flash/vision/backbones.py
@@ -19,6 +19,11 @@
from torch import nn as nn
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
+from flash.utils.imports import _TIMM_AVAILABLE
+
+if _TIMM_AVAILABLE:
+ import timm
+
if _BOLTS_AVAILABLE:
from pl_bolts.models.self_supervised import SimCLR, SwAV
@@ -70,6 +75,9 @@
if model_name in TORCHVISION_MODELS:
return torchvision_backbone_and_num_features(model_name, pretrained)
+ if _TIMM_AVAILABLE and model_name in timm.list_models():
+ return timm_backbone_and_num_features(model_name, pretrained)
+
raise ValueError(f"{model_name} is not supported yet.")
@@ -140,3 +148,15 @@
return backbone, num_features
raise ValueError(f"{model_name} is not supported yet.")
+
+
+def timm_backbone_and_num_features(model_name: str, pretrained: bool = True) -> Tuple[nn.Module, int]:
+
+ if model_name in timm.list_models():
+ backbone = timm.create_model(model_name, pretrained=pretrained, num_classes=0, global_pool='')
+ num_features = backbone.num_features
+ return backbone, num_features
+
+ raise ValueError(
+ f"{model_name} is not supported in timm yet. https://rwightman.github.io/pytorch-image-models/models/"
+ )
| {"golden_diff": "diff --git a/flash/utils/imports.py b/flash/utils/imports.py\n--- a/flash/utils/imports.py\n+++ b/flash/utils/imports.py\n@@ -3,3 +3,4 @@\n _TABNET_AVAILABLE = _module_available(\"pytorch_tabnet\")\n _KORNIA_AVAILABLE = _module_available(\"kornia\")\n _COCO_AVAILABLE = _module_available(\"pycocotools\")\n+_TIMM_AVAILABLE = _module_available(\"timm\")\ndiff --git a/flash/vision/backbones.py b/flash/vision/backbones.py\n--- a/flash/vision/backbones.py\n+++ b/flash/vision/backbones.py\n@@ -19,6 +19,11 @@\n from torch import nn as nn\n from torchvision.models.detection.backbone_utils import resnet_fpn_backbone\n \n+from flash.utils.imports import _TIMM_AVAILABLE\n+\n+if _TIMM_AVAILABLE:\n+ import timm\n+\n if _BOLTS_AVAILABLE:\n from pl_bolts.models.self_supervised import SimCLR, SwAV\n \n@@ -70,6 +75,9 @@\n if model_name in TORCHVISION_MODELS:\n return torchvision_backbone_and_num_features(model_name, pretrained)\n \n+ if _TIMM_AVAILABLE and model_name in timm.list_models():\n+ return timm_backbone_and_num_features(model_name, pretrained)\n+\n raise ValueError(f\"{model_name} is not supported yet.\")\n \n \n@@ -140,3 +148,15 @@\n return backbone, num_features\n \n raise ValueError(f\"{model_name} is not supported yet.\")\n+\n+\n+def timm_backbone_and_num_features(model_name: str, pretrained: bool = True) -> Tuple[nn.Module, int]:\n+\n+ if model_name in timm.list_models():\n+ backbone = timm.create_model(model_name, pretrained=pretrained, num_classes=0, global_pool='')\n+ num_features = backbone.num_features\n+ return backbone, num_features\n+\n+ raise ValueError(\n+ f\"{model_name} is not supported in timm yet. https://rwightman.github.io/pytorch-image-models/models/\"\n+ )\n", "issue": "Add Pytorch-Image-Models to backbones\n## \ud83d\ude80 Feature\r\nhttps://github.com/rwightman/pytorch-image-models#models\r\n\r\n### Motivation\r\n\r\nThe torch vision models are very limiting these models seem to perform best on kaggle challenges would be great to integrate them \n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torchvision\nfrom pytorch_lightning.utilities import _BOLTS_AVAILABLE, rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch import nn as nn\nfrom torchvision.models.detection.backbone_utils import resnet_fpn_backbone\n\nif _BOLTS_AVAILABLE:\n from pl_bolts.models.self_supervised import SimCLR, SwAV\n\nROOT_S3_BUCKET = \"https://pl-bolts-weights.s3.us-east-2.amazonaws.com\"\n\nMOBILENET_MODELS = [\"mobilenet_v2\"]\nVGG_MODELS = [\"vgg11\", \"vgg13\", \"vgg16\", \"vgg19\"]\nRESNET_MODELS = [\"resnet18\", \"resnet34\", \"resnet50\", \"resnet101\", \"resnet152\", \"resnext50_32x4d\", \"resnext101_32x8d\"]\nDENSENET_MODELS = [\"densenet121\", \"densenet169\", \"densenet161\"]\nTORCHVISION_MODELS = MOBILENET_MODELS + VGG_MODELS + RESNET_MODELS + DENSENET_MODELS\n\nBOLTS_MODELS = [\"simclr-imagenet\", \"swav-imagenet\"]\n\n\ndef backbone_and_num_features(\n model_name: str,\n fpn: bool = False,\n pretrained: bool = True,\n trainable_backbone_layers: int = 3,\n **kwargs\n) -> Tuple[nn.Module, int]:\n \"\"\"\n Args:\n model_name: backbone supported by `torchvision` and `bolts`\n fpn: If True, creates a Feature Pyramind Network on top of Resnet based CNNs.\n pretrained: if true, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers: number of trainable resnet layers starting from final block.\n\n >>> backbone_and_num_features('mobilenet_v2') # doctest: +ELLIPSIS\n (Sequential(...), 1280)\n >>> backbone_and_num_features('resnet50', fpn=True) # doctest: +ELLIPSIS\n (BackboneWithFPN(...), 256)\n >>> backbone_and_num_features('swav-imagenet') # doctest: +ELLIPSIS\n (Sequential(...), 2048)\n \"\"\"\n if fpn:\n if model_name in RESNET_MODELS:\n backbone = resnet_fpn_backbone(\n model_name, pretrained=pretrained, trainable_layers=trainable_backbone_layers, **kwargs\n )\n fpn_out_channels = 256\n return backbone, fpn_out_channels\n else:\n rank_zero_warn(f\"{model_name} backbone is not supported with `fpn=True`, `fpn` won't be added.\")\n\n if model_name in BOLTS_MODELS:\n return bolts_backbone_and_num_features(model_name)\n\n if model_name in TORCHVISION_MODELS:\n return torchvision_backbone_and_num_features(model_name, pretrained)\n\n raise ValueError(f\"{model_name} is not supported yet.\")\n\n\ndef bolts_backbone_and_num_features(model_name: str) -> Tuple[nn.Module, int]:\n \"\"\"\n >>> bolts_backbone_and_num_features('simclr-imagenet') # doctest: +ELLIPSIS\n (Sequential(...), 2048)\n >>> bolts_backbone_and_num_features('swav-imagenet') # doctest: +ELLIPSIS\n (Sequential(...), 2048)\n \"\"\"\n\n # TODO: maybe we should plain pytorch weights so we don't need to rely on bolts to load these\n # also mabye just use torchhub for the ssl lib\n def load_simclr_imagenet(path_or_url: str = f\"{ROOT_S3_BUCKET}/simclr/bolts_simclr_imagenet/simclr_imagenet.ckpt\"):\n simclr = SimCLR.load_from_checkpoint(path_or_url, strict=False)\n # remove the last two layers & turn it into a Sequential model\n backbone = nn.Sequential(*list(simclr.encoder.children())[:-2])\n return backbone, 2048\n\n def load_swav_imagenet(path_or_url: str = f\"{ROOT_S3_BUCKET}/swav/swav_imagenet/swav_imagenet.pth.tar\"):\n swav = SwAV.load_from_checkpoint(path_or_url, strict=True)\n # remove the last two layers & turn it into a Sequential model\n backbone = nn.Sequential(*list(swav.model.children())[:-2])\n return backbone, 2048\n\n models = {\n 'simclr-imagenet': load_simclr_imagenet,\n 'swav-imagenet': load_swav_imagenet,\n }\n if not _BOLTS_AVAILABLE:\n raise MisconfigurationException(\"Bolts isn't installed. Please, use ``pip install lightning-bolts``.\")\n if model_name in models:\n return models[model_name]()\n\n raise ValueError(f\"{model_name} is not supported yet.\")\n\n\ndef torchvision_backbone_and_num_features(model_name: str, pretrained: bool = True) -> Tuple[nn.Module, int]:\n \"\"\"\n >>> torchvision_backbone_and_num_features('mobilenet_v2') # doctest: +ELLIPSIS\n (Sequential(...), 1280)\n >>> torchvision_backbone_and_num_features('resnet18') # doctest: +ELLIPSIS\n (Sequential(...), 512)\n >>> torchvision_backbone_and_num_features('densenet121') # doctest: +ELLIPSIS\n (Sequential(...), 1024)\n \"\"\"\n model = getattr(torchvision.models, model_name, None)\n if model is None:\n raise MisconfigurationException(f\"{model_name} is not supported by torchvision\")\n\n if model_name in MOBILENET_MODELS + VGG_MODELS:\n model = model(pretrained=pretrained)\n backbone = model.features\n num_features = 512 if model_name in VGG_MODELS else model.classifier[-1].in_features\n return backbone, num_features\n\n elif model_name in RESNET_MODELS:\n model = model(pretrained=pretrained)\n # remove the last two layers & turn it into a Sequential model\n backbone = nn.Sequential(*list(model.children())[:-2])\n num_features = model.fc.in_features\n return backbone, num_features\n\n elif model_name in DENSENET_MODELS:\n model = model(pretrained=pretrained)\n backbone = nn.Sequential(*model.features, nn.ReLU(inplace=True))\n num_features = model.classifier.in_features\n return backbone, num_features\n\n raise ValueError(f\"{model_name} is not supported yet.\")\n", "path": "flash/vision/backbones.py"}, {"content": "from pytorch_lightning.utilities.imports import _module_available\n\n_TABNET_AVAILABLE = _module_available(\"pytorch_tabnet\")\n_KORNIA_AVAILABLE = _module_available(\"kornia\")\n_COCO_AVAILABLE = _module_available(\"pycocotools\")\n", "path": "flash/utils/imports.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torchvision\nfrom pytorch_lightning.utilities import _BOLTS_AVAILABLE, rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch import nn as nn\nfrom torchvision.models.detection.backbone_utils import resnet_fpn_backbone\n\nfrom flash.utils.imports import _TIMM_AVAILABLE\n\nif _TIMM_AVAILABLE:\n import timm\n\nif _BOLTS_AVAILABLE:\n from pl_bolts.models.self_supervised import SimCLR, SwAV\n\nROOT_S3_BUCKET = \"https://pl-bolts-weights.s3.us-east-2.amazonaws.com\"\n\nMOBILENET_MODELS = [\"mobilenet_v2\"]\nVGG_MODELS = [\"vgg11\", \"vgg13\", \"vgg16\", \"vgg19\"]\nRESNET_MODELS = [\"resnet18\", \"resnet34\", \"resnet50\", \"resnet101\", \"resnet152\", \"resnext50_32x4d\", \"resnext101_32x8d\"]\nDENSENET_MODELS = [\"densenet121\", \"densenet169\", \"densenet161\"]\nTORCHVISION_MODELS = MOBILENET_MODELS + VGG_MODELS + RESNET_MODELS + DENSENET_MODELS\n\nBOLTS_MODELS = [\"simclr-imagenet\", \"swav-imagenet\"]\n\n\ndef backbone_and_num_features(\n model_name: str,\n fpn: bool = False,\n pretrained: bool = True,\n trainable_backbone_layers: int = 3,\n **kwargs\n) -> Tuple[nn.Module, int]:\n \"\"\"\n Args:\n model_name: backbone supported by `torchvision` and `bolts`\n fpn: If True, creates a Feature Pyramind Network on top of Resnet based CNNs.\n pretrained: if true, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers: number of trainable resnet layers starting from final block.\n\n >>> backbone_and_num_features('mobilenet_v2') # doctest: +ELLIPSIS\n (Sequential(...), 1280)\n >>> backbone_and_num_features('resnet50', fpn=True) # doctest: +ELLIPSIS\n (BackboneWithFPN(...), 256)\n >>> backbone_and_num_features('swav-imagenet') # doctest: +ELLIPSIS\n (Sequential(...), 2048)\n \"\"\"\n if fpn:\n if model_name in RESNET_MODELS:\n backbone = resnet_fpn_backbone(\n model_name, pretrained=pretrained, trainable_layers=trainable_backbone_layers, **kwargs\n )\n fpn_out_channels = 256\n return backbone, fpn_out_channels\n else:\n rank_zero_warn(f\"{model_name} backbone is not supported with `fpn=True`, `fpn` won't be added.\")\n\n if model_name in BOLTS_MODELS:\n return bolts_backbone_and_num_features(model_name)\n\n if model_name in TORCHVISION_MODELS:\n return torchvision_backbone_and_num_features(model_name, pretrained)\n\n if _TIMM_AVAILABLE and model_name in timm.list_models():\n return timm_backbone_and_num_features(model_name, pretrained)\n\n raise ValueError(f\"{model_name} is not supported yet.\")\n\n\ndef bolts_backbone_and_num_features(model_name: str) -> Tuple[nn.Module, int]:\n \"\"\"\n >>> bolts_backbone_and_num_features('simclr-imagenet') # doctest: +ELLIPSIS\n (Sequential(...), 2048)\n >>> bolts_backbone_and_num_features('swav-imagenet') # doctest: +ELLIPSIS\n (Sequential(...), 2048)\n \"\"\"\n\n # TODO: maybe we should plain pytorch weights so we don't need to rely on bolts to load these\n # also mabye just use torchhub for the ssl lib\n def load_simclr_imagenet(path_or_url: str = f\"{ROOT_S3_BUCKET}/simclr/bolts_simclr_imagenet/simclr_imagenet.ckpt\"):\n simclr = SimCLR.load_from_checkpoint(path_or_url, strict=False)\n # remove the last two layers & turn it into a Sequential model\n backbone = nn.Sequential(*list(simclr.encoder.children())[:-2])\n return backbone, 2048\n\n def load_swav_imagenet(path_or_url: str = f\"{ROOT_S3_BUCKET}/swav/swav_imagenet/swav_imagenet.pth.tar\"):\n swav = SwAV.load_from_checkpoint(path_or_url, strict=True)\n # remove the last two layers & turn it into a Sequential model\n backbone = nn.Sequential(*list(swav.model.children())[:-2])\n return backbone, 2048\n\n models = {\n 'simclr-imagenet': load_simclr_imagenet,\n 'swav-imagenet': load_swav_imagenet,\n }\n if not _BOLTS_AVAILABLE:\n raise MisconfigurationException(\"Bolts isn't installed. Please, use ``pip install lightning-bolts``.\")\n if model_name in models:\n return models[model_name]()\n\n raise ValueError(f\"{model_name} is not supported yet.\")\n\n\ndef torchvision_backbone_and_num_features(model_name: str, pretrained: bool = True) -> Tuple[nn.Module, int]:\n \"\"\"\n >>> torchvision_backbone_and_num_features('mobilenet_v2') # doctest: +ELLIPSIS\n (Sequential(...), 1280)\n >>> torchvision_backbone_and_num_features('resnet18') # doctest: +ELLIPSIS\n (Sequential(...), 512)\n >>> torchvision_backbone_and_num_features('densenet121') # doctest: +ELLIPSIS\n (Sequential(...), 1024)\n \"\"\"\n model = getattr(torchvision.models, model_name, None)\n if model is None:\n raise MisconfigurationException(f\"{model_name} is not supported by torchvision\")\n\n if model_name in MOBILENET_MODELS + VGG_MODELS:\n model = model(pretrained=pretrained)\n backbone = model.features\n num_features = 512 if model_name in VGG_MODELS else model.classifier[-1].in_features\n return backbone, num_features\n\n elif model_name in RESNET_MODELS:\n model = model(pretrained=pretrained)\n # remove the last two layers & turn it into a Sequential model\n backbone = nn.Sequential(*list(model.children())[:-2])\n num_features = model.fc.in_features\n return backbone, num_features\n\n elif model_name in DENSENET_MODELS:\n model = model(pretrained=pretrained)\n backbone = nn.Sequential(*model.features, nn.ReLU(inplace=True))\n num_features = model.classifier.in_features\n return backbone, num_features\n\n raise ValueError(f\"{model_name} is not supported yet.\")\n\n\ndef timm_backbone_and_num_features(model_name: str, pretrained: bool = True) -> Tuple[nn.Module, int]:\n\n if model_name in timm.list_models():\n backbone = timm.create_model(model_name, pretrained=pretrained, num_classes=0, global_pool='')\n num_features = backbone.num_features\n return backbone, num_features\n\n raise ValueError(\n f\"{model_name} is not supported in timm yet. https://rwightman.github.io/pytorch-image-models/models/\"\n )\n", "path": "flash/vision/backbones.py"}, {"content": "from pytorch_lightning.utilities.imports import _module_available\n\n_TABNET_AVAILABLE = _module_available(\"pytorch_tabnet\")\n_KORNIA_AVAILABLE = _module_available(\"kornia\")\n_COCO_AVAILABLE = _module_available(\"pycocotools\")\n_TIMM_AVAILABLE = _module_available(\"timm\")\n", "path": "flash/utils/imports.py"}]} | 2,345 | 467 |
gh_patches_debug_26340 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2465 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bond parser is not collecting data for currently active primary_slave
current parser is unable to collect data for "Primary Slave" key.
```
Bonding Mode: fault-tolerance (active-backup)
**Primary Slave: eth6 (primary_reselect failure)**
Currently Active Slave: eth7
MII Status: up
MII Polling Interval (ms): 200
Up Delay (ms): 0
Down Delay (ms): 0
```
Requested a PR: https://github.com/RedHatInsights/insights-core/pull/2465
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/parsers/bond.py`
Content:
```
1 """
2 Bond - file ``/proc/net/bonding``
3 =================================
4
5 Provides plugins access to the network bonding information gathered from
6 all the files starteing with "bond." located in the
7 ``/proc/net/bonding`` directory.
8
9 Typical content of ``bond.*`` file is::
10
11 Ethernet Channel Bonding Driver: v3.2.4 (January 28, 2008)
12
13 Bonding Mode: IEEE 802.3ad Dynamic link aggregation
14 Transmit Hash Policy: layer2 (0)
15 MII Status: up
16 MII Polling Interval (ms): 500
17 Up Delay (ms): 0
18 Down Delay (ms): 0
19
20 802.3ad info
21 LACP rate: slow
22 Active Aggregator Info:
23 Aggregator ID: 3
24 Number of ports: 1
25 Actor Key: 17
26 Partner Key: 1
27 Partner Mac Address: 00:00:00:00:00:00
28
29 Slave Interface: eth1
30 MII Status: up
31 Link Failure Count: 0
32 Permanent HW addr: 00:16:35:5e:42:fc
33 Aggregator ID: 3
34
35 Slave Interface: eth2
36 MII Status: up
37 Link Failure Count: 0
38 Permanent HW addr: 00:16:35:5e:02:7e
39 Aggregator ID: 2
40
41 Data is modeled as an array of ``Bond`` objects (``bond`` being a
42 pattern file specification gathering data from files located in
43 ``/proc/net/bonding``.
44
45 Examples:
46 >>> type(bond_info)
47 <class 'insights.parsers.bond.Bond'>
48 >>> bond_info.bond_mode
49 '4'
50 >>> bond_info.partner_mac_address
51 '00:00:00:00:00:00'
52 >>> bond_info.slave_interface
53 ['eth1', 'eth2']
54 >>> bond_info.aggregator_id
55 ['3', '3', '2']
56 >>> bond_info.xmit_hash_policy
57 'layer2'
58 >>> bond_info.active_slave
59 >>> bond_info.slave_duplex
60 ['full', 'full']
61 >>> bond_info.slave_speed
62 ['1000 Mbps', '1000 Mbps']
63 """
64
65 from insights import Parser, parser, get_active_lines
66 from insights.specs import Specs
67 from insights.parsers import ParseException
68
69
70 """dict: bonding mode parameter string linked to bond type index."""
71 BOND_PREFIX_MAP = {
72 'load balancing (round-robin)': '0',
73 'fault-tolerance (active-backup)': '1',
74 'fault-tolerance (active-backup) (fail_over_mac active)': '1',
75 'load balancing (xor)': '2',
76 'fault-tolerance (broadcast)': '3',
77 'IEEE 802.3ad Dynamic link aggregation': '4',
78 'transmit load balancing': '5',
79 'adaptive load balancing': '6'
80 }
81
82
83 @parser(Specs.bond)
84 class Bond(Parser):
85 """
86 Models the ``/proc/net/bonding`` file.
87
88 Currently used information from ``/proc/net/bonding`` includes
89 the "bond mode" and "partner mac address".
90 """
91
92 def parse_content(self, content):
93 self._bond_mode = None
94 self._partner_mac_address = None
95 self._active_slave = None
96 self.xmit_hash_policy = None
97 self._arp_polling_interval = None
98 self._arp_ip_target = None
99 self._slave_interface = []
100 self._aggregator_id = []
101 self._mii_status = []
102 self._slave_link_failure_count = []
103 self._slave_speed = []
104 self._slave_duplex = []
105
106 for line in get_active_lines(content):
107 if line.startswith("Bonding Mode: "):
108 raw_mode = line.split(":", 1)[1].strip()
109 self._bond_mode = raw_mode
110 if raw_mode in BOND_PREFIX_MAP:
111 self._bond_mode = BOND_PREFIX_MAP[raw_mode]
112 else:
113 raise ParseException("Unrecognised bonding mode '{b}'".format(b=raw_mode))
114 elif line.startswith("Partner Mac Address: "):
115 self._partner_mac_address = line.split(":", 1)[1].strip()
116 elif line.startswith("Slave Interface: "):
117 self._slave_interface.append(line.split(":", 1)[1].strip())
118 elif line.strip().startswith("Aggregator ID: "):
119 self._aggregator_id.append(line.strip().split(':', 1)[1].strip())
120 elif line.strip().startswith("Transmit Hash Policy"):
121 # No need of values in bracket:
122 # Integer notification (0), (1), (2) of layer2, layer3+4, layer2+3 resp
123 self.xmit_hash_policy = line.split(":", 1)[1].split()[0]
124 elif line.strip().startswith("Currently Active Slave"):
125 self._active_slave = line.split(":", 1)[1].split()[0]
126 elif line.strip().startswith("MII Status: "):
127 self._mii_status.append(line.strip().split(':', 1)[1].strip())
128 elif line.strip().startswith("Link Failure Count: "):
129 self._slave_link_failure_count.append(line.strip().split(':', 1)[1].strip())
130 elif line.strip().startswith("Speed: "):
131 self._slave_speed.append(line.strip().split(':', 1)[1].strip())
132 elif line.strip().startswith("Duplex: "):
133 self._slave_duplex.append(line.strip().split(':', 1)[1].strip())
134 elif line.strip().startswith("ARP Polling Interval (ms):"):
135 self._arp_polling_interval = line.strip().split(':', 1)[1].strip()
136 elif line.strip().startswith("ARP IP target/s (n.n.n.n form):"):
137 self._arp_ip_target = line.strip().split(':', 1)[1].strip()
138
139 @property
140 def bond_mode(self):
141 """Returns the bond mode number as a string, or if there is no
142 known mapping to a number, the raw "Bonding Mode" value.
143 ``None`` is returned if no "Bonding Mode" key is found.
144 """
145 return self._bond_mode
146
147 @property
148 def partner_mac_address(self):
149 """Returns the value of the "Partner Mac Address" in the bond
150 file if the key/value exists. If the key is not in the bond
151 file, ``None`` is returned.
152 """
153 return self._partner_mac_address
154
155 @property
156 def slave_interface(self):
157 """Returns all the slave interfaces of in the bond file wrapped
158 a list if the key/value exists. If the key is not in the
159 bond file, ``[]`` is returned.
160 """
161 return self._slave_interface
162
163 @property
164 def aggregator_id(self):
165 """Returns all the aggregator id of in the bond file wrapped
166 a list if the key/value exists. If the key is not in the
167 bond file, ``[]`` is returned.
168 """
169 return self._aggregator_id
170
171 @property
172 def active_slave(self):
173 """Returns the active slave of the "Currently Active Slave" in the bond
174 file if key/value exists. If the key is not in the bond file, ``None``
175 is returned.
176 """
177 return self._active_slave
178
179 @property
180 def mii_status(self):
181 """Returns the master and all the slaves "MII Status" value in the bond file wrapped
182 a list if the key/value exists. If the key is not in the
183 bond file, ``[]`` is returned.
184 """
185 return self._mii_status
186
187 @property
188 def slave_link_failure_count(self):
189 """Returns all the slaves "Link Failure Count" value in the bond file wrapped
190 a list if the key/value exists. If the key is not in the
191 bond file, ``[]`` is returned.
192 """
193 return self._slave_link_failure_count
194
195 @property
196 def slave_speed(self):
197 """Returns all the slaves "Speed" value in the bond file wrapped
198 a list if the key/value exists. If the key is not in the
199 bond file, ``[]`` is returned.
200 """
201 return self._slave_speed
202
203 @property
204 def slave_duplex(self):
205 """Returns all the slave "Duplex" value in the bond file wrapped
206 a list if the key/value exists. If the key is not in the
207 bond file, ``[]`` is returned.
208 """
209 return self._slave_duplex
210
211 @property
212 def arp_polling_interval(self):
213 """Returns the arp polling interval as a string. ``None`` is returned
214 if no "ARP Polling Interval (ms)" key is found.
215 """
216 return self._arp_polling_interval
217
218 @property
219 def arp_ip_target(self):
220 """Returns the arp ip target as a string. ``None`` is returned
221 if no "ARP IP target/s (n.n.n.n form)" key is found.
222 """
223 return self._arp_ip_target
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/parsers/bond.py b/insights/parsers/bond.py
--- a/insights/parsers/bond.py
+++ b/insights/parsers/bond.py
@@ -102,6 +102,7 @@
self._slave_link_failure_count = []
self._slave_speed = []
self._slave_duplex = []
+ self._primary_slave = None
for line in get_active_lines(content):
if line.startswith("Bonding Mode: "):
@@ -135,6 +136,8 @@
self._arp_polling_interval = line.strip().split(':', 1)[1].strip()
elif line.strip().startswith("ARP IP target/s (n.n.n.n form):"):
self._arp_ip_target = line.strip().split(':', 1)[1].strip()
+ elif line.strip().startswith("Primary Slave"):
+ self._primary_slave = line.split(":", 1)[1].split()[0]
@property
def bond_mode(self):
@@ -221,3 +224,10 @@
if no "ARP IP target/s (n.n.n.n form)" key is found.
"""
return self._arp_ip_target
+
+ @property
+ def primary_slave(self):
+ """Returns the "Primary Slave" in the bond file if key/value exists.
+ If the key is not in the bond file, ``None`` is returned.
+ """
+ return self._primary_slave
| {"golden_diff": "diff --git a/insights/parsers/bond.py b/insights/parsers/bond.py\n--- a/insights/parsers/bond.py\n+++ b/insights/parsers/bond.py\n@@ -102,6 +102,7 @@\n self._slave_link_failure_count = []\n self._slave_speed = []\n self._slave_duplex = []\n+ self._primary_slave = None\n \n for line in get_active_lines(content):\n if line.startswith(\"Bonding Mode: \"):\n@@ -135,6 +136,8 @@\n self._arp_polling_interval = line.strip().split(':', 1)[1].strip()\n elif line.strip().startswith(\"ARP IP target/s (n.n.n.n form):\"):\n self._arp_ip_target = line.strip().split(':', 1)[1].strip()\n+ elif line.strip().startswith(\"Primary Slave\"):\n+ self._primary_slave = line.split(\":\", 1)[1].split()[0]\n \n @property\n def bond_mode(self):\n@@ -221,3 +224,10 @@\n if no \"ARP IP target/s (n.n.n.n form)\" key is found.\n \"\"\"\n return self._arp_ip_target\n+\n+ @property\n+ def primary_slave(self):\n+ \"\"\"Returns the \"Primary Slave\" in the bond file if key/value exists.\n+ If the key is not in the bond file, ``None`` is returned.\n+ \"\"\"\n+ return self._primary_slave\n", "issue": "Bond parser is not collecting data for currently active primary_slave\ncurrent parser is unable to collect data for \"Primary Slave\" key.\r\n\r\n```\r\nBonding Mode: fault-tolerance (active-backup)\r\n**Primary Slave: eth6 (primary_reselect failure)**\r\nCurrently Active Slave: eth7\r\nMII Status: up\r\nMII Polling Interval (ms): 200\r\nUp Delay (ms): 0\r\nDown Delay (ms): 0\r\n```\r\n\r\nRequested a PR: https://github.com/RedHatInsights/insights-core/pull/2465\n", "before_files": [{"content": "\"\"\"\nBond - file ``/proc/net/bonding``\n=================================\n\nProvides plugins access to the network bonding information gathered from\nall the files starteing with \"bond.\" located in the\n``/proc/net/bonding`` directory.\n\nTypical content of ``bond.*`` file is::\n\n Ethernet Channel Bonding Driver: v3.2.4 (January 28, 2008)\n\n Bonding Mode: IEEE 802.3ad Dynamic link aggregation\n Transmit Hash Policy: layer2 (0)\n MII Status: up\n MII Polling Interval (ms): 500\n Up Delay (ms): 0\n Down Delay (ms): 0\n\n 802.3ad info\n LACP rate: slow\n Active Aggregator Info:\n Aggregator ID: 3\n Number of ports: 1\n Actor Key: 17\n Partner Key: 1\n Partner Mac Address: 00:00:00:00:00:00\n\n Slave Interface: eth1\n MII Status: up\n Link Failure Count: 0\n Permanent HW addr: 00:16:35:5e:42:fc\n Aggregator ID: 3\n\n Slave Interface: eth2\n MII Status: up\n Link Failure Count: 0\n Permanent HW addr: 00:16:35:5e:02:7e\n Aggregator ID: 2\n\nData is modeled as an array of ``Bond`` objects (``bond`` being a\npattern file specification gathering data from files located in\n``/proc/net/bonding``.\n\nExamples:\n >>> type(bond_info)\n <class 'insights.parsers.bond.Bond'>\n >>> bond_info.bond_mode\n '4'\n >>> bond_info.partner_mac_address\n '00:00:00:00:00:00'\n >>> bond_info.slave_interface\n ['eth1', 'eth2']\n >>> bond_info.aggregator_id\n ['3', '3', '2']\n >>> bond_info.xmit_hash_policy\n 'layer2'\n >>> bond_info.active_slave\n >>> bond_info.slave_duplex\n ['full', 'full']\n >>> bond_info.slave_speed\n ['1000 Mbps', '1000 Mbps']\n\"\"\"\n\nfrom insights import Parser, parser, get_active_lines\nfrom insights.specs import Specs\nfrom insights.parsers import ParseException\n\n\n\"\"\"dict: bonding mode parameter string linked to bond type index.\"\"\"\nBOND_PREFIX_MAP = {\n 'load balancing (round-robin)': '0',\n 'fault-tolerance (active-backup)': '1',\n 'fault-tolerance (active-backup) (fail_over_mac active)': '1',\n 'load balancing (xor)': '2',\n 'fault-tolerance (broadcast)': '3',\n 'IEEE 802.3ad Dynamic link aggregation': '4',\n 'transmit load balancing': '5',\n 'adaptive load balancing': '6'\n}\n\n\n@parser(Specs.bond)\nclass Bond(Parser):\n \"\"\"\n Models the ``/proc/net/bonding`` file.\n\n Currently used information from ``/proc/net/bonding`` includes\n the \"bond mode\" and \"partner mac address\".\n \"\"\"\n\n def parse_content(self, content):\n self._bond_mode = None\n self._partner_mac_address = None\n self._active_slave = None\n self.xmit_hash_policy = None\n self._arp_polling_interval = None\n self._arp_ip_target = None\n self._slave_interface = []\n self._aggregator_id = []\n self._mii_status = []\n self._slave_link_failure_count = []\n self._slave_speed = []\n self._slave_duplex = []\n\n for line in get_active_lines(content):\n if line.startswith(\"Bonding Mode: \"):\n raw_mode = line.split(\":\", 1)[1].strip()\n self._bond_mode = raw_mode\n if raw_mode in BOND_PREFIX_MAP:\n self._bond_mode = BOND_PREFIX_MAP[raw_mode]\n else:\n raise ParseException(\"Unrecognised bonding mode '{b}'\".format(b=raw_mode))\n elif line.startswith(\"Partner Mac Address: \"):\n self._partner_mac_address = line.split(\":\", 1)[1].strip()\n elif line.startswith(\"Slave Interface: \"):\n self._slave_interface.append(line.split(\":\", 1)[1].strip())\n elif line.strip().startswith(\"Aggregator ID: \"):\n self._aggregator_id.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Transmit Hash Policy\"):\n # No need of values in bracket:\n # Integer notification (0), (1), (2) of layer2, layer3+4, layer2+3 resp\n self.xmit_hash_policy = line.split(\":\", 1)[1].split()[0]\n elif line.strip().startswith(\"Currently Active Slave\"):\n self._active_slave = line.split(\":\", 1)[1].split()[0]\n elif line.strip().startswith(\"MII Status: \"):\n self._mii_status.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Link Failure Count: \"):\n self._slave_link_failure_count.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Speed: \"):\n self._slave_speed.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Duplex: \"):\n self._slave_duplex.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"ARP Polling Interval (ms):\"):\n self._arp_polling_interval = line.strip().split(':', 1)[1].strip()\n elif line.strip().startswith(\"ARP IP target/s (n.n.n.n form):\"):\n self._arp_ip_target = line.strip().split(':', 1)[1].strip()\n\n @property\n def bond_mode(self):\n \"\"\"Returns the bond mode number as a string, or if there is no\n known mapping to a number, the raw \"Bonding Mode\" value.\n ``None`` is returned if no \"Bonding Mode\" key is found.\n \"\"\"\n return self._bond_mode\n\n @property\n def partner_mac_address(self):\n \"\"\"Returns the value of the \"Partner Mac Address\" in the bond\n file if the key/value exists. If the key is not in the bond\n file, ``None`` is returned.\n \"\"\"\n return self._partner_mac_address\n\n @property\n def slave_interface(self):\n \"\"\"Returns all the slave interfaces of in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_interface\n\n @property\n def aggregator_id(self):\n \"\"\"Returns all the aggregator id of in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._aggregator_id\n\n @property\n def active_slave(self):\n \"\"\"Returns the active slave of the \"Currently Active Slave\" in the bond\n file if key/value exists. If the key is not in the bond file, ``None``\n is returned.\n \"\"\"\n return self._active_slave\n\n @property\n def mii_status(self):\n \"\"\"Returns the master and all the slaves \"MII Status\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._mii_status\n\n @property\n def slave_link_failure_count(self):\n \"\"\"Returns all the slaves \"Link Failure Count\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_link_failure_count\n\n @property\n def slave_speed(self):\n \"\"\"Returns all the slaves \"Speed\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_speed\n\n @property\n def slave_duplex(self):\n \"\"\"Returns all the slave \"Duplex\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_duplex\n\n @property\n def arp_polling_interval(self):\n \"\"\"Returns the arp polling interval as a string. ``None`` is returned\n if no \"ARP Polling Interval (ms)\" key is found.\n \"\"\"\n return self._arp_polling_interval\n\n @property\n def arp_ip_target(self):\n \"\"\"Returns the arp ip target as a string. ``None`` is returned\n if no \"ARP IP target/s (n.n.n.n form)\" key is found.\n \"\"\"\n return self._arp_ip_target\n", "path": "insights/parsers/bond.py"}], "after_files": [{"content": "\"\"\"\nBond - file ``/proc/net/bonding``\n=================================\n\nProvides plugins access to the network bonding information gathered from\nall the files starteing with \"bond.\" located in the\n``/proc/net/bonding`` directory.\n\nTypical content of ``bond.*`` file is::\n\n Ethernet Channel Bonding Driver: v3.2.4 (January 28, 2008)\n\n Bonding Mode: IEEE 802.3ad Dynamic link aggregation\n Transmit Hash Policy: layer2 (0)\n MII Status: up\n MII Polling Interval (ms): 500\n Up Delay (ms): 0\n Down Delay (ms): 0\n\n 802.3ad info\n LACP rate: slow\n Active Aggregator Info:\n Aggregator ID: 3\n Number of ports: 1\n Actor Key: 17\n Partner Key: 1\n Partner Mac Address: 00:00:00:00:00:00\n\n Slave Interface: eth1\n MII Status: up\n Link Failure Count: 0\n Permanent HW addr: 00:16:35:5e:42:fc\n Aggregator ID: 3\n\n Slave Interface: eth2\n MII Status: up\n Link Failure Count: 0\n Permanent HW addr: 00:16:35:5e:02:7e\n Aggregator ID: 2\n\nData is modeled as an array of ``Bond`` objects (``bond`` being a\npattern file specification gathering data from files located in\n``/proc/net/bonding``.\n\nExamples:\n >>> type(bond_info)\n <class 'insights.parsers.bond.Bond'>\n >>> bond_info.bond_mode\n '4'\n >>> bond_info.partner_mac_address\n '00:00:00:00:00:00'\n >>> bond_info.slave_interface\n ['eth1', 'eth2']\n >>> bond_info.aggregator_id\n ['3', '3', '2']\n >>> bond_info.xmit_hash_policy\n 'layer2'\n >>> bond_info.active_slave\n >>> bond_info.slave_duplex\n ['full', 'full']\n >>> bond_info.slave_speed\n ['1000 Mbps', '1000 Mbps']\n\"\"\"\n\nfrom insights import Parser, parser, get_active_lines\nfrom insights.specs import Specs\nfrom insights.parsers import ParseException\n\n\n\"\"\"dict: bonding mode parameter string linked to bond type index.\"\"\"\nBOND_PREFIX_MAP = {\n 'load balancing (round-robin)': '0',\n 'fault-tolerance (active-backup)': '1',\n 'fault-tolerance (active-backup) (fail_over_mac active)': '1',\n 'load balancing (xor)': '2',\n 'fault-tolerance (broadcast)': '3',\n 'IEEE 802.3ad Dynamic link aggregation': '4',\n 'transmit load balancing': '5',\n 'adaptive load balancing': '6'\n}\n\n\n@parser(Specs.bond)\nclass Bond(Parser):\n \"\"\"\n Models the ``/proc/net/bonding`` file.\n\n Currently used information from ``/proc/net/bonding`` includes\n the \"bond mode\" and \"partner mac address\".\n \"\"\"\n\n def parse_content(self, content):\n self._bond_mode = None\n self._partner_mac_address = None\n self._active_slave = None\n self.xmit_hash_policy = None\n self._arp_polling_interval = None\n self._arp_ip_target = None\n self._slave_interface = []\n self._aggregator_id = []\n self._mii_status = []\n self._slave_link_failure_count = []\n self._slave_speed = []\n self._slave_duplex = []\n self._primary_slave = None\n\n for line in get_active_lines(content):\n if line.startswith(\"Bonding Mode: \"):\n raw_mode = line.split(\":\", 1)[1].strip()\n self._bond_mode = raw_mode\n if raw_mode in BOND_PREFIX_MAP:\n self._bond_mode = BOND_PREFIX_MAP[raw_mode]\n else:\n raise ParseException(\"Unrecognised bonding mode '{b}'\".format(b=raw_mode))\n elif line.startswith(\"Partner Mac Address: \"):\n self._partner_mac_address = line.split(\":\", 1)[1].strip()\n elif line.startswith(\"Slave Interface: \"):\n self._slave_interface.append(line.split(\":\", 1)[1].strip())\n elif line.strip().startswith(\"Aggregator ID: \"):\n self._aggregator_id.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Transmit Hash Policy\"):\n # No need of values in bracket:\n # Integer notification (0), (1), (2) of layer2, layer3+4, layer2+3 resp\n self.xmit_hash_policy = line.split(\":\", 1)[1].split()[0]\n elif line.strip().startswith(\"Currently Active Slave\"):\n self._active_slave = line.split(\":\", 1)[1].split()[0]\n elif line.strip().startswith(\"MII Status: \"):\n self._mii_status.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Link Failure Count: \"):\n self._slave_link_failure_count.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Speed: \"):\n self._slave_speed.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Duplex: \"):\n self._slave_duplex.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"ARP Polling Interval (ms):\"):\n self._arp_polling_interval = line.strip().split(':', 1)[1].strip()\n elif line.strip().startswith(\"ARP IP target/s (n.n.n.n form):\"):\n self._arp_ip_target = line.strip().split(':', 1)[1].strip()\n elif line.strip().startswith(\"Primary Slave\"):\n self._primary_slave = line.split(\":\", 1)[1].split()[0]\n\n @property\n def bond_mode(self):\n \"\"\"Returns the bond mode number as a string, or if there is no\n known mapping to a number, the raw \"Bonding Mode\" value.\n ``None`` is returned if no \"Bonding Mode\" key is found.\n \"\"\"\n return self._bond_mode\n\n @property\n def partner_mac_address(self):\n \"\"\"Returns the value of the \"Partner Mac Address\" in the bond\n file if the key/value exists. If the key is not in the bond\n file, ``None`` is returned.\n \"\"\"\n return self._partner_mac_address\n\n @property\n def slave_interface(self):\n \"\"\"Returns all the slave interfaces of in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_interface\n\n @property\n def aggregator_id(self):\n \"\"\"Returns all the aggregator id of in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._aggregator_id\n\n @property\n def active_slave(self):\n \"\"\"Returns the active slave of the \"Currently Active Slave\" in the bond\n file if key/value exists. If the key is not in the bond file, ``None``\n is returned.\n \"\"\"\n return self._active_slave\n\n @property\n def mii_status(self):\n \"\"\"Returns the master and all the slaves \"MII Status\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._mii_status\n\n @property\n def slave_link_failure_count(self):\n \"\"\"Returns all the slaves \"Link Failure Count\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_link_failure_count\n\n @property\n def slave_speed(self):\n \"\"\"Returns all the slaves \"Speed\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_speed\n\n @property\n def slave_duplex(self):\n \"\"\"Returns all the slave \"Duplex\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_duplex\n\n @property\n def arp_polling_interval(self):\n \"\"\"Returns the arp polling interval as a string. ``None`` is returned\n if no \"ARP Polling Interval (ms)\" key is found.\n \"\"\"\n return self._arp_polling_interval\n\n @property\n def arp_ip_target(self):\n \"\"\"Returns the arp ip target as a string. ``None`` is returned\n if no \"ARP IP target/s (n.n.n.n form)\" key is found.\n \"\"\"\n return self._arp_ip_target\n\n @property\n def primary_slave(self):\n \"\"\"Returns the \"Primary Slave\" in the bond file if key/value exists.\n If the key is not in the bond file, ``None`` is returned.\n \"\"\"\n return self._primary_slave\n", "path": "insights/parsers/bond.py"}]} | 2,983 | 329 |
gh_patches_debug_44524 | rasdani/github-patches | git_diff | fal-ai__dbt-fal-28 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DevEx] Better error messaging when a dbt run is missing
When the developer doesn't run dbt run but have models that have fal scripts those models dont have any run results.. and we error but not very descriptive, I think if we dont find run results 99% of the time its a wrong `dbt run`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/faldbt/parse.py`
Content:
```
1 import os
2 import json
3 import glob
4 from typing import Dict, Any
5
6 import dbt.tracking
7 from dbt.contracts.results import RunResultsArtifact
8
9 import faldbt.lib as lib
10 from faldbt.utils.yaml_helper import load_yaml_text
11 from faldbt.project import DbtProject, DbtManifest, DbtRunResult
12
13
14 class FalParseError(Exception):
15 pass
16
17
18 def _load_file_contents(path: str, strip: bool = True) -> str:
19 with open(path, "rb") as handle:
20 to_return = handle.read().decode("utf-8")
21
22 if strip:
23 to_return = to_return.strip()
24
25 return to_return
26
27
28 def _load_yaml(path):
29 contents = _load_file_contents(path)
30 return load_yaml_text(contents)
31
32
33 def _read_json(path: str) -> Dict[str, Any]:
34 return json.loads(_load_file_contents(path))
35
36
37 def _flatten(t):
38 return [item for sublist in t for item in sublist]
39
40
41 def _get_all_model_config(project_root, project_dict):
42 return _flatten(
43 map(
44 ## find one with any kind yml this doesnt need to schema
45 ## look at all of them find the ones that has model in them
46 ## and keep remembering it
47 lambda model_path: glob.glob(
48 os.path.join(project_root, model_path, "**.yml"), recursive=True
49 ),
50 project_dict["source-paths"],
51 )
52 )
53
54
55 def parse_project(project_dir: str, profiles_dir: str, keyword: str):
56 project_dict = _get_project_dict(project_dir)
57 scripts = glob.glob(os.path.join(project_dir, "**.py"), recursive=True)
58 model_config_paths = _get_all_model_config(project_dir, project_dict)
59 target_path = os.path.join(project_dir, project_dict["target-path"])
60 run_result_path = os.path.join(target_path, "run_results.json")
61
62 config = lib.get_dbt_config(project_dir)
63 lib.register_adapters(config)
64
65 # Necessary for parse_to_manifest to not fail
66 dbt.tracking.initialize_tracking(profiles_dir)
67
68 manifest = lib.parse_to_manifest(config)
69 run_result_artifact = RunResultsArtifact(**_read_json(run_result_path))
70 dbtmanifest = DbtManifest(nativeManifest=manifest)
71
72 models = dbtmanifest.get_models()
73 status_map = dict(
74 map(lambda result: [result["unique_id"], result["status"]], run_result_artifact)
75 )
76 for model in models:
77 model.status = status_map[model.unique_id]
78
79 return DbtProject(
80 name=project_dict["name"],
81 model_config_paths=list(model_config_paths),
82 models=models,
83 manifest=DbtManifest(nativeManifest=manifest),
84 keyword=keyword,
85 scripts=scripts,
86 run_result=DbtRunResult(run_result_artifact),
87 )
88
89
90 def _get_project_dict(project_dir):
91 project_yaml_filepath = os.path.join(project_dir, "dbt_project.yml")
92
93 if not os.path.lexists(project_yaml_filepath):
94 raise FalParseError(
95 "no dbt_project.yml found at expected path {}".format(project_yaml_filepath)
96 )
97
98 project_dict = _load_yaml(project_yaml_filepath)
99
100 if not isinstance(project_dict, dict):
101 raise FalParseError("dbt_project.yml does not parse to a dictionary")
102
103 return project_dict
104
```
Path: `src/faldbt/project.py`
Content:
```
1 from dataclasses import dataclass, field
2 from typing import Dict, List, List, Any, TypeVar, Sequence
3 from dbt.contracts.graph.parsed import ParsedModelNode
4 from dbt.node_types import NodeType
5 from pathlib import Path
6 from dbt.contracts.graph.manifest import Manifest
7 from dbt.contracts.results import RunResultsArtifact, RunResultOutput
8
9
10 class FalGeneralException(Exception):
11 pass
12
13
14 @dataclass
15 class DbtModel:
16 node: ParsedModelNode
17 name: str = field(init=False)
18 meta: Dict[str, Any] = field(init=False)
19 status: str = field(init=False)
20 columns: Dict[str, Any] = field(init=False)
21
22 def __post_init__(self):
23 self.name = self.node.name
24 self.meta = self.node.config.meta
25 self.columns = self.node.columns
26 self.unique_id = self.node.unique_id
27
28
29 @dataclass
30 class DbtManifest:
31 nativeManifest: Manifest
32
33 def get_models(self) -> List[DbtModel]:
34 return list(
35 filter(
36 lambda model: model.node.resource_type == NodeType.Model,
37 map(
38 lambda node: DbtModel(node=node), self.nativeManifest.nodes.values()
39 ),
40 )
41 )
42
43
44 @dataclass
45 class DbtRunResult:
46 nativeRunResult: RunResultsArtifact
47 results: Sequence[RunResultOutput] = field(init=False)
48
49 def __post_init__(self):
50 self.results = self.nativeRunResult.results
51
52
53 T = TypeVar("T", bound="DbtProject")
54
55
56 @dataclass
57 class DbtProject:
58 name: str
59 model_config_paths: List[str]
60 models: List[DbtModel]
61 manifest: DbtManifest
62 keyword: str
63 scripts: List[Path]
64 run_result: DbtRunResult
65
66 def state_has_changed(self, other: DbtManifest) -> bool:
67 return self.manifest != other
68
69 def find_model_location(self, model: DbtModel) -> List[str]:
70 model_node = self.manifest.nodes[model.unique_id]
71 return model_node.relation_name.replace("`", "")
72
73 def changed_model_names(self) -> List[str]:
74 return list(
75 map(
76 lambda result: result["unique_id"].split(".")[-1],
77 self.run_result.results,
78 )
79 )
80
81 def get_models_with_keyword(self, keyword) -> List[DbtModel]:
82 return list(filter(lambda model: keyword in model.meta, self.models))
83
84 def get_filtered_models(self, all):
85 filtered_models: List[DbtModel] = []
86 for node in self.get_models_with_keyword(self.keyword):
87 if all:
88 filtered_models.append(node)
89 elif node.name in self.changed_model_names():
90 filtered_models.append(node)
91 else:
92 continue
93 return filtered_models
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/faldbt/parse.py b/src/faldbt/parse.py
--- a/src/faldbt/parse.py
+++ b/src/faldbt/parse.py
@@ -25,7 +25,7 @@
return to_return
-def _load_yaml(path):
+def _read_yaml(path):
contents = _load_file_contents(path)
return load_yaml_text(contents)
@@ -45,7 +45,8 @@
## look at all of them find the ones that has model in them
## and keep remembering it
lambda model_path: glob.glob(
- os.path.join(project_root, model_path, "**.yml"), recursive=True
+ os.path.join(project_root, model_path, "**.yml"),
+ recursive=True,
),
project_dict["source-paths"],
)
@@ -57,7 +58,12 @@
scripts = glob.glob(os.path.join(project_dir, "**.py"), recursive=True)
model_config_paths = _get_all_model_config(project_dir, project_dict)
target_path = os.path.join(project_dir, project_dict["target-path"])
- run_result_path = os.path.join(target_path, "run_results.json")
+
+ run_results_path = os.path.join(target_path, "run_results.json")
+ try:
+ run_results = _read_json(run_results_path)
+ except IOError as e:
+ raise FalParseError("Did you forget to run dbt run?") from e
config = lib.get_dbt_config(project_dir)
lib.register_adapters(config)
@@ -66,7 +72,7 @@
dbt.tracking.initialize_tracking(profiles_dir)
manifest = lib.parse_to_manifest(config)
- run_result_artifact = RunResultsArtifact(**_read_json(run_result_path))
+ run_result_artifact = RunResultsArtifact(**run_results)
dbtmanifest = DbtManifest(nativeManifest=manifest)
models = dbtmanifest.get_models()
@@ -74,7 +80,7 @@
map(lambda result: [result["unique_id"], result["status"]], run_result_artifact)
)
for model in models:
- model.status = status_map[model.unique_id]
+ model.status = status_map.get(model.unique_id)
return DbtProject(
name=project_dict["name"],
@@ -90,14 +96,14 @@
def _get_project_dict(project_dir):
project_yaml_filepath = os.path.join(project_dir, "dbt_project.yml")
- if not os.path.lexists(project_yaml_filepath):
+ if not os.path.exists(project_yaml_filepath):
raise FalParseError(
"no dbt_project.yml found at expected path {}".format(project_yaml_filepath)
)
- project_dict = _load_yaml(project_yaml_filepath)
+ project_dict = _read_yaml(project_yaml_filepath)
if not isinstance(project_dict, dict):
- raise FalParseError("dbt_project.yml does not parse to a dictionary")
+ raise FalParseError("dbt_project.yml formatting is wrong")
return project_dict
diff --git a/src/faldbt/project.py b/src/faldbt/project.py
--- a/src/faldbt/project.py
+++ b/src/faldbt/project.py
@@ -70,24 +70,20 @@
model_node = self.manifest.nodes[model.unique_id]
return model_node.relation_name.replace("`", "")
- def changed_model_names(self) -> List[str]:
- return list(
- map(
- lambda result: result["unique_id"].split(".")[-1],
- self.run_result.results,
- )
- )
+ def changed_models_ids(self) -> List[str]:
+ return list(map(lambda res: res["unique_id"], self.run_result.results))
def get_models_with_keyword(self, keyword) -> List[DbtModel]:
return list(filter(lambda model: keyword in model.meta, self.models))
def get_filtered_models(self, all):
+ changed_models_ids = self.changed_models_ids()
filtered_models: List[DbtModel] = []
+
for node in self.get_models_with_keyword(self.keyword):
if all:
filtered_models.append(node)
- elif node.name in self.changed_model_names():
+ elif node.unique_id in changed_models_ids:
filtered_models.append(node)
- else:
- continue
+
return filtered_models
| {"golden_diff": "diff --git a/src/faldbt/parse.py b/src/faldbt/parse.py\n--- a/src/faldbt/parse.py\n+++ b/src/faldbt/parse.py\n@@ -25,7 +25,7 @@\n return to_return\n \n \n-def _load_yaml(path):\n+def _read_yaml(path):\n contents = _load_file_contents(path)\n return load_yaml_text(contents)\n \n@@ -45,7 +45,8 @@\n ## look at all of them find the ones that has model in them\n ## and keep remembering it\n lambda model_path: glob.glob(\n- os.path.join(project_root, model_path, \"**.yml\"), recursive=True\n+ os.path.join(project_root, model_path, \"**.yml\"),\n+ recursive=True,\n ),\n project_dict[\"source-paths\"],\n )\n@@ -57,7 +58,12 @@\n scripts = glob.glob(os.path.join(project_dir, \"**.py\"), recursive=True)\n model_config_paths = _get_all_model_config(project_dir, project_dict)\n target_path = os.path.join(project_dir, project_dict[\"target-path\"])\n- run_result_path = os.path.join(target_path, \"run_results.json\")\n+\n+ run_results_path = os.path.join(target_path, \"run_results.json\")\n+ try:\n+ run_results = _read_json(run_results_path)\n+ except IOError as e:\n+ raise FalParseError(\"Did you forget to run dbt run?\") from e\n \n config = lib.get_dbt_config(project_dir)\n lib.register_adapters(config)\n@@ -66,7 +72,7 @@\n dbt.tracking.initialize_tracking(profiles_dir)\n \n manifest = lib.parse_to_manifest(config)\n- run_result_artifact = RunResultsArtifact(**_read_json(run_result_path))\n+ run_result_artifact = RunResultsArtifact(**run_results)\n dbtmanifest = DbtManifest(nativeManifest=manifest)\n \n models = dbtmanifest.get_models()\n@@ -74,7 +80,7 @@\n map(lambda result: [result[\"unique_id\"], result[\"status\"]], run_result_artifact)\n )\n for model in models:\n- model.status = status_map[model.unique_id]\n+ model.status = status_map.get(model.unique_id)\n \n return DbtProject(\n name=project_dict[\"name\"],\n@@ -90,14 +96,14 @@\n def _get_project_dict(project_dir):\n project_yaml_filepath = os.path.join(project_dir, \"dbt_project.yml\")\n \n- if not os.path.lexists(project_yaml_filepath):\n+ if not os.path.exists(project_yaml_filepath):\n raise FalParseError(\n \"no dbt_project.yml found at expected path {}\".format(project_yaml_filepath)\n )\n \n- project_dict = _load_yaml(project_yaml_filepath)\n+ project_dict = _read_yaml(project_yaml_filepath)\n \n if not isinstance(project_dict, dict):\n- raise FalParseError(\"dbt_project.yml does not parse to a dictionary\")\n+ raise FalParseError(\"dbt_project.yml formatting is wrong\")\n \n return project_dict\ndiff --git a/src/faldbt/project.py b/src/faldbt/project.py\n--- a/src/faldbt/project.py\n+++ b/src/faldbt/project.py\n@@ -70,24 +70,20 @@\n model_node = self.manifest.nodes[model.unique_id]\n return model_node.relation_name.replace(\"`\", \"\")\n \n- def changed_model_names(self) -> List[str]:\n- return list(\n- map(\n- lambda result: result[\"unique_id\"].split(\".\")[-1],\n- self.run_result.results,\n- )\n- )\n+ def changed_models_ids(self) -> List[str]:\n+ return list(map(lambda res: res[\"unique_id\"], self.run_result.results))\n \n def get_models_with_keyword(self, keyword) -> List[DbtModel]:\n return list(filter(lambda model: keyword in model.meta, self.models))\n \n def get_filtered_models(self, all):\n+ changed_models_ids = self.changed_models_ids()\n filtered_models: List[DbtModel] = []\n+\n for node in self.get_models_with_keyword(self.keyword):\n if all:\n filtered_models.append(node)\n- elif node.name in self.changed_model_names():\n+ elif node.unique_id in changed_models_ids:\n filtered_models.append(node)\n- else:\n- continue\n+\n return filtered_models\n", "issue": "[DevEx] Better error messaging when a dbt run is missing \nWhen the developer doesn't run dbt run but have models that have fal scripts those models dont have any run results.. and we error but not very descriptive, I think if we dont find run results 99% of the time its a wrong `dbt run`. \n", "before_files": [{"content": "import os\nimport json\nimport glob\nfrom typing import Dict, Any\n\nimport dbt.tracking\nfrom dbt.contracts.results import RunResultsArtifact\n\nimport faldbt.lib as lib\nfrom faldbt.utils.yaml_helper import load_yaml_text\nfrom faldbt.project import DbtProject, DbtManifest, DbtRunResult\n\n\nclass FalParseError(Exception):\n pass\n\n\ndef _load_file_contents(path: str, strip: bool = True) -> str:\n with open(path, \"rb\") as handle:\n to_return = handle.read().decode(\"utf-8\")\n\n if strip:\n to_return = to_return.strip()\n\n return to_return\n\n\ndef _load_yaml(path):\n contents = _load_file_contents(path)\n return load_yaml_text(contents)\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n return json.loads(_load_file_contents(path))\n\n\ndef _flatten(t):\n return [item for sublist in t for item in sublist]\n\n\ndef _get_all_model_config(project_root, project_dict):\n return _flatten(\n map(\n ## find one with any kind yml this doesnt need to schema\n ## look at all of them find the ones that has model in them\n ## and keep remembering it\n lambda model_path: glob.glob(\n os.path.join(project_root, model_path, \"**.yml\"), recursive=True\n ),\n project_dict[\"source-paths\"],\n )\n )\n\n\ndef parse_project(project_dir: str, profiles_dir: str, keyword: str):\n project_dict = _get_project_dict(project_dir)\n scripts = glob.glob(os.path.join(project_dir, \"**.py\"), recursive=True)\n model_config_paths = _get_all_model_config(project_dir, project_dict)\n target_path = os.path.join(project_dir, project_dict[\"target-path\"])\n run_result_path = os.path.join(target_path, \"run_results.json\")\n\n config = lib.get_dbt_config(project_dir)\n lib.register_adapters(config)\n\n # Necessary for parse_to_manifest to not fail\n dbt.tracking.initialize_tracking(profiles_dir)\n\n manifest = lib.parse_to_manifest(config)\n run_result_artifact = RunResultsArtifact(**_read_json(run_result_path))\n dbtmanifest = DbtManifest(nativeManifest=manifest)\n\n models = dbtmanifest.get_models()\n status_map = dict(\n map(lambda result: [result[\"unique_id\"], result[\"status\"]], run_result_artifact)\n )\n for model in models:\n model.status = status_map[model.unique_id]\n\n return DbtProject(\n name=project_dict[\"name\"],\n model_config_paths=list(model_config_paths),\n models=models,\n manifest=DbtManifest(nativeManifest=manifest),\n keyword=keyword,\n scripts=scripts,\n run_result=DbtRunResult(run_result_artifact),\n )\n\n\ndef _get_project_dict(project_dir):\n project_yaml_filepath = os.path.join(project_dir, \"dbt_project.yml\")\n\n if not os.path.lexists(project_yaml_filepath):\n raise FalParseError(\n \"no dbt_project.yml found at expected path {}\".format(project_yaml_filepath)\n )\n\n project_dict = _load_yaml(project_yaml_filepath)\n\n if not isinstance(project_dict, dict):\n raise FalParseError(\"dbt_project.yml does not parse to a dictionary\")\n\n return project_dict\n", "path": "src/faldbt/parse.py"}, {"content": "from dataclasses import dataclass, field\nfrom typing import Dict, List, List, Any, TypeVar, Sequence\nfrom dbt.contracts.graph.parsed import ParsedModelNode\nfrom dbt.node_types import NodeType\nfrom pathlib import Path\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.contracts.results import RunResultsArtifact, RunResultOutput\n\n\nclass FalGeneralException(Exception):\n pass\n\n\n@dataclass\nclass DbtModel:\n node: ParsedModelNode\n name: str = field(init=False)\n meta: Dict[str, Any] = field(init=False)\n status: str = field(init=False)\n columns: Dict[str, Any] = field(init=False)\n\n def __post_init__(self):\n self.name = self.node.name\n self.meta = self.node.config.meta\n self.columns = self.node.columns\n self.unique_id = self.node.unique_id\n\n\n@dataclass\nclass DbtManifest:\n nativeManifest: Manifest\n\n def get_models(self) -> List[DbtModel]:\n return list(\n filter(\n lambda model: model.node.resource_type == NodeType.Model,\n map(\n lambda node: DbtModel(node=node), self.nativeManifest.nodes.values()\n ),\n )\n )\n\n\n@dataclass\nclass DbtRunResult:\n nativeRunResult: RunResultsArtifact\n results: Sequence[RunResultOutput] = field(init=False)\n\n def __post_init__(self):\n self.results = self.nativeRunResult.results\n\n\nT = TypeVar(\"T\", bound=\"DbtProject\")\n\n\n@dataclass\nclass DbtProject:\n name: str\n model_config_paths: List[str]\n models: List[DbtModel]\n manifest: DbtManifest\n keyword: str\n scripts: List[Path]\n run_result: DbtRunResult\n\n def state_has_changed(self, other: DbtManifest) -> bool:\n return self.manifest != other\n\n def find_model_location(self, model: DbtModel) -> List[str]:\n model_node = self.manifest.nodes[model.unique_id]\n return model_node.relation_name.replace(\"`\", \"\")\n\n def changed_model_names(self) -> List[str]:\n return list(\n map(\n lambda result: result[\"unique_id\"].split(\".\")[-1],\n self.run_result.results,\n )\n )\n\n def get_models_with_keyword(self, keyword) -> List[DbtModel]:\n return list(filter(lambda model: keyword in model.meta, self.models))\n\n def get_filtered_models(self, all):\n filtered_models: List[DbtModel] = []\n for node in self.get_models_with_keyword(self.keyword):\n if all:\n filtered_models.append(node)\n elif node.name in self.changed_model_names():\n filtered_models.append(node)\n else:\n continue\n return filtered_models\n", "path": "src/faldbt/project.py"}], "after_files": [{"content": "import os\nimport json\nimport glob\nfrom typing import Dict, Any\n\nimport dbt.tracking\nfrom dbt.contracts.results import RunResultsArtifact\n\nimport faldbt.lib as lib\nfrom faldbt.utils.yaml_helper import load_yaml_text\nfrom faldbt.project import DbtProject, DbtManifest, DbtRunResult\n\n\nclass FalParseError(Exception):\n pass\n\n\ndef _load_file_contents(path: str, strip: bool = True) -> str:\n with open(path, \"rb\") as handle:\n to_return = handle.read().decode(\"utf-8\")\n\n if strip:\n to_return = to_return.strip()\n\n return to_return\n\n\ndef _read_yaml(path):\n contents = _load_file_contents(path)\n return load_yaml_text(contents)\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n return json.loads(_load_file_contents(path))\n\n\ndef _flatten(t):\n return [item for sublist in t for item in sublist]\n\n\ndef _get_all_model_config(project_root, project_dict):\n return _flatten(\n map(\n ## find one with any kind yml this doesnt need to schema\n ## look at all of them find the ones that has model in them\n ## and keep remembering it\n lambda model_path: glob.glob(\n os.path.join(project_root, model_path, \"**.yml\"),\n recursive=True,\n ),\n project_dict[\"source-paths\"],\n )\n )\n\n\ndef parse_project(project_dir: str, profiles_dir: str, keyword: str):\n project_dict = _get_project_dict(project_dir)\n scripts = glob.glob(os.path.join(project_dir, \"**.py\"), recursive=True)\n model_config_paths = _get_all_model_config(project_dir, project_dict)\n target_path = os.path.join(project_dir, project_dict[\"target-path\"])\n\n run_results_path = os.path.join(target_path, \"run_results.json\")\n try:\n run_results = _read_json(run_results_path)\n except IOError as e:\n raise FalParseError(\"Did you forget to run dbt run?\") from e\n\n config = lib.get_dbt_config(project_dir)\n lib.register_adapters(config)\n\n # Necessary for parse_to_manifest to not fail\n dbt.tracking.initialize_tracking(profiles_dir)\n\n manifest = lib.parse_to_manifest(config)\n run_result_artifact = RunResultsArtifact(**run_results)\n dbtmanifest = DbtManifest(nativeManifest=manifest)\n\n models = dbtmanifest.get_models()\n status_map = dict(\n map(lambda result: [result[\"unique_id\"], result[\"status\"]], run_result_artifact)\n )\n for model in models:\n model.status = status_map.get(model.unique_id)\n\n return DbtProject(\n name=project_dict[\"name\"],\n model_config_paths=list(model_config_paths),\n models=models,\n manifest=DbtManifest(nativeManifest=manifest),\n keyword=keyword,\n scripts=scripts,\n run_result=DbtRunResult(run_result_artifact),\n )\n\n\ndef _get_project_dict(project_dir):\n project_yaml_filepath = os.path.join(project_dir, \"dbt_project.yml\")\n\n if not os.path.exists(project_yaml_filepath):\n raise FalParseError(\n \"no dbt_project.yml found at expected path {}\".format(project_yaml_filepath)\n )\n\n project_dict = _read_yaml(project_yaml_filepath)\n\n if not isinstance(project_dict, dict):\n raise FalParseError(\"dbt_project.yml formatting is wrong\")\n\n return project_dict\n", "path": "src/faldbt/parse.py"}, {"content": "from dataclasses import dataclass, field\nfrom typing import Dict, List, List, Any, TypeVar, Sequence\nfrom dbt.contracts.graph.parsed import ParsedModelNode\nfrom dbt.node_types import NodeType\nfrom pathlib import Path\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.contracts.results import RunResultsArtifact, RunResultOutput\n\n\nclass FalGeneralException(Exception):\n pass\n\n\n@dataclass\nclass DbtModel:\n node: ParsedModelNode\n name: str = field(init=False)\n meta: Dict[str, Any] = field(init=False)\n status: str = field(init=False)\n columns: Dict[str, Any] = field(init=False)\n\n def __post_init__(self):\n self.name = self.node.name\n self.meta = self.node.config.meta\n self.columns = self.node.columns\n self.unique_id = self.node.unique_id\n\n\n@dataclass\nclass DbtManifest:\n nativeManifest: Manifest\n\n def get_models(self) -> List[DbtModel]:\n return list(\n filter(\n lambda model: model.node.resource_type == NodeType.Model,\n map(\n lambda node: DbtModel(node=node), self.nativeManifest.nodes.values()\n ),\n )\n )\n\n\n@dataclass\nclass DbtRunResult:\n nativeRunResult: RunResultsArtifact\n results: Sequence[RunResultOutput] = field(init=False)\n\n def __post_init__(self):\n self.results = self.nativeRunResult.results\n\n\nT = TypeVar(\"T\", bound=\"DbtProject\")\n\n\n@dataclass\nclass DbtProject:\n name: str\n model_config_paths: List[str]\n models: List[DbtModel]\n manifest: DbtManifest\n keyword: str\n scripts: List[Path]\n run_result: DbtRunResult\n\n def state_has_changed(self, other: DbtManifest) -> bool:\n return self.manifest != other\n\n def find_model_location(self, model: DbtModel) -> List[str]:\n model_node = self.manifest.nodes[model.unique_id]\n return model_node.relation_name.replace(\"`\", \"\")\n\n def changed_models_ids(self) -> List[str]:\n return list(map(lambda res: res[\"unique_id\"], self.run_result.results))\n\n def get_models_with_keyword(self, keyword) -> List[DbtModel]:\n return list(filter(lambda model: keyword in model.meta, self.models))\n\n def get_filtered_models(self, all):\n changed_models_ids = self.changed_models_ids()\n filtered_models: List[DbtModel] = []\n\n for node in self.get_models_with_keyword(self.keyword):\n if all:\n filtered_models.append(node)\n elif node.unique_id in changed_models_ids:\n filtered_models.append(node)\n\n return filtered_models\n", "path": "src/faldbt/project.py"}]} | 2,074 | 964 |
gh_patches_debug_21230 | rasdani/github-patches | git_diff | ckan__ckan-6008 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flask-Babel does not translate zh_TW and zh_CN
### CKAN Version if known (or site URL)
≧ 2.8.0
### Please describe the expected behaviour
When switching to ``zh_TW`` or ``zh_CN`` languages on the pages written in Flask (ex. ``/`` and ``/user``), the pages should be shown in corresponding languages.
### Please describe the actual behaviour
It shows English instead of ``zh_TW`` or ``zh_CN``.
### What steps can be taken to reproduce the issue?
Switch language to Chinese on the above-mentioned pages.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/views/home.py`
Content:
```
1 # encoding: utf-8
2
3 from flask import Blueprint, abort
4
5 import ckan.model as model
6 import ckan.logic as logic
7 import ckan.lib.base as base
8 import ckan.lib.search as search
9 import ckan.lib.helpers as h
10
11 from ckan.common import g, config, _
12
13 CACHE_PARAMETERS = [u'__cache', u'__no_cache__']
14
15
16 home = Blueprint(u'home', __name__)
17
18
19 @home.before_request
20 def before_request():
21 u'''set context and check authorization'''
22 try:
23 context = {
24 u'model': model,
25 u'user': g.user,
26 u'auth_user_obj': g.userobj}
27 logic.check_access(u'site_read', context)
28 except logic.NotAuthorized:
29 abort(403)
30
31
32 def index():
33 u'''display home page'''
34 try:
35 context = {u'model': model, u'session': model.Session,
36 u'user': g.user, u'auth_user_obj': g.userobj}
37 data_dict = {u'q': u'*:*',
38 u'facet.field': h.facets(),
39 u'rows': 4,
40 u'start': 0,
41 u'sort': u'view_recent desc',
42 u'fq': u'capacity:"public"'}
43 query = logic.get_action(u'package_search')(context, data_dict)
44 g.search_facets = query['search_facets']
45 g.package_count = query['count']
46 g.datasets = query['results']
47
48 org_label = h.humanize_entity_type(
49 u'organization',
50 h.default_group_type(u'organization'),
51 u'facet label') or _(u'Organizations')
52
53 group_label = h.humanize_entity_type(
54 u'group',
55 h.default_group_type(u'group'),
56 u'facet label') or _(u'Groups')
57
58 g.facet_titles = {
59 u'organization': org_label,
60 u'groups': group_label,
61 u'tags': _(u'Tags'),
62 u'res_format': _(u'Formats'),
63 u'license': _(u'Licenses'),
64 }
65
66 except search.SearchError:
67 g.package_count = 0
68
69 if g.userobj and not g.userobj.email:
70 url = h.url_for(controller=u'user', action=u'edit')
71 msg = _(u'Please <a href="%s">update your profile</a>'
72 u' and add your email address. ') % url + \
73 _(u'%s uses your email address'
74 u' if you need to reset your password.') \
75 % config.get(u'ckan.site_title')
76 h.flash_notice(msg, allow_html=True)
77 return base.render(u'home/index.html', extra_vars={})
78
79
80 def about():
81 u''' display about page'''
82 return base.render(u'home/about.html', extra_vars={})
83
84
85 util_rules = [
86 (u'/', index),
87 (u'/about', about)
88 ]
89 for rule, view_func in util_rules:
90 home.add_url_rule(rule, view_func=view_func)
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/views/home.py b/ckan/views/home.py
--- a/ckan/views/home.py
+++ b/ckan/views/home.py
@@ -1,6 +1,6 @@
# encoding: utf-8
-from flask import Blueprint, abort
+from flask import Blueprint, abort, redirect
import ckan.model as model
import ckan.logic as logic
@@ -82,9 +82,36 @@
return base.render(u'home/about.html', extra_vars={})
+def redirect_locale(target_locale, path=None):
+ target = f'/{target_locale}/{path}' if path else f'/{target_locale}'
+ return redirect(target, code=308)
+
+
util_rules = [
(u'/', index),
(u'/about', about)
]
for rule, view_func in util_rules:
home.add_url_rule(rule, view_func=view_func)
+
+locales_mapping = [
+ ('zh_TW', 'zh_Hant_TW'),
+ ('zh_CN', 'zh_Hans_CN'),
+]
+
+for locale in locales_mapping:
+
+ legacy_locale = locale[0]
+ new_locale = locale[1]
+
+ home.add_url_rule(
+ f'/{legacy_locale}/',
+ view_func=redirect_locale,
+ defaults={'target_locale': new_locale}
+ )
+
+ home.add_url_rule(
+ f'/{legacy_locale}/<path:path>',
+ view_func=redirect_locale,
+ defaults={'target_locale': new_locale}
+ )
| {"golden_diff": "diff --git a/ckan/views/home.py b/ckan/views/home.py\n--- a/ckan/views/home.py\n+++ b/ckan/views/home.py\n@@ -1,6 +1,6 @@\n # encoding: utf-8\n \n-from flask import Blueprint, abort\n+from flask import Blueprint, abort, redirect\n \n import ckan.model as model\n import ckan.logic as logic\n@@ -82,9 +82,36 @@\n return base.render(u'home/about.html', extra_vars={})\n \n \n+def redirect_locale(target_locale, path=None):\n+ target = f'/{target_locale}/{path}' if path else f'/{target_locale}'\n+ return redirect(target, code=308)\n+\n+\n util_rules = [\n (u'/', index),\n (u'/about', about)\n ]\n for rule, view_func in util_rules:\n home.add_url_rule(rule, view_func=view_func)\n+\n+locales_mapping = [\n+ ('zh_TW', 'zh_Hant_TW'),\n+ ('zh_CN', 'zh_Hans_CN'),\n+]\n+\n+for locale in locales_mapping:\n+\n+ legacy_locale = locale[0]\n+ new_locale = locale[1]\n+\n+ home.add_url_rule(\n+ f'/{legacy_locale}/',\n+ view_func=redirect_locale,\n+ defaults={'target_locale': new_locale}\n+ )\n+\n+ home.add_url_rule(\n+ f'/{legacy_locale}/<path:path>',\n+ view_func=redirect_locale,\n+ defaults={'target_locale': new_locale}\n+ )\n", "issue": "Flask-Babel does not translate zh_TW and zh_CN\n### CKAN Version if known (or site URL)\r\n\r\n\u2267 2.8.0\r\n\r\n### Please describe the expected behaviour\r\n\r\nWhen switching to ``zh_TW`` or ``zh_CN`` languages on the pages written in Flask (ex. ``/`` and ``/user``), the pages should be shown in corresponding languages.\r\n\r\n### Please describe the actual behaviour\r\n\r\nIt shows English instead of ``zh_TW`` or ``zh_CN``.\r\n\r\n### What steps can be taken to reproduce the issue? \r\n\r\nSwitch language to Chinese on the above-mentioned pages.\n", "before_files": [{"content": "# encoding: utf-8\n\nfrom flask import Blueprint, abort\n\nimport ckan.model as model\nimport ckan.logic as logic\nimport ckan.lib.base as base\nimport ckan.lib.search as search\nimport ckan.lib.helpers as h\n\nfrom ckan.common import g, config, _\n\nCACHE_PARAMETERS = [u'__cache', u'__no_cache__']\n\n\nhome = Blueprint(u'home', __name__)\n\n\[email protected]_request\ndef before_request():\n u'''set context and check authorization'''\n try:\n context = {\n u'model': model,\n u'user': g.user,\n u'auth_user_obj': g.userobj}\n logic.check_access(u'site_read', context)\n except logic.NotAuthorized:\n abort(403)\n\n\ndef index():\n u'''display home page'''\n try:\n context = {u'model': model, u'session': model.Session,\n u'user': g.user, u'auth_user_obj': g.userobj}\n data_dict = {u'q': u'*:*',\n u'facet.field': h.facets(),\n u'rows': 4,\n u'start': 0,\n u'sort': u'view_recent desc',\n u'fq': u'capacity:\"public\"'}\n query = logic.get_action(u'package_search')(context, data_dict)\n g.search_facets = query['search_facets']\n g.package_count = query['count']\n g.datasets = query['results']\n\n org_label = h.humanize_entity_type(\n u'organization',\n h.default_group_type(u'organization'),\n u'facet label') or _(u'Organizations')\n\n group_label = h.humanize_entity_type(\n u'group',\n h.default_group_type(u'group'),\n u'facet label') or _(u'Groups')\n\n g.facet_titles = {\n u'organization': org_label,\n u'groups': group_label,\n u'tags': _(u'Tags'),\n u'res_format': _(u'Formats'),\n u'license': _(u'Licenses'),\n }\n\n except search.SearchError:\n g.package_count = 0\n\n if g.userobj and not g.userobj.email:\n url = h.url_for(controller=u'user', action=u'edit')\n msg = _(u'Please <a href=\"%s\">update your profile</a>'\n u' and add your email address. ') % url + \\\n _(u'%s uses your email address'\n u' if you need to reset your password.') \\\n % config.get(u'ckan.site_title')\n h.flash_notice(msg, allow_html=True)\n return base.render(u'home/index.html', extra_vars={})\n\n\ndef about():\n u''' display about page'''\n return base.render(u'home/about.html', extra_vars={})\n\n\nutil_rules = [\n (u'/', index),\n (u'/about', about)\n]\nfor rule, view_func in util_rules:\n home.add_url_rule(rule, view_func=view_func)\n", "path": "ckan/views/home.py"}], "after_files": [{"content": "# encoding: utf-8\n\nfrom flask import Blueprint, abort, redirect\n\nimport ckan.model as model\nimport ckan.logic as logic\nimport ckan.lib.base as base\nimport ckan.lib.search as search\nimport ckan.lib.helpers as h\n\nfrom ckan.common import g, config, _\n\nCACHE_PARAMETERS = [u'__cache', u'__no_cache__']\n\n\nhome = Blueprint(u'home', __name__)\n\n\[email protected]_request\ndef before_request():\n u'''set context and check authorization'''\n try:\n context = {\n u'model': model,\n u'user': g.user,\n u'auth_user_obj': g.userobj}\n logic.check_access(u'site_read', context)\n except logic.NotAuthorized:\n abort(403)\n\n\ndef index():\n u'''display home page'''\n try:\n context = {u'model': model, u'session': model.Session,\n u'user': g.user, u'auth_user_obj': g.userobj}\n data_dict = {u'q': u'*:*',\n u'facet.field': h.facets(),\n u'rows': 4,\n u'start': 0,\n u'sort': u'view_recent desc',\n u'fq': u'capacity:\"public\"'}\n query = logic.get_action(u'package_search')(context, data_dict)\n g.search_facets = query['search_facets']\n g.package_count = query['count']\n g.datasets = query['results']\n\n org_label = h.humanize_entity_type(\n u'organization',\n h.default_group_type(u'organization'),\n u'facet label') or _(u'Organizations')\n\n group_label = h.humanize_entity_type(\n u'group',\n h.default_group_type(u'group'),\n u'facet label') or _(u'Groups')\n\n g.facet_titles = {\n u'organization': org_label,\n u'groups': group_label,\n u'tags': _(u'Tags'),\n u'res_format': _(u'Formats'),\n u'license': _(u'Licenses'),\n }\n\n except search.SearchError:\n g.package_count = 0\n\n if g.userobj and not g.userobj.email:\n url = h.url_for(controller=u'user', action=u'edit')\n msg = _(u'Please <a href=\"%s\">update your profile</a>'\n u' and add your email address. ') % url + \\\n _(u'%s uses your email address'\n u' if you need to reset your password.') \\\n % config.get(u'ckan.site_title')\n h.flash_notice(msg, allow_html=True)\n return base.render(u'home/index.html', extra_vars={})\n\n\ndef about():\n u''' display about page'''\n return base.render(u'home/about.html', extra_vars={})\n\n\ndef redirect_locale(target_locale, path=None):\n target = f'/{target_locale}/{path}' if path else f'/{target_locale}'\n return redirect(target, code=308)\n\n\nutil_rules = [\n (u'/', index),\n (u'/about', about)\n]\nfor rule, view_func in util_rules:\n home.add_url_rule(rule, view_func=view_func)\n\nlocales_mapping = [\n ('zh_TW', 'zh_Hant_TW'),\n ('zh_CN', 'zh_Hans_CN'),\n]\n\nfor locale in locales_mapping:\n\n legacy_locale = locale[0]\n new_locale = locale[1]\n\n home.add_url_rule(\n f'/{legacy_locale}/',\n view_func=redirect_locale,\n defaults={'target_locale': new_locale}\n )\n\n home.add_url_rule(\n f'/{legacy_locale}/<path:path>',\n view_func=redirect_locale,\n defaults={'target_locale': new_locale}\n )\n", "path": "ckan/views/home.py"}]} | 1,220 | 336 |
gh_patches_debug_6079 | rasdani/github-patches | git_diff | carpentries__amy-2324 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Internal Server Error: /fiscal/organizations/
Production issue.
```
FieldError at /fiscal/organizations/
Cannot resolve keyword 'membership' into field. Choices are: administered_events, affiliated_organizations, country, domain, fullname, hosted_events, id, latitude, longitude, member, memberships, selforganisedsubmission, sponsored_events, workshopinquiryrequest, workshoprequest
```
https://amy.carpentries.org/fiscal/organizations/?country=&membership__variant=silver&order_by=
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/fiscal/filters.py`
Content:
```
1 from datetime import date
2
3 from django.forms import widgets
4 import django_filters
5
6 from workshops.fields import Select2MultipleWidget, Select2Widget
7 from workshops.filters import AllCountriesFilter, AMYFilterSet
8 from workshops.models import Membership, Organization
9
10
11 class OrganizationFilter(AMYFilterSet):
12 country = AllCountriesFilter(widget=Select2Widget)
13
14 membership__variant = django_filters.MultipleChoiceFilter(
15 label="Memberships (current or past)",
16 choices=Membership.MEMBERSHIP_CHOICES,
17 widget=Select2MultipleWidget,
18 )
19
20 order_by = django_filters.OrderingFilter(
21 fields=(
22 "fullname",
23 "domain",
24 ),
25 )
26
27 class Meta:
28 model = Organization
29 fields = [
30 "country",
31 ]
32
33
34 def filter_active_memberships_only(queryset, name, active):
35 """Limit Memberships to only active entries."""
36 if active:
37 today = date.today()
38 return queryset.filter(agreement_start__lte=today, agreement_end__gte=today)
39 else:
40 return queryset
41
42
43 def filter_training_seats_only(queryset, name, seats):
44 """Limit Memberships to only entries with some training seats allowed."""
45 if seats:
46 return queryset.filter(instructor_training_seats_total__gt=0)
47 else:
48 return queryset
49
50
51 def filter_nonpositive_remaining_seats(queryset, name, seats):
52 """Limit Memberships to only entries with negative remaining seats."""
53 if seats:
54 return queryset.filter(instructor_training_seats_remaining__lt=0)
55 else:
56 return queryset
57
58
59 class MembershipFilter(AMYFilterSet):
60 organization_name = django_filters.CharFilter(
61 label="Organisation name",
62 field_name="organizations__fullname",
63 lookup_expr="icontains",
64 )
65
66 MEMBERSHIP_CHOICES = (("", "Any"),) + Membership.MEMBERSHIP_CHOICES
67 variant = django_filters.ChoiceFilter(choices=MEMBERSHIP_CHOICES)
68
69 CONTRIBUTION_CHOICES = (("", "Any"),) + Membership.CONTRIBUTION_CHOICES
70 contribution_type = django_filters.ChoiceFilter(choices=CONTRIBUTION_CHOICES)
71
72 active_only = django_filters.BooleanFilter(
73 label="Only show active memberships",
74 method=filter_active_memberships_only,
75 widget=widgets.CheckboxInput,
76 )
77
78 training_seats_only = django_filters.BooleanFilter(
79 label="Only show memberships with non-zero allowed training seats",
80 method=filter_training_seats_only,
81 widget=widgets.CheckboxInput,
82 )
83
84 nonpositive_remaining_seats_only = django_filters.BooleanFilter(
85 label="Only show memberships with zero or less remaining seats",
86 method=filter_nonpositive_remaining_seats,
87 widget=widgets.CheckboxInput,
88 )
89
90 order_by = django_filters.OrderingFilter(
91 fields=(
92 "agreement_start",
93 "agreement_end",
94 "instructor_training_seats_remaining",
95 ),
96 )
97
98 class Meta:
99 model = Membership
100 fields = [
101 "organization_name",
102 "consortium",
103 "public_status",
104 "variant",
105 "contribution_type",
106 ]
107
108
109 class MembershipTrainingsFilter(AMYFilterSet):
110 organization_name = django_filters.CharFilter(
111 label="Organization name",
112 field_name="organization__fullname",
113 lookup_expr="icontains",
114 )
115
116 active_only = django_filters.BooleanFilter(
117 label="Only show active memberships",
118 method=filter_active_memberships_only,
119 widget=widgets.CheckboxInput,
120 )
121
122 training_seats_only = django_filters.BooleanFilter(
123 label="Only show memberships with non-zero allowed training seats",
124 method=filter_training_seats_only,
125 widget=widgets.CheckboxInput,
126 )
127
128 nonpositive_remaining_seats_only = django_filters.BooleanFilter(
129 label="Only show memberships with zero or less remaining seats",
130 method=filter_nonpositive_remaining_seats,
131 widget=widgets.CheckboxInput,
132 )
133
134 order_by = django_filters.OrderingFilter(
135 fields=(
136 "organization__fullname",
137 "organization__domain",
138 "agreement_start",
139 "agreement_end",
140 "instructor_training_seats_total",
141 "instructor_training_seats_utilized",
142 "instructor_training_seats_remaining",
143 ),
144 )
145
146 class Meta:
147 model = Membership
148 fields = [
149 "organization_name",
150 ]
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/amy/fiscal/filters.py b/amy/fiscal/filters.py
--- a/amy/fiscal/filters.py
+++ b/amy/fiscal/filters.py
@@ -11,7 +11,7 @@
class OrganizationFilter(AMYFilterSet):
country = AllCountriesFilter(widget=Select2Widget)
- membership__variant = django_filters.MultipleChoiceFilter(
+ memberships__variant = django_filters.MultipleChoiceFilter(
label="Memberships (current or past)",
choices=Membership.MEMBERSHIP_CHOICES,
widget=Select2MultipleWidget,
| {"golden_diff": "diff --git a/amy/fiscal/filters.py b/amy/fiscal/filters.py\n--- a/amy/fiscal/filters.py\n+++ b/amy/fiscal/filters.py\n@@ -11,7 +11,7 @@\n class OrganizationFilter(AMYFilterSet):\n country = AllCountriesFilter(widget=Select2Widget)\n \n- membership__variant = django_filters.MultipleChoiceFilter(\n+ memberships__variant = django_filters.MultipleChoiceFilter(\n label=\"Memberships (current or past)\",\n choices=Membership.MEMBERSHIP_CHOICES,\n widget=Select2MultipleWidget,\n", "issue": "Bug: Internal Server Error: /fiscal/organizations/\nProduction issue.\r\n\r\n```\r\nFieldError at /fiscal/organizations/\r\nCannot resolve keyword 'membership' into field. Choices are: administered_events, affiliated_organizations, country, domain, fullname, hosted_events, id, latitude, longitude, member, memberships, selforganisedsubmission, sponsored_events, workshopinquiryrequest, workshoprequest\r\n```\r\n\r\nhttps://amy.carpentries.org/fiscal/organizations/?country=&membership__variant=silver&order_by=\n", "before_files": [{"content": "from datetime import date\n\nfrom django.forms import widgets\nimport django_filters\n\nfrom workshops.fields import Select2MultipleWidget, Select2Widget\nfrom workshops.filters import AllCountriesFilter, AMYFilterSet\nfrom workshops.models import Membership, Organization\n\n\nclass OrganizationFilter(AMYFilterSet):\n country = AllCountriesFilter(widget=Select2Widget)\n\n membership__variant = django_filters.MultipleChoiceFilter(\n label=\"Memberships (current or past)\",\n choices=Membership.MEMBERSHIP_CHOICES,\n widget=Select2MultipleWidget,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"fullname\",\n \"domain\",\n ),\n )\n\n class Meta:\n model = Organization\n fields = [\n \"country\",\n ]\n\n\ndef filter_active_memberships_only(queryset, name, active):\n \"\"\"Limit Memberships to only active entries.\"\"\"\n if active:\n today = date.today()\n return queryset.filter(agreement_start__lte=today, agreement_end__gte=today)\n else:\n return queryset\n\n\ndef filter_training_seats_only(queryset, name, seats):\n \"\"\"Limit Memberships to only entries with some training seats allowed.\"\"\"\n if seats:\n return queryset.filter(instructor_training_seats_total__gt=0)\n else:\n return queryset\n\n\ndef filter_nonpositive_remaining_seats(queryset, name, seats):\n \"\"\"Limit Memberships to only entries with negative remaining seats.\"\"\"\n if seats:\n return queryset.filter(instructor_training_seats_remaining__lt=0)\n else:\n return queryset\n\n\nclass MembershipFilter(AMYFilterSet):\n organization_name = django_filters.CharFilter(\n label=\"Organisation name\",\n field_name=\"organizations__fullname\",\n lookup_expr=\"icontains\",\n )\n\n MEMBERSHIP_CHOICES = ((\"\", \"Any\"),) + Membership.MEMBERSHIP_CHOICES\n variant = django_filters.ChoiceFilter(choices=MEMBERSHIP_CHOICES)\n\n CONTRIBUTION_CHOICES = ((\"\", \"Any\"),) + Membership.CONTRIBUTION_CHOICES\n contribution_type = django_filters.ChoiceFilter(choices=CONTRIBUTION_CHOICES)\n\n active_only = django_filters.BooleanFilter(\n label=\"Only show active memberships\",\n method=filter_active_memberships_only,\n widget=widgets.CheckboxInput,\n )\n\n training_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with non-zero allowed training seats\",\n method=filter_training_seats_only,\n widget=widgets.CheckboxInput,\n )\n\n nonpositive_remaining_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with zero or less remaining seats\",\n method=filter_nonpositive_remaining_seats,\n widget=widgets.CheckboxInput,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"agreement_start\",\n \"agreement_end\",\n \"instructor_training_seats_remaining\",\n ),\n )\n\n class Meta:\n model = Membership\n fields = [\n \"organization_name\",\n \"consortium\",\n \"public_status\",\n \"variant\",\n \"contribution_type\",\n ]\n\n\nclass MembershipTrainingsFilter(AMYFilterSet):\n organization_name = django_filters.CharFilter(\n label=\"Organization name\",\n field_name=\"organization__fullname\",\n lookup_expr=\"icontains\",\n )\n\n active_only = django_filters.BooleanFilter(\n label=\"Only show active memberships\",\n method=filter_active_memberships_only,\n widget=widgets.CheckboxInput,\n )\n\n training_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with non-zero allowed training seats\",\n method=filter_training_seats_only,\n widget=widgets.CheckboxInput,\n )\n\n nonpositive_remaining_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with zero or less remaining seats\",\n method=filter_nonpositive_remaining_seats,\n widget=widgets.CheckboxInput,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"organization__fullname\",\n \"organization__domain\",\n \"agreement_start\",\n \"agreement_end\",\n \"instructor_training_seats_total\",\n \"instructor_training_seats_utilized\",\n \"instructor_training_seats_remaining\",\n ),\n )\n\n class Meta:\n model = Membership\n fields = [\n \"organization_name\",\n ]\n", "path": "amy/fiscal/filters.py"}], "after_files": [{"content": "from datetime import date\n\nfrom django.forms import widgets\nimport django_filters\n\nfrom workshops.fields import Select2MultipleWidget, Select2Widget\nfrom workshops.filters import AllCountriesFilter, AMYFilterSet\nfrom workshops.models import Membership, Organization\n\n\nclass OrganizationFilter(AMYFilterSet):\n country = AllCountriesFilter(widget=Select2Widget)\n\n memberships__variant = django_filters.MultipleChoiceFilter(\n label=\"Memberships (current or past)\",\n choices=Membership.MEMBERSHIP_CHOICES,\n widget=Select2MultipleWidget,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"fullname\",\n \"domain\",\n ),\n )\n\n class Meta:\n model = Organization\n fields = [\n \"country\",\n ]\n\n\ndef filter_active_memberships_only(queryset, name, active):\n \"\"\"Limit Memberships to only active entries.\"\"\"\n if active:\n today = date.today()\n return queryset.filter(agreement_start__lte=today, agreement_end__gte=today)\n else:\n return queryset\n\n\ndef filter_training_seats_only(queryset, name, seats):\n \"\"\"Limit Memberships to only entries with some training seats allowed.\"\"\"\n if seats:\n return queryset.filter(instructor_training_seats_total__gt=0)\n else:\n return queryset\n\n\ndef filter_nonpositive_remaining_seats(queryset, name, seats):\n \"\"\"Limit Memberships to only entries with negative remaining seats.\"\"\"\n if seats:\n return queryset.filter(instructor_training_seats_remaining__lt=0)\n else:\n return queryset\n\n\nclass MembershipFilter(AMYFilterSet):\n organization_name = django_filters.CharFilter(\n label=\"Organisation name\",\n field_name=\"organizations__fullname\",\n lookup_expr=\"icontains\",\n )\n\n MEMBERSHIP_CHOICES = ((\"\", \"Any\"),) + Membership.MEMBERSHIP_CHOICES\n variant = django_filters.ChoiceFilter(choices=MEMBERSHIP_CHOICES)\n\n CONTRIBUTION_CHOICES = ((\"\", \"Any\"),) + Membership.CONTRIBUTION_CHOICES\n contribution_type = django_filters.ChoiceFilter(choices=CONTRIBUTION_CHOICES)\n\n active_only = django_filters.BooleanFilter(\n label=\"Only show active memberships\",\n method=filter_active_memberships_only,\n widget=widgets.CheckboxInput,\n )\n\n training_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with non-zero allowed training seats\",\n method=filter_training_seats_only,\n widget=widgets.CheckboxInput,\n )\n\n nonpositive_remaining_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with zero or less remaining seats\",\n method=filter_nonpositive_remaining_seats,\n widget=widgets.CheckboxInput,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"agreement_start\",\n \"agreement_end\",\n \"instructor_training_seats_remaining\",\n ),\n )\n\n class Meta:\n model = Membership\n fields = [\n \"organization_name\",\n \"consortium\",\n \"public_status\",\n \"variant\",\n \"contribution_type\",\n ]\n\n\nclass MembershipTrainingsFilter(AMYFilterSet):\n organization_name = django_filters.CharFilter(\n label=\"Organization name\",\n field_name=\"organization__fullname\",\n lookup_expr=\"icontains\",\n )\n\n active_only = django_filters.BooleanFilter(\n label=\"Only show active memberships\",\n method=filter_active_memberships_only,\n widget=widgets.CheckboxInput,\n )\n\n training_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with non-zero allowed training seats\",\n method=filter_training_seats_only,\n widget=widgets.CheckboxInput,\n )\n\n nonpositive_remaining_seats_only = django_filters.BooleanFilter(\n label=\"Only show memberships with zero or less remaining seats\",\n method=filter_nonpositive_remaining_seats,\n widget=widgets.CheckboxInput,\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\n \"organization__fullname\",\n \"organization__domain\",\n \"agreement_start\",\n \"agreement_end\",\n \"instructor_training_seats_total\",\n \"instructor_training_seats_utilized\",\n \"instructor_training_seats_remaining\",\n ),\n )\n\n class Meta:\n model = Membership\n fields = [\n \"organization_name\",\n ]\n", "path": "amy/fiscal/filters.py"}]} | 1,629 | 127 |
gh_patches_debug_10474 | rasdani/github-patches | git_diff | aws__aws-cli-4147 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aws cloudformation prints S3 upload progress to STDOUT
When running `aws cloudformation package`, the output must not include the S3 upload progress.
It currently includes the combined output of the S3 file upload as well as packaged CloudFormation itself making it impossible to use via STDOUT.
See the full repro at https://github.com/dnagir/cf-package-stdout-messed-up
Workaround - use `--output-template-file` option.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/s3uploader.py`
Content:
```
1 # Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14 import hashlib
15 import logging
16 import threading
17 import os
18 import sys
19
20 import botocore
21 import botocore.exceptions
22 from s3transfer.manager import TransferManager
23 from s3transfer.subscribers import BaseSubscriber
24
25 from awscli.compat import collections_abc
26
27 LOG = logging.getLogger(__name__)
28
29
30 class NoSuchBucketError(Exception):
31 def __init__(self, **kwargs):
32 msg = self.fmt.format(**kwargs)
33 Exception.__init__(self, msg)
34 self.kwargs = kwargs
35
36
37 fmt = ("S3 Bucket does not exist. "
38 "Execute the command to create a new bucket"
39 "\n"
40 "aws s3 mb s3://{bucket_name}")
41
42
43 class S3Uploader(object):
44 """
45 Class to upload objects to S3 bucket that use versioning. If bucket
46 does not already use versioning, this class will turn on versioning.
47 """
48
49 @property
50 def artifact_metadata(self):
51 """
52 Metadata to attach to the object(s) uploaded by the uploader.
53 """
54 return self._artifact_metadata
55
56 @artifact_metadata.setter
57 def artifact_metadata(self, val):
58 if val is not None and not isinstance(val, collections_abc.Mapping):
59 raise TypeError("Artifact metadata should be in dict type")
60 self._artifact_metadata = val
61
62 def __init__(self, s3_client,
63 bucket_name,
64 prefix=None,
65 kms_key_id=None,
66 force_upload=False,
67 transfer_manager=None):
68 self.bucket_name = bucket_name
69 self.prefix = prefix
70 self.kms_key_id = kms_key_id or None
71 self.force_upload = force_upload
72 self.s3 = s3_client
73
74 self.transfer_manager = transfer_manager
75 if not transfer_manager:
76 self.transfer_manager = TransferManager(self.s3)
77
78 self._artifact_metadata = None
79
80 def upload(self, file_name, remote_path):
81 """
82 Uploads given file to S3
83 :param file_name: Path to the file that will be uploaded
84 :param remote_path: be uploaded
85 :return: VersionId of the latest upload
86 """
87
88 if self.prefix and len(self.prefix) > 0:
89 remote_path = "{0}/{1}".format(self.prefix, remote_path)
90
91 # Check if a file with same data exists
92 if not self.force_upload and self.file_exists(remote_path):
93 LOG.debug("File with same data already exists at {0}. "
94 "Skipping upload".format(remote_path))
95 return self.make_url(remote_path)
96
97 try:
98
99 # Default to regular server-side encryption unless customer has
100 # specified their own KMS keys
101 additional_args = {
102 "ServerSideEncryption": "AES256"
103 }
104
105 if self.kms_key_id:
106 additional_args["ServerSideEncryption"] = "aws:kms"
107 additional_args["SSEKMSKeyId"] = self.kms_key_id
108
109 if self.artifact_metadata:
110 additional_args["Metadata"] = self.artifact_metadata
111
112 print_progress_callback = \
113 ProgressPercentage(file_name, remote_path)
114 future = self.transfer_manager.upload(file_name,
115 self.bucket_name,
116 remote_path,
117 additional_args,
118 [print_progress_callback])
119 future.result()
120
121 return self.make_url(remote_path)
122
123 except botocore.exceptions.ClientError as ex:
124 error_code = ex.response["Error"]["Code"]
125 if error_code == "NoSuchBucket":
126 raise NoSuchBucketError(bucket_name=self.bucket_name)
127 raise ex
128
129 def upload_with_dedup(self, file_name, extension=None):
130 """
131 Makes and returns name of the S3 object based on the file's MD5 sum
132
133 :param file_name: file to upload
134 :param extension: String of file extension to append to the object
135 :return: S3 URL of the uploaded object
136 """
137
138 # This construction of remote_path is critical to preventing duplicate
139 # uploads of same object. Uploader will check if the file exists in S3
140 # and re-upload only if necessary. So the template points to same file
141 # in multiple places, this will upload only once
142
143 filemd5 = self.file_checksum(file_name)
144 remote_path = filemd5
145 if extension:
146 remote_path = remote_path + "." + extension
147
148 return self.upload(file_name, remote_path)
149
150 def file_exists(self, remote_path):
151 """
152 Check if the file we are trying to upload already exists in S3
153
154 :param remote_path:
155 :return: True, if file exists. False, otherwise
156 """
157
158 try:
159 # Find the object that matches this ETag
160 self.s3.head_object(
161 Bucket=self.bucket_name, Key=remote_path)
162 return True
163 except botocore.exceptions.ClientError:
164 # Either File does not exist or we are unable to get
165 # this information.
166 return False
167
168 def make_url(self, obj_path):
169 return "s3://{0}/{1}".format(
170 self.bucket_name, obj_path)
171
172 def file_checksum(self, file_name):
173
174 with open(file_name, "rb") as file_handle:
175 md5 = hashlib.md5()
176 # Read file in chunks of 4096 bytes
177 block_size = 4096
178
179 # Save current cursor position and reset cursor to start of file
180 curpos = file_handle.tell()
181 file_handle.seek(0)
182
183 buf = file_handle.read(block_size)
184 while len(buf) > 0:
185 md5.update(buf)
186 buf = file_handle.read(block_size)
187
188 # Restore file cursor's position
189 file_handle.seek(curpos)
190
191 return md5.hexdigest()
192
193 def to_path_style_s3_url(self, key, version=None):
194 """
195 This link describes the format of Path Style URLs
196 http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
197 """
198 base = self.s3.meta.endpoint_url
199 result = "{0}/{1}/{2}".format(base, self.bucket_name, key)
200 if version:
201 result = "{0}?versionId={1}".format(result, version)
202
203 return result
204
205
206 class ProgressPercentage(BaseSubscriber):
207 # This class was copied directly from S3Transfer docs
208
209 def __init__(self, filename, remote_path):
210 self._filename = filename
211 self._remote_path = remote_path
212 self._size = float(os.path.getsize(filename))
213 self._seen_so_far = 0
214 self._lock = threading.Lock()
215
216 def on_progress(self, future, bytes_transferred, **kwargs):
217
218 # To simplify we'll assume this is hooked up
219 # to a single filename.
220 with self._lock:
221 self._seen_so_far += bytes_transferred
222 percentage = (self._seen_so_far / self._size) * 100
223 sys.stdout.write(
224 "\rUploading to %s %s / %s (%.2f%%)" %
225 (self._remote_path, self._seen_so_far,
226 self._size, percentage))
227 sys.stdout.flush()
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/s3uploader.py b/awscli/customizations/s3uploader.py
--- a/awscli/customizations/s3uploader.py
+++ b/awscli/customizations/s3uploader.py
@@ -220,8 +220,8 @@
with self._lock:
self._seen_so_far += bytes_transferred
percentage = (self._seen_so_far / self._size) * 100
- sys.stdout.write(
+ sys.stderr.write(
"\rUploading to %s %s / %s (%.2f%%)" %
(self._remote_path, self._seen_so_far,
self._size, percentage))
- sys.stdout.flush()
+ sys.stderr.flush()
| {"golden_diff": "diff --git a/awscli/customizations/s3uploader.py b/awscli/customizations/s3uploader.py\n--- a/awscli/customizations/s3uploader.py\n+++ b/awscli/customizations/s3uploader.py\n@@ -220,8 +220,8 @@\n with self._lock:\n self._seen_so_far += bytes_transferred\n percentage = (self._seen_so_far / self._size) * 100\n- sys.stdout.write(\n+ sys.stderr.write(\n \"\\rUploading to %s %s / %s (%.2f%%)\" %\n (self._remote_path, self._seen_so_far,\n self._size, percentage))\n- sys.stdout.flush()\n+ sys.stderr.flush()\n", "issue": "aws cloudformation prints S3 upload progress to STDOUT\nWhen running `aws cloudformation package`, the output must not include the S3 upload progress.\r\n\r\nIt currently includes the combined output of the S3 file upload as well as packaged CloudFormation itself making it impossible to use via STDOUT.\r\n\r\nSee the full repro at https://github.com/dnagir/cf-package-stdout-messed-up\r\n\r\nWorkaround - use `--output-template-file` option.\n", "before_files": [{"content": "# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport hashlib\nimport logging\nimport threading\nimport os\nimport sys\n\nimport botocore\nimport botocore.exceptions\nfrom s3transfer.manager import TransferManager\nfrom s3transfer.subscribers import BaseSubscriber\n\nfrom awscli.compat import collections_abc\n\nLOG = logging.getLogger(__name__)\n\n\nclass NoSuchBucketError(Exception):\n def __init__(self, **kwargs):\n msg = self.fmt.format(**kwargs)\n Exception.__init__(self, msg)\n self.kwargs = kwargs\n\n\n fmt = (\"S3 Bucket does not exist. \"\n \"Execute the command to create a new bucket\"\n \"\\n\"\n \"aws s3 mb s3://{bucket_name}\")\n\n\nclass S3Uploader(object):\n \"\"\"\n Class to upload objects to S3 bucket that use versioning. If bucket\n does not already use versioning, this class will turn on versioning.\n \"\"\"\n\n @property\n def artifact_metadata(self):\n \"\"\"\n Metadata to attach to the object(s) uploaded by the uploader.\n \"\"\"\n return self._artifact_metadata\n\n @artifact_metadata.setter\n def artifact_metadata(self, val):\n if val is not None and not isinstance(val, collections_abc.Mapping):\n raise TypeError(\"Artifact metadata should be in dict type\")\n self._artifact_metadata = val\n\n def __init__(self, s3_client,\n bucket_name,\n prefix=None,\n kms_key_id=None,\n force_upload=False,\n transfer_manager=None):\n self.bucket_name = bucket_name\n self.prefix = prefix\n self.kms_key_id = kms_key_id or None\n self.force_upload = force_upload\n self.s3 = s3_client\n\n self.transfer_manager = transfer_manager\n if not transfer_manager:\n self.transfer_manager = TransferManager(self.s3)\n\n self._artifact_metadata = None\n\n def upload(self, file_name, remote_path):\n \"\"\"\n Uploads given file to S3\n :param file_name: Path to the file that will be uploaded\n :param remote_path: be uploaded\n :return: VersionId of the latest upload\n \"\"\"\n\n if self.prefix and len(self.prefix) > 0:\n remote_path = \"{0}/{1}\".format(self.prefix, remote_path)\n\n # Check if a file with same data exists\n if not self.force_upload and self.file_exists(remote_path):\n LOG.debug(\"File with same data already exists at {0}. \"\n \"Skipping upload\".format(remote_path))\n return self.make_url(remote_path)\n\n try:\n\n # Default to regular server-side encryption unless customer has\n # specified their own KMS keys\n additional_args = {\n \"ServerSideEncryption\": \"AES256\"\n }\n\n if self.kms_key_id:\n additional_args[\"ServerSideEncryption\"] = \"aws:kms\"\n additional_args[\"SSEKMSKeyId\"] = self.kms_key_id\n\n if self.artifact_metadata:\n additional_args[\"Metadata\"] = self.artifact_metadata\n\n print_progress_callback = \\\n ProgressPercentage(file_name, remote_path)\n future = self.transfer_manager.upload(file_name,\n self.bucket_name,\n remote_path,\n additional_args,\n [print_progress_callback])\n future.result()\n\n return self.make_url(remote_path)\n\n except botocore.exceptions.ClientError as ex:\n error_code = ex.response[\"Error\"][\"Code\"]\n if error_code == \"NoSuchBucket\":\n raise NoSuchBucketError(bucket_name=self.bucket_name)\n raise ex\n\n def upload_with_dedup(self, file_name, extension=None):\n \"\"\"\n Makes and returns name of the S3 object based on the file's MD5 sum\n\n :param file_name: file to upload\n :param extension: String of file extension to append to the object\n :return: S3 URL of the uploaded object\n \"\"\"\n\n # This construction of remote_path is critical to preventing duplicate\n # uploads of same object. Uploader will check if the file exists in S3\n # and re-upload only if necessary. So the template points to same file\n # in multiple places, this will upload only once\n\n filemd5 = self.file_checksum(file_name)\n remote_path = filemd5\n if extension:\n remote_path = remote_path + \".\" + extension\n\n return self.upload(file_name, remote_path)\n\n def file_exists(self, remote_path):\n \"\"\"\n Check if the file we are trying to upload already exists in S3\n\n :param remote_path:\n :return: True, if file exists. False, otherwise\n \"\"\"\n\n try:\n # Find the object that matches this ETag\n self.s3.head_object(\n Bucket=self.bucket_name, Key=remote_path)\n return True\n except botocore.exceptions.ClientError:\n # Either File does not exist or we are unable to get\n # this information.\n return False\n\n def make_url(self, obj_path):\n return \"s3://{0}/{1}\".format(\n self.bucket_name, obj_path)\n\n def file_checksum(self, file_name):\n\n with open(file_name, \"rb\") as file_handle:\n md5 = hashlib.md5()\n # Read file in chunks of 4096 bytes\n block_size = 4096\n\n # Save current cursor position and reset cursor to start of file\n curpos = file_handle.tell()\n file_handle.seek(0)\n\n buf = file_handle.read(block_size)\n while len(buf) > 0:\n md5.update(buf)\n buf = file_handle.read(block_size)\n\n # Restore file cursor's position\n file_handle.seek(curpos)\n\n return md5.hexdigest()\n\n def to_path_style_s3_url(self, key, version=None):\n \"\"\"\n This link describes the format of Path Style URLs\n http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro\n \"\"\"\n base = self.s3.meta.endpoint_url\n result = \"{0}/{1}/{2}\".format(base, self.bucket_name, key)\n if version:\n result = \"{0}?versionId={1}\".format(result, version)\n\n return result\n\n\nclass ProgressPercentage(BaseSubscriber):\n # This class was copied directly from S3Transfer docs\n\n def __init__(self, filename, remote_path):\n self._filename = filename\n self._remote_path = remote_path\n self._size = float(os.path.getsize(filename))\n self._seen_so_far = 0\n self._lock = threading.Lock()\n\n def on_progress(self, future, bytes_transferred, **kwargs):\n\n # To simplify we'll assume this is hooked up\n # to a single filename.\n with self._lock:\n self._seen_so_far += bytes_transferred\n percentage = (self._seen_so_far / self._size) * 100\n sys.stdout.write(\n \"\\rUploading to %s %s / %s (%.2f%%)\" %\n (self._remote_path, self._seen_so_far,\n self._size, percentage))\n sys.stdout.flush()\n", "path": "awscli/customizations/s3uploader.py"}], "after_files": [{"content": "# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport hashlib\nimport logging\nimport threading\nimport os\nimport sys\n\nimport botocore\nimport botocore.exceptions\nfrom s3transfer.manager import TransferManager\nfrom s3transfer.subscribers import BaseSubscriber\n\nfrom awscli.compat import collections_abc\n\nLOG = logging.getLogger(__name__)\n\n\nclass NoSuchBucketError(Exception):\n def __init__(self, **kwargs):\n msg = self.fmt.format(**kwargs)\n Exception.__init__(self, msg)\n self.kwargs = kwargs\n\n\n fmt = (\"S3 Bucket does not exist. \"\n \"Execute the command to create a new bucket\"\n \"\\n\"\n \"aws s3 mb s3://{bucket_name}\")\n\n\nclass S3Uploader(object):\n \"\"\"\n Class to upload objects to S3 bucket that use versioning. If bucket\n does not already use versioning, this class will turn on versioning.\n \"\"\"\n\n @property\n def artifact_metadata(self):\n \"\"\"\n Metadata to attach to the object(s) uploaded by the uploader.\n \"\"\"\n return self._artifact_metadata\n\n @artifact_metadata.setter\n def artifact_metadata(self, val):\n if val is not None and not isinstance(val, collections_abc.Mapping):\n raise TypeError(\"Artifact metadata should be in dict type\")\n self._artifact_metadata = val\n\n def __init__(self, s3_client,\n bucket_name,\n prefix=None,\n kms_key_id=None,\n force_upload=False,\n transfer_manager=None):\n self.bucket_name = bucket_name\n self.prefix = prefix\n self.kms_key_id = kms_key_id or None\n self.force_upload = force_upload\n self.s3 = s3_client\n\n self.transfer_manager = transfer_manager\n if not transfer_manager:\n self.transfer_manager = TransferManager(self.s3)\n\n self._artifact_metadata = None\n\n def upload(self, file_name, remote_path):\n \"\"\"\n Uploads given file to S3\n :param file_name: Path to the file that will be uploaded\n :param remote_path: be uploaded\n :return: VersionId of the latest upload\n \"\"\"\n\n if self.prefix and len(self.prefix) > 0:\n remote_path = \"{0}/{1}\".format(self.prefix, remote_path)\n\n # Check if a file with same data exists\n if not self.force_upload and self.file_exists(remote_path):\n LOG.debug(\"File with same data already exists at {0}. \"\n \"Skipping upload\".format(remote_path))\n return self.make_url(remote_path)\n\n try:\n\n # Default to regular server-side encryption unless customer has\n # specified their own KMS keys\n additional_args = {\n \"ServerSideEncryption\": \"AES256\"\n }\n\n if self.kms_key_id:\n additional_args[\"ServerSideEncryption\"] = \"aws:kms\"\n additional_args[\"SSEKMSKeyId\"] = self.kms_key_id\n\n if self.artifact_metadata:\n additional_args[\"Metadata\"] = self.artifact_metadata\n\n print_progress_callback = \\\n ProgressPercentage(file_name, remote_path)\n future = self.transfer_manager.upload(file_name,\n self.bucket_name,\n remote_path,\n additional_args,\n [print_progress_callback])\n future.result()\n\n return self.make_url(remote_path)\n\n except botocore.exceptions.ClientError as ex:\n error_code = ex.response[\"Error\"][\"Code\"]\n if error_code == \"NoSuchBucket\":\n raise NoSuchBucketError(bucket_name=self.bucket_name)\n raise ex\n\n def upload_with_dedup(self, file_name, extension=None):\n \"\"\"\n Makes and returns name of the S3 object based on the file's MD5 sum\n\n :param file_name: file to upload\n :param extension: String of file extension to append to the object\n :return: S3 URL of the uploaded object\n \"\"\"\n\n # This construction of remote_path is critical to preventing duplicate\n # uploads of same object. Uploader will check if the file exists in S3\n # and re-upload only if necessary. So the template points to same file\n # in multiple places, this will upload only once\n\n filemd5 = self.file_checksum(file_name)\n remote_path = filemd5\n if extension:\n remote_path = remote_path + \".\" + extension\n\n return self.upload(file_name, remote_path)\n\n def file_exists(self, remote_path):\n \"\"\"\n Check if the file we are trying to upload already exists in S3\n\n :param remote_path:\n :return: True, if file exists. False, otherwise\n \"\"\"\n\n try:\n # Find the object that matches this ETag\n self.s3.head_object(\n Bucket=self.bucket_name, Key=remote_path)\n return True\n except botocore.exceptions.ClientError:\n # Either File does not exist or we are unable to get\n # this information.\n return False\n\n def make_url(self, obj_path):\n return \"s3://{0}/{1}\".format(\n self.bucket_name, obj_path)\n\n def file_checksum(self, file_name):\n\n with open(file_name, \"rb\") as file_handle:\n md5 = hashlib.md5()\n # Read file in chunks of 4096 bytes\n block_size = 4096\n\n # Save current cursor position and reset cursor to start of file\n curpos = file_handle.tell()\n file_handle.seek(0)\n\n buf = file_handle.read(block_size)\n while len(buf) > 0:\n md5.update(buf)\n buf = file_handle.read(block_size)\n\n # Restore file cursor's position\n file_handle.seek(curpos)\n\n return md5.hexdigest()\n\n def to_path_style_s3_url(self, key, version=None):\n \"\"\"\n This link describes the format of Path Style URLs\n http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro\n \"\"\"\n base = self.s3.meta.endpoint_url\n result = \"{0}/{1}/{2}\".format(base, self.bucket_name, key)\n if version:\n result = \"{0}?versionId={1}\".format(result, version)\n\n return result\n\n\nclass ProgressPercentage(BaseSubscriber):\n # This class was copied directly from S3Transfer docs\n\n def __init__(self, filename, remote_path):\n self._filename = filename\n self._remote_path = remote_path\n self._size = float(os.path.getsize(filename))\n self._seen_so_far = 0\n self._lock = threading.Lock()\n\n def on_progress(self, future, bytes_transferred, **kwargs):\n\n # To simplify we'll assume this is hooked up\n # to a single filename.\n with self._lock:\n self._seen_so_far += bytes_transferred\n percentage = (self._seen_so_far / self._size) * 100\n sys.stderr.write(\n \"\\rUploading to %s %s / %s (%.2f%%)\" %\n (self._remote_path, self._seen_so_far,\n self._size, percentage))\n sys.stderr.flush()\n", "path": "awscli/customizations/s3uploader.py"}]} | 2,616 | 161 |
gh_patches_debug_40473 | rasdani/github-patches | git_diff | sunpy__sunpy-5025 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a dynamic z axis option to the animators
So if your frames have a wide dynamic range the z scale changes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/visualization/animator/wcs.py`
Content:
```
1 from functools import partial
2
3 import numpy as np
4
5 import astropy.units as u
6 from astropy.wcs.wcsapi import BaseLowLevelWCS
7
8 from sunpy.extern import modest_image
9 from sunpy.visualization.animator.base import ArrayAnimator
10
11 __all__ = ['ArrayAnimatorWCS']
12
13
14 class ArrayAnimatorWCS(ArrayAnimator):
15 """
16 Animate an array with associated `~astropy.wcs.wcsapi.BaseLowLevelWCS` object.
17
18 The following keyboard shortcuts are defined in the viewer:
19
20 * 'left': previous step on active slider.
21 * 'right': next step on active slider.
22 * 'top': change the active slider up one.
23 * 'bottom': change the active slider down one.
24 * 'p': play/pause active slider.
25
26 Parameters
27 ----------
28 data: `numpy.ndarray`
29 The data to be visualized.
30 wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS`
31 The world coordinate object associated with the array.
32 slices: `tuple` or `list`
33 A list specifying which axes of the array should be plotted on which
34 axes. The list should be the same length as the number of pixel
35 dimensions with ``'x'`` and (optionally) ``'y'`` in the elements
36 corresponding to the axes to be plotted. If only ``'x'`` is present a
37 line plot will be drawn. All other elements should be ``0``.
38 coord_params: `dict`, optional
39 This dict allows you to override
40 `~astropy.visualization.wcsaxes.WCSAxes` parameters for each world
41 coordinate. The keys of this dictionary should be a value which can be
42 looked up in ``WCSAxes.coords`` (i.e. ``em.wl`` or ``hpln``) and the
43 values should be a dict which supports the following keys, and passes
44 their values to the associated `~astropy.visualization.wcsaxes.WCSAxes`
45 methods.
46
47 * ``format_unit``: `~astropy.visualization.wcsaxes.CoordinateHelper.set_format_unit`
48 * ``major_formatter``: `~astropy.visualization.wcsaxes.CoordinateHelper.set_major_formatter`
49 * ``axislabel``: `~astropy.visualization.wcsaxes.CoordinateHelper.set_axislabel`
50 * ``grid``: `~astropy.visualization.wcsaxes.CoordinateHelper.grid` (The value should be a dict of keyword arguments to ``grid()`` or `True`).
51 * ``ticks``: `dict` or `bool` the keyword arguments to the
52 `~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks` method,
53 or `False` to display no ticks for this coord.
54
55 ylim: `tuple` or `str`, optional
56 The yaxis limits to use when drawing a line plot, if 'fixed' then use
57 the global data limits, if 'dynamic' then set the y limit for each frame
58 individually (meaning the y limits change as you animate).
59 ylabel: `string`, optional
60 The yaxis label to use when drawing a line plot. Setting the label on
61 the y-axis on an image plot should be done via ``coord_params``.
62
63 """
64
65 def __init__(self, data, wcs, slices, coord_params=None, ylim='dynamic', ylabel=None, **kwargs):
66 if not isinstance(wcs, BaseLowLevelWCS):
67 raise ValueError("A WCS object should be provided that implements the astropy WCS API.")
68 if wcs.pixel_n_dim != data.ndim:
69 raise ValueError("Dimensionality of the data and WCS object do not match.")
70 if len(slices) != wcs.pixel_n_dim:
71 raise ValueError("slices should be the same length as the number of pixel dimensions.")
72 if "x" not in slices:
73 raise ValueError(
74 "slices should contain at least 'x' to indicate the axis to plot on the x axis.")
75
76 self.plot_dimensionality = 1
77
78 image_axes = [slices[::-1].index("x")]
79 if "y" in slices:
80 image_axes.append(slices[::-1].index("y"))
81 self.plot_dimensionality = 2
82
83 self.naxis = data.ndim
84 self.num_sliders = self.naxis - self.plot_dimensionality
85 self.slices_wcsaxes = list(slices)
86 self.wcs = wcs
87 self.coord_params = coord_params
88 self.ylim = ylim
89 self.ylabel = ylabel
90
91 extra_slider_labels = []
92 if "slider_functions" in kwargs and "slider_labels" not in kwargs:
93 extra_slider_labels = [a.__name__ for a in kwargs['slider_functions']]
94
95 slider_labels = self._compute_slider_labels_from_wcs(slices) + extra_slider_labels
96
97 super().__init__(data, image_axes=image_axes, axis_ranges=None,
98 slider_labels=slider_labels,
99 **kwargs)
100
101 def _get_wcs_labels(self):
102 """
103 Read first the axes names property of the wcs and fall back to physical types.
104 """
105 # Return the name if it is set, or the physical type if it is not.
106 return [l or t for l, t in zip(self.wcs.world_axis_names, self.wcs.world_axis_physical_types)]
107
108 def _compute_slider_labels_from_wcs(self, slices):
109 """
110 For each pixel dimension, not used in the plot, calculate the world
111 names which are correlated with that pixel dimension. This can return
112 more than one world name per pixel dimension (i.e. lat & lon) so join
113 them if there are.
114 """
115 labels = []
116 wal = np.array(self._get_wcs_labels())
117 pixel_indicies = np.array([a not in ['x', 'y'] for a in slices])
118 for sliced_axis in self.wcs.axis_correlation_matrix[:, pixel_indicies].T:
119 labels.append(" / ".join(list(map(str, wal[sliced_axis]))))
120
121 return labels[::-1]
122
123 def _partial_pixel_to_world(self, pixel_dimension, pixel_coord):
124 """
125 Return the world coordinate along one axis, if it is only
126 correlated to that axis.
127 """
128 wcs_dimension = self.wcs.pixel_n_dim - pixel_dimension - 1
129 corr = self.wcs.axis_correlation_matrix[:, wcs_dimension]
130
131 # If more than one world axis is linked to this dimension we can't
132 # display the world coordinate because we have no way of picking,
133 # so we just display pixel index.
134 if len(np.nonzero(corr)[0]) != 1:
135 return pixel_coord * u.pix
136
137 # We know that the coordinate we care about is independent of the
138 # other axes, so we can set the pixel coordinates to 0.
139 coords = [0] * self.wcs.pixel_n_dim
140 coords[wcs_dimension] = pixel_coord
141 wc = self.wcs.pixel_to_world_values(*coords)[wcs_dimension]
142 return u.Quantity(wc, unit=self.wcs.world_axis_units[wcs_dimension])
143
144 def _sanitize_axis_ranges(self, *args):
145 """
146 This overrides the behaviour of ArrayAnimator to generate axis_ranges
147 based on the WCS.
148 """
149
150 axis_ranges = [None] * self.wcs.pixel_n_dim
151 for i in self.slider_axes:
152 axis_ranges[i] = partial(self._partial_pixel_to_world, i)
153
154 return axis_ranges, None
155
156 def _apply_coord_params(self, axes):
157 if self.coord_params is None:
158 return
159
160 for coord_name in self.coord_params:
161 coord = axes.coords[coord_name]
162 params = self.coord_params[coord_name]
163
164 format_unit = params.get("format_unit", None)
165 if format_unit:
166 coord.set_format_unit(format_unit)
167
168 major_formatter = params.get("major_formatter", None)
169 if major_formatter:
170 coord.set_major_formatter(major_formatter)
171
172 axislabel = params.get("axislabel", None)
173 if axislabel:
174 coord.set_axislabel(axislabel)
175
176 grid = params.get("grid", None)
177 if grid is not None:
178 if not isinstance(grid, dict):
179 grid = {}
180 coord.grid(**grid)
181
182 ticks = params.get("ticks", None)
183 if ticks is not None:
184 if isinstance(ticks, bool):
185 coord.set_ticks_visible(ticks)
186 coord.set_ticklabel_visible(ticks)
187 elif isinstance(ticks, dict):
188 coord.set_ticks(**ticks)
189 else:
190 raise TypeError(
191 "The 'ticks' value in the coord_params dictionary must be a dict or a boolean."
192 )
193
194 def _setup_main_axes(self):
195 self.axes = self.fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=self.wcs,
196 slices=self.slices_wcsaxes)
197 self._apply_coord_params(self.axes)
198
199 def plot_start_image(self, ax):
200 if self.plot_dimensionality == 1:
201 artist = self.plot_start_image_1d(ax)
202
203 elif self.plot_dimensionality == 2:
204 artist = self.plot_start_image_2d(ax)
205
206 return artist
207
208 def update_plot(self, val, artist, slider):
209 """
210 Update the plot when a slider changes.
211
212 This method both updates the state of the Animator and also re-draws
213 the matplotlib artist.
214 """
215 ind = int(val)
216 if ind == int(slider.cval):
217 return
218 ax_ind = self.slider_axes[slider.slider_ind]
219 self.frame_slice[ax_ind] = ind
220 self.slices_wcsaxes[self.wcs.pixel_n_dim - ax_ind - 1] = ind
221
222 if self.plot_dimensionality == 1:
223 self.update_plot_1d(val, artist, slider)
224 elif self.plot_dimensionality == 2:
225 self.update_plot_2d(val, artist, slider)
226
227 self._apply_coord_params(self.axes)
228 return super().update_plot(val, artist, slider)
229
230 def plot_start_image_1d(self, ax):
231 """
232 Set up a line plot.
233
234 When plotting with WCSAxes, we always plot against pixel coordinate.
235 """
236 if self.ylim != 'dynamic':
237 ylim = self.ylim
238 if ylim == 'fixed':
239 ylim = (self.data.min(), self.data.max())
240 ax.set_ylim(ylim)
241
242 if self.ylabel:
243 ax.set_ylabel(self.ylabel)
244
245 ydata = self.data[self.frame_index]
246 line, = ax.plot(ydata, **self.imshow_kwargs)
247
248 if isinstance(self.data, np.ma.MaskedArray):
249 ax.set_xlim((0, ydata.shape[0]))
250
251 return line
252
253 @property
254 def data_transposed(self):
255 """
256 Return data for 2D plotting, transposed if needed.
257 """
258 if self.slices_wcsaxes.index('y') < self.slices_wcsaxes.index("x"):
259 return self.data[self.frame_index].transpose()
260 else:
261 return self.data[self.frame_index]
262
263 def update_plot_1d(self, val, line, slider):
264 """
265 Update the line plot.
266 """
267 self.axes.reset_wcs(wcs=self.wcs, slices=self.slices_wcsaxes)
268 line.set_ydata(self.data[self.frame_index])
269
270 # If we are not setting ylim globally then we set it per frame.
271 if self.ylim == 'dynamic':
272 self.axes.set_ylim(self.data[self.frame_index].min(),
273 self.data[self.frame_index].max())
274 slider.cval = val
275
276 def plot_start_image_2d(self, ax):
277 """
278 Setup an image plot.
279 """
280 imshow_args = {'interpolation': 'nearest',
281 'origin': 'lower'}
282 imshow_args.update(self.imshow_kwargs)
283
284 im = modest_image.imshow(ax, self.data_transposed, **imshow_args)
285
286 if 'extent' in imshow_args:
287 ax.set_xlim(imshow_args['extent'][:2])
288 ax.set_ylim(imshow_args['extent'][2:])
289 else:
290 ny, nx = self.data_transposed.shape
291 ax.set_xlim(-0.5, nx - 0.5)
292 ax.set_ylim(-0.5, ny - 0.5)
293
294 ax.dataLim.intervalx = ax.get_xlim()
295 ax.dataLim.intervaly = ax.get_ylim()
296
297 if self.if_colorbar:
298 self._add_colorbar(im)
299
300 return im
301
302 def update_plot_2d(self, val, im, slider):
303 """
304 Update the image plot.
305 """
306 self.axes.reset_wcs(wcs=self.wcs, slices=self.slices_wcsaxes)
307 im.set_array(self.data_transposed)
308 slider.cval = val
309
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/visualization/animator/wcs.py b/sunpy/visualization/animator/wcs.py
--- a/sunpy/visualization/animator/wcs.py
+++ b/sunpy/visualization/animator/wcs.py
@@ -3,6 +3,7 @@
import numpy as np
import astropy.units as u
+from astropy.visualization import AsymmetricPercentileInterval
from astropy.wcs.wcsapi import BaseLowLevelWCS
from sunpy.extern import modest_image
@@ -59,10 +60,12 @@
ylabel: `string`, optional
The yaxis label to use when drawing a line plot. Setting the label on
the y-axis on an image plot should be done via ``coord_params``.
-
+ clip_interval : two-element `~astropy.units.Quantity`, optional
+ If provided, the data for each step will be clipped to the percentile interval bounded by the two numbers.
"""
- def __init__(self, data, wcs, slices, coord_params=None, ylim='dynamic', ylabel=None, **kwargs):
+ def __init__(self, data, wcs, slices, coord_params=None, ylim='dynamic', ylabel=None,
+ clip_interval: u.percent = None, **kwargs):
if not isinstance(wcs, BaseLowLevelWCS):
raise ValueError("A WCS object should be provided that implements the astropy WCS API.")
if wcs.pixel_n_dim != data.ndim:
@@ -88,6 +91,11 @@
self.ylim = ylim
self.ylabel = ylabel
+ if clip_interval is not None and len(clip_interval) != 2:
+ raise ValueError('A range of 2 values must be specified for clip_interval.')
+
+ self.clip_interval = clip_interval
+
extra_slider_labels = []
if "slider_functions" in kwargs and "slider_labels" not in kwargs:
extra_slider_labels = [a.__name__ for a in kwargs['slider_functions']]
@@ -281,6 +289,9 @@
'origin': 'lower'}
imshow_args.update(self.imshow_kwargs)
+ if self.clip_interval is not None:
+ imshow_args['vmin'], imshow_args['vmax'] = self._get_2d_plot_limits()
+
im = modest_image.imshow(ax, self.data_transposed, **imshow_args)
if 'extent' in imshow_args:
@@ -299,10 +310,23 @@
return im
+ def _get_2d_plot_limits(self):
+ """
+ Get vmin, vmax of a data slice when clip_interval is specified.
+ """
+ percent_limits = self.clip_interval.to('%').value
+ vmin, vmax = AsymmetricPercentileInterval(*percent_limits).get_limits(self.data_transposed)
+ return vmin, vmax
+
def update_plot_2d(self, val, im, slider):
"""
Update the image plot.
"""
self.axes.reset_wcs(wcs=self.wcs, slices=self.slices_wcsaxes)
im.set_array(self.data_transposed)
+
+ if self.clip_interval is not None:
+ vmin, vmax = self._get_2d_plot_limits()
+ im.set_clim(vmin, vmax)
+
slider.cval = val
| {"golden_diff": "diff --git a/sunpy/visualization/animator/wcs.py b/sunpy/visualization/animator/wcs.py\n--- a/sunpy/visualization/animator/wcs.py\n+++ b/sunpy/visualization/animator/wcs.py\n@@ -3,6 +3,7 @@\n import numpy as np\n \n import astropy.units as u\n+from astropy.visualization import AsymmetricPercentileInterval\n from astropy.wcs.wcsapi import BaseLowLevelWCS\n \n from sunpy.extern import modest_image\n@@ -59,10 +60,12 @@\n ylabel: `string`, optional\n The yaxis label to use when drawing a line plot. Setting the label on\n the y-axis on an image plot should be done via ``coord_params``.\n-\n+ clip_interval : two-element `~astropy.units.Quantity`, optional\n+ If provided, the data for each step will be clipped to the percentile interval bounded by the two numbers.\n \"\"\"\n \n- def __init__(self, data, wcs, slices, coord_params=None, ylim='dynamic', ylabel=None, **kwargs):\n+ def __init__(self, data, wcs, slices, coord_params=None, ylim='dynamic', ylabel=None,\n+ clip_interval: u.percent = None, **kwargs):\n if not isinstance(wcs, BaseLowLevelWCS):\n raise ValueError(\"A WCS object should be provided that implements the astropy WCS API.\")\n if wcs.pixel_n_dim != data.ndim:\n@@ -88,6 +91,11 @@\n self.ylim = ylim\n self.ylabel = ylabel\n \n+ if clip_interval is not None and len(clip_interval) != 2:\n+ raise ValueError('A range of 2 values must be specified for clip_interval.')\n+\n+ self.clip_interval = clip_interval\n+\n extra_slider_labels = []\n if \"slider_functions\" in kwargs and \"slider_labels\" not in kwargs:\n extra_slider_labels = [a.__name__ for a in kwargs['slider_functions']]\n@@ -281,6 +289,9 @@\n 'origin': 'lower'}\n imshow_args.update(self.imshow_kwargs)\n \n+ if self.clip_interval is not None:\n+ imshow_args['vmin'], imshow_args['vmax'] = self._get_2d_plot_limits()\n+\n im = modest_image.imshow(ax, self.data_transposed, **imshow_args)\n \n if 'extent' in imshow_args:\n@@ -299,10 +310,23 @@\n \n return im\n \n+ def _get_2d_plot_limits(self):\n+ \"\"\"\n+ Get vmin, vmax of a data slice when clip_interval is specified.\n+ \"\"\"\n+ percent_limits = self.clip_interval.to('%').value\n+ vmin, vmax = AsymmetricPercentileInterval(*percent_limits).get_limits(self.data_transposed)\n+ return vmin, vmax\n+\n def update_plot_2d(self, val, im, slider):\n \"\"\"\n Update the image plot.\n \"\"\"\n self.axes.reset_wcs(wcs=self.wcs, slices=self.slices_wcsaxes)\n im.set_array(self.data_transposed)\n+\n+ if self.clip_interval is not None:\n+ vmin, vmax = self._get_2d_plot_limits()\n+ im.set_clim(vmin, vmax)\n+\n slider.cval = val\n", "issue": "Add a dynamic z axis option to the animators\nSo if your frames have a wide dynamic range the z scale changes\n", "before_files": [{"content": "from functools import partial\n\nimport numpy as np\n\nimport astropy.units as u\nfrom astropy.wcs.wcsapi import BaseLowLevelWCS\n\nfrom sunpy.extern import modest_image\nfrom sunpy.visualization.animator.base import ArrayAnimator\n\n__all__ = ['ArrayAnimatorWCS']\n\n\nclass ArrayAnimatorWCS(ArrayAnimator):\n \"\"\"\n Animate an array with associated `~astropy.wcs.wcsapi.BaseLowLevelWCS` object.\n\n The following keyboard shortcuts are defined in the viewer:\n\n * 'left': previous step on active slider.\n * 'right': next step on active slider.\n * 'top': change the active slider up one.\n * 'bottom': change the active slider down one.\n * 'p': play/pause active slider.\n\n Parameters\n ----------\n data: `numpy.ndarray`\n The data to be visualized.\n wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS`\n The world coordinate object associated with the array.\n slices: `tuple` or `list`\n A list specifying which axes of the array should be plotted on which\n axes. The list should be the same length as the number of pixel\n dimensions with ``'x'`` and (optionally) ``'y'`` in the elements\n corresponding to the axes to be plotted. If only ``'x'`` is present a\n line plot will be drawn. All other elements should be ``0``.\n coord_params: `dict`, optional\n This dict allows you to override\n `~astropy.visualization.wcsaxes.WCSAxes` parameters for each world\n coordinate. The keys of this dictionary should be a value which can be\n looked up in ``WCSAxes.coords`` (i.e. ``em.wl`` or ``hpln``) and the\n values should be a dict which supports the following keys, and passes\n their values to the associated `~astropy.visualization.wcsaxes.WCSAxes`\n methods.\n\n * ``format_unit``: `~astropy.visualization.wcsaxes.CoordinateHelper.set_format_unit`\n * ``major_formatter``: `~astropy.visualization.wcsaxes.CoordinateHelper.set_major_formatter`\n * ``axislabel``: `~astropy.visualization.wcsaxes.CoordinateHelper.set_axislabel`\n * ``grid``: `~astropy.visualization.wcsaxes.CoordinateHelper.grid` (The value should be a dict of keyword arguments to ``grid()`` or `True`).\n * ``ticks``: `dict` or `bool` the keyword arguments to the\n `~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks` method,\n or `False` to display no ticks for this coord.\n\n ylim: `tuple` or `str`, optional\n The yaxis limits to use when drawing a line plot, if 'fixed' then use\n the global data limits, if 'dynamic' then set the y limit for each frame\n individually (meaning the y limits change as you animate).\n ylabel: `string`, optional\n The yaxis label to use when drawing a line plot. Setting the label on\n the y-axis on an image plot should be done via ``coord_params``.\n\n \"\"\"\n\n def __init__(self, data, wcs, slices, coord_params=None, ylim='dynamic', ylabel=None, **kwargs):\n if not isinstance(wcs, BaseLowLevelWCS):\n raise ValueError(\"A WCS object should be provided that implements the astropy WCS API.\")\n if wcs.pixel_n_dim != data.ndim:\n raise ValueError(\"Dimensionality of the data and WCS object do not match.\")\n if len(slices) != wcs.pixel_n_dim:\n raise ValueError(\"slices should be the same length as the number of pixel dimensions.\")\n if \"x\" not in slices:\n raise ValueError(\n \"slices should contain at least 'x' to indicate the axis to plot on the x axis.\")\n\n self.plot_dimensionality = 1\n\n image_axes = [slices[::-1].index(\"x\")]\n if \"y\" in slices:\n image_axes.append(slices[::-1].index(\"y\"))\n self.plot_dimensionality = 2\n\n self.naxis = data.ndim\n self.num_sliders = self.naxis - self.plot_dimensionality\n self.slices_wcsaxes = list(slices)\n self.wcs = wcs\n self.coord_params = coord_params\n self.ylim = ylim\n self.ylabel = ylabel\n\n extra_slider_labels = []\n if \"slider_functions\" in kwargs and \"slider_labels\" not in kwargs:\n extra_slider_labels = [a.__name__ for a in kwargs['slider_functions']]\n\n slider_labels = self._compute_slider_labels_from_wcs(slices) + extra_slider_labels\n\n super().__init__(data, image_axes=image_axes, axis_ranges=None,\n slider_labels=slider_labels,\n **kwargs)\n\n def _get_wcs_labels(self):\n \"\"\"\n Read first the axes names property of the wcs and fall back to physical types.\n \"\"\"\n # Return the name if it is set, or the physical type if it is not.\n return [l or t for l, t in zip(self.wcs.world_axis_names, self.wcs.world_axis_physical_types)]\n\n def _compute_slider_labels_from_wcs(self, slices):\n \"\"\"\n For each pixel dimension, not used in the plot, calculate the world\n names which are correlated with that pixel dimension. This can return\n more than one world name per pixel dimension (i.e. lat & lon) so join\n them if there are.\n \"\"\"\n labels = []\n wal = np.array(self._get_wcs_labels())\n pixel_indicies = np.array([a not in ['x', 'y'] for a in slices])\n for sliced_axis in self.wcs.axis_correlation_matrix[:, pixel_indicies].T:\n labels.append(\" / \".join(list(map(str, wal[sliced_axis]))))\n\n return labels[::-1]\n\n def _partial_pixel_to_world(self, pixel_dimension, pixel_coord):\n \"\"\"\n Return the world coordinate along one axis, if it is only\n correlated to that axis.\n \"\"\"\n wcs_dimension = self.wcs.pixel_n_dim - pixel_dimension - 1\n corr = self.wcs.axis_correlation_matrix[:, wcs_dimension]\n\n # If more than one world axis is linked to this dimension we can't\n # display the world coordinate because we have no way of picking,\n # so we just display pixel index.\n if len(np.nonzero(corr)[0]) != 1:\n return pixel_coord * u.pix\n\n # We know that the coordinate we care about is independent of the\n # other axes, so we can set the pixel coordinates to 0.\n coords = [0] * self.wcs.pixel_n_dim\n coords[wcs_dimension] = pixel_coord\n wc = self.wcs.pixel_to_world_values(*coords)[wcs_dimension]\n return u.Quantity(wc, unit=self.wcs.world_axis_units[wcs_dimension])\n\n def _sanitize_axis_ranges(self, *args):\n \"\"\"\n This overrides the behaviour of ArrayAnimator to generate axis_ranges\n based on the WCS.\n \"\"\"\n\n axis_ranges = [None] * self.wcs.pixel_n_dim\n for i in self.slider_axes:\n axis_ranges[i] = partial(self._partial_pixel_to_world, i)\n\n return axis_ranges, None\n\n def _apply_coord_params(self, axes):\n if self.coord_params is None:\n return\n\n for coord_name in self.coord_params:\n coord = axes.coords[coord_name]\n params = self.coord_params[coord_name]\n\n format_unit = params.get(\"format_unit\", None)\n if format_unit:\n coord.set_format_unit(format_unit)\n\n major_formatter = params.get(\"major_formatter\", None)\n if major_formatter:\n coord.set_major_formatter(major_formatter)\n\n axislabel = params.get(\"axislabel\", None)\n if axislabel:\n coord.set_axislabel(axislabel)\n\n grid = params.get(\"grid\", None)\n if grid is not None:\n if not isinstance(grid, dict):\n grid = {}\n coord.grid(**grid)\n\n ticks = params.get(\"ticks\", None)\n if ticks is not None:\n if isinstance(ticks, bool):\n coord.set_ticks_visible(ticks)\n coord.set_ticklabel_visible(ticks)\n elif isinstance(ticks, dict):\n coord.set_ticks(**ticks)\n else:\n raise TypeError(\n \"The 'ticks' value in the coord_params dictionary must be a dict or a boolean.\"\n )\n\n def _setup_main_axes(self):\n self.axes = self.fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=self.wcs,\n slices=self.slices_wcsaxes)\n self._apply_coord_params(self.axes)\n\n def plot_start_image(self, ax):\n if self.plot_dimensionality == 1:\n artist = self.plot_start_image_1d(ax)\n\n elif self.plot_dimensionality == 2:\n artist = self.plot_start_image_2d(ax)\n\n return artist\n\n def update_plot(self, val, artist, slider):\n \"\"\"\n Update the plot when a slider changes.\n\n This method both updates the state of the Animator and also re-draws\n the matplotlib artist.\n \"\"\"\n ind = int(val)\n if ind == int(slider.cval):\n return\n ax_ind = self.slider_axes[slider.slider_ind]\n self.frame_slice[ax_ind] = ind\n self.slices_wcsaxes[self.wcs.pixel_n_dim - ax_ind - 1] = ind\n\n if self.plot_dimensionality == 1:\n self.update_plot_1d(val, artist, slider)\n elif self.plot_dimensionality == 2:\n self.update_plot_2d(val, artist, slider)\n\n self._apply_coord_params(self.axes)\n return super().update_plot(val, artist, slider)\n\n def plot_start_image_1d(self, ax):\n \"\"\"\n Set up a line plot.\n\n When plotting with WCSAxes, we always plot against pixel coordinate.\n \"\"\"\n if self.ylim != 'dynamic':\n ylim = self.ylim\n if ylim == 'fixed':\n ylim = (self.data.min(), self.data.max())\n ax.set_ylim(ylim)\n\n if self.ylabel:\n ax.set_ylabel(self.ylabel)\n\n ydata = self.data[self.frame_index]\n line, = ax.plot(ydata, **self.imshow_kwargs)\n\n if isinstance(self.data, np.ma.MaskedArray):\n ax.set_xlim((0, ydata.shape[0]))\n\n return line\n\n @property\n def data_transposed(self):\n \"\"\"\n Return data for 2D plotting, transposed if needed.\n \"\"\"\n if self.slices_wcsaxes.index('y') < self.slices_wcsaxes.index(\"x\"):\n return self.data[self.frame_index].transpose()\n else:\n return self.data[self.frame_index]\n\n def update_plot_1d(self, val, line, slider):\n \"\"\"\n Update the line plot.\n \"\"\"\n self.axes.reset_wcs(wcs=self.wcs, slices=self.slices_wcsaxes)\n line.set_ydata(self.data[self.frame_index])\n\n # If we are not setting ylim globally then we set it per frame.\n if self.ylim == 'dynamic':\n self.axes.set_ylim(self.data[self.frame_index].min(),\n self.data[self.frame_index].max())\n slider.cval = val\n\n def plot_start_image_2d(self, ax):\n \"\"\"\n Setup an image plot.\n \"\"\"\n imshow_args = {'interpolation': 'nearest',\n 'origin': 'lower'}\n imshow_args.update(self.imshow_kwargs)\n\n im = modest_image.imshow(ax, self.data_transposed, **imshow_args)\n\n if 'extent' in imshow_args:\n ax.set_xlim(imshow_args['extent'][:2])\n ax.set_ylim(imshow_args['extent'][2:])\n else:\n ny, nx = self.data_transposed.shape\n ax.set_xlim(-0.5, nx - 0.5)\n ax.set_ylim(-0.5, ny - 0.5)\n\n ax.dataLim.intervalx = ax.get_xlim()\n ax.dataLim.intervaly = ax.get_ylim()\n\n if self.if_colorbar:\n self._add_colorbar(im)\n\n return im\n\n def update_plot_2d(self, val, im, slider):\n \"\"\"\n Update the image plot.\n \"\"\"\n self.axes.reset_wcs(wcs=self.wcs, slices=self.slices_wcsaxes)\n im.set_array(self.data_transposed)\n slider.cval = val\n", "path": "sunpy/visualization/animator/wcs.py"}], "after_files": [{"content": "from functools import partial\n\nimport numpy as np\n\nimport astropy.units as u\nfrom astropy.visualization import AsymmetricPercentileInterval\nfrom astropy.wcs.wcsapi import BaseLowLevelWCS\n\nfrom sunpy.extern import modest_image\nfrom sunpy.visualization.animator.base import ArrayAnimator\n\n__all__ = ['ArrayAnimatorWCS']\n\n\nclass ArrayAnimatorWCS(ArrayAnimator):\n \"\"\"\n Animate an array with associated `~astropy.wcs.wcsapi.BaseLowLevelWCS` object.\n\n The following keyboard shortcuts are defined in the viewer:\n\n * 'left': previous step on active slider.\n * 'right': next step on active slider.\n * 'top': change the active slider up one.\n * 'bottom': change the active slider down one.\n * 'p': play/pause active slider.\n\n Parameters\n ----------\n data: `numpy.ndarray`\n The data to be visualized.\n wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS`\n The world coordinate object associated with the array.\n slices: `tuple` or `list`\n A list specifying which axes of the array should be plotted on which\n axes. The list should be the same length as the number of pixel\n dimensions with ``'x'`` and (optionally) ``'y'`` in the elements\n corresponding to the axes to be plotted. If only ``'x'`` is present a\n line plot will be drawn. All other elements should be ``0``.\n coord_params: `dict`, optional\n This dict allows you to override\n `~astropy.visualization.wcsaxes.WCSAxes` parameters for each world\n coordinate. The keys of this dictionary should be a value which can be\n looked up in ``WCSAxes.coords`` (i.e. ``em.wl`` or ``hpln``) and the\n values should be a dict which supports the following keys, and passes\n their values to the associated `~astropy.visualization.wcsaxes.WCSAxes`\n methods.\n\n * ``format_unit``: `~astropy.visualization.wcsaxes.CoordinateHelper.set_format_unit`\n * ``major_formatter``: `~astropy.visualization.wcsaxes.CoordinateHelper.set_major_formatter`\n * ``axislabel``: `~astropy.visualization.wcsaxes.CoordinateHelper.set_axislabel`\n * ``grid``: `~astropy.visualization.wcsaxes.CoordinateHelper.grid` (The value should be a dict of keyword arguments to ``grid()`` or `True`).\n * ``ticks``: `dict` or `bool` the keyword arguments to the\n `~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks` method,\n or `False` to display no ticks for this coord.\n\n ylim: `tuple` or `str`, optional\n The yaxis limits to use when drawing a line plot, if 'fixed' then use\n the global data limits, if 'dynamic' then set the y limit for each frame\n individually (meaning the y limits change as you animate).\n ylabel: `string`, optional\n The yaxis label to use when drawing a line plot. Setting the label on\n the y-axis on an image plot should be done via ``coord_params``.\n clip_interval : two-element `~astropy.units.Quantity`, optional\n If provided, the data for each step will be clipped to the percentile interval bounded by the two numbers.\n \"\"\"\n\n def __init__(self, data, wcs, slices, coord_params=None, ylim='dynamic', ylabel=None,\n clip_interval: u.percent = None, **kwargs):\n if not isinstance(wcs, BaseLowLevelWCS):\n raise ValueError(\"A WCS object should be provided that implements the astropy WCS API.\")\n if wcs.pixel_n_dim != data.ndim:\n raise ValueError(\"Dimensionality of the data and WCS object do not match.\")\n if len(slices) != wcs.pixel_n_dim:\n raise ValueError(\"slices should be the same length as the number of pixel dimensions.\")\n if \"x\" not in slices:\n raise ValueError(\n \"slices should contain at least 'x' to indicate the axis to plot on the x axis.\")\n\n self.plot_dimensionality = 1\n\n image_axes = [slices[::-1].index(\"x\")]\n if \"y\" in slices:\n image_axes.append(slices[::-1].index(\"y\"))\n self.plot_dimensionality = 2\n\n self.naxis = data.ndim\n self.num_sliders = self.naxis - self.plot_dimensionality\n self.slices_wcsaxes = list(slices)\n self.wcs = wcs\n self.coord_params = coord_params\n self.ylim = ylim\n self.ylabel = ylabel\n\n if clip_interval is not None and len(clip_interval) != 2:\n raise ValueError('A range of 2 values must be specified for clip_interval.')\n\n self.clip_interval = clip_interval\n\n extra_slider_labels = []\n if \"slider_functions\" in kwargs and \"slider_labels\" not in kwargs:\n extra_slider_labels = [a.__name__ for a in kwargs['slider_functions']]\n\n slider_labels = self._compute_slider_labels_from_wcs(slices) + extra_slider_labels\n\n super().__init__(data, image_axes=image_axes, axis_ranges=None,\n slider_labels=slider_labels,\n **kwargs)\n\n def _get_wcs_labels(self):\n \"\"\"\n Read first the axes names property of the wcs and fall back to physical types.\n \"\"\"\n # Return the name if it is set, or the physical type if it is not.\n return [l or t for l, t in zip(self.wcs.world_axis_names, self.wcs.world_axis_physical_types)]\n\n def _compute_slider_labels_from_wcs(self, slices):\n \"\"\"\n For each pixel dimension, not used in the plot, calculate the world\n names which are correlated with that pixel dimension. This can return\n more than one world name per pixel dimension (i.e. lat & lon) so join\n them if there are.\n \"\"\"\n labels = []\n wal = np.array(self._get_wcs_labels())\n pixel_indicies = np.array([a not in ['x', 'y'] for a in slices])\n for sliced_axis in self.wcs.axis_correlation_matrix[:, pixel_indicies].T:\n labels.append(\" / \".join(list(map(str, wal[sliced_axis]))))\n\n return labels[::-1]\n\n def _partial_pixel_to_world(self, pixel_dimension, pixel_coord):\n \"\"\"\n Return the world coordinate along one axis, if it is only\n correlated to that axis.\n \"\"\"\n wcs_dimension = self.wcs.pixel_n_dim - pixel_dimension - 1\n corr = self.wcs.axis_correlation_matrix[:, wcs_dimension]\n\n # If more than one world axis is linked to this dimension we can't\n # display the world coordinate because we have no way of picking,\n # so we just display pixel index.\n if len(np.nonzero(corr)[0]) != 1:\n return pixel_coord * u.pix\n\n # We know that the coordinate we care about is independent of the\n # other axes, so we can set the pixel coordinates to 0.\n coords = [0] * self.wcs.pixel_n_dim\n coords[wcs_dimension] = pixel_coord\n wc = self.wcs.pixel_to_world_values(*coords)[wcs_dimension]\n return u.Quantity(wc, unit=self.wcs.world_axis_units[wcs_dimension])\n\n def _sanitize_axis_ranges(self, *args):\n \"\"\"\n This overrides the behaviour of ArrayAnimator to generate axis_ranges\n based on the WCS.\n \"\"\"\n\n axis_ranges = [None] * self.wcs.pixel_n_dim\n for i in self.slider_axes:\n axis_ranges[i] = partial(self._partial_pixel_to_world, i)\n\n return axis_ranges, None\n\n def _apply_coord_params(self, axes):\n if self.coord_params is None:\n return\n\n for coord_name in self.coord_params:\n coord = axes.coords[coord_name]\n params = self.coord_params[coord_name]\n\n format_unit = params.get(\"format_unit\", None)\n if format_unit:\n coord.set_format_unit(format_unit)\n\n major_formatter = params.get(\"major_formatter\", None)\n if major_formatter:\n coord.set_major_formatter(major_formatter)\n\n axislabel = params.get(\"axislabel\", None)\n if axislabel:\n coord.set_axislabel(axislabel)\n\n grid = params.get(\"grid\", None)\n if grid is not None:\n if not isinstance(grid, dict):\n grid = {}\n coord.grid(**grid)\n\n ticks = params.get(\"ticks\", None)\n if ticks is not None:\n if isinstance(ticks, bool):\n coord.set_ticks_visible(ticks)\n coord.set_ticklabel_visible(ticks)\n elif isinstance(ticks, dict):\n coord.set_ticks(**ticks)\n else:\n raise TypeError(\n \"The 'ticks' value in the coord_params dictionary must be a dict or a boolean.\"\n )\n\n def _setup_main_axes(self):\n self.axes = self.fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=self.wcs,\n slices=self.slices_wcsaxes)\n self._apply_coord_params(self.axes)\n\n def plot_start_image(self, ax):\n if self.plot_dimensionality == 1:\n artist = self.plot_start_image_1d(ax)\n\n elif self.plot_dimensionality == 2:\n artist = self.plot_start_image_2d(ax)\n\n return artist\n\n def update_plot(self, val, artist, slider):\n \"\"\"\n Update the plot when a slider changes.\n\n This method both updates the state of the Animator and also re-draws\n the matplotlib artist.\n \"\"\"\n ind = int(val)\n if ind == int(slider.cval):\n return\n ax_ind = self.slider_axes[slider.slider_ind]\n self.frame_slice[ax_ind] = ind\n self.slices_wcsaxes[self.wcs.pixel_n_dim - ax_ind - 1] = ind\n\n if self.plot_dimensionality == 1:\n self.update_plot_1d(val, artist, slider)\n elif self.plot_dimensionality == 2:\n self.update_plot_2d(val, artist, slider)\n\n self._apply_coord_params(self.axes)\n return super().update_plot(val, artist, slider)\n\n def plot_start_image_1d(self, ax):\n \"\"\"\n Set up a line plot.\n\n When plotting with WCSAxes, we always plot against pixel coordinate.\n \"\"\"\n if self.ylim != 'dynamic':\n ylim = self.ylim\n if ylim == 'fixed':\n ylim = (self.data.min(), self.data.max())\n ax.set_ylim(ylim)\n\n if self.ylabel:\n ax.set_ylabel(self.ylabel)\n\n ydata = self.data[self.frame_index]\n line, = ax.plot(ydata, **self.imshow_kwargs)\n\n if isinstance(self.data, np.ma.MaskedArray):\n ax.set_xlim((0, ydata.shape[0]))\n\n return line\n\n @property\n def data_transposed(self):\n \"\"\"\n Return data for 2D plotting, transposed if needed.\n \"\"\"\n if self.slices_wcsaxes.index('y') < self.slices_wcsaxes.index(\"x\"):\n return self.data[self.frame_index].transpose()\n else:\n return self.data[self.frame_index]\n\n def update_plot_1d(self, val, line, slider):\n \"\"\"\n Update the line plot.\n \"\"\"\n self.axes.reset_wcs(wcs=self.wcs, slices=self.slices_wcsaxes)\n line.set_ydata(self.data[self.frame_index])\n\n # If we are not setting ylim globally then we set it per frame.\n if self.ylim == 'dynamic':\n self.axes.set_ylim(self.data[self.frame_index].min(),\n self.data[self.frame_index].max())\n slider.cval = val\n\n def plot_start_image_2d(self, ax):\n \"\"\"\n Setup an image plot.\n \"\"\"\n imshow_args = {'interpolation': 'nearest',\n 'origin': 'lower'}\n imshow_args.update(self.imshow_kwargs)\n\n if self.clip_interval is not None:\n imshow_args['vmin'], imshow_args['vmax'] = self._get_2d_plot_limits()\n\n im = modest_image.imshow(ax, self.data_transposed, **imshow_args)\n\n if 'extent' in imshow_args:\n ax.set_xlim(imshow_args['extent'][:2])\n ax.set_ylim(imshow_args['extent'][2:])\n else:\n ny, nx = self.data_transposed.shape\n ax.set_xlim(-0.5, nx - 0.5)\n ax.set_ylim(-0.5, ny - 0.5)\n\n ax.dataLim.intervalx = ax.get_xlim()\n ax.dataLim.intervaly = ax.get_ylim()\n\n if self.if_colorbar:\n self._add_colorbar(im)\n\n return im\n\n def _get_2d_plot_limits(self):\n \"\"\"\n Get vmin, vmax of a data slice when clip_interval is specified.\n \"\"\"\n percent_limits = self.clip_interval.to('%').value\n vmin, vmax = AsymmetricPercentileInterval(*percent_limits).get_limits(self.data_transposed)\n return vmin, vmax\n\n def update_plot_2d(self, val, im, slider):\n \"\"\"\n Update the image plot.\n \"\"\"\n self.axes.reset_wcs(wcs=self.wcs, slices=self.slices_wcsaxes)\n im.set_array(self.data_transposed)\n\n if self.clip_interval is not None:\n vmin, vmax = self._get_2d_plot_limits()\n im.set_clim(vmin, vmax)\n\n slider.cval = val\n", "path": "sunpy/visualization/animator/wcs.py"}]} | 3,849 | 718 |
gh_patches_debug_5709 | rasdani/github-patches | git_diff | pulp__pulpcore-4152 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/download/base.py`
Content:
```
1 from gettext import gettext as _
2
3 import asyncio
4 from collections import namedtuple
5 import logging
6 import os
7 import tempfile
8 from urllib.parse import urlsplit
9
10 from pulpcore.app import pulp_hashlib
11 from pulpcore.app.models import Artifact
12 from pulpcore.exceptions import (
13 DigestValidationError,
14 SizeValidationError,
15 TimeoutException,
16 UnsupportedDigestValidationError,
17 )
18
19
20 log = logging.getLogger(__name__)
21
22
23 DownloadResult = namedtuple("DownloadResult", ["url", "artifact_attributes", "path", "headers"])
24 """
25 Args:
26 url (str): The url corresponding with the download.
27 path (str): The absolute path to the saved file
28 artifact_attributes (dict): Contains keys corresponding with
29 :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values
30 along with size information.
31 headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The
32 values are header content. None when not using the HttpDownloader or sublclass.
33 """
34
35
36 class BaseDownloader:
37 """
38 The base class of all downloaders, providing digest calculation, validation, and file handling.
39
40 This is an abstract class and is meant to be subclassed. Subclasses are required to implement
41 the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:
42
43 1. Pass all downloaded data to
44 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.
45
46 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has
47 been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
48
49 Passing all downloaded data the into
50 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to
51 be computed while data is written to disk. The digests computed are required if the download is
52 to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the
53 data later.
54
55 The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default
56 writes to a random file in the current working directory.
57
58 The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all
59 data written to the file-like object is quiesced to disk before the file-like object has
60 `close()` called on it.
61
62 Attributes:
63 url (str): The url to download.
64 expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the
65 value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}
66 expected_size (int): The number of bytes the download is expected to have.
67 path (str): The full path to the file containing the downloaded data.
68 """
69
70 def __init__(
71 self,
72 url,
73 expected_digests=None,
74 expected_size=None,
75 semaphore=None,
76 *args,
77 **kwargs,
78 ):
79 """
80 Create a BaseDownloader object. This is expected to be called by all subclasses.
81
82 Args:
83 url (str): The url to download.
84 expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the
85 value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}
86 expected_size (int): The number of bytes the download is expected to have.
87 semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.
88 Useful for limiting the number of outstanding downloaders in various ways.
89 """
90
91 self.url = url
92 self._writer = None
93 self.path = None
94 self.expected_digests = expected_digests
95 self.expected_size = expected_size
96 if semaphore:
97 self.semaphore = semaphore
98 else:
99 self.semaphore = asyncio.Semaphore() # This will always be acquired
100 self._digests = {}
101 self._size = 0
102 if self.expected_digests:
103 if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):
104 raise UnsupportedDigestValidationError(
105 _(
106 "Content at the URL '{}' does not contain at least one trusted hasher which"
107 " is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The"
108 " downloader expected one of the following hashers: {}"
109 ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))
110 )
111
112 def _ensure_writer_has_open_file(self):
113 """
114 Create a temporary file on demand.
115
116 Create a temporary file when it's actually used,
117 allowing plugin writers to instantiate many downloaders in memory.
118 """
119 if not self._writer:
120 filename = urlsplit(self.url).path.split("/")[-1]
121 # linux allows any character except NUL or / in a filename and has a length limit of
122 # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded
123 # paths should be OK
124 is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length
125 # if the filename isn't legal then we just fall back to no suffix (random name)
126 suffix = "-" + filename if is_legal_filename else None
127 # write the file to the current working directory with a random prefix and the
128 # desired suffix. we always want the random prefix as it is possible to download
129 # the same filename from two different URLs, and the files may not be the same.
130 self._writer = tempfile.NamedTemporaryFile(dir=".", suffix=suffix, delete=False)
131 self.path = self._writer.name
132 self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}
133 self._size = 0
134
135 async def handle_data(self, data):
136 """
137 A coroutine that writes data to the file object and compute its digests.
138
139 All subclassed downloaders are expected to pass all data downloaded to this method. Similar
140 to the hashlib docstring, repeated calls are equivalent to a single call with
141 the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to
142 m.handle_data(a+b).
143
144 Args:
145 data (bytes): The data to be handled by the downloader.
146 """
147 self._ensure_writer_has_open_file()
148 self._writer.write(data)
149 self._record_size_and_digests_for_data(data)
150
151 async def finalize(self):
152 """
153 A coroutine to flush downloaded data, close the file writer, and validate the data.
154
155 All subclasses are required to call this method after all data has been passed to
156 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
157
158 Raises:
159 :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``
160 values don't match the digest of the data passed to
161 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
162 :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value
163 doesn't match the size of the data passed to
164 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
165 """
166 self._ensure_writer_has_open_file()
167 self._writer.flush()
168 os.fsync(self._writer.fileno())
169 self._writer.close()
170 self._writer = None
171 self.validate_digests()
172 self.validate_size()
173 log.debug(f"Downloaded file from {self.url}")
174
175 def fetch(self):
176 """
177 Run the download synchronously and return the `DownloadResult`.
178
179 Returns:
180 :class:`~pulpcore.plugin.download.DownloadResult`
181
182 Raises:
183 Exception: Any fatal exception emitted during downloading
184 """
185 done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
186 return done.pop().result()
187
188 def _record_size_and_digests_for_data(self, data):
189 """
190 Record the size and digest for an available chunk of data.
191
192 Args:
193 data (bytes): The data to have its size and digest values recorded.
194 """
195 for algorithm in self._digests.values():
196 algorithm.update(data)
197 self._size += len(data)
198
199 @property
200 def artifact_attributes(self):
201 """
202 A property that returns a dictionary with size and digest information. The keys of this
203 dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.
204 """
205 attributes = {"size": self._size}
206 for algorithm in self._digests:
207 attributes[algorithm] = self._digests[algorithm].hexdigest()
208 return attributes
209
210 def validate_digests(self):
211 """
212 Validate all digests validate if ``expected_digests`` is set
213
214 Raises:
215 :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``
216 values don't match the digest of the data passed to
217 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
218 """
219 if self.expected_digests:
220 for algorithm, expected_digest in self.expected_digests.items():
221 actual_digest = self._digests[algorithm].hexdigest()
222 if actual_digest != expected_digest:
223 raise DigestValidationError(actual_digest, expected_digest, url=self.url)
224
225 def validate_size(self):
226 """
227 Validate the size if ``expected_size`` is set
228
229 Raises:
230 :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value
231 doesn't match the size of the data passed to
232 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
233 """
234 if self.expected_size:
235 actual_size = self._size
236 expected_size = self.expected_size
237 if actual_size != expected_size:
238 raise SizeValidationError(actual_size, expected_size, url=self.url)
239
240 async def run(self, extra_data=None):
241 """
242 Run the downloader with concurrency restriction.
243
244 This method acquires `self.semaphore` before calling the actual download implementation
245 contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`
246 decorator on `_run()`, handles backoff-and-retry logic.
247
248 Args:
249 extra_data (dict): Extra data passed to the downloader.
250
251 Returns:
252 :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.
253
254 """
255 async with self.semaphore:
256 try:
257 return await self._run(extra_data=extra_data)
258 except asyncio.TimeoutError:
259 raise TimeoutException(self.url)
260
261 async def _run(self, extra_data=None):
262 """
263 Run the downloader.
264
265 This is a coroutine that asyncio can schedule to complete downloading. Subclasses are
266 required to implement this method and do two things:
267
268 1. Pass all downloaded data to
269 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
270
271 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has
272 been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
273
274 It is also expected that the subclass implementation return a
275 :class:`~pulpcore.plugin.download.DownloadResult` object. The
276 ``artifact_attributes`` value of the
277 :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the
278 :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.
279
280 This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which
281 handles concurrency restriction. Thus, by the time this method is called, the download can
282 occur without violating the concurrency restriction.
283
284 Args:
285 extra_data (dict): Extra data passed to the downloader.
286
287 Returns:
288 :class:`~pulpcore.plugin.download.DownloadResult`
289
290 Raises:
291 Validation errors could be emitted when subclassed implementations call
292 :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.
293 """
294 raise NotImplementedError("Subclasses must define a _run() method that returns a coroutine")
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -182,8 +182,8 @@
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| {"golden_diff": "diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py\n--- a/pulpcore/download/base.py\n+++ b/pulpcore/download/base.py\n@@ -182,8 +182,8 @@\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))\n- return done.pop().result()\n+ result = asyncio.get_event_loop().run_until_complete(self.run())\n+ return result\n \n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n", "issue": "BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11\nPython 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .\r\n\r\nPython provides the solution in the error message: \"Passing coroutines is forbidden, use tasks explicitly.\"\r\n\r\nI believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`\n", "before_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\nfrom collections import namedtuple\nimport logging\nimport os\nimport tempfile\nfrom urllib.parse import urlsplit\n\nfrom pulpcore.app import pulp_hashlib\nfrom pulpcore.app.models import Artifact\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n UnsupportedDigestValidationError,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nDownloadResult = namedtuple(\"DownloadResult\", [\"url\", \"artifact_attributes\", \"path\", \"headers\"])\n\"\"\"\nArgs:\n url (str): The url corresponding with the download.\n path (str): The absolute path to the saved file\n artifact_attributes (dict): Contains keys corresponding with\n :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values\n along with size information.\n headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The\n values are header content. None when not using the HttpDownloader or sublclass.\n\"\"\"\n\n\nclass BaseDownloader:\n \"\"\"\n The base class of all downloaders, providing digest calculation, validation, and file handling.\n\n This is an abstract class and is meant to be subclassed. Subclasses are required to implement\n the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.\n\n 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Passing all downloaded data the into\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to\n be computed while data is written to disk. The digests computed are required if the download is\n to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the\n data later.\n\n The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default\n writes to a random file in the current working directory.\n\n The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all\n data written to the file-like object is quiesced to disk before the file-like object has\n `close()` called on it.\n\n Attributes:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n path (str): The full path to the file containing the downloaded data.\n \"\"\"\n\n def __init__(\n self,\n url,\n expected_digests=None,\n expected_size=None,\n semaphore=None,\n *args,\n **kwargs,\n ):\n \"\"\"\n Create a BaseDownloader object. This is expected to be called by all subclasses.\n\n Args:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.\n Useful for limiting the number of outstanding downloaders in various ways.\n \"\"\"\n\n self.url = url\n self._writer = None\n self.path = None\n self.expected_digests = expected_digests\n self.expected_size = expected_size\n if semaphore:\n self.semaphore = semaphore\n else:\n self.semaphore = asyncio.Semaphore() # This will always be acquired\n self._digests = {}\n self._size = 0\n if self.expected_digests:\n if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):\n raise UnsupportedDigestValidationError(\n _(\n \"Content at the URL '{}' does not contain at least one trusted hasher which\"\n \" is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The\"\n \" downloader expected one of the following hashers: {}\"\n ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))\n )\n\n def _ensure_writer_has_open_file(self):\n \"\"\"\n Create a temporary file on demand.\n\n Create a temporary file when it's actually used,\n allowing plugin writers to instantiate many downloaders in memory.\n \"\"\"\n if not self._writer:\n filename = urlsplit(self.url).path.split(\"/\")[-1]\n # linux allows any character except NUL or / in a filename and has a length limit of\n # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded\n # paths should be OK\n is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length\n # if the filename isn't legal then we just fall back to no suffix (random name)\n suffix = \"-\" + filename if is_legal_filename else None\n # write the file to the current working directory with a random prefix and the\n # desired suffix. we always want the random prefix as it is possible to download\n # the same filename from two different URLs, and the files may not be the same.\n self._writer = tempfile.NamedTemporaryFile(dir=\".\", suffix=suffix, delete=False)\n self.path = self._writer.name\n self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}\n self._size = 0\n\n async def handle_data(self, data):\n \"\"\"\n A coroutine that writes data to the file object and compute its digests.\n\n All subclassed downloaders are expected to pass all data downloaded to this method. Similar\n to the hashlib docstring, repeated calls are equivalent to a single call with\n the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to\n m.handle_data(a+b).\n\n Args:\n data (bytes): The data to be handled by the downloader.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.write(data)\n self._record_size_and_digests_for_data(data)\n\n async def finalize(self):\n \"\"\"\n A coroutine to flush downloaded data, close the file writer, and validate the data.\n\n All subclasses are required to call this method after all data has been passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.flush()\n os.fsync(self._writer.fileno())\n self._writer.close()\n self._writer = None\n self.validate_digests()\n self.validate_size()\n log.debug(f\"Downloaded file from {self.url}\")\n\n def fetch(self):\n \"\"\"\n Run the download synchronously and return the `DownloadResult`.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))\n return done.pop().result()\n\n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n Record the size and digest for an available chunk of data.\n\n Args:\n data (bytes): The data to have its size and digest values recorded.\n \"\"\"\n for algorithm in self._digests.values():\n algorithm.update(data)\n self._size += len(data)\n\n @property\n def artifact_attributes(self):\n \"\"\"\n A property that returns a dictionary with size and digest information. The keys of this\n dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.\n \"\"\"\n attributes = {\"size\": self._size}\n for algorithm in self._digests:\n attributes[algorithm] = self._digests[algorithm].hexdigest()\n return attributes\n\n def validate_digests(self):\n \"\"\"\n Validate all digests validate if ``expected_digests`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_digests:\n for algorithm, expected_digest in self.expected_digests.items():\n actual_digest = self._digests[algorithm].hexdigest()\n if actual_digest != expected_digest:\n raise DigestValidationError(actual_digest, expected_digest, url=self.url)\n\n def validate_size(self):\n \"\"\"\n Validate the size if ``expected_size`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_size:\n actual_size = self._size\n expected_size = self.expected_size\n if actual_size != expected_size:\n raise SizeValidationError(actual_size, expected_size, url=self.url)\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n decorator on `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n async with self.semaphore:\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Run the downloader.\n\n This is a coroutine that asyncio can schedule to complete downloading. Subclasses are\n required to implement this method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n It is also expected that the subclass implementation return a\n :class:`~pulpcore.plugin.download.DownloadResult` object. The\n ``artifact_attributes`` value of the\n :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the\n :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.\n\n This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which\n handles concurrency restriction. Thus, by the time this method is called, the download can\n occur without violating the concurrency restriction.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Validation errors could be emitted when subclassed implementations call\n :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.\n \"\"\"\n raise NotImplementedError(\"Subclasses must define a _run() method that returns a coroutine\")\n", "path": "pulpcore/download/base.py"}], "after_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\nfrom collections import namedtuple\nimport logging\nimport os\nimport tempfile\nfrom urllib.parse import urlsplit\n\nfrom pulpcore.app import pulp_hashlib\nfrom pulpcore.app.models import Artifact\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n UnsupportedDigestValidationError,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nDownloadResult = namedtuple(\"DownloadResult\", [\"url\", \"artifact_attributes\", \"path\", \"headers\"])\n\"\"\"\nArgs:\n url (str): The url corresponding with the download.\n path (str): The absolute path to the saved file\n artifact_attributes (dict): Contains keys corresponding with\n :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values\n along with size information.\n headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The\n values are header content. None when not using the HttpDownloader or sublclass.\n\"\"\"\n\n\nclass BaseDownloader:\n \"\"\"\n The base class of all downloaders, providing digest calculation, validation, and file handling.\n\n This is an abstract class and is meant to be subclassed. Subclasses are required to implement\n the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.\n\n 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Passing all downloaded data the into\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to\n be computed while data is written to disk. The digests computed are required if the download is\n to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the\n data later.\n\n The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default\n writes to a random file in the current working directory.\n\n The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all\n data written to the file-like object is quiesced to disk before the file-like object has\n `close()` called on it.\n\n Attributes:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n path (str): The full path to the file containing the downloaded data.\n \"\"\"\n\n def __init__(\n self,\n url,\n expected_digests=None,\n expected_size=None,\n semaphore=None,\n *args,\n **kwargs,\n ):\n \"\"\"\n Create a BaseDownloader object. This is expected to be called by all subclasses.\n\n Args:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.\n Useful for limiting the number of outstanding downloaders in various ways.\n \"\"\"\n\n self.url = url\n self._writer = None\n self.path = None\n self.expected_digests = expected_digests\n self.expected_size = expected_size\n if semaphore:\n self.semaphore = semaphore\n else:\n self.semaphore = asyncio.Semaphore() # This will always be acquired\n self._digests = {}\n self._size = 0\n if self.expected_digests:\n if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):\n raise UnsupportedDigestValidationError(\n _(\n \"Content at the URL '{}' does not contain at least one trusted hasher which\"\n \" is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The\"\n \" downloader expected one of the following hashers: {}\"\n ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))\n )\n\n def _ensure_writer_has_open_file(self):\n \"\"\"\n Create a temporary file on demand.\n\n Create a temporary file when it's actually used,\n allowing plugin writers to instantiate many downloaders in memory.\n \"\"\"\n if not self._writer:\n filename = urlsplit(self.url).path.split(\"/\")[-1]\n # linux allows any character except NUL or / in a filename and has a length limit of\n # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded\n # paths should be OK\n is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length\n # if the filename isn't legal then we just fall back to no suffix (random name)\n suffix = \"-\" + filename if is_legal_filename else None\n # write the file to the current working directory with a random prefix and the\n # desired suffix. we always want the random prefix as it is possible to download\n # the same filename from two different URLs, and the files may not be the same.\n self._writer = tempfile.NamedTemporaryFile(dir=\".\", suffix=suffix, delete=False)\n self.path = self._writer.name\n self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}\n self._size = 0\n\n async def handle_data(self, data):\n \"\"\"\n A coroutine that writes data to the file object and compute its digests.\n\n All subclassed downloaders are expected to pass all data downloaded to this method. Similar\n to the hashlib docstring, repeated calls are equivalent to a single call with\n the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to\n m.handle_data(a+b).\n\n Args:\n data (bytes): The data to be handled by the downloader.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.write(data)\n self._record_size_and_digests_for_data(data)\n\n async def finalize(self):\n \"\"\"\n A coroutine to flush downloaded data, close the file writer, and validate the data.\n\n All subclasses are required to call this method after all data has been passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.flush()\n os.fsync(self._writer.fileno())\n self._writer.close()\n self._writer = None\n self.validate_digests()\n self.validate_size()\n log.debug(f\"Downloaded file from {self.url}\")\n\n def fetch(self):\n \"\"\"\n Run the download synchronously and return the `DownloadResult`.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n result = asyncio.get_event_loop().run_until_complete(self.run())\n return result\n\n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n Record the size and digest for an available chunk of data.\n\n Args:\n data (bytes): The data to have its size and digest values recorded.\n \"\"\"\n for algorithm in self._digests.values():\n algorithm.update(data)\n self._size += len(data)\n\n @property\n def artifact_attributes(self):\n \"\"\"\n A property that returns a dictionary with size and digest information. The keys of this\n dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.\n \"\"\"\n attributes = {\"size\": self._size}\n for algorithm in self._digests:\n attributes[algorithm] = self._digests[algorithm].hexdigest()\n return attributes\n\n def validate_digests(self):\n \"\"\"\n Validate all digests validate if ``expected_digests`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_digests:\n for algorithm, expected_digest in self.expected_digests.items():\n actual_digest = self._digests[algorithm].hexdigest()\n if actual_digest != expected_digest:\n raise DigestValidationError(actual_digest, expected_digest, url=self.url)\n\n def validate_size(self):\n \"\"\"\n Validate the size if ``expected_size`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_size:\n actual_size = self._size\n expected_size = self.expected_size\n if actual_size != expected_size:\n raise SizeValidationError(actual_size, expected_size, url=self.url)\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n decorator on `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n async with self.semaphore:\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Run the downloader.\n\n This is a coroutine that asyncio can schedule to complete downloading. Subclasses are\n required to implement this method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n It is also expected that the subclass implementation return a\n :class:`~pulpcore.plugin.download.DownloadResult` object. The\n ``artifact_attributes`` value of the\n :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the\n :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.\n\n This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which\n handles concurrency restriction. Thus, by the time this method is called, the download can\n occur without violating the concurrency restriction.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Validation errors could be emitted when subclassed implementations call\n :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.\n \"\"\"\n raise NotImplementedError(\"Subclasses must define a _run() method that returns a coroutine\")\n", "path": "pulpcore/download/base.py"}]} | 3,894 | 131 |
gh_patches_debug_14037 | rasdani/github-patches | git_diff | voicepaw__so-vits-svc-fork-557 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PermissionError: [Errno 13] Permission denied
I'm on Windows 10 WSL2 Ubuntu and this happens when I try to run `svc pre-resample`, what's going on?
Traceback (most recent call last):
File "/home/fab/miniconda3/envs/sovits/bin/svc", line 5, in <module>
from so_vits_svc_fork.__main__ import cli
File "/home/fab/miniconda3/envs/sovits/lib/python3.10/site-packages/so_vits_svc_fork/__init__.py", line 5, in <module>
init_logger()
File "/home/fab/miniconda3/envs/sovits/lib/python3.10/site-packages/so_vits_svc_fork/logger.py", line 31, in init_logger
FileHandler(f"{__name__.split('.')[0]}.log"),
File "/home/fab/miniconda3/envs/sovits/lib/python3.10/logging/__init__.py", line 1169, in __init__
StreamHandler.__init__(self, self._open())
File "/home/fab/miniconda3/envs/sovits/lib/python3.10/logging/__init__.py", line 1201, in _open
return open_func(self.baseFilename, self.mode,
PermissionError: [Errno 13] Permission denied: '/home/fab/sovits/so_vits_svc_fork.log'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/so_vits_svc_fork/logger.py`
Content:
```
1 import os
2 import sys
3 from logging import (
4 DEBUG,
5 INFO,
6 FileHandler,
7 StreamHandler,
8 basicConfig,
9 captureWarnings,
10 getLogger,
11 )
12 from pathlib import Path
13
14 from rich.logging import RichHandler
15
16 LOGGER_INIT = False
17
18
19 def init_logger() -> None:
20 global LOGGER_INIT
21 if LOGGER_INIT:
22 return
23
24 IS_TEST = "test" in Path.cwd().stem
25 package_name = sys.modules[__name__].__package__
26 basicConfig(
27 level=INFO,
28 format="%(asctime)s %(message)s",
29 datefmt="[%X]",
30 handlers=[
31 StreamHandler() if is_notebook() else RichHandler(),
32 FileHandler(f"{package_name}.log"),
33 ],
34 )
35 if IS_TEST:
36 getLogger(package_name).setLevel(DEBUG)
37 captureWarnings(True)
38 LOGGER_INIT = True
39
40
41 def is_notebook():
42 try:
43 from IPython import get_ipython
44
45 if "IPKernelApp" not in get_ipython().config: # pragma: no cover
46 raise ImportError("console")
47 return False
48 if "VSCODE_PID" in os.environ: # pragma: no cover
49 raise ImportError("vscode")
50 return False
51 except Exception:
52 return False
53 else: # pragma: no cover
54 return True
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/so_vits_svc_fork/logger.py b/src/so_vits_svc_fork/logger.py
--- a/src/so_vits_svc_fork/logger.py
+++ b/src/so_vits_svc_fork/logger.py
@@ -1,14 +1,6 @@
import os
import sys
-from logging import (
- DEBUG,
- INFO,
- FileHandler,
- StreamHandler,
- basicConfig,
- captureWarnings,
- getLogger,
-)
+from logging import DEBUG, INFO, StreamHandler, basicConfig, captureWarnings, getLogger
from pathlib import Path
from rich.logging import RichHandler
@@ -29,7 +21,7 @@
datefmt="[%X]",
handlers=[
StreamHandler() if is_notebook() else RichHandler(),
- FileHandler(f"{package_name}.log"),
+ # FileHandler(f"{package_name}.log"),
],
)
if IS_TEST:
| {"golden_diff": "diff --git a/src/so_vits_svc_fork/logger.py b/src/so_vits_svc_fork/logger.py\n--- a/src/so_vits_svc_fork/logger.py\n+++ b/src/so_vits_svc_fork/logger.py\n@@ -1,14 +1,6 @@\n import os\n import sys\n-from logging import (\n- DEBUG,\n- INFO,\n- FileHandler,\n- StreamHandler,\n- basicConfig,\n- captureWarnings,\n- getLogger,\n-)\n+from logging import DEBUG, INFO, StreamHandler, basicConfig, captureWarnings, getLogger\n from pathlib import Path\n \n from rich.logging import RichHandler\n@@ -29,7 +21,7 @@\n datefmt=\"[%X]\",\n handlers=[\n StreamHandler() if is_notebook() else RichHandler(),\n- FileHandler(f\"{package_name}.log\"),\n+ # FileHandler(f\"{package_name}.log\"),\n ],\n )\n if IS_TEST:\n", "issue": "PermissionError: [Errno 13] Permission denied\nI'm on Windows 10 WSL2 Ubuntu and this happens when I try to run `svc pre-resample`, what's going on?\r\n\r\nTraceback (most recent call last):\r\n File \"/home/fab/miniconda3/envs/sovits/bin/svc\", line 5, in <module>\r\n from so_vits_svc_fork.__main__ import cli\r\n File \"/home/fab/miniconda3/envs/sovits/lib/python3.10/site-packages/so_vits_svc_fork/__init__.py\", line 5, in <module>\r\n init_logger()\r\n File \"/home/fab/miniconda3/envs/sovits/lib/python3.10/site-packages/so_vits_svc_fork/logger.py\", line 31, in init_logger\r\n FileHandler(f\"{__name__.split('.')[0]}.log\"),\r\n File \"/home/fab/miniconda3/envs/sovits/lib/python3.10/logging/__init__.py\", line 1169, in __init__\r\n StreamHandler.__init__(self, self._open())\r\n File \"/home/fab/miniconda3/envs/sovits/lib/python3.10/logging/__init__.py\", line 1201, in _open\r\n return open_func(self.baseFilename, self.mode,\r\nPermissionError: [Errno 13] Permission denied: '/home/fab/sovits/so_vits_svc_fork.log'\n", "before_files": [{"content": "import os\nimport sys\nfrom logging import (\n DEBUG,\n INFO,\n FileHandler,\n StreamHandler,\n basicConfig,\n captureWarnings,\n getLogger,\n)\nfrom pathlib import Path\n\nfrom rich.logging import RichHandler\n\nLOGGER_INIT = False\n\n\ndef init_logger() -> None:\n global LOGGER_INIT\n if LOGGER_INIT:\n return\n\n IS_TEST = \"test\" in Path.cwd().stem\n package_name = sys.modules[__name__].__package__\n basicConfig(\n level=INFO,\n format=\"%(asctime)s %(message)s\",\n datefmt=\"[%X]\",\n handlers=[\n StreamHandler() if is_notebook() else RichHandler(),\n FileHandler(f\"{package_name}.log\"),\n ],\n )\n if IS_TEST:\n getLogger(package_name).setLevel(DEBUG)\n captureWarnings(True)\n LOGGER_INIT = True\n\n\ndef is_notebook():\n try:\n from IPython import get_ipython\n\n if \"IPKernelApp\" not in get_ipython().config: # pragma: no cover\n raise ImportError(\"console\")\n return False\n if \"VSCODE_PID\" in os.environ: # pragma: no cover\n raise ImportError(\"vscode\")\n return False\n except Exception:\n return False\n else: # pragma: no cover\n return True\n", "path": "src/so_vits_svc_fork/logger.py"}], "after_files": [{"content": "import os\nimport sys\nfrom logging import DEBUG, INFO, StreamHandler, basicConfig, captureWarnings, getLogger\nfrom pathlib import Path\n\nfrom rich.logging import RichHandler\n\nLOGGER_INIT = False\n\n\ndef init_logger() -> None:\n global LOGGER_INIT\n if LOGGER_INIT:\n return\n\n IS_TEST = \"test\" in Path.cwd().stem\n package_name = sys.modules[__name__].__package__\n basicConfig(\n level=INFO,\n format=\"%(asctime)s %(message)s\",\n datefmt=\"[%X]\",\n handlers=[\n StreamHandler() if is_notebook() else RichHandler(),\n # FileHandler(f\"{package_name}.log\"),\n ],\n )\n if IS_TEST:\n getLogger(package_name).setLevel(DEBUG)\n captureWarnings(True)\n LOGGER_INIT = True\n\n\ndef is_notebook():\n try:\n from IPython import get_ipython\n\n if \"IPKernelApp\" not in get_ipython().config: # pragma: no cover\n raise ImportError(\"console\")\n return False\n if \"VSCODE_PID\" in os.environ: # pragma: no cover\n raise ImportError(\"vscode\")\n return False\n except Exception:\n return False\n else: # pragma: no cover\n return True\n", "path": "src/so_vits_svc_fork/logger.py"}]} | 979 | 205 |
gh_patches_debug_469 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-293 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switch from "uritemplate" to "uritemplate.py"
There are at least 2 URI template packages on the PyPI: [uritemplate](https://pypi.python.org/pypi/uritemplate/0.6) and [uritemplate.py](https://pypi.python.org/pypi/uritemplate.py/0.3.0). Both of these packages use `uritemplate` as the package name to be imported, which causes a conflict when both are installed (see https://github.com/sigmavirus24/uritemplate/issues/14).
I propose that this project depend on `uritemplate.py` because:
- `uritemplate.py` is API-compatible with `uritemplate`, but the reverse is not true. I have confirmed that this library works with uritemplate.py.
- Other projects depend on `uritemplate.py`. For example, we are currently working on a project that depends on both `github3.py` (GitHub API client) and `google-api-python-client`. Installing both results in an immediate `ImportError` due to the `uritemplate` conflict.
This is a simple, low-risk change that would aid compatibility with other projects.
Thoughts?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (2, 6):
25 print('google-api-python-client requires python version >= 2.6.',
26 file=sys.stderr)
27 sys.exit(1)
28 if (3, 1) <= sys.version_info < (3, 3):
29 print('google-api-python-client requires python3 version >= 3.3.',
30 file=sys.stderr)
31 sys.exit(1)
32
33 from setuptools import setup
34 import pkg_resources
35
36 def _DetectBadness():
37 import os
38 if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:
39 return
40 o2c_pkg = None
41 try:
42 o2c_pkg = pkg_resources.get_distribution('oauth2client')
43 except pkg_resources.DistributionNotFound:
44 pass
45 oauth2client = None
46 try:
47 import oauth2client
48 except ImportError:
49 pass
50 if o2c_pkg is None and oauth2client is not None:
51 raise RuntimeError(
52 'Previous version of google-api-python-client detected; due to a '
53 'packaging issue, we cannot perform an in-place upgrade. Please remove '
54 'the old version and re-install this package.'
55 )
56
57 _DetectBadness()
58
59 packages = [
60 'apiclient',
61 'googleapiclient',
62 'googleapiclient/discovery_cache',
63 ]
64
65 install_requires = [
66 'httplib2>=0.8,<1',
67 'oauth2client>=1.5.0,<4.0.0',
68 'six>=1.6.1,<2',
69 'uritemplate>=0.6,<1',
70 ]
71
72 if sys.version_info < (2, 7):
73 install_requires.append('argparse')
74
75 long_desc = """The Google API Client for Python is a client library for
76 accessing the Plus, Moderator, and many other Google APIs."""
77
78 import googleapiclient
79 version = googleapiclient.__version__
80
81 setup(
82 name="google-api-python-client",
83 version=version,
84 description="Google API Client Library for Python",
85 long_description=long_desc,
86 author="Google Inc.",
87 url="http://github.com/google/google-api-python-client/",
88 install_requires=install_requires,
89 packages=packages,
90 package_data={},
91 license="Apache 2.0",
92 keywords="google api client",
93 classifiers=[
94 'Programming Language :: Python :: 2',
95 'Programming Language :: Python :: 2.6',
96 'Programming Language :: Python :: 2.7',
97 'Programming Language :: Python :: 3',
98 'Programming Language :: Python :: 3.3',
99 'Programming Language :: Python :: 3.4',
100 'Development Status :: 5 - Production/Stable',
101 'Intended Audience :: Developers',
102 'License :: OSI Approved :: Apache Software License',
103 'Operating System :: OS Independent',
104 'Topic :: Internet :: WWW/HTTP',
105 ],
106 )
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,7 @@
'httplib2>=0.8,<1',
'oauth2client>=1.5.0,<4.0.0',
'six>=1.6.1,<2',
- 'uritemplate>=0.6,<1',
+ 'uritemplate>=3.0.0,<4',
]
if sys.version_info < (2, 7):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n 'httplib2>=0.8,<1',\n 'oauth2client>=1.5.0,<4.0.0',\n 'six>=1.6.1,<2',\n- 'uritemplate>=0.6,<1',\n+ 'uritemplate>=3.0.0,<4',\n ]\n \n if sys.version_info < (2, 7):\n", "issue": "Switch from \"uritemplate\" to \"uritemplate.py\"\nThere are at least 2 URI template packages on the PyPI: [uritemplate](https://pypi.python.org/pypi/uritemplate/0.6) and [uritemplate.py](https://pypi.python.org/pypi/uritemplate.py/0.3.0). Both of these packages use `uritemplate` as the package name to be imported, which causes a conflict when both are installed (see https://github.com/sigmavirus24/uritemplate/issues/14).\n\nI propose that this project depend on `uritemplate.py` because:\n- `uritemplate.py` is API-compatible with `uritemplate`, but the reverse is not true. I have confirmed that this library works with uritemplate.py.\n- Other projects depend on `uritemplate.py`. For example, we are currently working on a project that depends on both `github3.py` (GitHub API client) and `google-api-python-client`. Installing both results in an immediate `ImportError` due to the `uritemplate` conflict.\n\nThis is a simple, low-risk change that would aid compatibility with other projects.\n\nThoughts?\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 6):\n print('google-api-python-client requires python version >= 2.6.',\n file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 3):\n print('google-api-python-client requires python3 version >= 3.3.',\n file=sys.stderr)\n sys.exit(1)\n\nfrom setuptools import setup\nimport pkg_resources\n\ndef _DetectBadness():\n import os\n if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:\n return\n o2c_pkg = None\n try:\n o2c_pkg = pkg_resources.get_distribution('oauth2client')\n except pkg_resources.DistributionNotFound:\n pass\n oauth2client = None\n try:\n import oauth2client\n except ImportError:\n pass\n if o2c_pkg is None and oauth2client is not None:\n raise RuntimeError(\n 'Previous version of google-api-python-client detected; due to a '\n 'packaging issue, we cannot perform an in-place upgrade. Please remove '\n 'the old version and re-install this package.'\n )\n\n_DetectBadness()\n\npackages = [\n 'apiclient',\n 'googleapiclient',\n 'googleapiclient/discovery_cache',\n]\n\ninstall_requires = [\n 'httplib2>=0.8,<1',\n 'oauth2client>=1.5.0,<4.0.0',\n 'six>=1.6.1,<2',\n 'uritemplate>=0.6,<1',\n]\n\nif sys.version_info < (2, 7):\n install_requires.append('argparse')\n\nlong_desc = \"\"\"The Google API Client for Python is a client library for\naccessing the Plus, Moderator, and many other Google APIs.\"\"\"\n\nimport googleapiclient\nversion = googleapiclient.__version__\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=long_desc,\n author=\"Google Inc.\",\n url=\"http://github.com/google/google-api-python-client/\",\n install_requires=install_requires,\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 6):\n print('google-api-python-client requires python version >= 2.6.',\n file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 3):\n print('google-api-python-client requires python3 version >= 3.3.',\n file=sys.stderr)\n sys.exit(1)\n\nfrom setuptools import setup\nimport pkg_resources\n\ndef _DetectBadness():\n import os\n if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:\n return\n o2c_pkg = None\n try:\n o2c_pkg = pkg_resources.get_distribution('oauth2client')\n except pkg_resources.DistributionNotFound:\n pass\n oauth2client = None\n try:\n import oauth2client\n except ImportError:\n pass\n if o2c_pkg is None and oauth2client is not None:\n raise RuntimeError(\n 'Previous version of google-api-python-client detected; due to a '\n 'packaging issue, we cannot perform an in-place upgrade. Please remove '\n 'the old version and re-install this package.'\n )\n\n_DetectBadness()\n\npackages = [\n 'apiclient',\n 'googleapiclient',\n 'googleapiclient/discovery_cache',\n]\n\ninstall_requires = [\n 'httplib2>=0.8,<1',\n 'oauth2client>=1.5.0,<4.0.0',\n 'six>=1.6.1,<2',\n 'uritemplate>=3.0.0,<4',\n]\n\nif sys.version_info < (2, 7):\n install_requires.append('argparse')\n\nlong_desc = \"\"\"The Google API Client for Python is a client library for\naccessing the Plus, Moderator, and many other Google APIs.\"\"\"\n\nimport googleapiclient\nversion = googleapiclient.__version__\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=long_desc,\n author=\"Google Inc.\",\n url=\"http://github.com/google/google-api-python-client/\",\n install_requires=install_requires,\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}]} | 1,520 | 115 |
gh_patches_debug_21178 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1983 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Distros are coupled with instrumentations
As mentioned [here](https://github.com/open-telemetry/opentelemetry-python/discussions/2005#discussion-3489738).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import sys
16 from logging import getLogger
17 from os import environ, path
18 from os.path import abspath, dirname, pathsep
19 from re import sub
20
21 from pkg_resources import iter_entry_points
22
23 from opentelemetry.environment_variables import (
24 OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,
25 )
26 from opentelemetry.instrumentation.dependencies import (
27 get_dist_dependency_conflicts,
28 )
29 from opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro
30
31 logger = getLogger(__file__)
32
33
34 def _load_distros() -> BaseDistro:
35 for entry_point in iter_entry_points("opentelemetry_distro"):
36 try:
37 distro = entry_point.load()()
38 if not isinstance(distro, BaseDistro):
39 logger.debug(
40 "%s is not an OpenTelemetry Distro. Skipping",
41 entry_point.name,
42 )
43 continue
44 logger.debug(
45 "Distribution %s will be configured", entry_point.name
46 )
47 return distro
48 except Exception as exc: # pylint: disable=broad-except
49 logger.exception(
50 "Distribution %s configuration failed", entry_point.name
51 )
52 raise exc
53 return DefaultDistro()
54
55
56 def _load_instrumentors(distro):
57 package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])
58 if isinstance(package_to_exclude, str):
59 package_to_exclude = package_to_exclude.split(",")
60 # to handle users entering "requests , flask" or "requests, flask" with spaces
61 package_to_exclude = [x.strip() for x in package_to_exclude]
62
63 for entry_point in iter_entry_points("opentelemetry_instrumentor"):
64 if entry_point.name in package_to_exclude:
65 logger.debug(
66 "Instrumentation skipped for library %s", entry_point.name
67 )
68 continue
69
70 try:
71 conflict = get_dist_dependency_conflicts(entry_point.dist)
72 if conflict:
73 logger.debug(
74 "Skipping instrumentation %s: %s",
75 entry_point.name,
76 conflict,
77 )
78 continue
79
80 # tell instrumentation to not run dep checks again as we already did it above
81 distro.load_instrumentor(entry_point, skip_dep_check=True)
82 logger.debug("Instrumented %s", entry_point.name)
83 except Exception as exc: # pylint: disable=broad-except
84 logger.exception("Instrumenting of %s failed", entry_point.name)
85 raise exc
86
87
88 def _load_configurators():
89 configured = None
90 for entry_point in iter_entry_points("opentelemetry_configurator"):
91 if configured is not None:
92 logger.warning(
93 "Configuration of %s not loaded, %s already loaded",
94 entry_point.name,
95 configured,
96 )
97 continue
98 try:
99 entry_point.load()().configure() # type: ignore
100 configured = entry_point.name
101 except Exception as exc: # pylint: disable=broad-except
102 logger.exception("Configuration of %s failed", entry_point.name)
103 raise exc
104
105
106 def initialize():
107 try:
108 distro = _load_distros()
109 distro.configure()
110 _load_configurators()
111 _load_instrumentors(distro)
112 except Exception: # pylint: disable=broad-except
113 logger.exception("Failed to auto initialize opentelemetry")
114 finally:
115 environ["PYTHONPATH"] = sub(
116 r"{}{}?".format(dirname(abspath(__file__)), pathsep),
117 "",
118 environ["PYTHONPATH"],
119 )
120
121
122 if (
123 hasattr(sys, "argv")
124 and sys.argv[0].split(path.sep)[-1] == "celery"
125 and "worker" in sys.argv[1:]
126 ):
127 from celery.signals import worker_process_init # pylint:disable=E0401
128
129 @worker_process_init.connect(weak=False)
130 def init_celery(*args, **kwargs):
131 initialize()
132
133
134 else:
135 initialize()
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
@@ -60,6 +60,9 @@
# to handle users entering "requests , flask" or "requests, flask" with spaces
package_to_exclude = [x.strip() for x in package_to_exclude]
+ for entry_point in iter_entry_points("opentelemetry_pre_instrument"):
+ entry_point.load()()
+
for entry_point in iter_entry_points("opentelemetry_instrumentor"):
if entry_point.name in package_to_exclude:
logger.debug(
@@ -84,6 +87,9 @@
logger.exception("Instrumenting of %s failed", entry_point.name)
raise exc
+ for entry_point in iter_entry_points("opentelemetry_post_instrument"):
+ entry_point.load()()
+
def _load_configurators():
configured = None
| {"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n@@ -60,6 +60,9 @@\n # to handle users entering \"requests , flask\" or \"requests, flask\" with spaces\n package_to_exclude = [x.strip() for x in package_to_exclude]\n \n+ for entry_point in iter_entry_points(\"opentelemetry_pre_instrument\"):\n+ entry_point.load()()\n+\n for entry_point in iter_entry_points(\"opentelemetry_instrumentor\"):\n if entry_point.name in package_to_exclude:\n logger.debug(\n@@ -84,6 +87,9 @@\n logger.exception(\"Instrumenting of %s failed\", entry_point.name)\n raise exc\n \n+ for entry_point in iter_entry_points(\"opentelemetry_post_instrument\"):\n+ entry_point.load()()\n+\n \n def _load_configurators():\n configured = None\n", "issue": "Distros are coupled with instrumentations\nAs mentioned [here](https://github.com/open-telemetry/opentelemetry-python/discussions/2005#discussion-3489738).\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom logging import getLogger\nfrom os import environ, path\nfrom os.path import abspath, dirname, pathsep\nfrom re import sub\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,\n)\nfrom opentelemetry.instrumentation.dependencies import (\n get_dist_dependency_conflicts,\n)\nfrom opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro\n\nlogger = getLogger(__file__)\n\n\ndef _load_distros() -> BaseDistro:\n for entry_point in iter_entry_points(\"opentelemetry_distro\"):\n try:\n distro = entry_point.load()()\n if not isinstance(distro, BaseDistro):\n logger.debug(\n \"%s is not an OpenTelemetry Distro. Skipping\",\n entry_point.name,\n )\n continue\n logger.debug(\n \"Distribution %s will be configured\", entry_point.name\n )\n return distro\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\n \"Distribution %s configuration failed\", entry_point.name\n )\n raise exc\n return DefaultDistro()\n\n\ndef _load_instrumentors(distro):\n package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])\n if isinstance(package_to_exclude, str):\n package_to_exclude = package_to_exclude.split(\",\")\n # to handle users entering \"requests , flask\" or \"requests, flask\" with spaces\n package_to_exclude = [x.strip() for x in package_to_exclude]\n\n for entry_point in iter_entry_points(\"opentelemetry_instrumentor\"):\n if entry_point.name in package_to_exclude:\n logger.debug(\n \"Instrumentation skipped for library %s\", entry_point.name\n )\n continue\n\n try:\n conflict = get_dist_dependency_conflicts(entry_point.dist)\n if conflict:\n logger.debug(\n \"Skipping instrumentation %s: %s\",\n entry_point.name,\n conflict,\n )\n continue\n\n # tell instrumentation to not run dep checks again as we already did it above\n distro.load_instrumentor(entry_point, skip_dep_check=True)\n logger.debug(\"Instrumented %s\", entry_point.name)\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Instrumenting of %s failed\", entry_point.name)\n raise exc\n\n\ndef _load_configurators():\n configured = None\n for entry_point in iter_entry_points(\"opentelemetry_configurator\"):\n if configured is not None:\n logger.warning(\n \"Configuration of %s not loaded, %s already loaded\",\n entry_point.name,\n configured,\n )\n continue\n try:\n entry_point.load()().configure() # type: ignore\n configured = entry_point.name\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Configuration of %s failed\", entry_point.name)\n raise exc\n\n\ndef initialize():\n try:\n distro = _load_distros()\n distro.configure()\n _load_configurators()\n _load_instrumentors(distro)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n finally:\n environ[\"PYTHONPATH\"] = sub(\n r\"{}{}?\".format(dirname(abspath(__file__)), pathsep),\n \"\",\n environ[\"PYTHONPATH\"],\n )\n\n\nif (\n hasattr(sys, \"argv\")\n and sys.argv[0].split(path.sep)[-1] == \"celery\"\n and \"worker\" in sys.argv[1:]\n):\n from celery.signals import worker_process_init # pylint:disable=E0401\n\n @worker_process_init.connect(weak=False)\n def init_celery(*args, **kwargs):\n initialize()\n\n\nelse:\n initialize()\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom logging import getLogger\nfrom os import environ, path\nfrom os.path import abspath, dirname, pathsep\nfrom re import sub\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,\n)\nfrom opentelemetry.instrumentation.dependencies import (\n get_dist_dependency_conflicts,\n)\nfrom opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro\n\nlogger = getLogger(__file__)\n\n\ndef _load_distros() -> BaseDistro:\n for entry_point in iter_entry_points(\"opentelemetry_distro\"):\n try:\n distro = entry_point.load()()\n if not isinstance(distro, BaseDistro):\n logger.debug(\n \"%s is not an OpenTelemetry Distro. Skipping\",\n entry_point.name,\n )\n continue\n logger.debug(\n \"Distribution %s will be configured\", entry_point.name\n )\n return distro\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\n \"Distribution %s configuration failed\", entry_point.name\n )\n raise exc\n return DefaultDistro()\n\n\ndef _load_instrumentors(distro):\n package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])\n if isinstance(package_to_exclude, str):\n package_to_exclude = package_to_exclude.split(\",\")\n # to handle users entering \"requests , flask\" or \"requests, flask\" with spaces\n package_to_exclude = [x.strip() for x in package_to_exclude]\n\n for entry_point in iter_entry_points(\"opentelemetry_pre_instrument\"):\n entry_point.load()()\n\n for entry_point in iter_entry_points(\"opentelemetry_instrumentor\"):\n if entry_point.name in package_to_exclude:\n logger.debug(\n \"Instrumentation skipped for library %s\", entry_point.name\n )\n continue\n\n try:\n conflict = get_dist_dependency_conflicts(entry_point.dist)\n if conflict:\n logger.debug(\n \"Skipping instrumentation %s: %s\",\n entry_point.name,\n conflict,\n )\n continue\n\n # tell instrumentation to not run dep checks again as we already did it above\n distro.load_instrumentor(entry_point, skip_dep_check=True)\n logger.debug(\"Instrumented %s\", entry_point.name)\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Instrumenting of %s failed\", entry_point.name)\n raise exc\n\n for entry_point in iter_entry_points(\"opentelemetry_post_instrument\"):\n entry_point.load()()\n\n\ndef _load_configurators():\n configured = None\n for entry_point in iter_entry_points(\"opentelemetry_configurator\"):\n if configured is not None:\n logger.warning(\n \"Configuration of %s not loaded, %s already loaded\",\n entry_point.name,\n configured,\n )\n continue\n try:\n entry_point.load()().configure() # type: ignore\n configured = entry_point.name\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Configuration of %s failed\", entry_point.name)\n raise exc\n\n\ndef initialize():\n try:\n distro = _load_distros()\n distro.configure()\n _load_configurators()\n _load_instrumentors(distro)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n finally:\n environ[\"PYTHONPATH\"] = sub(\n r\"{}{}?\".format(dirname(abspath(__file__)), pathsep),\n \"\",\n environ[\"PYTHONPATH\"],\n )\n\n\nif (\n hasattr(sys, \"argv\")\n and sys.argv[0].split(path.sep)[-1] == \"celery\"\n and \"worker\" in sys.argv[1:]\n):\n from celery.signals import worker_process_init # pylint:disable=E0401\n\n @worker_process_init.connect(weak=False)\n def init_celery(*args, **kwargs):\n initialize()\n\n\nelse:\n initialize()\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py"}]} | 1,579 | 265 |
gh_patches_debug_18175 | rasdani/github-patches | git_diff | mozilla__bugbug-4031 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use Ruff for linting and formatting
Proposal to use[ Ruff ](https://docs.astral.sh/ruff/) for linting and code quality. Due to following reasons:
- It's faster
- Can fix code automatically
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/past_bugs_by_unit.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import argparse
7 import json
8 import logging
9 from collections import defaultdict
10
11 from tqdm import tqdm
12
13 from bugbug import bugzilla, db, repository
14 from bugbug.models.regressor import BUG_FIXING_COMMITS_DB
15 from bugbug.utils import zstd_compress
16
17 logging.basicConfig(level=logging.INFO)
18 logger = logging.getLogger(__name__)
19
20
21 class PastBugsCollector(object):
22 def __init__(self) -> None:
23 logger.info("Downloading commits database...")
24 assert db.download(repository.COMMITS_DB)
25
26 logger.info("Downloading bugs database...")
27 assert db.download(bugzilla.BUGS_DB)
28
29 logger.info("Download commit classifications...")
30 assert db.download(BUG_FIXING_COMMITS_DB)
31
32 def go(self) -> None:
33 logger.info(
34 "Generate map of bug ID -> bug data for all bugs which were defects"
35 )
36 bug_fixing_commits = list(db.read(BUG_FIXING_COMMITS_DB))
37
38 bug_fixing_commits_nodes = set(
39 bug_fixing_commit["rev"]
40 for bug_fixing_commit in bug_fixing_commits
41 if bug_fixing_commit["type"] in ("d", "r")
42 )
43
44 logger.info("%d bug-fixing commits to analyze", len(bug_fixing_commits_nodes))
45
46 all_bug_ids = set(commit["bug_id"] for commit in repository.get_commits())
47
48 bug_map = {
49 bug["id"]: bug for bug in bugzilla.get_bugs() if bug["id"] in all_bug_ids
50 }
51
52 logger.info(
53 "Generate a map from files/functions to the bugs which were fixed/introduced by touching them"
54 )
55
56 # TODO: Support "moving" past bugs between files when they are renamed and between functions when they are
57 # moved across files.
58
59 by_dimensions = ["file", "directory", "component"]
60
61 def dimension_to_field(dimension: str) -> str:
62 return f"{dimension}s" if dimension != "directory" else "directories"
63
64 past_regressions_by: dict[str, dict[str, list[int]]] = defaultdict(
65 lambda: defaultdict(list)
66 )
67 past_fixed_bugs_by: dict[str, dict[str, list[int]]] = defaultdict(
68 lambda: defaultdict(list)
69 )
70 past_regression_blocked_bugs_by: dict[str, dict[str, list[int]]] = defaultdict(
71 lambda: defaultdict(list)
72 )
73 past_fixed_bug_blocked_bugs_by: dict[str, dict[str, list[int]]] = defaultdict(
74 lambda: defaultdict(list)
75 )
76 past_regressions_by_function: dict[str, dict[str, list[int]]] = defaultdict(
77 lambda: defaultdict(list)
78 )
79 past_fixed_bugs_by_function: dict[str, dict[str, list[int]]] = defaultdict(
80 lambda: defaultdict(list)
81 )
82 past_regression_blocked_bugs_by_function: dict[
83 str, dict[str, list[int]]
84 ] = defaultdict(lambda: defaultdict(list))
85 past_fixed_bug_blocked_bugs_by_function: dict[
86 str, dict[str, list[int]]
87 ] = defaultdict(lambda: defaultdict(list))
88
89 for commit in tqdm(repository.get_commits()):
90 if commit["bug_id"] not in bug_map:
91 continue
92
93 if commit["backedoutby"]:
94 continue
95
96 bug = bug_map[commit["bug_id"]]
97
98 if len(bug["regressions"]) > 0:
99 for dimension in by_dimensions:
100 for path in commit[dimension_to_field(dimension)]:
101 past_regressions_by[dimension][path].extend(
102 bug_id for bug_id in bug["regressions"] if bug_id in bug_map
103 )
104
105 past_regression_blocked_bugs_by[dimension][path].extend(
106 bugzilla.find_blocked_by(bug_map, bug)
107 )
108
109 for path, f_group in commit["functions"].items():
110 for f in f_group:
111 past_regressions_by_function[path][f["name"]].extend(
112 bug_id for bug_id in bug["regressions"] if bug_id in bug_map
113 )
114
115 past_regression_blocked_bugs_by_function[path][
116 f["name"]
117 ].extend(bugzilla.find_blocked_by(bug_map, bug))
118
119 if commit["node"] in bug_fixing_commits_nodes:
120 for dimension in by_dimensions:
121 for path in commit[dimension_to_field(dimension)]:
122 past_fixed_bugs_by[dimension][path].append(bug["id"])
123
124 past_fixed_bug_blocked_bugs_by[dimension][path].extend(
125 bugzilla.find_blocked_by(bug_map, bug)
126 )
127
128 for path, f_group in commit["functions"].items():
129 for f in f_group:
130 past_fixed_bugs_by_function[path][f["name"]].append(bug["id"])
131
132 past_fixed_bug_blocked_bugs_by_function[path][f["name"]].extend(
133 bugzilla.find_blocked_by(bug_map, bug)
134 )
135
136 def _transform(bug_ids: list[int]) -> list[dict]:
137 seen = set()
138 results = []
139 for bug_id in bug_ids:
140 if bug_id in seen:
141 continue
142 seen.add(bug_id)
143
144 bug = bug_map[bug_id]
145 results.append(
146 {
147 "id": bug_id,
148 "summary": bug["summary"],
149 "component": "{}::{}".format(bug["product"], bug["component"]),
150 }
151 )
152
153 return results
154
155 def past_bug_ids_to_summaries(
156 past_bugs_by: dict[str, list[int]]
157 ) -> dict[str, list[dict]]:
158 return {path: _transform(bug_ids) for path, bug_ids in past_bugs_by.items()}
159
160 for dimension in by_dimensions:
161 with open(f"data/past_regressions_by_{dimension}.json", "w") as f:
162 json.dump(past_bug_ids_to_summaries(past_regressions_by[dimension]), f)
163 zstd_compress(f"data/past_regressions_by_{dimension}.json")
164
165 with open(f"data/past_fixed_bugs_by_{dimension}.json", "w") as f:
166 json.dump(past_bug_ids_to_summaries(past_fixed_bugs_by[dimension]), f)
167 zstd_compress(f"data/past_fixed_bugs_by_{dimension}.json")
168
169 with open(
170 f"data/past_regression_blocked_bugs_by_{dimension}.json", "w"
171 ) as f:
172 json.dump(
173 past_bug_ids_to_summaries(
174 past_regression_blocked_bugs_by[dimension]
175 ),
176 f,
177 )
178 zstd_compress(f"data/past_regression_blocked_bugs_by_{dimension}.json")
179
180 with open(
181 f"data/past_fixed_bug_blocked_bugs_by_{dimension}.json", "w"
182 ) as f:
183 json.dump(
184 past_bug_ids_to_summaries(
185 past_fixed_bug_blocked_bugs_by[dimension]
186 ),
187 f,
188 )
189 zstd_compress(f"data/past_fixed_bug_blocked_bugs_by_{dimension}.json")
190
191 def past_function_bug_ids_to_summaries(
192 past_bugs: dict[str, dict[str, list[int]]]
193 ) -> dict[str, dict[str, list[dict]]]:
194 return {
195 path: {
196 func: _transform(bug_ids) for func, bug_ids in funcs_bugs.items()
197 }
198 for path, funcs_bugs in past_bugs.items()
199 }
200
201 with open("data/past_regressions_by_function.json", "w") as f:
202 json.dump(
203 past_function_bug_ids_to_summaries(past_regressions_by_function), f
204 )
205 zstd_compress("data/past_regressions_by_function.json")
206
207 with open("data/past_fixed_bugs_by_function.json", "w") as f:
208 json.dump(
209 past_function_bug_ids_to_summaries(past_fixed_bugs_by_function), f
210 )
211 zstd_compress("data/past_fixed_bugs_by_function.json")
212
213 with open("data/past_regression_blocked_bugs_by_function.json", "w") as f:
214 json.dump(
215 past_function_bug_ids_to_summaries(
216 past_regression_blocked_bugs_by_function
217 ),
218 f,
219 )
220 zstd_compress("data/past_regression_blocked_bugs_by_function.json")
221
222 with open("data/past_fixed_bug_blocked_bugs_by_function.json", "w") as f:
223 json.dump(
224 past_function_bug_ids_to_summaries(
225 past_fixed_bug_blocked_bugs_by_function
226 ),
227 f,
228 )
229 zstd_compress("data/past_fixed_bug_blocked_bugs_by_function.json")
230
231
232 def main() -> None:
233 description = "Find past bugs linked to given units of source code"
234 parser = argparse.ArgumentParser(description=description)
235 parser.parse_args()
236
237 past_bugs_collector = PastBugsCollector()
238 past_bugs_collector.go()
239
240
241 if __name__ == "__main__":
242 main()
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/past_bugs_by_unit.py b/scripts/past_bugs_by_unit.py
--- a/scripts/past_bugs_by_unit.py
+++ b/scripts/past_bugs_by_unit.py
@@ -153,7 +153,7 @@
return results
def past_bug_ids_to_summaries(
- past_bugs_by: dict[str, list[int]]
+ past_bugs_by: dict[str, list[int]],
) -> dict[str, list[dict]]:
return {path: _transform(bug_ids) for path, bug_ids in past_bugs_by.items()}
@@ -189,7 +189,7 @@
zstd_compress(f"data/past_fixed_bug_blocked_bugs_by_{dimension}.json")
def past_function_bug_ids_to_summaries(
- past_bugs: dict[str, dict[str, list[int]]]
+ past_bugs: dict[str, dict[str, list[int]]],
) -> dict[str, dict[str, list[dict]]]:
return {
path: {
| {"golden_diff": "diff --git a/scripts/past_bugs_by_unit.py b/scripts/past_bugs_by_unit.py\n--- a/scripts/past_bugs_by_unit.py\n+++ b/scripts/past_bugs_by_unit.py\n@@ -153,7 +153,7 @@\n return results\n \n def past_bug_ids_to_summaries(\n- past_bugs_by: dict[str, list[int]]\n+ past_bugs_by: dict[str, list[int]],\n ) -> dict[str, list[dict]]:\n return {path: _transform(bug_ids) for path, bug_ids in past_bugs_by.items()}\n \n@@ -189,7 +189,7 @@\n zstd_compress(f\"data/past_fixed_bug_blocked_bugs_by_{dimension}.json\")\n \n def past_function_bug_ids_to_summaries(\n- past_bugs: dict[str, dict[str, list[int]]]\n+ past_bugs: dict[str, dict[str, list[int]]],\n ) -> dict[str, dict[str, list[dict]]]:\n return {\n path: {\n", "issue": "Use Ruff for linting and formatting\nProposal to use[ Ruff ](https://docs.astral.sh/ruff/) for linting and code quality. Due to following reasons:\r\n\r\n- It's faster \r\n- Can fix code automatically \r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport json\nimport logging\nfrom collections import defaultdict\n\nfrom tqdm import tqdm\n\nfrom bugbug import bugzilla, db, repository\nfrom bugbug.models.regressor import BUG_FIXING_COMMITS_DB\nfrom bugbug.utils import zstd_compress\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass PastBugsCollector(object):\n def __init__(self) -> None:\n logger.info(\"Downloading commits database...\")\n assert db.download(repository.COMMITS_DB)\n\n logger.info(\"Downloading bugs database...\")\n assert db.download(bugzilla.BUGS_DB)\n\n logger.info(\"Download commit classifications...\")\n assert db.download(BUG_FIXING_COMMITS_DB)\n\n def go(self) -> None:\n logger.info(\n \"Generate map of bug ID -> bug data for all bugs which were defects\"\n )\n bug_fixing_commits = list(db.read(BUG_FIXING_COMMITS_DB))\n\n bug_fixing_commits_nodes = set(\n bug_fixing_commit[\"rev\"]\n for bug_fixing_commit in bug_fixing_commits\n if bug_fixing_commit[\"type\"] in (\"d\", \"r\")\n )\n\n logger.info(\"%d bug-fixing commits to analyze\", len(bug_fixing_commits_nodes))\n\n all_bug_ids = set(commit[\"bug_id\"] for commit in repository.get_commits())\n\n bug_map = {\n bug[\"id\"]: bug for bug in bugzilla.get_bugs() if bug[\"id\"] in all_bug_ids\n }\n\n logger.info(\n \"Generate a map from files/functions to the bugs which were fixed/introduced by touching them\"\n )\n\n # TODO: Support \"moving\" past bugs between files when they are renamed and between functions when they are\n # moved across files.\n\n by_dimensions = [\"file\", \"directory\", \"component\"]\n\n def dimension_to_field(dimension: str) -> str:\n return f\"{dimension}s\" if dimension != \"directory\" else \"directories\"\n\n past_regressions_by: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_fixed_bugs_by: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_regression_blocked_bugs_by: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_fixed_bug_blocked_bugs_by: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_regressions_by_function: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_fixed_bugs_by_function: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_regression_blocked_bugs_by_function: dict[\n str, dict[str, list[int]]\n ] = defaultdict(lambda: defaultdict(list))\n past_fixed_bug_blocked_bugs_by_function: dict[\n str, dict[str, list[int]]\n ] = defaultdict(lambda: defaultdict(list))\n\n for commit in tqdm(repository.get_commits()):\n if commit[\"bug_id\"] not in bug_map:\n continue\n\n if commit[\"backedoutby\"]:\n continue\n\n bug = bug_map[commit[\"bug_id\"]]\n\n if len(bug[\"regressions\"]) > 0:\n for dimension in by_dimensions:\n for path in commit[dimension_to_field(dimension)]:\n past_regressions_by[dimension][path].extend(\n bug_id for bug_id in bug[\"regressions\"] if bug_id in bug_map\n )\n\n past_regression_blocked_bugs_by[dimension][path].extend(\n bugzilla.find_blocked_by(bug_map, bug)\n )\n\n for path, f_group in commit[\"functions\"].items():\n for f in f_group:\n past_regressions_by_function[path][f[\"name\"]].extend(\n bug_id for bug_id in bug[\"regressions\"] if bug_id in bug_map\n )\n\n past_regression_blocked_bugs_by_function[path][\n f[\"name\"]\n ].extend(bugzilla.find_blocked_by(bug_map, bug))\n\n if commit[\"node\"] in bug_fixing_commits_nodes:\n for dimension in by_dimensions:\n for path in commit[dimension_to_field(dimension)]:\n past_fixed_bugs_by[dimension][path].append(bug[\"id\"])\n\n past_fixed_bug_blocked_bugs_by[dimension][path].extend(\n bugzilla.find_blocked_by(bug_map, bug)\n )\n\n for path, f_group in commit[\"functions\"].items():\n for f in f_group:\n past_fixed_bugs_by_function[path][f[\"name\"]].append(bug[\"id\"])\n\n past_fixed_bug_blocked_bugs_by_function[path][f[\"name\"]].extend(\n bugzilla.find_blocked_by(bug_map, bug)\n )\n\n def _transform(bug_ids: list[int]) -> list[dict]:\n seen = set()\n results = []\n for bug_id in bug_ids:\n if bug_id in seen:\n continue\n seen.add(bug_id)\n\n bug = bug_map[bug_id]\n results.append(\n {\n \"id\": bug_id,\n \"summary\": bug[\"summary\"],\n \"component\": \"{}::{}\".format(bug[\"product\"], bug[\"component\"]),\n }\n )\n\n return results\n\n def past_bug_ids_to_summaries(\n past_bugs_by: dict[str, list[int]]\n ) -> dict[str, list[dict]]:\n return {path: _transform(bug_ids) for path, bug_ids in past_bugs_by.items()}\n\n for dimension in by_dimensions:\n with open(f\"data/past_regressions_by_{dimension}.json\", \"w\") as f:\n json.dump(past_bug_ids_to_summaries(past_regressions_by[dimension]), f)\n zstd_compress(f\"data/past_regressions_by_{dimension}.json\")\n\n with open(f\"data/past_fixed_bugs_by_{dimension}.json\", \"w\") as f:\n json.dump(past_bug_ids_to_summaries(past_fixed_bugs_by[dimension]), f)\n zstd_compress(f\"data/past_fixed_bugs_by_{dimension}.json\")\n\n with open(\n f\"data/past_regression_blocked_bugs_by_{dimension}.json\", \"w\"\n ) as f:\n json.dump(\n past_bug_ids_to_summaries(\n past_regression_blocked_bugs_by[dimension]\n ),\n f,\n )\n zstd_compress(f\"data/past_regression_blocked_bugs_by_{dimension}.json\")\n\n with open(\n f\"data/past_fixed_bug_blocked_bugs_by_{dimension}.json\", \"w\"\n ) as f:\n json.dump(\n past_bug_ids_to_summaries(\n past_fixed_bug_blocked_bugs_by[dimension]\n ),\n f,\n )\n zstd_compress(f\"data/past_fixed_bug_blocked_bugs_by_{dimension}.json\")\n\n def past_function_bug_ids_to_summaries(\n past_bugs: dict[str, dict[str, list[int]]]\n ) -> dict[str, dict[str, list[dict]]]:\n return {\n path: {\n func: _transform(bug_ids) for func, bug_ids in funcs_bugs.items()\n }\n for path, funcs_bugs in past_bugs.items()\n }\n\n with open(\"data/past_regressions_by_function.json\", \"w\") as f:\n json.dump(\n past_function_bug_ids_to_summaries(past_regressions_by_function), f\n )\n zstd_compress(\"data/past_regressions_by_function.json\")\n\n with open(\"data/past_fixed_bugs_by_function.json\", \"w\") as f:\n json.dump(\n past_function_bug_ids_to_summaries(past_fixed_bugs_by_function), f\n )\n zstd_compress(\"data/past_fixed_bugs_by_function.json\")\n\n with open(\"data/past_regression_blocked_bugs_by_function.json\", \"w\") as f:\n json.dump(\n past_function_bug_ids_to_summaries(\n past_regression_blocked_bugs_by_function\n ),\n f,\n )\n zstd_compress(\"data/past_regression_blocked_bugs_by_function.json\")\n\n with open(\"data/past_fixed_bug_blocked_bugs_by_function.json\", \"w\") as f:\n json.dump(\n past_function_bug_ids_to_summaries(\n past_fixed_bug_blocked_bugs_by_function\n ),\n f,\n )\n zstd_compress(\"data/past_fixed_bug_blocked_bugs_by_function.json\")\n\n\ndef main() -> None:\n description = \"Find past bugs linked to given units of source code\"\n parser = argparse.ArgumentParser(description=description)\n parser.parse_args()\n\n past_bugs_collector = PastBugsCollector()\n past_bugs_collector.go()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/past_bugs_by_unit.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport json\nimport logging\nfrom collections import defaultdict\n\nfrom tqdm import tqdm\n\nfrom bugbug import bugzilla, db, repository\nfrom bugbug.models.regressor import BUG_FIXING_COMMITS_DB\nfrom bugbug.utils import zstd_compress\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass PastBugsCollector(object):\n def __init__(self) -> None:\n logger.info(\"Downloading commits database...\")\n assert db.download(repository.COMMITS_DB)\n\n logger.info(\"Downloading bugs database...\")\n assert db.download(bugzilla.BUGS_DB)\n\n logger.info(\"Download commit classifications...\")\n assert db.download(BUG_FIXING_COMMITS_DB)\n\n def go(self) -> None:\n logger.info(\n \"Generate map of bug ID -> bug data for all bugs which were defects\"\n )\n bug_fixing_commits = list(db.read(BUG_FIXING_COMMITS_DB))\n\n bug_fixing_commits_nodes = set(\n bug_fixing_commit[\"rev\"]\n for bug_fixing_commit in bug_fixing_commits\n if bug_fixing_commit[\"type\"] in (\"d\", \"r\")\n )\n\n logger.info(\"%d bug-fixing commits to analyze\", len(bug_fixing_commits_nodes))\n\n all_bug_ids = set(commit[\"bug_id\"] for commit in repository.get_commits())\n\n bug_map = {\n bug[\"id\"]: bug for bug in bugzilla.get_bugs() if bug[\"id\"] in all_bug_ids\n }\n\n logger.info(\n \"Generate a map from files/functions to the bugs which were fixed/introduced by touching them\"\n )\n\n # TODO: Support \"moving\" past bugs between files when they are renamed and between functions when they are\n # moved across files.\n\n by_dimensions = [\"file\", \"directory\", \"component\"]\n\n def dimension_to_field(dimension: str) -> str:\n return f\"{dimension}s\" if dimension != \"directory\" else \"directories\"\n\n past_regressions_by: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_fixed_bugs_by: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_regression_blocked_bugs_by: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_fixed_bug_blocked_bugs_by: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_regressions_by_function: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_fixed_bugs_by_function: dict[str, dict[str, list[int]]] = defaultdict(\n lambda: defaultdict(list)\n )\n past_regression_blocked_bugs_by_function: dict[\n str, dict[str, list[int]]\n ] = defaultdict(lambda: defaultdict(list))\n past_fixed_bug_blocked_bugs_by_function: dict[\n str, dict[str, list[int]]\n ] = defaultdict(lambda: defaultdict(list))\n\n for commit in tqdm(repository.get_commits()):\n if commit[\"bug_id\"] not in bug_map:\n continue\n\n if commit[\"backedoutby\"]:\n continue\n\n bug = bug_map[commit[\"bug_id\"]]\n\n if len(bug[\"regressions\"]) > 0:\n for dimension in by_dimensions:\n for path in commit[dimension_to_field(dimension)]:\n past_regressions_by[dimension][path].extend(\n bug_id for bug_id in bug[\"regressions\"] if bug_id in bug_map\n )\n\n past_regression_blocked_bugs_by[dimension][path].extend(\n bugzilla.find_blocked_by(bug_map, bug)\n )\n\n for path, f_group in commit[\"functions\"].items():\n for f in f_group:\n past_regressions_by_function[path][f[\"name\"]].extend(\n bug_id for bug_id in bug[\"regressions\"] if bug_id in bug_map\n )\n\n past_regression_blocked_bugs_by_function[path][\n f[\"name\"]\n ].extend(bugzilla.find_blocked_by(bug_map, bug))\n\n if commit[\"node\"] in bug_fixing_commits_nodes:\n for dimension in by_dimensions:\n for path in commit[dimension_to_field(dimension)]:\n past_fixed_bugs_by[dimension][path].append(bug[\"id\"])\n\n past_fixed_bug_blocked_bugs_by[dimension][path].extend(\n bugzilla.find_blocked_by(bug_map, bug)\n )\n\n for path, f_group in commit[\"functions\"].items():\n for f in f_group:\n past_fixed_bugs_by_function[path][f[\"name\"]].append(bug[\"id\"])\n\n past_fixed_bug_blocked_bugs_by_function[path][f[\"name\"]].extend(\n bugzilla.find_blocked_by(bug_map, bug)\n )\n\n def _transform(bug_ids: list[int]) -> list[dict]:\n seen = set()\n results = []\n for bug_id in bug_ids:\n if bug_id in seen:\n continue\n seen.add(bug_id)\n\n bug = bug_map[bug_id]\n results.append(\n {\n \"id\": bug_id,\n \"summary\": bug[\"summary\"],\n \"component\": \"{}::{}\".format(bug[\"product\"], bug[\"component\"]),\n }\n )\n\n return results\n\n def past_bug_ids_to_summaries(\n past_bugs_by: dict[str, list[int]],\n ) -> dict[str, list[dict]]:\n return {path: _transform(bug_ids) for path, bug_ids in past_bugs_by.items()}\n\n for dimension in by_dimensions:\n with open(f\"data/past_regressions_by_{dimension}.json\", \"w\") as f:\n json.dump(past_bug_ids_to_summaries(past_regressions_by[dimension]), f)\n zstd_compress(f\"data/past_regressions_by_{dimension}.json\")\n\n with open(f\"data/past_fixed_bugs_by_{dimension}.json\", \"w\") as f:\n json.dump(past_bug_ids_to_summaries(past_fixed_bugs_by[dimension]), f)\n zstd_compress(f\"data/past_fixed_bugs_by_{dimension}.json\")\n\n with open(\n f\"data/past_regression_blocked_bugs_by_{dimension}.json\", \"w\"\n ) as f:\n json.dump(\n past_bug_ids_to_summaries(\n past_regression_blocked_bugs_by[dimension]\n ),\n f,\n )\n zstd_compress(f\"data/past_regression_blocked_bugs_by_{dimension}.json\")\n\n with open(\n f\"data/past_fixed_bug_blocked_bugs_by_{dimension}.json\", \"w\"\n ) as f:\n json.dump(\n past_bug_ids_to_summaries(\n past_fixed_bug_blocked_bugs_by[dimension]\n ),\n f,\n )\n zstd_compress(f\"data/past_fixed_bug_blocked_bugs_by_{dimension}.json\")\n\n def past_function_bug_ids_to_summaries(\n past_bugs: dict[str, dict[str, list[int]]],\n ) -> dict[str, dict[str, list[dict]]]:\n return {\n path: {\n func: _transform(bug_ids) for func, bug_ids in funcs_bugs.items()\n }\n for path, funcs_bugs in past_bugs.items()\n }\n\n with open(\"data/past_regressions_by_function.json\", \"w\") as f:\n json.dump(\n past_function_bug_ids_to_summaries(past_regressions_by_function), f\n )\n zstd_compress(\"data/past_regressions_by_function.json\")\n\n with open(\"data/past_fixed_bugs_by_function.json\", \"w\") as f:\n json.dump(\n past_function_bug_ids_to_summaries(past_fixed_bugs_by_function), f\n )\n zstd_compress(\"data/past_fixed_bugs_by_function.json\")\n\n with open(\"data/past_regression_blocked_bugs_by_function.json\", \"w\") as f:\n json.dump(\n past_function_bug_ids_to_summaries(\n past_regression_blocked_bugs_by_function\n ),\n f,\n )\n zstd_compress(\"data/past_regression_blocked_bugs_by_function.json\")\n\n with open(\"data/past_fixed_bug_blocked_bugs_by_function.json\", \"w\") as f:\n json.dump(\n past_function_bug_ids_to_summaries(\n past_fixed_bug_blocked_bugs_by_function\n ),\n f,\n )\n zstd_compress(\"data/past_fixed_bug_blocked_bugs_by_function.json\")\n\n\ndef main() -> None:\n description = \"Find past bugs linked to given units of source code\"\n parser = argparse.ArgumentParser(description=description)\n parser.parse_args()\n\n past_bugs_collector = PastBugsCollector()\n past_bugs_collector.go()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/past_bugs_by_unit.py"}]} | 2,911 | 228 |
gh_patches_debug_15750 | rasdani/github-patches | git_diff | google__fuzzbench-767 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Interpretation of the FuzzBench Reports
The available fuzzer configurations and the results might be confusing for someone who looks at a random FuzzBench report for the first time. To avoid misinterpretation of the results, maybe there could be
1. a description of the intent of the evaluation and/or an explanation of the evaluated fuzzer configurations, and
2. a distinction between "master/official/representative" and "developer" evaluations and fuzzer variants.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `analysis/experiment_results.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ExperimentResults class."""
15
16 import functools
17 import os
18
19 from analysis import benchmark_results
20 from analysis import coverage_data_utils
21 from analysis import data_utils
22 from analysis import stat_tests
23
24
25 class ExperimentResults: # pylint: disable=too-many-instance-attributes
26 """Provides the main interface for getting various analysis results and
27 plots about an experiment, represented by |experiment_df|.
28
29 Can be used as the context of template based report generation. Each
30 result is a property, which is lazily computed and memorized when
31 needed multiple times. Therefore, when used as a context of a report
32 template, only the properties needed for the given report will be computed.
33 """
34
35 def __init__( # pylint: disable=too-many-arguments
36 self,
37 experiment_df,
38 coverage_dict,
39 output_directory,
40 plotter,
41 experiment_name=None):
42 if experiment_name:
43 self.name = experiment_name
44 else:
45 # Take name from first row.
46 self.name = experiment_df.experiment.iloc[0]
47
48 # FuzzBench repo commit hash.
49 self.git_hash = None
50 if 'git_hash' in experiment_df.columns:
51 # Not possible to represent hashes for multiple experiments.
52 if len(experiment_df.experiment.unique()) == 1:
53 self.git_hash = experiment_df.git_hash.iloc[0]
54
55 # Earliest trial start time.
56 self.started = experiment_df.time_started.dropna().min()
57 # Latest trial end time.
58 self.ended = experiment_df.time_ended.dropna().max()
59
60 # Keep data frame without non-interesting columns.
61 self._experiment_df = data_utils.drop_uninteresting_columns(
62 experiment_df)
63
64 # Directory where the rendered plots are written to.
65 self._output_directory = output_directory
66
67 self._plotter = plotter
68
69 # Dictionary to store the full coverage data.
70 self._coverage_dict = coverage_dict
71
72 def _get_full_path(self, filename):
73 return os.path.join(self._output_directory, filename)
74
75 @property
76 @functools.lru_cache()
77 # TODO(lszekeres): With python3.8+, replace above two decorators with:
78 # @functools.cached_property
79 def _experiment_snapshots_df(self):
80 """Data frame containing only the time snapshots, for each benchmark,
81 based on which we do further analysis, i.e., statistical tests and
82 ranking."""
83 return data_utils.get_experiment_snapshots(self._experiment_df)
84
85 @property
86 @functools.lru_cache()
87 def benchmarks(self):
88 """Returns the list of BenchmarkResults.
89
90 This is cheap as no computation is done on the benchmark data,
91 until a property is evaluated.
92 """
93 benchmark_names = self._experiment_df.benchmark.unique()
94 return [
95 benchmark_results.BenchmarkResults(name, self._experiment_df,
96 self._coverage_dict,
97 self._output_directory,
98 self._plotter)
99 for name in sorted(benchmark_names)
100 ]
101
102 @property
103 @functools.lru_cache()
104 def summary_table(self):
105 """A pivot table of medians for each fuzzer on each benchmark."""
106 return data_utils.experiment_pivot_table(
107 self._experiment_snapshots_df, data_utils.benchmark_rank_by_median)
108
109 @property
110 def rank_by_unique_coverage_average_normalized_score(self):
111 """Rank fuzzers using average normalized score on unique coverage across
112 benchmarks."""
113 benchmarks_unique_coverage_list = [
114 benchmark.unique_region_cov_df for benchmark in self.benchmarks
115 ]
116 return coverage_data_utils.rank_by_average_normalized_score(
117 benchmarks_unique_coverage_list)
118
119 @property
120 def rank_by_average_rank_and_average_rank(self):
121 """Rank fuzzers using average rank per benchmark and average rank
122 across benchmarks."""
123 return data_utils.experiment_level_ranking(
124 self._experiment_snapshots_df,
125 data_utils.benchmark_rank_by_average_rank,
126 data_utils.experiment_rank_by_average_rank)
127
128 @property
129 def rank_by_mean_and_average_rank(self):
130 """Rank fuzzers using mean coverage per benchmark and average rank
131 across benchmarks."""
132 return data_utils.experiment_level_ranking(
133 self._experiment_snapshots_df, data_utils.benchmark_rank_by_mean,
134 data_utils.experiment_rank_by_average_rank)
135
136 @property
137 def rank_by_median_and_average_rank(self):
138 """Rank fuzzers using median coverage per benchmark and average rank
139 across benchmarks."""
140 return data_utils.experiment_level_ranking(
141 self._experiment_snapshots_df, data_utils.benchmark_rank_by_median,
142 data_utils.experiment_rank_by_average_rank)
143
144 @property
145 def rank_by_median_and_average_normalized_score(self):
146 """Rank fuzzers using median coverage per benchmark and average
147 normalized score across benchmarks."""
148 return data_utils.experiment_level_ranking(
149 self._experiment_snapshots_df, data_utils.benchmark_rank_by_median,
150 data_utils.experiment_rank_by_average_normalized_score)
151
152 @property
153 def rank_by_median_and_number_of_firsts(self):
154 """Rank fuzzers using median coverage per benchmark and number of first
155 places across benchmarks."""
156 return data_utils.experiment_level_ranking(
157 self._experiment_snapshots_df, data_utils.benchmark_rank_by_median,
158 data_utils.experiment_rank_by_num_firsts)
159
160 @property
161 def rank_by_stat_test_wins_and_average_rank(self):
162 """Rank fuzzers using statistical test wins per benchmark and average
163 rank across benchmarks."""
164 return data_utils.experiment_level_ranking(
165 self._experiment_snapshots_df,
166 data_utils.benchmark_rank_by_stat_test_wins,
167 data_utils.experiment_rank_by_average_rank)
168
169 @property
170 def friedman_p_value(self):
171 """Friedman test result."""
172 return stat_tests.friedman_test(self.summary_table)
173
174 @property
175 @functools.lru_cache()
176 def friedman_posthoc_p_values(self):
177 """Friedman posthoc test results."""
178 return stat_tests.friedman_posthoc_tests(self.summary_table)
179
180 @property
181 def friedman_conover_plot(self):
182 """Friedman/Conover posthoc test result plot."""
183 plot_filename = 'experiment_friedman_conover_plot.svg'
184 self._plotter.write_heatmap_plot(
185 self.friedman_posthoc_p_values['conover'],
186 self._get_full_path(plot_filename),
187 symmetric=True)
188 return plot_filename
189
190 @property
191 def friedman_nemenyi_plot(self):
192 """Friedman/Nemenyi posthoc test result plot."""
193 plot_filename = 'experiment_friedman_nemenyi_plot.svg'
194 self._plotter.write_heatmap_plot(
195 self.friedman_posthoc_p_values['nemenyi'],
196 self._get_full_path(plot_filename),
197 symmetric=True)
198 return plot_filename
199
200 @property
201 def critical_difference_plot(self):
202 """Critical difference diagram.
203
204 Represents average ranks of fuzzers across all benchmarks,
205 considering medians on final coverage.
206 """
207 average_ranks = self.rank_by_median_and_average_rank
208 num_of_benchmarks = self.summary_table.shape[0]
209
210 plot_filename = 'experiment_critical_difference_plot.svg'
211 self._plotter.write_critical_difference_plot(
212 average_ranks, num_of_benchmarks,
213 self._get_full_path(plot_filename))
214 return plot_filename
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/analysis/experiment_results.py b/analysis/experiment_results.py
--- a/analysis/experiment_results.py
+++ b/analysis/experiment_results.py
@@ -72,6 +72,19 @@
def _get_full_path(self, filename):
return os.path.join(self._output_directory, filename)
+ def linkify_names(self, df):
+ """For any DataFrame which is indexed by fuzzer names, turns the fuzzer
+ names into links to their directory with a description on GitHub."""
+ assert df.index.name == 'fuzzer'
+
+ def description_link(commit, fuzzer):
+ return (f'<a href="https://github.com/google/fuzzbench/blob/'
+ f'{commit}/fuzzers/{fuzzer}">{fuzzer}</a>')
+
+ commit = self.git_hash if self.git_hash else 'master'
+ df.index = df.index.map(lambda fuzzer: description_link(commit, fuzzer))
+ return df
+
@property
@functools.lru_cache()
# TODO(lszekeres): With python3.8+, replace above two decorators with:
| {"golden_diff": "diff --git a/analysis/experiment_results.py b/analysis/experiment_results.py\n--- a/analysis/experiment_results.py\n+++ b/analysis/experiment_results.py\n@@ -72,6 +72,19 @@\n def _get_full_path(self, filename):\n return os.path.join(self._output_directory, filename)\n \n+ def linkify_names(self, df):\n+ \"\"\"For any DataFrame which is indexed by fuzzer names, turns the fuzzer\n+ names into links to their directory with a description on GitHub.\"\"\"\n+ assert df.index.name == 'fuzzer'\n+\n+ def description_link(commit, fuzzer):\n+ return (f'<a href=\"https://github.com/google/fuzzbench/blob/'\n+ f'{commit}/fuzzers/{fuzzer}\">{fuzzer}</a>')\n+\n+ commit = self.git_hash if self.git_hash else 'master'\n+ df.index = df.index.map(lambda fuzzer: description_link(commit, fuzzer))\n+ return df\n+\n @property\n @functools.lru_cache()\n # TODO(lszekeres): With python3.8+, replace above two decorators with:\n", "issue": "Interpretation of the FuzzBench Reports\nThe available fuzzer configurations and the results might be confusing for someone who looks at a random FuzzBench report for the first time. To avoid misinterpretation of the results, maybe there could be\r\n1. a description of the intent of the evaluation and/or an explanation of the evaluated fuzzer configurations, and\r\n2. a distinction between \"master/official/representative\" and \"developer\" evaluations and fuzzer variants. \n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"ExperimentResults class.\"\"\"\n\nimport functools\nimport os\n\nfrom analysis import benchmark_results\nfrom analysis import coverage_data_utils\nfrom analysis import data_utils\nfrom analysis import stat_tests\n\n\nclass ExperimentResults: # pylint: disable=too-many-instance-attributes\n \"\"\"Provides the main interface for getting various analysis results and\n plots about an experiment, represented by |experiment_df|.\n\n Can be used as the context of template based report generation. Each\n result is a property, which is lazily computed and memorized when\n needed multiple times. Therefore, when used as a context of a report\n template, only the properties needed for the given report will be computed.\n \"\"\"\n\n def __init__( # pylint: disable=too-many-arguments\n self,\n experiment_df,\n coverage_dict,\n output_directory,\n plotter,\n experiment_name=None):\n if experiment_name:\n self.name = experiment_name\n else:\n # Take name from first row.\n self.name = experiment_df.experiment.iloc[0]\n\n # FuzzBench repo commit hash.\n self.git_hash = None\n if 'git_hash' in experiment_df.columns:\n # Not possible to represent hashes for multiple experiments.\n if len(experiment_df.experiment.unique()) == 1:\n self.git_hash = experiment_df.git_hash.iloc[0]\n\n # Earliest trial start time.\n self.started = experiment_df.time_started.dropna().min()\n # Latest trial end time.\n self.ended = experiment_df.time_ended.dropna().max()\n\n # Keep data frame without non-interesting columns.\n self._experiment_df = data_utils.drop_uninteresting_columns(\n experiment_df)\n\n # Directory where the rendered plots are written to.\n self._output_directory = output_directory\n\n self._plotter = plotter\n\n # Dictionary to store the full coverage data.\n self._coverage_dict = coverage_dict\n\n def _get_full_path(self, filename):\n return os.path.join(self._output_directory, filename)\n\n @property\n @functools.lru_cache()\n # TODO(lszekeres): With python3.8+, replace above two decorators with:\n # @functools.cached_property\n def _experiment_snapshots_df(self):\n \"\"\"Data frame containing only the time snapshots, for each benchmark,\n based on which we do further analysis, i.e., statistical tests and\n ranking.\"\"\"\n return data_utils.get_experiment_snapshots(self._experiment_df)\n\n @property\n @functools.lru_cache()\n def benchmarks(self):\n \"\"\"Returns the list of BenchmarkResults.\n\n This is cheap as no computation is done on the benchmark data,\n until a property is evaluated.\n \"\"\"\n benchmark_names = self._experiment_df.benchmark.unique()\n return [\n benchmark_results.BenchmarkResults(name, self._experiment_df,\n self._coverage_dict,\n self._output_directory,\n self._plotter)\n for name in sorted(benchmark_names)\n ]\n\n @property\n @functools.lru_cache()\n def summary_table(self):\n \"\"\"A pivot table of medians for each fuzzer on each benchmark.\"\"\"\n return data_utils.experiment_pivot_table(\n self._experiment_snapshots_df, data_utils.benchmark_rank_by_median)\n\n @property\n def rank_by_unique_coverage_average_normalized_score(self):\n \"\"\"Rank fuzzers using average normalized score on unique coverage across\n benchmarks.\"\"\"\n benchmarks_unique_coverage_list = [\n benchmark.unique_region_cov_df for benchmark in self.benchmarks\n ]\n return coverage_data_utils.rank_by_average_normalized_score(\n benchmarks_unique_coverage_list)\n\n @property\n def rank_by_average_rank_and_average_rank(self):\n \"\"\"Rank fuzzers using average rank per benchmark and average rank\n across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df,\n data_utils.benchmark_rank_by_average_rank,\n data_utils.experiment_rank_by_average_rank)\n\n @property\n def rank_by_mean_and_average_rank(self):\n \"\"\"Rank fuzzers using mean coverage per benchmark and average rank\n across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df, data_utils.benchmark_rank_by_mean,\n data_utils.experiment_rank_by_average_rank)\n\n @property\n def rank_by_median_and_average_rank(self):\n \"\"\"Rank fuzzers using median coverage per benchmark and average rank\n across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df, data_utils.benchmark_rank_by_median,\n data_utils.experiment_rank_by_average_rank)\n\n @property\n def rank_by_median_and_average_normalized_score(self):\n \"\"\"Rank fuzzers using median coverage per benchmark and average\n normalized score across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df, data_utils.benchmark_rank_by_median,\n data_utils.experiment_rank_by_average_normalized_score)\n\n @property\n def rank_by_median_and_number_of_firsts(self):\n \"\"\"Rank fuzzers using median coverage per benchmark and number of first\n places across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df, data_utils.benchmark_rank_by_median,\n data_utils.experiment_rank_by_num_firsts)\n\n @property\n def rank_by_stat_test_wins_and_average_rank(self):\n \"\"\"Rank fuzzers using statistical test wins per benchmark and average\n rank across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df,\n data_utils.benchmark_rank_by_stat_test_wins,\n data_utils.experiment_rank_by_average_rank)\n\n @property\n def friedman_p_value(self):\n \"\"\"Friedman test result.\"\"\"\n return stat_tests.friedman_test(self.summary_table)\n\n @property\n @functools.lru_cache()\n def friedman_posthoc_p_values(self):\n \"\"\"Friedman posthoc test results.\"\"\"\n return stat_tests.friedman_posthoc_tests(self.summary_table)\n\n @property\n def friedman_conover_plot(self):\n \"\"\"Friedman/Conover posthoc test result plot.\"\"\"\n plot_filename = 'experiment_friedman_conover_plot.svg'\n self._plotter.write_heatmap_plot(\n self.friedman_posthoc_p_values['conover'],\n self._get_full_path(plot_filename),\n symmetric=True)\n return plot_filename\n\n @property\n def friedman_nemenyi_plot(self):\n \"\"\"Friedman/Nemenyi posthoc test result plot.\"\"\"\n plot_filename = 'experiment_friedman_nemenyi_plot.svg'\n self._plotter.write_heatmap_plot(\n self.friedman_posthoc_p_values['nemenyi'],\n self._get_full_path(plot_filename),\n symmetric=True)\n return plot_filename\n\n @property\n def critical_difference_plot(self):\n \"\"\"Critical difference diagram.\n\n Represents average ranks of fuzzers across all benchmarks,\n considering medians on final coverage.\n \"\"\"\n average_ranks = self.rank_by_median_and_average_rank\n num_of_benchmarks = self.summary_table.shape[0]\n\n plot_filename = 'experiment_critical_difference_plot.svg'\n self._plotter.write_critical_difference_plot(\n average_ranks, num_of_benchmarks,\n self._get_full_path(plot_filename))\n return plot_filename\n", "path": "analysis/experiment_results.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"ExperimentResults class.\"\"\"\n\nimport functools\nimport os\n\nfrom analysis import benchmark_results\nfrom analysis import coverage_data_utils\nfrom analysis import data_utils\nfrom analysis import stat_tests\n\n\nclass ExperimentResults: # pylint: disable=too-many-instance-attributes\n \"\"\"Provides the main interface for getting various analysis results and\n plots about an experiment, represented by |experiment_df|.\n\n Can be used as the context of template based report generation. Each\n result is a property, which is lazily computed and memorized when\n needed multiple times. Therefore, when used as a context of a report\n template, only the properties needed for the given report will be computed.\n \"\"\"\n\n def __init__( # pylint: disable=too-many-arguments\n self,\n experiment_df,\n coverage_dict,\n output_directory,\n plotter,\n experiment_name=None):\n if experiment_name:\n self.name = experiment_name\n else:\n # Take name from first row.\n self.name = experiment_df.experiment.iloc[0]\n\n # FuzzBench repo commit hash.\n self.git_hash = None\n if 'git_hash' in experiment_df.columns:\n # Not possible to represent hashes for multiple experiments.\n if len(experiment_df.experiment.unique()) == 1:\n self.git_hash = experiment_df.git_hash.iloc[0]\n\n # Earliest trial start time.\n self.started = experiment_df.time_started.dropna().min()\n # Latest trial end time.\n self.ended = experiment_df.time_ended.dropna().max()\n\n # Keep data frame without non-interesting columns.\n self._experiment_df = data_utils.drop_uninteresting_columns(\n experiment_df)\n\n # Directory where the rendered plots are written to.\n self._output_directory = output_directory\n\n self._plotter = plotter\n\n # Dictionary to store the full coverage data.\n self._coverage_dict = coverage_dict\n\n def _get_full_path(self, filename):\n return os.path.join(self._output_directory, filename)\n\n def linkify_names(self, df):\n \"\"\"For any DataFrame which is indexed by fuzzer names, turns the fuzzer\n names into links to their directory with a description on GitHub.\"\"\"\n assert df.index.name == 'fuzzer'\n\n def description_link(commit, fuzzer):\n return (f'<a href=\"https://github.com/google/fuzzbench/blob/'\n f'{commit}/fuzzers/{fuzzer}\">{fuzzer}</a>')\n\n commit = self.git_hash if self.git_hash else 'master'\n df.index = df.index.map(lambda fuzzer: description_link(commit, fuzzer))\n return df\n\n @property\n @functools.lru_cache()\n # TODO(lszekeres): With python3.8+, replace above two decorators with:\n # @functools.cached_property\n def _experiment_snapshots_df(self):\n \"\"\"Data frame containing only the time snapshots, for each benchmark,\n based on which we do further analysis, i.e., statistical tests and\n ranking.\"\"\"\n return data_utils.get_experiment_snapshots(self._experiment_df)\n\n @property\n @functools.lru_cache()\n def benchmarks(self):\n \"\"\"Returns the list of BenchmarkResults.\n\n This is cheap as no computation is done on the benchmark data,\n until a property is evaluated.\n \"\"\"\n benchmark_names = self._experiment_df.benchmark.unique()\n return [\n benchmark_results.BenchmarkResults(name, self._experiment_df,\n self._coverage_dict,\n self._output_directory,\n self._plotter)\n for name in sorted(benchmark_names)\n ]\n\n @property\n @functools.lru_cache()\n def summary_table(self):\n \"\"\"A pivot table of medians for each fuzzer on each benchmark.\"\"\"\n return data_utils.experiment_pivot_table(\n self._experiment_snapshots_df, data_utils.benchmark_rank_by_median)\n\n @property\n def rank_by_unique_coverage_average_normalized_score(self):\n \"\"\"Rank fuzzers using average normalized score on unique coverage across\n benchmarks.\"\"\"\n benchmarks_unique_coverage_list = [\n benchmark.unique_region_cov_df for benchmark in self.benchmarks\n ]\n return coverage_data_utils.rank_by_average_normalized_score(\n benchmarks_unique_coverage_list)\n\n @property\n def rank_by_average_rank_and_average_rank(self):\n \"\"\"Rank fuzzers using average rank per benchmark and average rank\n across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df,\n data_utils.benchmark_rank_by_average_rank,\n data_utils.experiment_rank_by_average_rank)\n\n @property\n def rank_by_mean_and_average_rank(self):\n \"\"\"Rank fuzzers using mean coverage per benchmark and average rank\n across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df, data_utils.benchmark_rank_by_mean,\n data_utils.experiment_rank_by_average_rank)\n\n @property\n def rank_by_median_and_average_rank(self):\n \"\"\"Rank fuzzers using median coverage per benchmark and average rank\n across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df, data_utils.benchmark_rank_by_median,\n data_utils.experiment_rank_by_average_rank)\n\n @property\n def rank_by_median_and_average_normalized_score(self):\n \"\"\"Rank fuzzers using median coverage per benchmark and average\n normalized score across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df, data_utils.benchmark_rank_by_median,\n data_utils.experiment_rank_by_average_normalized_score)\n\n @property\n def rank_by_median_and_number_of_firsts(self):\n \"\"\"Rank fuzzers using median coverage per benchmark and number of first\n places across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df, data_utils.benchmark_rank_by_median,\n data_utils.experiment_rank_by_num_firsts)\n\n @property\n def rank_by_stat_test_wins_and_average_rank(self):\n \"\"\"Rank fuzzers using statistical test wins per benchmark and average\n rank across benchmarks.\"\"\"\n return data_utils.experiment_level_ranking(\n self._experiment_snapshots_df,\n data_utils.benchmark_rank_by_stat_test_wins,\n data_utils.experiment_rank_by_average_rank)\n\n @property\n def friedman_p_value(self):\n \"\"\"Friedman test result.\"\"\"\n return stat_tests.friedman_test(self.summary_table)\n\n @property\n @functools.lru_cache()\n def friedman_posthoc_p_values(self):\n \"\"\"Friedman posthoc test results.\"\"\"\n return stat_tests.friedman_posthoc_tests(self.summary_table)\n\n @property\n def friedman_conover_plot(self):\n \"\"\"Friedman/Conover posthoc test result plot.\"\"\"\n plot_filename = 'experiment_friedman_conover_plot.svg'\n self._plotter.write_heatmap_plot(\n self.friedman_posthoc_p_values['conover'],\n self._get_full_path(plot_filename),\n symmetric=True)\n return plot_filename\n\n @property\n def friedman_nemenyi_plot(self):\n \"\"\"Friedman/Nemenyi posthoc test result plot.\"\"\"\n plot_filename = 'experiment_friedman_nemenyi_plot.svg'\n self._plotter.write_heatmap_plot(\n self.friedman_posthoc_p_values['nemenyi'],\n self._get_full_path(plot_filename),\n symmetric=True)\n return plot_filename\n\n @property\n def critical_difference_plot(self):\n \"\"\"Critical difference diagram.\n\n Represents average ranks of fuzzers across all benchmarks,\n considering medians on final coverage.\n \"\"\"\n average_ranks = self.rank_by_median_and_average_rank\n num_of_benchmarks = self.summary_table.shape[0]\n\n plot_filename = 'experiment_critical_difference_plot.svg'\n self._plotter.write_critical_difference_plot(\n average_ranks, num_of_benchmarks,\n self._get_full_path(plot_filename))\n return plot_filename\n", "path": "analysis/experiment_results.py"}]} | 2,602 | 249 |
gh_patches_debug_26676 | rasdani/github-patches | git_diff | airctic__icevision-143 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
load_state_dict bug
## 🐛 Bug
Introducing `load_state_dict` into the models in order to avoid doing `model.model.load_state_dict` caused a bug when trying to load from a previous checkpoint.
When saving the model the state dict looks like: `model.backbone...` but the new `load_state_dict` expects only `backbone`, without the model part.
More specifically, the following fails:
```python
model.load_state_dict(model.state_dict())
```
Related to #137
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py`
Content:
```
1 __all__ = ["MantisMaskRCNN"]
2
3 from mantisshrimp.imports import *
4 from mantisshrimp.core import *
5 from mantisshrimp.models.mantis_rcnn.rcnn_param_groups import *
6 from mantisshrimp.models.mantis_rcnn.mantis_rcnn import *
7 from mantisshrimp.models.mantis_rcnn.mantis_faster_rcnn import *
8 from mantisshrimp.backbones import *
9
10
11 class MantisMaskRCNN(MantisRCNN):
12 @delegates(MaskRCNN.__init__)
13 def __init__(
14 self,
15 num_classes: int,
16 backbone: nn.Module = None,
17 param_groups: List[nn.Module] = None,
18 **kwargs,
19 ):
20 super().__init__()
21 self.num_classes = num_classes
22
23 if backbone is None:
24 # Creates the default fasterrcnn as given in pytorch. Trained on COCO dataset
25 self.model = maskrcnn_resnet50_fpn(pretrained=True, **kwargs)
26 in_features = self.model.roi_heads.box_predictor.cls_score.in_features
27 self.model.roi_heads.box_predictor = FastRCNNPredictor(
28 in_features, num_classes
29 )
30 in_features_mask = (
31 self.model.roi_heads.mask_predictor.conv5_mask.in_channels
32 )
33 self.model.roi_heads.mask_predictor = MaskRCNNPredictor(
34 in_channels=in_features_mask, dim_reduced=256, num_classes=num_classes
35 )
36 param_groups = resnet_fpn_backbone_param_groups(self.model.backbone)
37 else:
38 self.model = MaskRCNN(backbone, num_classes=num_classes, **kwargs)
39 param_groups = param_groups or [backbone]
40
41 self._param_groups = param_groups + [self.model.rpn, self.model.roi_heads]
42 check_all_model_params_in_groups(self.model, self.param_groups)
43
44 def forward(self, images, targets=None):
45 return self.model(images, targets)
46
47 def predict(
48 self,
49 images: List[np.ndarray],
50 detection_threshold: float = 0.5,
51 mask_threshold: float = 0.5,
52 ):
53 convert_raw_prediction = partial(
54 self.convert_raw_prediction,
55 detection_threshold=detection_threshold,
56 mask_threshold=mask_threshold,
57 )
58
59 return self._predict(
60 images=images, convert_raw_prediction=convert_raw_prediction
61 )
62
63 def load_state_dict(
64 self, state_dict: Dict[str, Tensor], strict: bool = True,
65 ):
66 return self.model.load_state_dict(state_dict=state_dict, strict=strict)
67
68 @property
69 def param_groups(self):
70 return self._param_groups
71
72 @staticmethod
73 def convert_raw_prediction(
74 raw_pred: dict, detection_threshold: float, mask_threshold: float
75 ):
76 preds = MantisFasterRCNN.convert_raw_prediction(
77 raw_pred=raw_pred, detection_threshold=detection_threshold
78 )
79
80 above_threshold = preds["above_threshold"]
81 masks_probs = raw_pred["masks"][above_threshold]
82 masks_probs = masks_probs.detach().cpu().numpy()
83 # convert probabilities to 0 or 1 based on mask_threshold
84 masks = masks_probs > mask_threshold
85 masks = MaskArray(masks.squeeze(1))
86
87 return {**preds, "masks": masks}
88
89 @staticmethod
90 def build_training_sample(
91 imageid: int,
92 img: np.ndarray,
93 label: List[int],
94 bbox: List[BBox],
95 mask: MaskArray,
96 **kwargs,
97 ):
98 x, y = MantisFasterRCNN.build_training_sample(
99 imageid=imageid, img=img, label=label, bbox=bbox,
100 )
101 y["masks"] = tensor(mask.data, dtype=torch.uint8)
102 return x, y
103
```
Path: `mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py`
Content:
```
1 __all__ = ["MantisFasterRCNN"]
2
3 from mantisshrimp.imports import *
4 from mantisshrimp.core import *
5 from mantisshrimp.models.mantis_rcnn.rcnn_param_groups import *
6 from mantisshrimp.models.mantis_rcnn.mantis_rcnn import *
7 from mantisshrimp.backbones import *
8
9
10 class MantisFasterRCNN(MantisRCNN):
11 """
12 Creates a flexible Faster RCNN implementation based on torchvision library.
13 Args:
14 n_class (int) : number of classes. Do not have class_id "0" it is reserved as background.
15 n_class = number of classes to label + 1 for background.
16 """
17
18 @delegates(FasterRCNN.__init__)
19 def __init__(
20 self,
21 num_classes: int,
22 backbone: nn.Module = None,
23 param_groups: List[nn.Module] = None,
24 metrics=None,
25 **kwargs,
26 ):
27 super().__init__(metrics=metrics)
28 self.num_classes = num_classes
29 self.backbone = backbone
30 if backbone is None:
31 # Creates the default fasterrcnn as given in pytorch. Trained on COCO dataset
32 self.model = fasterrcnn_resnet50_fpn(pretrained=True, **kwargs)
33 in_features = self.model.roi_heads.box_predictor.cls_score.in_features
34 self.model.roi_heads.box_predictor = FastRCNNPredictor(
35 in_features, num_classes
36 )
37 param_groups = resnet_fpn_backbone_param_groups(self.model.backbone)
38 else:
39 self.model = FasterRCNN(backbone, num_classes=num_classes, **kwargs)
40 param_groups = param_groups or [backbone]
41
42 self._param_groups = param_groups + [self.model.rpn, self.model.roi_heads]
43 check_all_model_params_in_groups(self.model, self._param_groups)
44
45 def forward(self, images, targets=None):
46 return self.model(images, targets)
47
48 def predict(self, images: List[np.ndarray], detection_threshold: float = 0.5):
49 convert_raw_prediction = partial(
50 self.convert_raw_prediction, detection_threshold=detection_threshold,
51 )
52
53 return self._predict(
54 images=images, convert_raw_prediction=convert_raw_prediction
55 )
56
57 def load_state_dict(
58 self, state_dict: Dict[str, Tensor], strict: bool = True,
59 ):
60 return self.model.load_state_dict(state_dict=state_dict, strict=strict)
61
62 @property
63 def param_groups(self):
64 return self._param_groups
65
66 @staticmethod
67 def convert_raw_prediction(raw_pred: dict, detection_threshold: float):
68 above_threshold = raw_pred["scores"] >= detection_threshold
69
70 labels = raw_pred["labels"][above_threshold]
71 labels = labels.detach().cpu().numpy()
72
73 scores = raw_pred["scores"][above_threshold]
74 scores = scores.detach().cpu().numpy()
75
76 boxes = raw_pred["boxes"][above_threshold]
77 bboxes = []
78 for box_tensor in boxes:
79 xyxy = box_tensor.cpu().tolist()
80 bbox = BBox.from_xyxy(*xyxy)
81 bboxes.append(bbox)
82
83 return {
84 "labels": labels,
85 "scores": scores,
86 "bboxes": bboxes,
87 "above_threshold": above_threshold,
88 }
89
90 @staticmethod
91 def build_training_sample(
92 imageid: int, img: np.ndarray, label: List[int], bbox: List[BBox], **kwargs,
93 ):
94 x = im2tensor(img)
95 # injected values when annotations are empty are disconsidered
96 # because we mark label as 0 (background)
97 _fake_box = [0, 1, 2, 3]
98 y = {
99 "image_id": tensor(imageid, dtype=torch.int64),
100 "labels": tensor(label or [0], dtype=torch.int64),
101 "boxes": tensor([o.xyxy for o in bbox] or [_fake_box], dtype=torch.float),
102 }
103 return x, y
104
```
Path: `mantisshrimp/models/mantis_module.py`
Content:
```
1 __all__ = ["MantisModule"]
2
3 from mantisshrimp.imports import *
4 from mantisshrimp.models.device_module_mixin import *
5 from mantisshrimp.models.parameters_splits_module_mixin import *
6
7
8 class MantisModule(DeviceModuleMixin, ParametersSplitsModuleMixin, nn.Module, ABC):
9 @abstractmethod
10 def load_state_dict(
11 self, state_dict: Dict[str, Tensor], strict: bool = True,
12 ):
13 """ Load weights
14 """
15
16 @classmethod
17 @abstractmethod
18 def dataloader(cls, **kwargs) -> DataLoader:
19 """ Returns the specific dataloader for this class
20 """
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mantisshrimp/models/mantis_module.py b/mantisshrimp/models/mantis_module.py
--- a/mantisshrimp/models/mantis_module.py
+++ b/mantisshrimp/models/mantis_module.py
@@ -6,13 +6,6 @@
class MantisModule(DeviceModuleMixin, ParametersSplitsModuleMixin, nn.Module, ABC):
- @abstractmethod
- def load_state_dict(
- self, state_dict: Dict[str, Tensor], strict: bool = True,
- ):
- """ Load weights
- """
-
@classmethod
@abstractmethod
def dataloader(cls, **kwargs) -> DataLoader:
diff --git a/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py b/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py
--- a/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py
+++ b/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py
@@ -54,11 +54,6 @@
images=images, convert_raw_prediction=convert_raw_prediction
)
- def load_state_dict(
- self, state_dict: Dict[str, Tensor], strict: bool = True,
- ):
- return self.model.load_state_dict(state_dict=state_dict, strict=strict)
-
@property
def param_groups(self):
return self._param_groups
diff --git a/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py b/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py
--- a/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py
+++ b/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py
@@ -60,11 +60,6 @@
images=images, convert_raw_prediction=convert_raw_prediction
)
- def load_state_dict(
- self, state_dict: Dict[str, Tensor], strict: bool = True,
- ):
- return self.model.load_state_dict(state_dict=state_dict, strict=strict)
-
@property
def param_groups(self):
return self._param_groups
| {"golden_diff": "diff --git a/mantisshrimp/models/mantis_module.py b/mantisshrimp/models/mantis_module.py\n--- a/mantisshrimp/models/mantis_module.py\n+++ b/mantisshrimp/models/mantis_module.py\n@@ -6,13 +6,6 @@\n \n \n class MantisModule(DeviceModuleMixin, ParametersSplitsModuleMixin, nn.Module, ABC):\n- @abstractmethod\n- def load_state_dict(\n- self, state_dict: Dict[str, Tensor], strict: bool = True,\n- ):\n- \"\"\" Load weights\n- \"\"\"\n-\n @classmethod\n @abstractmethod\n def dataloader(cls, **kwargs) -> DataLoader:\ndiff --git a/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py b/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py\n--- a/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py\n+++ b/mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py\n@@ -54,11 +54,6 @@\n images=images, convert_raw_prediction=convert_raw_prediction\n )\n \n- def load_state_dict(\n- self, state_dict: Dict[str, Tensor], strict: bool = True,\n- ):\n- return self.model.load_state_dict(state_dict=state_dict, strict=strict)\n-\n @property\n def param_groups(self):\n return self._param_groups\ndiff --git a/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py b/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py\n--- a/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py\n+++ b/mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py\n@@ -60,11 +60,6 @@\n images=images, convert_raw_prediction=convert_raw_prediction\n )\n \n- def load_state_dict(\n- self, state_dict: Dict[str, Tensor], strict: bool = True,\n- ):\n- return self.model.load_state_dict(state_dict=state_dict, strict=strict)\n-\n @property\n def param_groups(self):\n return self._param_groups\n", "issue": "load_state_dict bug\n## \ud83d\udc1b Bug\r\nIntroducing `load_state_dict` into the models in order to avoid doing `model.model.load_state_dict` caused a bug when trying to load from a previous checkpoint.\r\n\r\nWhen saving the model the state dict looks like: `model.backbone...` but the new `load_state_dict` expects only `backbone`, without the model part.\r\n\r\nMore specifically, the following fails:\r\n```python\r\nmodel.load_state_dict(model.state_dict())\r\n```\r\n\r\nRelated to #137 \r\n\n", "before_files": [{"content": "__all__ = [\"MantisMaskRCNN\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.core import *\nfrom mantisshrimp.models.mantis_rcnn.rcnn_param_groups import *\nfrom mantisshrimp.models.mantis_rcnn.mantis_rcnn import *\nfrom mantisshrimp.models.mantis_rcnn.mantis_faster_rcnn import *\nfrom mantisshrimp.backbones import *\n\n\nclass MantisMaskRCNN(MantisRCNN):\n @delegates(MaskRCNN.__init__)\n def __init__(\n self,\n num_classes: int,\n backbone: nn.Module = None,\n param_groups: List[nn.Module] = None,\n **kwargs,\n ):\n super().__init__()\n self.num_classes = num_classes\n\n if backbone is None:\n # Creates the default fasterrcnn as given in pytorch. Trained on COCO dataset\n self.model = maskrcnn_resnet50_fpn(pretrained=True, **kwargs)\n in_features = self.model.roi_heads.box_predictor.cls_score.in_features\n self.model.roi_heads.box_predictor = FastRCNNPredictor(\n in_features, num_classes\n )\n in_features_mask = (\n self.model.roi_heads.mask_predictor.conv5_mask.in_channels\n )\n self.model.roi_heads.mask_predictor = MaskRCNNPredictor(\n in_channels=in_features_mask, dim_reduced=256, num_classes=num_classes\n )\n param_groups = resnet_fpn_backbone_param_groups(self.model.backbone)\n else:\n self.model = MaskRCNN(backbone, num_classes=num_classes, **kwargs)\n param_groups = param_groups or [backbone]\n\n self._param_groups = param_groups + [self.model.rpn, self.model.roi_heads]\n check_all_model_params_in_groups(self.model, self.param_groups)\n\n def forward(self, images, targets=None):\n return self.model(images, targets)\n\n def predict(\n self,\n images: List[np.ndarray],\n detection_threshold: float = 0.5,\n mask_threshold: float = 0.5,\n ):\n convert_raw_prediction = partial(\n self.convert_raw_prediction,\n detection_threshold=detection_threshold,\n mask_threshold=mask_threshold,\n )\n\n return self._predict(\n images=images, convert_raw_prediction=convert_raw_prediction\n )\n\n def load_state_dict(\n self, state_dict: Dict[str, Tensor], strict: bool = True,\n ):\n return self.model.load_state_dict(state_dict=state_dict, strict=strict)\n\n @property\n def param_groups(self):\n return self._param_groups\n\n @staticmethod\n def convert_raw_prediction(\n raw_pred: dict, detection_threshold: float, mask_threshold: float\n ):\n preds = MantisFasterRCNN.convert_raw_prediction(\n raw_pred=raw_pred, detection_threshold=detection_threshold\n )\n\n above_threshold = preds[\"above_threshold\"]\n masks_probs = raw_pred[\"masks\"][above_threshold]\n masks_probs = masks_probs.detach().cpu().numpy()\n # convert probabilities to 0 or 1 based on mask_threshold\n masks = masks_probs > mask_threshold\n masks = MaskArray(masks.squeeze(1))\n\n return {**preds, \"masks\": masks}\n\n @staticmethod\n def build_training_sample(\n imageid: int,\n img: np.ndarray,\n label: List[int],\n bbox: List[BBox],\n mask: MaskArray,\n **kwargs,\n ):\n x, y = MantisFasterRCNN.build_training_sample(\n imageid=imageid, img=img, label=label, bbox=bbox,\n )\n y[\"masks\"] = tensor(mask.data, dtype=torch.uint8)\n return x, y\n", "path": "mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py"}, {"content": "__all__ = [\"MantisFasterRCNN\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.core import *\nfrom mantisshrimp.models.mantis_rcnn.rcnn_param_groups import *\nfrom mantisshrimp.models.mantis_rcnn.mantis_rcnn import *\nfrom mantisshrimp.backbones import *\n\n\nclass MantisFasterRCNN(MantisRCNN):\n \"\"\"\n Creates a flexible Faster RCNN implementation based on torchvision library.\n Args: \n n_class (int) : number of classes. Do not have class_id \"0\" it is reserved as background.\n n_class = number of classes to label + 1 for background.\n \"\"\"\n\n @delegates(FasterRCNN.__init__)\n def __init__(\n self,\n num_classes: int,\n backbone: nn.Module = None,\n param_groups: List[nn.Module] = None,\n metrics=None,\n **kwargs,\n ):\n super().__init__(metrics=metrics)\n self.num_classes = num_classes\n self.backbone = backbone\n if backbone is None:\n # Creates the default fasterrcnn as given in pytorch. Trained on COCO dataset\n self.model = fasterrcnn_resnet50_fpn(pretrained=True, **kwargs)\n in_features = self.model.roi_heads.box_predictor.cls_score.in_features\n self.model.roi_heads.box_predictor = FastRCNNPredictor(\n in_features, num_classes\n )\n param_groups = resnet_fpn_backbone_param_groups(self.model.backbone)\n else:\n self.model = FasterRCNN(backbone, num_classes=num_classes, **kwargs)\n param_groups = param_groups or [backbone]\n\n self._param_groups = param_groups + [self.model.rpn, self.model.roi_heads]\n check_all_model_params_in_groups(self.model, self._param_groups)\n\n def forward(self, images, targets=None):\n return self.model(images, targets)\n\n def predict(self, images: List[np.ndarray], detection_threshold: float = 0.5):\n convert_raw_prediction = partial(\n self.convert_raw_prediction, detection_threshold=detection_threshold,\n )\n\n return self._predict(\n images=images, convert_raw_prediction=convert_raw_prediction\n )\n\n def load_state_dict(\n self, state_dict: Dict[str, Tensor], strict: bool = True,\n ):\n return self.model.load_state_dict(state_dict=state_dict, strict=strict)\n\n @property\n def param_groups(self):\n return self._param_groups\n\n @staticmethod\n def convert_raw_prediction(raw_pred: dict, detection_threshold: float):\n above_threshold = raw_pred[\"scores\"] >= detection_threshold\n\n labels = raw_pred[\"labels\"][above_threshold]\n labels = labels.detach().cpu().numpy()\n\n scores = raw_pred[\"scores\"][above_threshold]\n scores = scores.detach().cpu().numpy()\n\n boxes = raw_pred[\"boxes\"][above_threshold]\n bboxes = []\n for box_tensor in boxes:\n xyxy = box_tensor.cpu().tolist()\n bbox = BBox.from_xyxy(*xyxy)\n bboxes.append(bbox)\n\n return {\n \"labels\": labels,\n \"scores\": scores,\n \"bboxes\": bboxes,\n \"above_threshold\": above_threshold,\n }\n\n @staticmethod\n def build_training_sample(\n imageid: int, img: np.ndarray, label: List[int], bbox: List[BBox], **kwargs,\n ):\n x = im2tensor(img)\n # injected values when annotations are empty are disconsidered\n # because we mark label as 0 (background)\n _fake_box = [0, 1, 2, 3]\n y = {\n \"image_id\": tensor(imageid, dtype=torch.int64),\n \"labels\": tensor(label or [0], dtype=torch.int64),\n \"boxes\": tensor([o.xyxy for o in bbox] or [_fake_box], dtype=torch.float),\n }\n return x, y\n", "path": "mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py"}, {"content": "__all__ = [\"MantisModule\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.models.device_module_mixin import *\nfrom mantisshrimp.models.parameters_splits_module_mixin import *\n\n\nclass MantisModule(DeviceModuleMixin, ParametersSplitsModuleMixin, nn.Module, ABC):\n @abstractmethod\n def load_state_dict(\n self, state_dict: Dict[str, Tensor], strict: bool = True,\n ):\n \"\"\" Load weights\n \"\"\"\n\n @classmethod\n @abstractmethod\n def dataloader(cls, **kwargs) -> DataLoader:\n \"\"\" Returns the specific dataloader for this class\n \"\"\"\n", "path": "mantisshrimp/models/mantis_module.py"}], "after_files": [{"content": "__all__ = [\"MantisMaskRCNN\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.core import *\nfrom mantisshrimp.models.mantis_rcnn.rcnn_param_groups import *\nfrom mantisshrimp.models.mantis_rcnn.mantis_rcnn import *\nfrom mantisshrimp.models.mantis_rcnn.mantis_faster_rcnn import *\nfrom mantisshrimp.backbones import *\n\n\nclass MantisMaskRCNN(MantisRCNN):\n @delegates(MaskRCNN.__init__)\n def __init__(\n self,\n num_classes: int,\n backbone: nn.Module = None,\n param_groups: List[nn.Module] = None,\n **kwargs,\n ):\n super().__init__()\n self.num_classes = num_classes\n\n if backbone is None:\n # Creates the default fasterrcnn as given in pytorch. Trained on COCO dataset\n self.model = maskrcnn_resnet50_fpn(pretrained=True, **kwargs)\n in_features = self.model.roi_heads.box_predictor.cls_score.in_features\n self.model.roi_heads.box_predictor = FastRCNNPredictor(\n in_features, num_classes\n )\n in_features_mask = (\n self.model.roi_heads.mask_predictor.conv5_mask.in_channels\n )\n self.model.roi_heads.mask_predictor = MaskRCNNPredictor(\n in_channels=in_features_mask, dim_reduced=256, num_classes=num_classes\n )\n param_groups = resnet_fpn_backbone_param_groups(self.model.backbone)\n else:\n self.model = MaskRCNN(backbone, num_classes=num_classes, **kwargs)\n param_groups = param_groups or [backbone]\n\n self._param_groups = param_groups + [self.model.rpn, self.model.roi_heads]\n check_all_model_params_in_groups(self.model, self.param_groups)\n\n def forward(self, images, targets=None):\n return self.model(images, targets)\n\n def predict(\n self,\n images: List[np.ndarray],\n detection_threshold: float = 0.5,\n mask_threshold: float = 0.5,\n ):\n convert_raw_prediction = partial(\n self.convert_raw_prediction,\n detection_threshold=detection_threshold,\n mask_threshold=mask_threshold,\n )\n\n return self._predict(\n images=images, convert_raw_prediction=convert_raw_prediction\n )\n\n @property\n def param_groups(self):\n return self._param_groups\n\n @staticmethod\n def convert_raw_prediction(\n raw_pred: dict, detection_threshold: float, mask_threshold: float\n ):\n preds = MantisFasterRCNN.convert_raw_prediction(\n raw_pred=raw_pred, detection_threshold=detection_threshold\n )\n\n above_threshold = preds[\"above_threshold\"]\n masks_probs = raw_pred[\"masks\"][above_threshold]\n masks_probs = masks_probs.detach().cpu().numpy()\n # convert probabilities to 0 or 1 based on mask_threshold\n masks = masks_probs > mask_threshold\n masks = MaskArray(masks.squeeze(1))\n\n return {**preds, \"masks\": masks}\n\n @staticmethod\n def build_training_sample(\n imageid: int,\n img: np.ndarray,\n label: List[int],\n bbox: List[BBox],\n mask: MaskArray,\n **kwargs,\n ):\n x, y = MantisFasterRCNN.build_training_sample(\n imageid=imageid, img=img, label=label, bbox=bbox,\n )\n y[\"masks\"] = tensor(mask.data, dtype=torch.uint8)\n return x, y\n", "path": "mantisshrimp/models/mantis_rcnn/mantis_mask_rcnn.py"}, {"content": "__all__ = [\"MantisFasterRCNN\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.core import *\nfrom mantisshrimp.models.mantis_rcnn.rcnn_param_groups import *\nfrom mantisshrimp.models.mantis_rcnn.mantis_rcnn import *\nfrom mantisshrimp.backbones import *\n\n\nclass MantisFasterRCNN(MantisRCNN):\n \"\"\"\n Creates a flexible Faster RCNN implementation based on torchvision library.\n Args: \n n_class (int) : number of classes. Do not have class_id \"0\" it is reserved as background.\n n_class = number of classes to label + 1 for background.\n \"\"\"\n\n @delegates(FasterRCNN.__init__)\n def __init__(\n self,\n num_classes: int,\n backbone: nn.Module = None,\n param_groups: List[nn.Module] = None,\n metrics=None,\n **kwargs,\n ):\n super().__init__(metrics=metrics)\n self.num_classes = num_classes\n self.backbone = backbone\n if backbone is None:\n # Creates the default fasterrcnn as given in pytorch. Trained on COCO dataset\n self.model = fasterrcnn_resnet50_fpn(pretrained=True, **kwargs)\n in_features = self.model.roi_heads.box_predictor.cls_score.in_features\n self.model.roi_heads.box_predictor = FastRCNNPredictor(\n in_features, num_classes\n )\n param_groups = resnet_fpn_backbone_param_groups(self.model.backbone)\n else:\n self.model = FasterRCNN(backbone, num_classes=num_classes, **kwargs)\n param_groups = param_groups or [backbone]\n\n self._param_groups = param_groups + [self.model.rpn, self.model.roi_heads]\n check_all_model_params_in_groups(self.model, self._param_groups)\n\n def forward(self, images, targets=None):\n return self.model(images, targets)\n\n def predict(self, images: List[np.ndarray], detection_threshold: float = 0.5):\n convert_raw_prediction = partial(\n self.convert_raw_prediction, detection_threshold=detection_threshold,\n )\n\n return self._predict(\n images=images, convert_raw_prediction=convert_raw_prediction\n )\n\n @property\n def param_groups(self):\n return self._param_groups\n\n @staticmethod\n def convert_raw_prediction(raw_pred: dict, detection_threshold: float):\n above_threshold = raw_pred[\"scores\"] >= detection_threshold\n\n labels = raw_pred[\"labels\"][above_threshold]\n labels = labels.detach().cpu().numpy()\n\n scores = raw_pred[\"scores\"][above_threshold]\n scores = scores.detach().cpu().numpy()\n\n boxes = raw_pred[\"boxes\"][above_threshold]\n bboxes = []\n for box_tensor in boxes:\n xyxy = box_tensor.cpu().tolist()\n bbox = BBox.from_xyxy(*xyxy)\n bboxes.append(bbox)\n\n return {\n \"labels\": labels,\n \"scores\": scores,\n \"bboxes\": bboxes,\n \"above_threshold\": above_threshold,\n }\n\n @staticmethod\n def build_training_sample(\n imageid: int, img: np.ndarray, label: List[int], bbox: List[BBox], **kwargs,\n ):\n x = im2tensor(img)\n # injected values when annotations are empty are disconsidered\n # because we mark label as 0 (background)\n _fake_box = [0, 1, 2, 3]\n y = {\n \"image_id\": tensor(imageid, dtype=torch.int64),\n \"labels\": tensor(label or [0], dtype=torch.int64),\n \"boxes\": tensor([o.xyxy for o in bbox] or [_fake_box], dtype=torch.float),\n }\n return x, y\n", "path": "mantisshrimp/models/mantis_rcnn/mantis_faster_rcnn.py"}, {"content": "__all__ = [\"MantisModule\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.models.device_module_mixin import *\nfrom mantisshrimp.models.parameters_splits_module_mixin import *\n\n\nclass MantisModule(DeviceModuleMixin, ParametersSplitsModuleMixin, nn.Module, ABC):\n @classmethod\n @abstractmethod\n def dataloader(cls, **kwargs) -> DataLoader:\n \"\"\" Returns the specific dataloader for this class\n \"\"\"\n", "path": "mantisshrimp/models/mantis_module.py"}]} | 2,681 | 477 |
gh_patches_debug_42146 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-466 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Elasticsearch 7.5.1 changed positional arguments order
Similar to #454, Elasticsearch 7.5.1 broke the test suite in another way.
https://github.com/elastic/elasticsearch-py/issues/1099
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/compat.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import datetime as dt
5 import inspect
6 import sys
7 from functools import wraps
8
9 string_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821
10 text_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821
11 string_types = tuple({string_type, text_type})
12
13 if sys.version_info >= (3, 2):
14 from contextlib import ContextDecorator
15 else:
16 import functools
17
18 class ContextDecorator(object):
19 def __call__(self, f):
20 @functools.wraps(f)
21 def decorated(*args, **kwds):
22 with self:
23 return f(*args, **kwds)
24
25 return decorated
26
27
28 if sys.version_info >= (3, 0):
29 import queue
30 else:
31 import Queue as queue
32
33 # datetime_to_timestamp converts a naive UTC datetime to a unix timestamp
34 if sys.version_info >= (3, 3):
35
36 def datetime_to_timestamp(datetime_obj):
37 return datetime_obj.replace(tzinfo=dt.timezone.utc).timestamp()
38
39
40 else:
41 _EPOCH = dt.datetime(1970, 1, 1)
42
43 def datetime_to_timestamp(datetime_obj):
44 return (datetime_obj - _EPOCH).total_seconds()
45
46
47 def text(value, encoding="utf-8", errors="strict"):
48 """
49 Convert a value to str on Python 3 and unicode on Python 2.
50 """
51 if isinstance(value, text_type):
52 return value
53 elif isinstance(value, bytes):
54 return text_type(value, encoding, errors)
55 else:
56 return text_type(value)
57
58
59 if sys.version_info >= (3, 0):
60 from urllib.parse import parse_qsl, urlencode
61 else:
62 from urllib import urlencode
63 from urlparse import parse_qsl
64
65
66 def kwargs_only(func):
67 """
68 Make a function only accept keyword arguments.
69 This can be dropped in Python 3 in lieu of:
70 def foo(*, bar=default):
71 Source: https://pypi.org/project/kwargs-only/
72 """
73 if hasattr(inspect, "signature"): # pragma: no cover
74 # Python 3
75 signature = inspect.signature(func)
76 arg_names = list(signature.parameters.keys())
77 else: # pragma: no cover
78 # Python 2
79 signature = inspect.getargspec(func)
80 arg_names = signature.args
81
82 if len(arg_names) > 0 and arg_names[0] in ("self", "cls"):
83 allowable_args = 1
84 else:
85 allowable_args = 0
86
87 @wraps(func)
88 def wrapper(*args, **kwargs):
89 if len(args) > allowable_args:
90 raise TypeError(
91 "{} should only be called with keyword args".format(func.__name__)
92 )
93 return func(*args, **kwargs)
94
95 return wrapper
96
97
98 __all__ = [
99 "ContextDecorator",
100 "datetime_to_timestamp",
101 "kwargs_only",
102 "parse_qsl",
103 "queue",
104 "string_type",
105 "text",
106 "text_type",
107 "urlencode",
108 ]
109
```
Path: `src/scout_apm/instruments/elasticsearch.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5 from collections import namedtuple
6
7 import wrapt
8
9 from scout_apm.core.tracked_request import TrackedRequest
10
11 try:
12 from elasticsearch import Elasticsearch, Transport
13 except ImportError: # pragma: no cover
14 Elasticsearch = None
15 Transport = None
16
17 logger = logging.getLogger(__name__)
18
19
20 def ensure_installed():
21 logger.info("Ensuring elasticsearch instrumentation is installed.")
22
23 if Elasticsearch is None:
24 logger.info("Unable to import elasticsearch.Elasticsearch")
25 else:
26 ensure_client_instrumented()
27 ensure_transport_instrumented()
28
29
30 ClientMethod = namedtuple("ClientMethod", ["name", "takes_index_argument"])
31
32 CLIENT_METHODS = [
33 ClientMethod("bulk", True),
34 ClientMethod("clear_scroll", False),
35 ClientMethod("count", True),
36 ClientMethod("create", True),
37 ClientMethod("delete", True),
38 ClientMethod("delete_by_query", True),
39 ClientMethod("delete_by_query_rethrottle", False),
40 ClientMethod("delete_script", False),
41 ClientMethod("exists", True),
42 ClientMethod("exists_source", True),
43 ClientMethod("explain", True),
44 ClientMethod("field_caps", True),
45 ClientMethod("get", True),
46 ClientMethod("get_script", False),
47 ClientMethod("get_source", True),
48 ClientMethod("index", True),
49 ClientMethod("info", False),
50 ClientMethod("mget", True),
51 ClientMethod("msearch", True),
52 ClientMethod("msearch_template", True),
53 ClientMethod("mtermvectors", True),
54 ClientMethod("ping", False),
55 ClientMethod("put_script", False),
56 ClientMethod("rank_eval", True),
57 ClientMethod("reindex", False),
58 ClientMethod("reindex_rethrottle", False),
59 ClientMethod("render_search_template", False),
60 ClientMethod("scripts_painless_context", False),
61 ClientMethod("scripts_painless_execute", False),
62 ClientMethod("scroll", False),
63 ClientMethod("search", True),
64 ClientMethod("search_shards", True),
65 ClientMethod("search_template", True),
66 ClientMethod("termvectors", True),
67 ClientMethod("update", True),
68 ClientMethod("update_by_query", True),
69 ClientMethod("update_by_query_rethrottle", False),
70 ]
71
72
73 have_patched_client = False
74
75
76 def ensure_client_instrumented():
77 global have_patched_client
78
79 if not have_patched_client:
80 for name, takes_index_argument in CLIENT_METHODS:
81 try:
82 method = getattr(Elasticsearch, name)
83 if takes_index_argument:
84 wrapped = wrap_client_index_method(method)
85 else:
86 wrapped = wrap_client_method(method)
87 setattr(Elasticsearch, name, wrapped)
88 except Exception as exc:
89 # Workaround for version 7.5.0 removing scripts_painless_context:
90 # https://github.com/elastic/elasticsearch-py/issues/1098
91 if name != "scripts_painless_context":
92 logger.warning(
93 "Unable to instrument elasticsearch.Elasticsearch.%s: %r",
94 name,
95 exc,
96 exc_info=exc,
97 )
98
99 have_patched_client = True
100
101
102 @wrapt.decorator
103 def wrap_client_index_method(wrapped, instance, args, kwargs):
104 def _get_index(index, *args, **kwargs):
105 return index
106
107 try:
108 index = _get_index(*args, **kwargs)
109 except TypeError:
110 index = "Unknown"
111 else:
112 if not index:
113 index = "Unknown"
114 if isinstance(index, (list, tuple)):
115 index = ",".join(index)
116 index = index.title()
117 camel_name = "".join(c.title() for c in wrapped.__name__.split("_"))
118 operation = "Elasticsearch/{}/{}".format(index, camel_name)
119 tracked_request = TrackedRequest.instance()
120 tracked_request.start_span(operation=operation, ignore_children=True)
121
122 try:
123 return wrapped(*args, **kwargs)
124 finally:
125 tracked_request.stop_span()
126
127
128 @wrapt.decorator
129 def wrap_client_method(wrapped, instance, args, kwargs):
130 camel_name = "".join(c.title() for c in wrapped.__name__.split("_"))
131 operation = "Elasticsearch/{}".format(camel_name)
132 tracked_request = TrackedRequest.instance()
133 tracked_request.start_span(operation=operation, ignore_children=True)
134
135 try:
136 return wrapped(*args, **kwargs)
137 finally:
138 tracked_request.stop_span()
139
140
141 have_patched_transport = False
142
143
144 def ensure_transport_instrumented():
145 global have_patched_transport
146
147 if not have_patched_transport:
148 try:
149 Transport.perform_request = wrapped_perform_request(
150 Transport.perform_request
151 )
152 except Exception as exc:
153 logger.warning(
154 "Unable to instrument elasticsearch.Transport.perform_request: %r",
155 exc,
156 exc_info=exc,
157 )
158
159 have_patched_transport = True
160
161
162 def _sanitize_name(name):
163 try:
164 op = name.split("/")[-1]
165 op = op[1:] # chop leading '_' from op
166 known_names = (
167 "bench",
168 "bulk",
169 "count",
170 "exists",
171 "explain",
172 "field_stats",
173 "health",
174 "mget",
175 "mlt",
176 "mpercolate",
177 "msearch",
178 "mtermvectors",
179 "percolate",
180 "query",
181 "scroll",
182 "search_shards",
183 "source",
184 "suggest",
185 "template",
186 "termvectors",
187 "update",
188 "search",
189 )
190 if op in known_names:
191 return op.title()
192 return "Unknown"
193 except Exception:
194 return "Unknown"
195
196
197 @wrapt.decorator
198 def wrapped_perform_request(wrapped, instance, args, kwargs):
199 try:
200 op = _sanitize_name(args[1])
201 except IndexError:
202 op = "Unknown"
203
204 tracked_request = TrackedRequest.instance()
205 tracked_request.start_span(
206 operation="Elasticsearch/{}".format(op), ignore_children=True
207 )
208
209 try:
210 return wrapped(*args, **kwargs)
211 finally:
212 tracked_request.stop_span()
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/compat.py b/src/scout_apm/compat.py
--- a/src/scout_apm/compat.py
+++ b/src/scout_apm/compat.py
@@ -63,8 +63,21 @@
from urlparse import parse_qsl
+if sys.version_info >= (3, 0):
+
+ def get_pos_args(func):
+ return inspect.getfullargspec(func).args
+
+
+else:
+
+ def get_pos_args(func):
+ return inspect.getargspec(func).args
+
+
def kwargs_only(func):
"""
+ Source: https://pypi.org/project/kwargs-only/
Make a function only accept keyword arguments.
This can be dropped in Python 3 in lieu of:
def foo(*, bar=default):
diff --git a/src/scout_apm/instruments/elasticsearch.py b/src/scout_apm/instruments/elasticsearch.py
--- a/src/scout_apm/instruments/elasticsearch.py
+++ b/src/scout_apm/instruments/elasticsearch.py
@@ -6,6 +6,7 @@
import wrapt
+from scout_apm.compat import get_pos_args
from scout_apm.core.tracked_request import TrackedRequest
try:
@@ -57,7 +58,6 @@
ClientMethod("reindex", False),
ClientMethod("reindex_rethrottle", False),
ClientMethod("render_search_template", False),
- ClientMethod("scripts_painless_context", False),
ClientMethod("scripts_painless_execute", False),
ClientMethod("scroll", False),
ClientMethod("search", True),
@@ -86,34 +86,50 @@
wrapped = wrap_client_method(method)
setattr(Elasticsearch, name, wrapped)
except Exception as exc:
- # Workaround for version 7.5.0 removing scripts_painless_context:
- # https://github.com/elastic/elasticsearch-py/issues/1098
- if name != "scripts_painless_context":
- logger.warning(
- "Unable to instrument elasticsearch.Elasticsearch.%s: %r",
- name,
- exc,
- exc_info=exc,
- )
+ logger.warning(
+ "Unable to instrument elasticsearch.Elasticsearch.%s: %r",
+ name,
+ exc,
+ exc_info=exc,
+ )
have_patched_client = True
@wrapt.decorator
def wrap_client_index_method(wrapped, instance, args, kwargs):
- def _get_index(index, *args, **kwargs):
- return index
+ # elasticsearch-py 7.5.1 changed the order of arguments for client methods,
+ # so to be safe we need to inspect the wrapped method's positional
+ # arguments to see if we should pull it from there
+ if "index" in kwargs:
+ index = kwargs["index"]
+ else:
+ unwrapped = wrapped
+ while True:
+ try:
+ unwrapped = unwrapped.__wrapped__
+ except AttributeError:
+ break
- try:
- index = _get_index(*args, **kwargs)
- except TypeError:
+ pos_args = get_pos_args(unwrapped)
+ try:
+ index_index = pos_args.index("index")
+ except ValueError: # pragma: no cover
+ # This guards against the method not accepting an 'index' argument
+ # but they all do - for now
+ index = ""
+ else:
+ try:
+ index = args[index_index - 1] # subtract 'self'
+ except IndexError:
+ index = ""
+
+ if isinstance(index, (list, tuple)):
+ index = ",".join(index)
+ if index == "":
index = "Unknown"
- else:
- if not index:
- index = "Unknown"
- if isinstance(index, (list, tuple)):
- index = ",".join(index)
index = index.title()
+
camel_name = "".join(c.title() for c in wrapped.__name__.split("_"))
operation = "Elasticsearch/{}/{}".format(index, camel_name)
tracked_request = TrackedRequest.instance()
| {"golden_diff": "diff --git a/src/scout_apm/compat.py b/src/scout_apm/compat.py\n--- a/src/scout_apm/compat.py\n+++ b/src/scout_apm/compat.py\n@@ -63,8 +63,21 @@\n from urlparse import parse_qsl\n \n \n+if sys.version_info >= (3, 0):\n+\n+ def get_pos_args(func):\n+ return inspect.getfullargspec(func).args\n+\n+\n+else:\n+\n+ def get_pos_args(func):\n+ return inspect.getargspec(func).args\n+\n+\n def kwargs_only(func):\n \"\"\"\n+ Source: https://pypi.org/project/kwargs-only/\n Make a function only accept keyword arguments.\n This can be dropped in Python 3 in lieu of:\n def foo(*, bar=default):\ndiff --git a/src/scout_apm/instruments/elasticsearch.py b/src/scout_apm/instruments/elasticsearch.py\n--- a/src/scout_apm/instruments/elasticsearch.py\n+++ b/src/scout_apm/instruments/elasticsearch.py\n@@ -6,6 +6,7 @@\n \n import wrapt\n \n+from scout_apm.compat import get_pos_args\n from scout_apm.core.tracked_request import TrackedRequest\n \n try:\n@@ -57,7 +58,6 @@\n ClientMethod(\"reindex\", False),\n ClientMethod(\"reindex_rethrottle\", False),\n ClientMethod(\"render_search_template\", False),\n- ClientMethod(\"scripts_painless_context\", False),\n ClientMethod(\"scripts_painless_execute\", False),\n ClientMethod(\"scroll\", False),\n ClientMethod(\"search\", True),\n@@ -86,34 +86,50 @@\n wrapped = wrap_client_method(method)\n setattr(Elasticsearch, name, wrapped)\n except Exception as exc:\n- # Workaround for version 7.5.0 removing scripts_painless_context:\n- # https://github.com/elastic/elasticsearch-py/issues/1098\n- if name != \"scripts_painless_context\":\n- logger.warning(\n- \"Unable to instrument elasticsearch.Elasticsearch.%s: %r\",\n- name,\n- exc,\n- exc_info=exc,\n- )\n+ logger.warning(\n+ \"Unable to instrument elasticsearch.Elasticsearch.%s: %r\",\n+ name,\n+ exc,\n+ exc_info=exc,\n+ )\n \n have_patched_client = True\n \n \n @wrapt.decorator\n def wrap_client_index_method(wrapped, instance, args, kwargs):\n- def _get_index(index, *args, **kwargs):\n- return index\n+ # elasticsearch-py 7.5.1 changed the order of arguments for client methods,\n+ # so to be safe we need to inspect the wrapped method's positional\n+ # arguments to see if we should pull it from there\n+ if \"index\" in kwargs:\n+ index = kwargs[\"index\"]\n+ else:\n+ unwrapped = wrapped\n+ while True:\n+ try:\n+ unwrapped = unwrapped.__wrapped__\n+ except AttributeError:\n+ break\n \n- try:\n- index = _get_index(*args, **kwargs)\n- except TypeError:\n+ pos_args = get_pos_args(unwrapped)\n+ try:\n+ index_index = pos_args.index(\"index\")\n+ except ValueError: # pragma: no cover\n+ # This guards against the method not accepting an 'index' argument\n+ # but they all do - for now\n+ index = \"\"\n+ else:\n+ try:\n+ index = args[index_index - 1] # subtract 'self'\n+ except IndexError:\n+ index = \"\"\n+\n+ if isinstance(index, (list, tuple)):\n+ index = \",\".join(index)\n+ if index == \"\":\n index = \"Unknown\"\n- else:\n- if not index:\n- index = \"Unknown\"\n- if isinstance(index, (list, tuple)):\n- index = \",\".join(index)\n index = index.title()\n+\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}/{}\".format(index, camel_name)\n tracked_request = TrackedRequest.instance()\n", "issue": "Elasticsearch 7.5.1 changed positional arguments order\nSimilar to #454, Elasticsearch 7.5.1 broke the test suite in another way.\r\n\r\nhttps://github.com/elastic/elasticsearch-py/issues/1099\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport inspect\nimport sys\nfrom functools import wraps\n\nstring_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821\ntext_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821\nstring_types = tuple({string_type, text_type})\n\nif sys.version_info >= (3, 2):\n from contextlib import ContextDecorator\nelse:\n import functools\n\n class ContextDecorator(object):\n def __call__(self, f):\n @functools.wraps(f)\n def decorated(*args, **kwds):\n with self:\n return f(*args, **kwds)\n\n return decorated\n\n\nif sys.version_info >= (3, 0):\n import queue\nelse:\n import Queue as queue\n\n# datetime_to_timestamp converts a naive UTC datetime to a unix timestamp\nif sys.version_info >= (3, 3):\n\n def datetime_to_timestamp(datetime_obj):\n return datetime_obj.replace(tzinfo=dt.timezone.utc).timestamp()\n\n\nelse:\n _EPOCH = dt.datetime(1970, 1, 1)\n\n def datetime_to_timestamp(datetime_obj):\n return (datetime_obj - _EPOCH).total_seconds()\n\n\ndef text(value, encoding=\"utf-8\", errors=\"strict\"):\n \"\"\"\n Convert a value to str on Python 3 and unicode on Python 2.\n \"\"\"\n if isinstance(value, text_type):\n return value\n elif isinstance(value, bytes):\n return text_type(value, encoding, errors)\n else:\n return text_type(value)\n\n\nif sys.version_info >= (3, 0):\n from urllib.parse import parse_qsl, urlencode\nelse:\n from urllib import urlencode\n from urlparse import parse_qsl\n\n\ndef kwargs_only(func):\n \"\"\"\n Make a function only accept keyword arguments.\n This can be dropped in Python 3 in lieu of:\n def foo(*, bar=default):\n Source: https://pypi.org/project/kwargs-only/\n \"\"\"\n if hasattr(inspect, \"signature\"): # pragma: no cover\n # Python 3\n signature = inspect.signature(func)\n arg_names = list(signature.parameters.keys())\n else: # pragma: no cover\n # Python 2\n signature = inspect.getargspec(func)\n arg_names = signature.args\n\n if len(arg_names) > 0 and arg_names[0] in (\"self\", \"cls\"):\n allowable_args = 1\n else:\n allowable_args = 0\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if len(args) > allowable_args:\n raise TypeError(\n \"{} should only be called with keyword args\".format(func.__name__)\n )\n return func(*args, **kwargs)\n\n return wrapper\n\n\n__all__ = [\n \"ContextDecorator\",\n \"datetime_to_timestamp\",\n \"kwargs_only\",\n \"parse_qsl\",\n \"queue\",\n \"string_type\",\n \"text\",\n \"text_type\",\n \"urlencode\",\n]\n", "path": "src/scout_apm/compat.py"}, {"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom collections import namedtuple\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from elasticsearch import Elasticsearch, Transport\nexcept ImportError: # pragma: no cover\n Elasticsearch = None\n Transport = None\n\nlogger = logging.getLogger(__name__)\n\n\ndef ensure_installed():\n logger.info(\"Ensuring elasticsearch instrumentation is installed.\")\n\n if Elasticsearch is None:\n logger.info(\"Unable to import elasticsearch.Elasticsearch\")\n else:\n ensure_client_instrumented()\n ensure_transport_instrumented()\n\n\nClientMethod = namedtuple(\"ClientMethod\", [\"name\", \"takes_index_argument\"])\n\nCLIENT_METHODS = [\n ClientMethod(\"bulk\", True),\n ClientMethod(\"clear_scroll\", False),\n ClientMethod(\"count\", True),\n ClientMethod(\"create\", True),\n ClientMethod(\"delete\", True),\n ClientMethod(\"delete_by_query\", True),\n ClientMethod(\"delete_by_query_rethrottle\", False),\n ClientMethod(\"delete_script\", False),\n ClientMethod(\"exists\", True),\n ClientMethod(\"exists_source\", True),\n ClientMethod(\"explain\", True),\n ClientMethod(\"field_caps\", True),\n ClientMethod(\"get\", True),\n ClientMethod(\"get_script\", False),\n ClientMethod(\"get_source\", True),\n ClientMethod(\"index\", True),\n ClientMethod(\"info\", False),\n ClientMethod(\"mget\", True),\n ClientMethod(\"msearch\", True),\n ClientMethod(\"msearch_template\", True),\n ClientMethod(\"mtermvectors\", True),\n ClientMethod(\"ping\", False),\n ClientMethod(\"put_script\", False),\n ClientMethod(\"rank_eval\", True),\n ClientMethod(\"reindex\", False),\n ClientMethod(\"reindex_rethrottle\", False),\n ClientMethod(\"render_search_template\", False),\n ClientMethod(\"scripts_painless_context\", False),\n ClientMethod(\"scripts_painless_execute\", False),\n ClientMethod(\"scroll\", False),\n ClientMethod(\"search\", True),\n ClientMethod(\"search_shards\", True),\n ClientMethod(\"search_template\", True),\n ClientMethod(\"termvectors\", True),\n ClientMethod(\"update\", True),\n ClientMethod(\"update_by_query\", True),\n ClientMethod(\"update_by_query_rethrottle\", False),\n]\n\n\nhave_patched_client = False\n\n\ndef ensure_client_instrumented():\n global have_patched_client\n\n if not have_patched_client:\n for name, takes_index_argument in CLIENT_METHODS:\n try:\n method = getattr(Elasticsearch, name)\n if takes_index_argument:\n wrapped = wrap_client_index_method(method)\n else:\n wrapped = wrap_client_method(method)\n setattr(Elasticsearch, name, wrapped)\n except Exception as exc:\n # Workaround for version 7.5.0 removing scripts_painless_context:\n # https://github.com/elastic/elasticsearch-py/issues/1098\n if name != \"scripts_painless_context\":\n logger.warning(\n \"Unable to instrument elasticsearch.Elasticsearch.%s: %r\",\n name,\n exc,\n exc_info=exc,\n )\n\n have_patched_client = True\n\n\[email protected]\ndef wrap_client_index_method(wrapped, instance, args, kwargs):\n def _get_index(index, *args, **kwargs):\n return index\n\n try:\n index = _get_index(*args, **kwargs)\n except TypeError:\n index = \"Unknown\"\n else:\n if not index:\n index = \"Unknown\"\n if isinstance(index, (list, tuple)):\n index = \",\".join(index)\n index = index.title()\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}/{}\".format(index, camel_name)\n tracked_request = TrackedRequest.instance()\n tracked_request.start_span(operation=operation, ignore_children=True)\n\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n\n\[email protected]\ndef wrap_client_method(wrapped, instance, args, kwargs):\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}\".format(camel_name)\n tracked_request = TrackedRequest.instance()\n tracked_request.start_span(operation=operation, ignore_children=True)\n\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n\n\nhave_patched_transport = False\n\n\ndef ensure_transport_instrumented():\n global have_patched_transport\n\n if not have_patched_transport:\n try:\n Transport.perform_request = wrapped_perform_request(\n Transport.perform_request\n )\n except Exception as exc:\n logger.warning(\n \"Unable to instrument elasticsearch.Transport.perform_request: %r\",\n exc,\n exc_info=exc,\n )\n\n have_patched_transport = True\n\n\ndef _sanitize_name(name):\n try:\n op = name.split(\"/\")[-1]\n op = op[1:] # chop leading '_' from op\n known_names = (\n \"bench\",\n \"bulk\",\n \"count\",\n \"exists\",\n \"explain\",\n \"field_stats\",\n \"health\",\n \"mget\",\n \"mlt\",\n \"mpercolate\",\n \"msearch\",\n \"mtermvectors\",\n \"percolate\",\n \"query\",\n \"scroll\",\n \"search_shards\",\n \"source\",\n \"suggest\",\n \"template\",\n \"termvectors\",\n \"update\",\n \"search\",\n )\n if op in known_names:\n return op.title()\n return \"Unknown\"\n except Exception:\n return \"Unknown\"\n\n\[email protected]\ndef wrapped_perform_request(wrapped, instance, args, kwargs):\n try:\n op = _sanitize_name(args[1])\n except IndexError:\n op = \"Unknown\"\n\n tracked_request = TrackedRequest.instance()\n tracked_request.start_span(\n operation=\"Elasticsearch/{}\".format(op), ignore_children=True\n )\n\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n", "path": "src/scout_apm/instruments/elasticsearch.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport inspect\nimport sys\nfrom functools import wraps\n\nstring_type = str if sys.version_info[0] >= 3 else basestring # noqa: F821\ntext_type = str if sys.version_info[0] >= 3 else unicode # noqa: F821\nstring_types = tuple({string_type, text_type})\n\nif sys.version_info >= (3, 2):\n from contextlib import ContextDecorator\nelse:\n import functools\n\n class ContextDecorator(object):\n def __call__(self, f):\n @functools.wraps(f)\n def decorated(*args, **kwds):\n with self:\n return f(*args, **kwds)\n\n return decorated\n\n\nif sys.version_info >= (3, 0):\n import queue\nelse:\n import Queue as queue\n\n# datetime_to_timestamp converts a naive UTC datetime to a unix timestamp\nif sys.version_info >= (3, 3):\n\n def datetime_to_timestamp(datetime_obj):\n return datetime_obj.replace(tzinfo=dt.timezone.utc).timestamp()\n\n\nelse:\n _EPOCH = dt.datetime(1970, 1, 1)\n\n def datetime_to_timestamp(datetime_obj):\n return (datetime_obj - _EPOCH).total_seconds()\n\n\ndef text(value, encoding=\"utf-8\", errors=\"strict\"):\n \"\"\"\n Convert a value to str on Python 3 and unicode on Python 2.\n \"\"\"\n if isinstance(value, text_type):\n return value\n elif isinstance(value, bytes):\n return text_type(value, encoding, errors)\n else:\n return text_type(value)\n\n\nif sys.version_info >= (3, 0):\n from urllib.parse import parse_qsl, urlencode\nelse:\n from urllib import urlencode\n from urlparse import parse_qsl\n\n\nif sys.version_info >= (3, 0):\n\n def get_pos_args(func):\n return inspect.getfullargspec(func).args\n\n\nelse:\n\n def get_pos_args(func):\n return inspect.getargspec(func).args\n\n\ndef kwargs_only(func):\n \"\"\"\n Source: https://pypi.org/project/kwargs-only/\n Make a function only accept keyword arguments.\n This can be dropped in Python 3 in lieu of:\n def foo(*, bar=default):\n Source: https://pypi.org/project/kwargs-only/\n \"\"\"\n if hasattr(inspect, \"signature\"): # pragma: no cover\n # Python 3\n signature = inspect.signature(func)\n arg_names = list(signature.parameters.keys())\n else: # pragma: no cover\n # Python 2\n signature = inspect.getargspec(func)\n arg_names = signature.args\n\n if len(arg_names) > 0 and arg_names[0] in (\"self\", \"cls\"):\n allowable_args = 1\n else:\n allowable_args = 0\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if len(args) > allowable_args:\n raise TypeError(\n \"{} should only be called with keyword args\".format(func.__name__)\n )\n return func(*args, **kwargs)\n\n return wrapper\n\n\n__all__ = [\n \"ContextDecorator\",\n \"datetime_to_timestamp\",\n \"kwargs_only\",\n \"parse_qsl\",\n \"queue\",\n \"string_type\",\n \"text\",\n \"text_type\",\n \"urlencode\",\n]\n", "path": "src/scout_apm/compat.py"}, {"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom collections import namedtuple\n\nimport wrapt\n\nfrom scout_apm.compat import get_pos_args\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from elasticsearch import Elasticsearch, Transport\nexcept ImportError: # pragma: no cover\n Elasticsearch = None\n Transport = None\n\nlogger = logging.getLogger(__name__)\n\n\ndef ensure_installed():\n logger.info(\"Ensuring elasticsearch instrumentation is installed.\")\n\n if Elasticsearch is None:\n logger.info(\"Unable to import elasticsearch.Elasticsearch\")\n else:\n ensure_client_instrumented()\n ensure_transport_instrumented()\n\n\nClientMethod = namedtuple(\"ClientMethod\", [\"name\", \"takes_index_argument\"])\n\nCLIENT_METHODS = [\n ClientMethod(\"bulk\", True),\n ClientMethod(\"clear_scroll\", False),\n ClientMethod(\"count\", True),\n ClientMethod(\"create\", True),\n ClientMethod(\"delete\", True),\n ClientMethod(\"delete_by_query\", True),\n ClientMethod(\"delete_by_query_rethrottle\", False),\n ClientMethod(\"delete_script\", False),\n ClientMethod(\"exists\", True),\n ClientMethod(\"exists_source\", True),\n ClientMethod(\"explain\", True),\n ClientMethod(\"field_caps\", True),\n ClientMethod(\"get\", True),\n ClientMethod(\"get_script\", False),\n ClientMethod(\"get_source\", True),\n ClientMethod(\"index\", True),\n ClientMethod(\"info\", False),\n ClientMethod(\"mget\", True),\n ClientMethod(\"msearch\", True),\n ClientMethod(\"msearch_template\", True),\n ClientMethod(\"mtermvectors\", True),\n ClientMethod(\"ping\", False),\n ClientMethod(\"put_script\", False),\n ClientMethod(\"rank_eval\", True),\n ClientMethod(\"reindex\", False),\n ClientMethod(\"reindex_rethrottle\", False),\n ClientMethod(\"render_search_template\", False),\n ClientMethod(\"scripts_painless_execute\", False),\n ClientMethod(\"scroll\", False),\n ClientMethod(\"search\", True),\n ClientMethod(\"search_shards\", True),\n ClientMethod(\"search_template\", True),\n ClientMethod(\"termvectors\", True),\n ClientMethod(\"update\", True),\n ClientMethod(\"update_by_query\", True),\n ClientMethod(\"update_by_query_rethrottle\", False),\n]\n\n\nhave_patched_client = False\n\n\ndef ensure_client_instrumented():\n global have_patched_client\n\n if not have_patched_client:\n for name, takes_index_argument in CLIENT_METHODS:\n try:\n method = getattr(Elasticsearch, name)\n if takes_index_argument:\n wrapped = wrap_client_index_method(method)\n else:\n wrapped = wrap_client_method(method)\n setattr(Elasticsearch, name, wrapped)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument elasticsearch.Elasticsearch.%s: %r\",\n name,\n exc,\n exc_info=exc,\n )\n\n have_patched_client = True\n\n\[email protected]\ndef wrap_client_index_method(wrapped, instance, args, kwargs):\n # elasticsearch-py 7.5.1 changed the order of arguments for client methods,\n # so to be safe we need to inspect the wrapped method's positional\n # arguments to see if we should pull it from there\n if \"index\" in kwargs:\n index = kwargs[\"index\"]\n else:\n unwrapped = wrapped\n while True:\n try:\n unwrapped = unwrapped.__wrapped__\n except AttributeError:\n break\n\n pos_args = get_pos_args(unwrapped)\n try:\n index_index = pos_args.index(\"index\")\n except ValueError: # pragma: no cover\n # This guards against the method not accepting an 'index' argument\n # but they all do - for now\n index = \"\"\n else:\n try:\n index = args[index_index - 1] # subtract 'self'\n except IndexError:\n index = \"\"\n\n if isinstance(index, (list, tuple)):\n index = \",\".join(index)\n if index == \"\":\n index = \"Unknown\"\n index = index.title()\n\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}/{}\".format(index, camel_name)\n tracked_request = TrackedRequest.instance()\n tracked_request.start_span(operation=operation, ignore_children=True)\n\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n\n\[email protected]\ndef wrap_client_method(wrapped, instance, args, kwargs):\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}\".format(camel_name)\n tracked_request = TrackedRequest.instance()\n tracked_request.start_span(operation=operation, ignore_children=True)\n\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n\n\nhave_patched_transport = False\n\n\ndef ensure_transport_instrumented():\n global have_patched_transport\n\n if not have_patched_transport:\n try:\n Transport.perform_request = wrapped_perform_request(\n Transport.perform_request\n )\n except Exception as exc:\n logger.warning(\n \"Unable to instrument elasticsearch.Transport.perform_request: %r\",\n exc,\n exc_info=exc,\n )\n\n have_patched_transport = True\n\n\ndef _sanitize_name(name):\n try:\n op = name.split(\"/\")[-1]\n op = op[1:] # chop leading '_' from op\n known_names = (\n \"bench\",\n \"bulk\",\n \"count\",\n \"exists\",\n \"explain\",\n \"field_stats\",\n \"health\",\n \"mget\",\n \"mlt\",\n \"mpercolate\",\n \"msearch\",\n \"mtermvectors\",\n \"percolate\",\n \"query\",\n \"scroll\",\n \"search_shards\",\n \"source\",\n \"suggest\",\n \"template\",\n \"termvectors\",\n \"update\",\n \"search\",\n )\n if op in known_names:\n return op.title()\n return \"Unknown\"\n except Exception:\n return \"Unknown\"\n\n\[email protected]\ndef wrapped_perform_request(wrapped, instance, args, kwargs):\n try:\n op = _sanitize_name(args[1])\n except IndexError:\n op = \"Unknown\"\n\n tracked_request = TrackedRequest.instance()\n tracked_request.start_span(\n operation=\"Elasticsearch/{}\".format(op), ignore_children=True\n )\n\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n", "path": "src/scout_apm/instruments/elasticsearch.py"}]} | 3,132 | 925 |
gh_patches_debug_36968 | rasdani/github-patches | git_diff | ESMCI__cime-1436 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add --component option to preview_namelists
It would be useful if users could specify a single component when running `preview_namelists` rather than building namelists for all components when the script is run from the command line in a case directory.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/CIME/preview_namelists.py`
Content:
```
1 """
2 API for preview namelist
3 """
4
5 from CIME.XML.standard_module_setup import *
6
7 import glob, shutil, imp
8 logger = logging.getLogger(__name__)
9
10 def create_dirs(case):
11 """
12 Make necessary directories for case
13 """
14 # Get data from XML
15 exeroot = case.get_value("EXEROOT")
16 libroot = case.get_value("LIBROOT")
17 incroot = case.get_value("INCROOT")
18 rundir = case.get_value("RUNDIR")
19 caseroot = case.get_value("CASEROOT")
20
21 docdir = os.path.join(caseroot, "CaseDocs")
22 dirs_to_make = []
23 models = case.get_values("COMP_CLASSES")
24 for model in models:
25 dirname = model.lower()
26 dirs_to_make.append(os.path.join(exeroot, dirname, "obj"))
27
28 dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir])
29
30 for dir_to_make in dirs_to_make:
31 if (not os.path.isdir(dir_to_make)):
32 try:
33 logger.debug("Making dir '%s'" % dir_to_make)
34 os.makedirs(dir_to_make)
35 except OSError as e:
36 expect(False, "Could not make directory '%s', error: %s" % (dir_to_make, e))
37
38 # As a convenience write the location of the case directory in the bld and run directories
39 for dir_ in (exeroot, rundir):
40 with open(os.path.join(dir_,"CASEROOT"),"w+") as fd:
41 fd.write(caseroot+"\n")
42
43 def create_namelists(case):
44 """
45 Create component namelists
46 """
47 case.flush()
48
49 create_dirs(case)
50
51 casebuild = case.get_value("CASEBUILD")
52 caseroot = case.get_value("CASEROOT")
53 rundir = case.get_value("RUNDIR")
54
55 docdir = os.path.join(caseroot, "CaseDocs")
56
57 # Load modules
58 case.load_env()
59
60 logger.info("Creating component namelists")
61
62 # Create namelists - must have cpl last in the list below
63 # Note - cpl must be last in the loop below so that in generating its namelist,
64 # it can use xml vars potentially set by other component's buildnml scripts
65 models = case.get_values("COMP_CLASSES")
66 models += [models.pop(0)]
67 for model in models:
68 model_str = model.lower()
69 config_file = case.get_value("CONFIG_%s_FILE" % model_str.upper())
70 config_dir = os.path.dirname(config_file)
71 if model_str == "cpl":
72 compname = "drv"
73 else:
74 compname = case.get_value("COMP_%s" % model_str.upper())
75
76 cmd = os.path.join(config_dir, "buildnml")
77 do_run_cmd = False
78 # This code will try to import and run each buildnml as a subroutine
79 # if that fails it will run it as a program in a seperate shell
80 try:
81 with open(cmd, 'r') as f:
82 first_line = f.readline()
83 if "python" in first_line:
84 mod = imp.load_source("buildnml", cmd)
85 logger.info(" Calling %s buildnml"%compname)
86 mod.buildnml(case, caseroot, compname)
87 else:
88 raise SyntaxError
89 except SyntaxError as detail:
90 if 'python' in first_line:
91 expect(False, detail)
92 else:
93 do_run_cmd = True
94 except AttributeError:
95 do_run_cmd = True
96 except:
97 raise
98
99 if do_run_cmd:
100 logger.info(" Running %s buildnml"%compname)
101 case.flush()
102 output = run_cmd_no_fail("%s %s" % (cmd, caseroot), verbose=False)
103 logger.info(output)
104 # refresh case xml object from file
105 case.read_xml()
106
107 logger.info("Finished creating component namelists")
108
109 # Save namelists to docdir
110 if (not os.path.isdir(docdir)):
111 os.makedirs(docdir)
112 try:
113 with open(os.path.join(docdir, "README"), "w") as fd:
114 fd.write(" CESM Resolved Namelist Files\n For documentation only DO NOT MODIFY\n")
115 except (OSError, IOError) as e:
116 expect(False, "Failed to write %s/README: %s" % (docdir, e))
117
118 for cpglob in ["*_in_[0-9]*", "*modelio*", "*_in",
119 "*streams*txt*", "*stxt", "*maps.rc", "*cism.config*"]:
120 for file_to_copy in glob.glob(os.path.join(rundir, cpglob)):
121 logger.debug("Copy file from '%s' to '%s'" % (file_to_copy, docdir))
122 shutil.copy2(file_to_copy, docdir)
123
124 # Copy over chemistry mechanism docs if they exist
125 if (os.path.isdir(os.path.join(casebuild, "camconf"))):
126 for file_to_copy in glob.glob(os.path.join(casebuild, "camconf", "*chem_mech*")):
127 shutil.copy2(file_to_copy, docdir)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/lib/CIME/preview_namelists.py b/scripts/lib/CIME/preview_namelists.py
--- a/scripts/lib/CIME/preview_namelists.py
+++ b/scripts/lib/CIME/preview_namelists.py
@@ -40,7 +40,7 @@
with open(os.path.join(dir_,"CASEROOT"),"w+") as fd:
fd.write(caseroot+"\n")
-def create_namelists(case):
+def create_namelists(case, component=None):
"""
Create component namelists
"""
@@ -73,36 +73,37 @@
else:
compname = case.get_value("COMP_%s" % model_str.upper())
- cmd = os.path.join(config_dir, "buildnml")
- do_run_cmd = False
- # This code will try to import and run each buildnml as a subroutine
- # if that fails it will run it as a program in a seperate shell
- try:
- with open(cmd, 'r') as f:
- first_line = f.readline()
- if "python" in first_line:
- mod = imp.load_source("buildnml", cmd)
- logger.info(" Calling %s buildnml"%compname)
- mod.buildnml(case, caseroot, compname)
- else:
- raise SyntaxError
- except SyntaxError as detail:
- if 'python' in first_line:
- expect(False, detail)
- else:
+ if component is None or component == model_str:
+ cmd = os.path.join(config_dir, "buildnml")
+ do_run_cmd = False
+ # This code will try to import and run each buildnml as a subroutine
+ # if that fails it will run it as a program in a seperate shell
+ try:
+ with open(cmd, 'r') as f:
+ first_line = f.readline()
+ if "python" in first_line:
+ mod = imp.load_source("buildnml", cmd)
+ logger.info(" Calling %s buildnml"%compname)
+ mod.buildnml(case, caseroot, compname)
+ else:
+ raise SyntaxError
+ except SyntaxError as detail:
+ if 'python' in first_line:
+ expect(False, detail)
+ else:
+ do_run_cmd = True
+ except AttributeError:
do_run_cmd = True
- except AttributeError:
- do_run_cmd = True
- except:
- raise
-
- if do_run_cmd:
- logger.info(" Running %s buildnml"%compname)
- case.flush()
- output = run_cmd_no_fail("%s %s" % (cmd, caseroot), verbose=False)
- logger.info(output)
- # refresh case xml object from file
- case.read_xml()
+ except:
+ raise
+
+ if do_run_cmd:
+ logger.info(" Running %s buildnml"%compname)
+ case.flush()
+ output = run_cmd_no_fail("%s %s" % (cmd, caseroot), verbose=False)
+ logger.info(output)
+ # refresh case xml object from file
+ case.read_xml()
logger.info("Finished creating component namelists")
| {"golden_diff": "diff --git a/scripts/lib/CIME/preview_namelists.py b/scripts/lib/CIME/preview_namelists.py\n--- a/scripts/lib/CIME/preview_namelists.py\n+++ b/scripts/lib/CIME/preview_namelists.py\n@@ -40,7 +40,7 @@\n with open(os.path.join(dir_,\"CASEROOT\"),\"w+\") as fd:\n fd.write(caseroot+\"\\n\")\n \n-def create_namelists(case):\n+def create_namelists(case, component=None):\n \"\"\"\n Create component namelists\n \"\"\"\n@@ -73,36 +73,37 @@\n else:\n compname = case.get_value(\"COMP_%s\" % model_str.upper())\n \n- cmd = os.path.join(config_dir, \"buildnml\")\n- do_run_cmd = False\n- # This code will try to import and run each buildnml as a subroutine\n- # if that fails it will run it as a program in a seperate shell\n- try:\n- with open(cmd, 'r') as f:\n- first_line = f.readline()\n- if \"python\" in first_line:\n- mod = imp.load_source(\"buildnml\", cmd)\n- logger.info(\" Calling %s buildnml\"%compname)\n- mod.buildnml(case, caseroot, compname)\n- else:\n- raise SyntaxError\n- except SyntaxError as detail:\n- if 'python' in first_line:\n- expect(False, detail)\n- else:\n+ if component is None or component == model_str:\n+ cmd = os.path.join(config_dir, \"buildnml\")\n+ do_run_cmd = False\n+ # This code will try to import and run each buildnml as a subroutine\n+ # if that fails it will run it as a program in a seperate shell\n+ try:\n+ with open(cmd, 'r') as f:\n+ first_line = f.readline()\n+ if \"python\" in first_line:\n+ mod = imp.load_source(\"buildnml\", cmd)\n+ logger.info(\" Calling %s buildnml\"%compname)\n+ mod.buildnml(case, caseroot, compname)\n+ else:\n+ raise SyntaxError\n+ except SyntaxError as detail:\n+ if 'python' in first_line:\n+ expect(False, detail)\n+ else:\n+ do_run_cmd = True\n+ except AttributeError:\n do_run_cmd = True\n- except AttributeError:\n- do_run_cmd = True\n- except:\n- raise\n-\n- if do_run_cmd:\n- logger.info(\" Running %s buildnml\"%compname)\n- case.flush()\n- output = run_cmd_no_fail(\"%s %s\" % (cmd, caseroot), verbose=False)\n- logger.info(output)\n- # refresh case xml object from file\n- case.read_xml()\n+ except:\n+ raise\n+\n+ if do_run_cmd:\n+ logger.info(\" Running %s buildnml\"%compname)\n+ case.flush()\n+ output = run_cmd_no_fail(\"%s %s\" % (cmd, caseroot), verbose=False)\n+ logger.info(output)\n+ # refresh case xml object from file\n+ case.read_xml()\n \n logger.info(\"Finished creating component namelists\")\n", "issue": "Add --component option to preview_namelists\nIt would be useful if users could specify a single component when running `preview_namelists` rather than building namelists for all components when the script is run from the command line in a case directory.\n", "before_files": [{"content": "\"\"\"\nAPI for preview namelist\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nimport glob, shutil, imp\nlogger = logging.getLogger(__name__)\n\ndef create_dirs(case):\n \"\"\"\n Make necessary directories for case\n \"\"\"\n # Get data from XML\n exeroot = case.get_value(\"EXEROOT\")\n libroot = case.get_value(\"LIBROOT\")\n incroot = case.get_value(\"INCROOT\")\n rundir = case.get_value(\"RUNDIR\")\n caseroot = case.get_value(\"CASEROOT\")\n\n docdir = os.path.join(caseroot, \"CaseDocs\")\n dirs_to_make = []\n models = case.get_values(\"COMP_CLASSES\")\n for model in models:\n dirname = model.lower()\n dirs_to_make.append(os.path.join(exeroot, dirname, \"obj\"))\n\n dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir])\n\n for dir_to_make in dirs_to_make:\n if (not os.path.isdir(dir_to_make)):\n try:\n logger.debug(\"Making dir '%s'\" % dir_to_make)\n os.makedirs(dir_to_make)\n except OSError as e:\n expect(False, \"Could not make directory '%s', error: %s\" % (dir_to_make, e))\n\n # As a convenience write the location of the case directory in the bld and run directories\n for dir_ in (exeroot, rundir):\n with open(os.path.join(dir_,\"CASEROOT\"),\"w+\") as fd:\n fd.write(caseroot+\"\\n\")\n\ndef create_namelists(case):\n \"\"\"\n Create component namelists\n \"\"\"\n case.flush()\n\n create_dirs(case)\n\n casebuild = case.get_value(\"CASEBUILD\")\n caseroot = case.get_value(\"CASEROOT\")\n rundir = case.get_value(\"RUNDIR\")\n\n docdir = os.path.join(caseroot, \"CaseDocs\")\n\n # Load modules\n case.load_env()\n\n logger.info(\"Creating component namelists\")\n\n # Create namelists - must have cpl last in the list below\n # Note - cpl must be last in the loop below so that in generating its namelist,\n # it can use xml vars potentially set by other component's buildnml scripts\n models = case.get_values(\"COMP_CLASSES\")\n models += [models.pop(0)]\n for model in models:\n model_str = model.lower()\n config_file = case.get_value(\"CONFIG_%s_FILE\" % model_str.upper())\n config_dir = os.path.dirname(config_file)\n if model_str == \"cpl\":\n compname = \"drv\"\n else:\n compname = case.get_value(\"COMP_%s\" % model_str.upper())\n\n cmd = os.path.join(config_dir, \"buildnml\")\n do_run_cmd = False\n # This code will try to import and run each buildnml as a subroutine\n # if that fails it will run it as a program in a seperate shell\n try:\n with open(cmd, 'r') as f:\n first_line = f.readline()\n if \"python\" in first_line:\n mod = imp.load_source(\"buildnml\", cmd)\n logger.info(\" Calling %s buildnml\"%compname)\n mod.buildnml(case, caseroot, compname)\n else:\n raise SyntaxError\n except SyntaxError as detail:\n if 'python' in first_line:\n expect(False, detail)\n else:\n do_run_cmd = True\n except AttributeError:\n do_run_cmd = True\n except:\n raise\n\n if do_run_cmd:\n logger.info(\" Running %s buildnml\"%compname)\n case.flush()\n output = run_cmd_no_fail(\"%s %s\" % (cmd, caseroot), verbose=False)\n logger.info(output)\n # refresh case xml object from file\n case.read_xml()\n\n logger.info(\"Finished creating component namelists\")\n\n # Save namelists to docdir\n if (not os.path.isdir(docdir)):\n os.makedirs(docdir)\n try:\n with open(os.path.join(docdir, \"README\"), \"w\") as fd:\n fd.write(\" CESM Resolved Namelist Files\\n For documentation only DO NOT MODIFY\\n\")\n except (OSError, IOError) as e:\n expect(False, \"Failed to write %s/README: %s\" % (docdir, e))\n\n for cpglob in [\"*_in_[0-9]*\", \"*modelio*\", \"*_in\",\n \"*streams*txt*\", \"*stxt\", \"*maps.rc\", \"*cism.config*\"]:\n for file_to_copy in glob.glob(os.path.join(rundir, cpglob)):\n logger.debug(\"Copy file from '%s' to '%s'\" % (file_to_copy, docdir))\n shutil.copy2(file_to_copy, docdir)\n\n # Copy over chemistry mechanism docs if they exist\n if (os.path.isdir(os.path.join(casebuild, \"camconf\"))):\n for file_to_copy in glob.glob(os.path.join(casebuild, \"camconf\", \"*chem_mech*\")):\n shutil.copy2(file_to_copy, docdir)\n", "path": "scripts/lib/CIME/preview_namelists.py"}], "after_files": [{"content": "\"\"\"\nAPI for preview namelist\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nimport glob, shutil, imp\nlogger = logging.getLogger(__name__)\n\ndef create_dirs(case):\n \"\"\"\n Make necessary directories for case\n \"\"\"\n # Get data from XML\n exeroot = case.get_value(\"EXEROOT\")\n libroot = case.get_value(\"LIBROOT\")\n incroot = case.get_value(\"INCROOT\")\n rundir = case.get_value(\"RUNDIR\")\n caseroot = case.get_value(\"CASEROOT\")\n\n docdir = os.path.join(caseroot, \"CaseDocs\")\n dirs_to_make = []\n models = case.get_values(\"COMP_CLASSES\")\n for model in models:\n dirname = model.lower()\n dirs_to_make.append(os.path.join(exeroot, dirname, \"obj\"))\n\n dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir])\n\n for dir_to_make in dirs_to_make:\n if (not os.path.isdir(dir_to_make)):\n try:\n logger.debug(\"Making dir '%s'\" % dir_to_make)\n os.makedirs(dir_to_make)\n except OSError as e:\n expect(False, \"Could not make directory '%s', error: %s\" % (dir_to_make, e))\n\n # As a convenience write the location of the case directory in the bld and run directories\n for dir_ in (exeroot, rundir):\n with open(os.path.join(dir_,\"CASEROOT\"),\"w+\") as fd:\n fd.write(caseroot+\"\\n\")\n\ndef create_namelists(case, component=None):\n \"\"\"\n Create component namelists\n \"\"\"\n case.flush()\n\n create_dirs(case)\n\n casebuild = case.get_value(\"CASEBUILD\")\n caseroot = case.get_value(\"CASEROOT\")\n rundir = case.get_value(\"RUNDIR\")\n\n docdir = os.path.join(caseroot, \"CaseDocs\")\n\n # Load modules\n case.load_env()\n\n logger.info(\"Creating component namelists\")\n\n # Create namelists - must have cpl last in the list below\n # Note - cpl must be last in the loop below so that in generating its namelist,\n # it can use xml vars potentially set by other component's buildnml scripts\n models = case.get_values(\"COMP_CLASSES\")\n models += [models.pop(0)]\n for model in models:\n model_str = model.lower()\n config_file = case.get_value(\"CONFIG_%s_FILE\" % model_str.upper())\n config_dir = os.path.dirname(config_file)\n if model_str == \"cpl\":\n compname = \"drv\"\n else:\n compname = case.get_value(\"COMP_%s\" % model_str.upper())\n\n if component is None or component == model_str:\n cmd = os.path.join(config_dir, \"buildnml\")\n do_run_cmd = False\n # This code will try to import and run each buildnml as a subroutine\n # if that fails it will run it as a program in a seperate shell\n try:\n with open(cmd, 'r') as f:\n first_line = f.readline()\n if \"python\" in first_line:\n mod = imp.load_source(\"buildnml\", cmd)\n logger.info(\" Calling %s buildnml\"%compname)\n mod.buildnml(case, caseroot, compname)\n else:\n raise SyntaxError\n except SyntaxError as detail:\n if 'python' in first_line:\n expect(False, detail)\n else:\n do_run_cmd = True\n except AttributeError:\n do_run_cmd = True\n except:\n raise\n\n if do_run_cmd:\n logger.info(\" Running %s buildnml\"%compname)\n case.flush()\n output = run_cmd_no_fail(\"%s %s\" % (cmd, caseroot), verbose=False)\n logger.info(output)\n # refresh case xml object from file\n case.read_xml()\n\n logger.info(\"Finished creating component namelists\")\n\n # Save namelists to docdir\n if (not os.path.isdir(docdir)):\n os.makedirs(docdir)\n try:\n with open(os.path.join(docdir, \"README\"), \"w\") as fd:\n fd.write(\" CESM Resolved Namelist Files\\n For documentation only DO NOT MODIFY\\n\")\n except (OSError, IOError) as e:\n expect(False, \"Failed to write %s/README: %s\" % (docdir, e))\n\n for cpglob in [\"*_in_[0-9]*\", \"*modelio*\", \"*_in\",\n \"*streams*txt*\", \"*stxt\", \"*maps.rc\", \"*cism.config*\"]:\n for file_to_copy in glob.glob(os.path.join(rundir, cpglob)):\n logger.debug(\"Copy file from '%s' to '%s'\" % (file_to_copy, docdir))\n shutil.copy2(file_to_copy, docdir)\n\n # Copy over chemistry mechanism docs if they exist\n if (os.path.isdir(os.path.join(casebuild, \"camconf\"))):\n for file_to_copy in glob.glob(os.path.join(casebuild, \"camconf\", \"*chem_mech*\")):\n shutil.copy2(file_to_copy, docdir)\n", "path": "scripts/lib/CIME/preview_namelists.py"}]} | 1,721 | 735 |
gh_patches_debug_15834 | rasdani/github-patches | git_diff | ansible__ansible-modules-extras-2674 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rabbitmq_binding can't set empty routing_key
<!--- Verify first that your issue/request is not already reported in GitHub -->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bug Report
##### COMPONENT NAME
<!--- Name of the plugin/module/task -->
messaging/rabbitmq_binding
##### ANSIBLE VERSION
```
ansible 2.0.1.0
config file = /etc/ansible/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
<!---
Mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).
-->
##### OS / ENVIRONMENT
<!---
Mention the OS you are running Ansible from, and the OS you are
managing, or say “N/A” for anything that is not platform-specific.
-->
N/A
##### SUMMARY
<!--- Explain the problem briefly -->
Module set routing_key="#" by default, so rabbitmq can't work when routing_key isn't set in messages. I'm tried to set routing_key='' in task, but ansible just mark it as OK and doesn't add binding.
##### STEPS TO REPRODUCE
```
- rabbitmq_binding: name=mailer destination=mailer type=queue
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
```
$ rabbitmqctl list_bindings
Listing bindings ...
exchange mailer queue mailer []
mailer exchange mailer queue []
$
```
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with high verbosity (-vvvv) -->
```
$ rabbitmqctl list_bindings
Listing bindings ...
exchange mailer queue mailer []
mailer exchange mailer queue # []
$
```
<!--- Paste verbatim command output between quotes -->
```
TASK [RabbitMQ : add rabbitmq bindings] ****************************************
ok: [%host_ip%] => (item={u'source': u'mailer', u'destination': u'mailer', u'destination_type': u'queue'})
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `messaging/rabbitmq_binding.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2015, Manuel Sousa <[email protected]>
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20 #
21
22 DOCUMENTATION = '''
23 ---
24 module: rabbitmq_binding
25 author: "Manuel Sousa (@manuel-sousa)"
26 version_added: "2.0"
27
28 short_description: This module manages rabbitMQ bindings
29 description:
30 - This module uses rabbitMQ Rest API to create/delete bindings
31 requirements: [ python requests ]
32 options:
33 state:
34 description:
35 - Whether the exchange should be present or absent
36 - Only present implemented atm
37 choices: [ "present", "absent" ]
38 required: false
39 default: present
40 name:
41 description:
42 - source exchange to create binding on
43 required: true
44 aliases: [ "src", "source" ]
45 login_user:
46 description:
47 - rabbitMQ user for connection
48 required: false
49 default: guest
50 login_password:
51 description:
52 - rabbitMQ password for connection
53 required: false
54 default: false
55 login_host:
56 description:
57 - rabbitMQ host for connection
58 required: false
59 default: localhost
60 login_port:
61 description:
62 - rabbitMQ management api port
63 required: false
64 default: 15672
65 vhost:
66 description:
67 - rabbitMQ virtual host
68 - default vhost is /
69 required: false
70 default: "/"
71 destination:
72 description:
73 - destination exchange or queue for the binding
74 required: true
75 aliases: [ "dst", "dest" ]
76 destination_type:
77 description:
78 - Either queue or exchange
79 required: true
80 choices: [ "queue", "exchange" ]
81 aliases: [ "type", "dest_type" ]
82 routing_key:
83 description:
84 - routing key for the binding
85 - default is #
86 required: false
87 default: "#"
88 arguments:
89 description:
90 - extra arguments for exchange. If defined this argument is a key/value dictionary
91 required: false
92 default: {}
93 '''
94
95 EXAMPLES = '''
96 # Bind myQueue to directExchange with routing key info
97 - rabbitmq_binding: name=directExchange destination=myQueue type=queue routing_key=info
98
99 # Bind directExchange to topicExchange with routing key *.info
100 - rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routing_key="*.info"
101 '''
102
103 import requests
104 import urllib
105 import json
106
107 def main():
108 module = AnsibleModule(
109 argument_spec = dict(
110 state = dict(default='present', choices=['present', 'absent'], type='str'),
111 name = dict(required=True, aliases=[ "src", "source" ], type='str'),
112 login_user = dict(default='guest', type='str'),
113 login_password = dict(default='guest', type='str', no_log=True),
114 login_host = dict(default='localhost', type='str'),
115 login_port = dict(default='15672', type='str'),
116 vhost = dict(default='/', type='str'),
117 destination = dict(required=True, aliases=[ "dst", "dest"], type='str'),
118 destination_type = dict(required=True, aliases=[ "type", "dest_type"], choices=[ "queue", "exchange" ],type='str'),
119 routing_key = dict(default='#', type='str'),
120 arguments = dict(default=dict(), type='dict')
121 ),
122 supports_check_mode = True
123 )
124
125 if module.params['destination_type'] == "queue":
126 dest_type="q"
127 else:
128 dest_type="e"
129
130 url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % (
131 module.params['login_host'],
132 module.params['login_port'],
133 urllib.quote(module.params['vhost'],''),
134 urllib.quote(module.params['name'],''),
135 dest_type,
136 urllib.quote(module.params['destination'],''),
137 urllib.quote(module.params['routing_key'],'')
138 )
139
140 # Check if exchange already exists
141 r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
142
143 if r.status_code==200:
144 binding_exists = True
145 response = r.json()
146 elif r.status_code==404:
147 binding_exists = False
148 response = r.text
149 else:
150 module.fail_json(
151 msg = "Invalid response from RESTAPI when trying to check if exchange exists",
152 details = r.text
153 )
154
155 if module.params['state']=='present':
156 change_required = not binding_exists
157 else:
158 change_required = binding_exists
159
160 # Exit if check_mode
161 if module.check_mode:
162 module.exit_json(
163 changed= change_required,
164 name = module.params['name'],
165 details = response,
166 arguments = module.params['arguments']
167 )
168
169 # Do changes
170 if change_required:
171 if module.params['state'] == 'present':
172 url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s" % (
173 module.params['login_host'],
174 module.params['login_port'],
175 urllib.quote(module.params['vhost'],''),
176 urllib.quote(module.params['name'],''),
177 dest_type,
178 urllib.quote(module.params['destination'],'')
179 )
180
181 r = requests.post(
182 url,
183 auth = (module.params['login_user'],module.params['login_password']),
184 headers = { "content-type": "application/json"},
185 data = json.dumps({
186 "routing_key": module.params['routing_key'],
187 "arguments": module.params['arguments']
188 })
189 )
190 elif module.params['state'] == 'absent':
191 r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
192
193 if r.status_code == 204 or r.status_code == 201:
194 module.exit_json(
195 changed = True,
196 name = module.params['name'],
197 destination = module.params['destination']
198 )
199 else:
200 module.fail_json(
201 msg = "Error creating exchange",
202 status = r.status_code,
203 details = r.text
204 )
205
206 else:
207 module.exit_json(
208 changed = False,
209 name = module.params['name']
210 )
211
212 # import module snippets
213 from ansible.module_utils.basic import *
214 main()
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py
--- a/messaging/rabbitmq_binding.py
+++ b/messaging/rabbitmq_binding.py
@@ -127,6 +127,11 @@
else:
dest_type="e"
+ if module.params['routing_key'] == "":
+ props = "~"
+ else:
+ props = urllib.quote(module.params['routing_key'],'')
+
url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
@@ -134,7 +139,7 @@
urllib.quote(module.params['name'],''),
dest_type,
urllib.quote(module.params['destination'],''),
- urllib.quote(module.params['routing_key'],'')
+ props
)
# Check if exchange already exists
| {"golden_diff": "diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py\n--- a/messaging/rabbitmq_binding.py\n+++ b/messaging/rabbitmq_binding.py\n@@ -127,6 +127,11 @@\n else:\n dest_type=\"e\"\n \n+ if module.params['routing_key'] == \"\":\n+ props = \"~\"\n+ else:\n+ props = urllib.quote(module.params['routing_key'],'')\n+\n url = \"http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s\" % (\n module.params['login_host'],\n module.params['login_port'],\n@@ -134,7 +139,7 @@\n urllib.quote(module.params['name'],''),\n dest_type,\n urllib.quote(module.params['destination'],''),\n- urllib.quote(module.params['routing_key'],'')\n+ props\n )\n \n # Check if exchange already exists\n", "issue": "rabbitmq_binding can't set empty routing_key\n<!--- Verify first that your issue/request is not already reported in GitHub -->\n##### ISSUE TYPE\n\n<!--- Pick one below and delete the rest: -->\n- Bug Report\n##### COMPONENT NAME\n\n<!--- Name of the plugin/module/task -->\n\nmessaging/rabbitmq_binding\n##### ANSIBLE VERSION\n\n```\nansible 2.0.1.0\n config file = /etc/ansible/ansible.cfg\n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n\n<!---\nMention any settings you have changed/added/removed in ansible.cfg\n(or using the ANSIBLE_* environment variables).\n-->\n##### OS / ENVIRONMENT\n\n<!---\nMention the OS you are running Ansible from, and the OS you are\nmanaging, or say \u201cN/A\u201d for anything that is not platform-specific.\n-->\n\nN/A\n##### SUMMARY\n\n<!--- Explain the problem briefly -->\n\nModule set routing_key=\"#\" by default, so rabbitmq can't work when routing_key isn't set in messages. I'm tried to set routing_key='' in task, but ansible just mark it as OK and doesn't add binding.\n##### STEPS TO REPRODUCE\n\n```\n- rabbitmq_binding: name=mailer destination=mailer type=queue\n```\n\n<!--- You can also paste gist.github.com links for larger files -->\n##### EXPECTED RESULTS\n\n<!--- What did you expect to happen when running the steps above? -->\n\n```\n$ rabbitmqctl list_bindings\nListing bindings ...\n exchange mailer queue mailer []\nmailer exchange mailer queue []\n$\n```\n##### ACTUAL RESULTS\n\n<!--- What actually happened? If possible run with high verbosity (-vvvv) -->\n\n```\n$ rabbitmqctl list_bindings\nListing bindings ...\n exchange mailer queue mailer []\nmailer exchange mailer queue # []\n$\n```\n\n<!--- Paste verbatim command output between quotes -->\n\n```\nTASK [RabbitMQ : add rabbitmq bindings] ****************************************\nok: [%host_ip%] => (item={u'source': u'mailer', u'destination': u'mailer', u'destination_type': u'queue'})\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2015, Manuel Sousa <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = '''\n---\nmodule: rabbitmq_binding\nauthor: \"Manuel Sousa (@manuel-sousa)\"\nversion_added: \"2.0\"\n\nshort_description: This module manages rabbitMQ bindings\ndescription:\n - This module uses rabbitMQ Rest API to create/delete bindings\nrequirements: [ python requests ]\noptions:\n state:\n description:\n - Whether the exchange should be present or absent\n - Only present implemented atm\n choices: [ \"present\", \"absent\" ]\n required: false\n default: present\n name:\n description:\n - source exchange to create binding on\n required: true\n aliases: [ \"src\", \"source\" ]\n login_user:\n description:\n - rabbitMQ user for connection\n required: false\n default: guest\n login_password:\n description:\n - rabbitMQ password for connection\n required: false\n default: false\n login_host:\n description:\n - rabbitMQ host for connection\n required: false\n default: localhost\n login_port:\n description:\n - rabbitMQ management api port\n required: false\n default: 15672\n vhost:\n description:\n - rabbitMQ virtual host\n - default vhost is /\n required: false\n default: \"/\"\n destination:\n description:\n - destination exchange or queue for the binding\n required: true\n aliases: [ \"dst\", \"dest\" ]\n destination_type:\n description:\n - Either queue or exchange\n required: true\n choices: [ \"queue\", \"exchange\" ]\n aliases: [ \"type\", \"dest_type\" ]\n routing_key:\n description:\n - routing key for the binding\n - default is #\n required: false\n default: \"#\"\n arguments:\n description:\n - extra arguments for exchange. If defined this argument is a key/value dictionary\n required: false\n default: {}\n'''\n\nEXAMPLES = '''\n# Bind myQueue to directExchange with routing key info\n- rabbitmq_binding: name=directExchange destination=myQueue type=queue routing_key=info\n\n# Bind directExchange to topicExchange with routing key *.info\n- rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routing_key=\"*.info\"\n'''\n\nimport requests\nimport urllib\nimport json\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n state = dict(default='present', choices=['present', 'absent'], type='str'),\n name = dict(required=True, aliases=[ \"src\", \"source\" ], type='str'),\n login_user = dict(default='guest', type='str'),\n login_password = dict(default='guest', type='str', no_log=True),\n login_host = dict(default='localhost', type='str'),\n login_port = dict(default='15672', type='str'),\n vhost = dict(default='/', type='str'),\n destination = dict(required=True, aliases=[ \"dst\", \"dest\"], type='str'),\n destination_type = dict(required=True, aliases=[ \"type\", \"dest_type\"], choices=[ \"queue\", \"exchange\" ],type='str'),\n routing_key = dict(default='#', type='str'),\n arguments = dict(default=dict(), type='dict')\n ),\n supports_check_mode = True\n )\n\n if module.params['destination_type'] == \"queue\":\n dest_type=\"q\"\n else:\n dest_type=\"e\"\n\n url = \"http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s\" % (\n module.params['login_host'],\n module.params['login_port'],\n urllib.quote(module.params['vhost'],''),\n urllib.quote(module.params['name'],''),\n dest_type,\n urllib.quote(module.params['destination'],''),\n urllib.quote(module.params['routing_key'],'')\n )\n\n # Check if exchange already exists\n r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))\n\n if r.status_code==200:\n binding_exists = True\n response = r.json()\n elif r.status_code==404:\n binding_exists = False\n response = r.text\n else:\n module.fail_json(\n msg = \"Invalid response from RESTAPI when trying to check if exchange exists\",\n details = r.text\n )\n\n if module.params['state']=='present':\n change_required = not binding_exists\n else:\n change_required = binding_exists\n\n # Exit if check_mode\n if module.check_mode:\n module.exit_json(\n changed= change_required,\n name = module.params['name'],\n details = response,\n arguments = module.params['arguments']\n )\n\n # Do changes\n if change_required:\n if module.params['state'] == 'present':\n url = \"http://%s:%s/api/bindings/%s/e/%s/%s/%s\" % (\n module.params['login_host'],\n module.params['login_port'],\n urllib.quote(module.params['vhost'],''),\n urllib.quote(module.params['name'],''),\n dest_type,\n urllib.quote(module.params['destination'],'')\n )\n\n r = requests.post(\n url,\n auth = (module.params['login_user'],module.params['login_password']),\n headers = { \"content-type\": \"application/json\"},\n data = json.dumps({\n \"routing_key\": module.params['routing_key'],\n \"arguments\": module.params['arguments']\n })\n )\n elif module.params['state'] == 'absent':\n r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))\n\n if r.status_code == 204 or r.status_code == 201:\n module.exit_json(\n changed = True,\n name = module.params['name'],\n destination = module.params['destination']\n )\n else:\n module.fail_json(\n msg = \"Error creating exchange\",\n status = r.status_code,\n details = r.text\n )\n\n else:\n module.exit_json(\n changed = False,\n name = module.params['name']\n )\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "messaging/rabbitmq_binding.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2015, Manuel Sousa <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = '''\n---\nmodule: rabbitmq_binding\nauthor: \"Manuel Sousa (@manuel-sousa)\"\nversion_added: \"2.0\"\n\nshort_description: This module manages rabbitMQ bindings\ndescription:\n - This module uses rabbitMQ Rest API to create/delete bindings\nrequirements: [ python requests ]\noptions:\n state:\n description:\n - Whether the exchange should be present or absent\n - Only present implemented atm\n choices: [ \"present\", \"absent\" ]\n required: false\n default: present\n name:\n description:\n - source exchange to create binding on\n required: true\n aliases: [ \"src\", \"source\" ]\n login_user:\n description:\n - rabbitMQ user for connection\n required: false\n default: guest\n login_password:\n description:\n - rabbitMQ password for connection\n required: false\n default: false\n login_host:\n description:\n - rabbitMQ host for connection\n required: false\n default: localhost\n login_port:\n description:\n - rabbitMQ management api port\n required: false\n default: 15672\n vhost:\n description:\n - rabbitMQ virtual host\n - default vhost is /\n required: false\n default: \"/\"\n destination:\n description:\n - destination exchange or queue for the binding\n required: true\n aliases: [ \"dst\", \"dest\" ]\n destination_type:\n description:\n - Either queue or exchange\n required: true\n choices: [ \"queue\", \"exchange\" ]\n aliases: [ \"type\", \"dest_type\" ]\n routing_key:\n description:\n - routing key for the binding\n - default is #\n required: false\n default: \"#\"\n arguments:\n description:\n - extra arguments for exchange. If defined this argument is a key/value dictionary\n required: false\n default: {}\n'''\n\nEXAMPLES = '''\n# Bind myQueue to directExchange with routing key info\n- rabbitmq_binding: name=directExchange destination=myQueue type=queue routing_key=info\n\n# Bind directExchange to topicExchange with routing key *.info\n- rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routing_key=\"*.info\"\n'''\n\nimport requests\nimport urllib\nimport json\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n state = dict(default='present', choices=['present', 'absent'], type='str'),\n name = dict(required=True, aliases=[ \"src\", \"source\" ], type='str'),\n login_user = dict(default='guest', type='str'),\n login_password = dict(default='guest', type='str', no_log=True),\n login_host = dict(default='localhost', type='str'),\n login_port = dict(default='15672', type='str'),\n vhost = dict(default='/', type='str'),\n destination = dict(required=True, aliases=[ \"dst\", \"dest\"], type='str'),\n destination_type = dict(required=True, aliases=[ \"type\", \"dest_type\"], choices=[ \"queue\", \"exchange\" ],type='str'),\n routing_key = dict(default='#', type='str'),\n arguments = dict(default=dict(), type='dict')\n ),\n supports_check_mode = True\n )\n\n if module.params['destination_type'] == \"queue\":\n dest_type=\"q\"\n else:\n dest_type=\"e\"\n\n if module.params['routing_key'] == \"\":\n props = \"~\"\n else:\n props = urllib.quote(module.params['routing_key'],'')\n\n url = \"http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s\" % (\n module.params['login_host'],\n module.params['login_port'],\n urllib.quote(module.params['vhost'],''),\n urllib.quote(module.params['name'],''),\n dest_type,\n urllib.quote(module.params['destination'],''),\n props\n )\n\n # Check if exchange already exists\n r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))\n\n if r.status_code==200:\n binding_exists = True\n response = r.json()\n elif r.status_code==404:\n binding_exists = False\n response = r.text\n else:\n module.fail_json(\n msg = \"Invalid response from RESTAPI when trying to check if exchange exists\",\n details = r.text\n )\n\n if module.params['state']=='present':\n change_required = not binding_exists\n else:\n change_required = binding_exists\n\n # Exit if check_mode\n if module.check_mode:\n module.exit_json(\n changed= change_required,\n name = module.params['name'],\n details = response,\n arguments = module.params['arguments']\n )\n\n # Do changes\n if change_required:\n if module.params['state'] == 'present':\n url = \"http://%s:%s/api/bindings/%s/e/%s/%s/%s\" % (\n module.params['login_host'],\n module.params['login_port'],\n urllib.quote(module.params['vhost'],''),\n urllib.quote(module.params['name'],''),\n dest_type,\n urllib.quote(module.params['destination'],'')\n )\n\n r = requests.post(\n url,\n auth = (module.params['login_user'],module.params['login_password']),\n headers = { \"content-type\": \"application/json\"},\n data = json.dumps({\n \"routing_key\": module.params['routing_key'],\n \"arguments\": module.params['arguments']\n })\n )\n elif module.params['state'] == 'absent':\n r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))\n\n if r.status_code == 204 or r.status_code == 201:\n module.exit_json(\n changed = True,\n name = module.params['name'],\n destination = module.params['destination']\n )\n else:\n module.fail_json(\n msg = \"Error creating exchange\",\n status = r.status_code,\n details = r.text\n )\n\n else:\n module.exit_json(\n changed = False,\n name = module.params['name']\n )\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "messaging/rabbitmq_binding.py"}]} | 2,779 | 202 |
gh_patches_debug_1633 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-2050 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
botbuidler support for regex== 2022 and above
Description:
I'm currently working on building a chatbot using Azure Bot Builder SDK in conjunction with OpenAI. In my project, I'm relying on the OpenAIEmbedding class from the langchain package, which utilizes Tiktoken. However, I've run into an issue due to dependency conflicts with Tiktoken. Specifically, Tiktoken requires regex version 2022 or higher, while the Bot Builder package supports only up to regex version 2019.
Feature Request:
I kindly request adding support for Tiktoken's regex version 2022 or higher in the OpenAIEmbedding class within the langchain package. This update would resolve the dependency conflicts and enable smoother integration of OpenAI into projects using Azure Bot Builder SDK.
Additional Information:
Current Behavior: Currently, the OpenAIEmbedding class in langchain relies on Tiktoken, which necessitates a regex version that is not compatible with the Bot Builder SDK's regex version support.
Desired Behavior: The botbuilder classes should be updated to support Tiktoken's dependency on regex version 2022 or higher t
Impact of the Feature:
This feature would benefit developers working on chatbot projects that use Azure Bot Builder SDK and OpenAI. It would eliminate dependency conflicts, allowing for a seamless integration experience.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-dialogs/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "regex<=2019.08.19",
9 "emoji==1.7.0",
10 "recognizers-text-date-time>=1.0.2a1",
11 "recognizers-text-number-with-unit>=1.0.2a1",
12 "recognizers-text-number>=1.0.2a1",
13 "recognizers-text>=1.0.2a1",
14 "recognizers-text-choice>=1.0.2a1",
15 "babel==2.9.1",
16 "botbuilder-schema==4.15.0",
17 "botframework-connector==4.15.0",
18 "botbuilder-core==4.15.0",
19 ]
20
21 TEST_REQUIRES = ["aiounittest==1.3.0"]
22
23 root = os.path.abspath(os.path.dirname(__file__))
24
25 with open(os.path.join(root, "botbuilder", "dialogs", "about.py")) as f:
26 package_info = {}
27 info = f.read()
28 exec(info, package_info)
29
30 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
31 long_description = f.read()
32
33 setup(
34 name=package_info["__title__"],
35 version=package_info["__version__"],
36 url=package_info["__uri__"],
37 author=package_info["__author__"],
38 description=package_info["__description__"],
39 keywords=["BotBuilderDialogs", "bots", "ai", "botframework", "botbuilder"],
40 long_description=long_description,
41 long_description_content_type="text/x-rst",
42 license=package_info["__license__"],
43 packages=[
44 "botbuilder.dialogs",
45 "botbuilder.dialogs.prompts",
46 "botbuilder.dialogs.choices",
47 "botbuilder.dialogs.skills",
48 "botbuilder.dialogs.memory",
49 "botbuilder.dialogs.memory.path_resolvers",
50 "botbuilder.dialogs.memory.scopes",
51 ],
52 install_requires=REQUIRES + TEST_REQUIRES,
53 tests_require=TEST_REQUIRES,
54 include_package_data=True,
55 classifiers=[
56 "Programming Language :: Python :: 3.7",
57 "Intended Audience :: Developers",
58 "License :: OSI Approved :: MIT License",
59 "Operating System :: OS Independent",
60 "Development Status :: 5 - Production/Stable",
61 "Topic :: Scientific/Engineering :: Artificial Intelligence",
62 ],
63 )
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-dialogs/setup.py b/libraries/botbuilder-dialogs/setup.py
--- a/libraries/botbuilder-dialogs/setup.py
+++ b/libraries/botbuilder-dialogs/setup.py
@@ -5,7 +5,7 @@
from setuptools import setup
REQUIRES = [
- "regex<=2019.08.19",
+ "regex>=2022.1.18",
"emoji==1.7.0",
"recognizers-text-date-time>=1.0.2a1",
"recognizers-text-number-with-unit>=1.0.2a1",
| {"golden_diff": "diff --git a/libraries/botbuilder-dialogs/setup.py b/libraries/botbuilder-dialogs/setup.py\n--- a/libraries/botbuilder-dialogs/setup.py\n+++ b/libraries/botbuilder-dialogs/setup.py\n@@ -5,7 +5,7 @@\n from setuptools import setup\n \n REQUIRES = [\n- \"regex<=2019.08.19\",\n+ \"regex>=2022.1.18\",\n \"emoji==1.7.0\",\n \"recognizers-text-date-time>=1.0.2a1\",\n \"recognizers-text-number-with-unit>=1.0.2a1\",\n", "issue": "botbuidler support for regex== 2022 and above\nDescription:\r\n\r\nI'm currently working on building a chatbot using Azure Bot Builder SDK in conjunction with OpenAI. In my project, I'm relying on the OpenAIEmbedding class from the langchain package, which utilizes Tiktoken. However, I've run into an issue due to dependency conflicts with Tiktoken. Specifically, Tiktoken requires regex version 2022 or higher, while the Bot Builder package supports only up to regex version 2019.\r\n\r\nFeature Request:\r\n\r\nI kindly request adding support for Tiktoken's regex version 2022 or higher in the OpenAIEmbedding class within the langchain package. This update would resolve the dependency conflicts and enable smoother integration of OpenAI into projects using Azure Bot Builder SDK.\r\n\r\nAdditional Information:\r\n\r\nCurrent Behavior: Currently, the OpenAIEmbedding class in langchain relies on Tiktoken, which necessitates a regex version that is not compatible with the Bot Builder SDK's regex version support.\r\n\r\nDesired Behavior: The botbuilder classes should be updated to support Tiktoken's dependency on regex version 2022 or higher t\r\n\r\nImpact of the Feature:\r\n\r\nThis feature would benefit developers working on chatbot projects that use Azure Bot Builder SDK and OpenAI. It would eliminate dependency conflicts, allowing for a seamless integration experience.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"regex<=2019.08.19\",\n \"emoji==1.7.0\",\n \"recognizers-text-date-time>=1.0.2a1\",\n \"recognizers-text-number-with-unit>=1.0.2a1\",\n \"recognizers-text-number>=1.0.2a1\",\n \"recognizers-text>=1.0.2a1\",\n \"recognizers-text-choice>=1.0.2a1\",\n \"babel==2.9.1\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\n\nTEST_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"dialogs\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderDialogs\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.dialogs\",\n \"botbuilder.dialogs.prompts\",\n \"botbuilder.dialogs.choices\",\n \"botbuilder.dialogs.skills\",\n \"botbuilder.dialogs.memory\",\n \"botbuilder.dialogs.memory.path_resolvers\",\n \"botbuilder.dialogs.memory.scopes\",\n ],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-dialogs/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"regex>=2022.1.18\",\n \"emoji==1.7.0\",\n \"recognizers-text-date-time>=1.0.2a1\",\n \"recognizers-text-number-with-unit>=1.0.2a1\",\n \"recognizers-text-number>=1.0.2a1\",\n \"recognizers-text>=1.0.2a1\",\n \"recognizers-text-choice>=1.0.2a1\",\n \"babel==2.9.1\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\n\nTEST_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"dialogs\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderDialogs\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.dialogs\",\n \"botbuilder.dialogs.prompts\",\n \"botbuilder.dialogs.choices\",\n \"botbuilder.dialogs.skills\",\n \"botbuilder.dialogs.memory\",\n \"botbuilder.dialogs.memory.path_resolvers\",\n \"botbuilder.dialogs.memory.scopes\",\n ],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-dialogs/setup.py"}]} | 1,203 | 141 |
gh_patches_debug_60913 | rasdani/github-patches | git_diff | holoviz__hvplot-494 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Entrypoint broken
The setup.py specifies `hvplot.__main__` as a console_script but that doesn't actually exist.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import sys
3 import shutil
4 from collections import defaultdict
5 from setuptools import setup, find_packages
6
7
8 ########## autover ##########
9
10 def embed_version(basepath, ref='v0.2.2'):
11 """
12 Autover is purely a build time dependency in all cases (conda and
13 pip) except for when you use pip's remote git support [git+url] as
14 1) you need a dynamically changing version and 2) the environment
15 starts off clean with zero dependencies installed.
16 This function acts as a fallback to make Version available until
17 PEP518 is commonly supported by pip to express build dependencies.
18 """
19 import io, zipfile, importlib
20 try: from urllib.request import urlopen
21 except: from urllib import urlopen
22 try:
23 url = 'https://github.com/ioam/autover/archive/{ref}.zip'
24 response = urlopen(url.format(ref=ref))
25 zf = zipfile.ZipFile(io.BytesIO(response.read()))
26 ref = ref[1:] if ref.startswith('v') else ref
27 embed_version = zf.read('autover-{ref}/autover/version.py'.format(ref=ref))
28 with open(os.path.join(basepath, 'version.py'), 'wb') as f:
29 f.write(embed_version)
30 return importlib.import_module("version")
31 except:
32 return None
33
34 def get_setup_version(reponame):
35 """
36 Helper to get the current version from either git describe or the
37 .version file (if available).
38 """
39 import json
40 basepath = os.path.split(__file__)[0]
41 version_file_path = os.path.join(basepath, reponame, '.version')
42 try:
43 from param import version
44 except:
45 version = embed_version(basepath)
46 if version is not None:
47 return version.Version.setup_version(basepath, reponame, archive_commit="$Format:%h$")
48 else:
49 print("WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.")
50 return json.load(open(version_file_path, 'r'))['version_string']
51
52 ########## examples ##########
53
54 def check_pseudo_package(path):
55 """
56 Verifies that a fake subpackage path for assets (notebooks, svgs,
57 pngs etc) both exists and is populated with files.
58 """
59 if not os.path.isdir(path):
60 raise Exception("Please make sure pseudo-package %s exists." % path)
61 else:
62 assets = os.listdir(path)
63 if len(assets) == 0:
64 raise Exception("Please make sure pseudo-package %s is populated." % path)
65
66
67 excludes = ['DS_Store', '.log', 'ipynb_checkpoints']
68 packages = []
69 extensions = defaultdict(list)
70
71 def walker(top, names):
72 """
73 Walks a directory and records all packages and file extensions.
74 """
75 global packages, extensions
76 if any(exc in top for exc in excludes):
77 return
78 package = top[top.rfind('hvplot'):].replace(os.path.sep, '.')
79 packages.append(package)
80 for name in names:
81 ext = '.'.join(name.split('.')[1:])
82 ext_str = '*.%s' % ext
83 if ext and ext not in excludes and ext_str not in extensions[package]:
84 extensions[package].append(ext_str)
85
86
87 def examples(path='hvplot-examples', verbose=False, force=False, root=__file__):
88 """
89 Copies the notebooks to the supplied path.
90 """
91 filepath = os.path.abspath(os.path.dirname(root))
92 example_dir = os.path.join(filepath, './examples')
93 if not os.path.exists(example_dir):
94 example_dir = os.path.join(filepath, '../examples')
95 if os.path.exists(path):
96 if not force:
97 print('%s directory already exists, either delete it or set the force flag' % path)
98 return
99 shutil.rmtree(path)
100 ignore = shutil.ignore_patterns('.ipynb_checkpoints', '*.pyc', '*~')
101 tree_root = os.path.abspath(example_dir)
102 if os.path.isdir(tree_root):
103 shutil.copytree(tree_root, path, ignore=ignore, symlinks=True)
104 else:
105 print('Cannot find %s' % tree_root)
106
107
108
109 def package_assets(example_path):
110 """
111 Generates pseudo-packages for the examples directory.
112 """
113 examples(example_path, force=True, root=__file__)
114 for root, dirs, files in os.walk(example_path):
115 walker(root, dirs+files)
116 setup_args['packages'] += packages
117 for p, exts in extensions.items():
118 if exts:
119 setup_args['package_data'][p] = exts
120
121
122 ########## dependencies ##########
123
124 install_requires = [
125 'bokeh >=1.0.0',
126 'colorcet >=2',
127 'holoviews >=1.11.0',
128 'pandas',
129 'numpy>=1.15'
130 ]
131
132 _examples = [
133 'geoviews >=1.6.0',
134 'panel',
135 'geopandas',
136 'xarray',
137 'networkx',
138 'streamz >=0.3.0',
139 'intake',
140 'intake-parquet',
141 'intake-xarray',
142 'dask',
143 'datashader >=0.6.5',
144 'notebook >=5.4',
145 'rasterio',
146 's3fs',
147 'scipy',
148 'pillow',
149 'selenium',
150 'spatialpandas',
151 'scikit-image'
152 ]
153
154 _examples_extra = _examples + [
155 'pygraphviz',
156 ]
157
158 extras_require = {
159 'tests': [
160 'coveralls',
161 'nose',
162 'flake8',
163 'parameterized',
164 'pytest',
165 'nbsmoke >=0.2.0',
166 ],
167 'examples': _examples,
168 'examples_extra': _examples_extra,
169 'doc': _examples_extra + [
170 'nbsite >=0.5.1',
171 'sphinx_holoviz_theme',
172 'tornado <6.0'
173 ]
174 }
175
176 # until pyproject.toml/equivalent is widely supported (setup_requires
177 # doesn't work well with pip)
178 extras_require['build'] = [
179 'param >=1.6.1',
180 'setuptools' # should make this pip now
181 ]
182
183 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
184
185 ########## metadata for setuptools ##########
186
187 setup_args = dict(
188 name='hvplot',
189 version=get_setup_version("hvplot"),
190 description='A high-level plotting API for the PyData ecosystem built on HoloViews.',
191 long_description=open("README.md").read(),
192 long_description_content_type="text/markdown",
193 author= "Philipp Rudiger",
194 author_email= "[email protected]",
195 maintainer="HoloViz developers",
196 maintainer_email="[email protected]",
197 packages=find_packages()+packages,
198 package_data={'hvplot': ['.version']},
199 platforms=['Windows', 'Mac OS X', 'Linux'],
200 license='BSD',
201 url='https://hvplot.pyviz.org',
202 classifiers = [
203 "License :: OSI Approved :: BSD License",
204 "Development Status :: 5 - Production/Stable",
205 "Programming Language :: Python :: 2",
206 "Programming Language :: Python :: 2.7",
207 "Programming Language :: Python :: 3.5",
208 "Programming Language :: Python :: 3.6",
209 "Operating System :: OS Independent",
210 "Intended Audience :: Science/Research",
211 "Intended Audience :: Developers",
212 "Natural Language :: English",
213 "Topic :: Scientific/Engineering",
214 "Topic :: Software Development :: Libraries"],
215 python_requires=">=2.7",
216 install_requires=install_requires,
217 extras_require=extras_require,
218 tests_require=extras_require['tests'],
219 entry_points={
220 'console_scripts': [
221 'hvplot = hvplot.__main__:main'
222 ],
223 'pandas_plotting_backends': [
224 'holoviews = hvplot:plotting',
225 ],
226 },
227 )
228
229
230 if __name__ == '__main__':
231 example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
232 'hvplot','examples')
233 if 'develop' not in sys.argv:
234 package_assets(example_path)
235
236 setup(**setup_args)
237
238 if os.path.isdir(example_path):
239 shutil.rmtree(example_path)
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -217,9 +217,7 @@
extras_require=extras_require,
tests_require=extras_require['tests'],
entry_points={
- 'console_scripts': [
- 'hvplot = hvplot.__main__:main'
- ],
+ 'console_scripts': [],
'pandas_plotting_backends': [
'holoviews = hvplot:plotting',
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -217,9 +217,7 @@\n extras_require=extras_require,\n tests_require=extras_require['tests'],\n entry_points={\n- 'console_scripts': [\n- 'hvplot = hvplot.__main__:main'\n- ],\n+ 'console_scripts': [],\n 'pandas_plotting_backends': [\n 'holoviews = hvplot:plotting',\n ],\n", "issue": "Entrypoint broken\nThe setup.py specifies `hvplot.__main__` as a console_script but that doesn't actually exist.\n", "before_files": [{"content": "import os\nimport sys\nimport shutil\nfrom collections import defaultdict\nfrom setuptools import setup, find_packages\n\n\n########## autover ##########\n\ndef embed_version(basepath, ref='v0.2.2'):\n \"\"\"\n Autover is purely a build time dependency in all cases (conda and\n pip) except for when you use pip's remote git support [git+url] as\n 1) you need a dynamically changing version and 2) the environment\n starts off clean with zero dependencies installed.\n This function acts as a fallback to make Version available until\n PEP518 is commonly supported by pip to express build dependencies.\n \"\"\"\n import io, zipfile, importlib\n try: from urllib.request import urlopen\n except: from urllib import urlopen\n try:\n url = 'https://github.com/ioam/autover/archive/{ref}.zip'\n response = urlopen(url.format(ref=ref))\n zf = zipfile.ZipFile(io.BytesIO(response.read()))\n ref = ref[1:] if ref.startswith('v') else ref\n embed_version = zf.read('autover-{ref}/autover/version.py'.format(ref=ref))\n with open(os.path.join(basepath, 'version.py'), 'wb') as f:\n f.write(embed_version)\n return importlib.import_module(\"version\")\n except:\n return None\n\ndef get_setup_version(reponame):\n \"\"\"\n Helper to get the current version from either git describe or the\n .version file (if available).\n \"\"\"\n import json\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, '.version')\n try:\n from param import version\n except:\n version = embed_version(basepath)\n if version is not None:\n return version.Version.setup_version(basepath, reponame, archive_commit=\"$Format:%h$\")\n else:\n print(\"WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.\")\n return json.load(open(version_file_path, 'r'))['version_string']\n\n########## examples ##########\n\ndef check_pseudo_package(path):\n \"\"\"\n Verifies that a fake subpackage path for assets (notebooks, svgs,\n pngs etc) both exists and is populated with files.\n \"\"\"\n if not os.path.isdir(path):\n raise Exception(\"Please make sure pseudo-package %s exists.\" % path)\n else:\n assets = os.listdir(path)\n if len(assets) == 0:\n raise Exception(\"Please make sure pseudo-package %s is populated.\" % path)\n\n\nexcludes = ['DS_Store', '.log', 'ipynb_checkpoints']\npackages = []\nextensions = defaultdict(list)\n\ndef walker(top, names):\n \"\"\"\n Walks a directory and records all packages and file extensions.\n \"\"\"\n global packages, extensions\n if any(exc in top for exc in excludes):\n return\n package = top[top.rfind('hvplot'):].replace(os.path.sep, '.')\n packages.append(package)\n for name in names:\n ext = '.'.join(name.split('.')[1:])\n ext_str = '*.%s' % ext\n if ext and ext not in excludes and ext_str not in extensions[package]:\n extensions[package].append(ext_str)\n\n\ndef examples(path='hvplot-examples', verbose=False, force=False, root=__file__):\n \"\"\"\n Copies the notebooks to the supplied path.\n \"\"\"\n filepath = os.path.abspath(os.path.dirname(root))\n example_dir = os.path.join(filepath, './examples')\n if not os.path.exists(example_dir):\n example_dir = os.path.join(filepath, '../examples')\n if os.path.exists(path):\n if not force:\n print('%s directory already exists, either delete it or set the force flag' % path)\n return\n shutil.rmtree(path)\n ignore = shutil.ignore_patterns('.ipynb_checkpoints', '*.pyc', '*~')\n tree_root = os.path.abspath(example_dir)\n if os.path.isdir(tree_root):\n shutil.copytree(tree_root, path, ignore=ignore, symlinks=True)\n else:\n print('Cannot find %s' % tree_root)\n\n\n\ndef package_assets(example_path):\n \"\"\"\n Generates pseudo-packages for the examples directory.\n \"\"\"\n examples(example_path, force=True, root=__file__)\n for root, dirs, files in os.walk(example_path):\n walker(root, dirs+files)\n setup_args['packages'] += packages\n for p, exts in extensions.items():\n if exts:\n setup_args['package_data'][p] = exts\n\n\n########## dependencies ##########\n\ninstall_requires = [\n 'bokeh >=1.0.0',\n 'colorcet >=2',\n 'holoviews >=1.11.0',\n 'pandas',\n 'numpy>=1.15'\n]\n\n_examples = [\n 'geoviews >=1.6.0',\n 'panel',\n 'geopandas',\n 'xarray',\n 'networkx',\n 'streamz >=0.3.0',\n 'intake',\n 'intake-parquet',\n 'intake-xarray',\n 'dask',\n 'datashader >=0.6.5',\n 'notebook >=5.4',\n 'rasterio',\n 's3fs',\n 'scipy',\n 'pillow',\n 'selenium',\n 'spatialpandas',\n 'scikit-image'\n]\n\n_examples_extra = _examples + [\n 'pygraphviz',\n]\n\nextras_require = {\n 'tests': [\n 'coveralls',\n 'nose',\n 'flake8',\n 'parameterized',\n 'pytest',\n 'nbsmoke >=0.2.0',\n ],\n 'examples': _examples,\n 'examples_extra': _examples_extra,\n 'doc': _examples_extra + [\n 'nbsite >=0.5.1',\n 'sphinx_holoviz_theme',\n 'tornado <6.0'\n ]\n}\n\n# until pyproject.toml/equivalent is widely supported (setup_requires\n# doesn't work well with pip)\nextras_require['build'] = [\n 'param >=1.6.1',\n 'setuptools' # should make this pip now\n]\n\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\n########## metadata for setuptools ##########\n\nsetup_args = dict(\n name='hvplot',\n version=get_setup_version(\"hvplot\"),\n description='A high-level plotting API for the PyData ecosystem built on HoloViews.',\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n author= \"Philipp Rudiger\",\n author_email= \"[email protected]\",\n maintainer=\"HoloViz developers\",\n maintainer_email=\"[email protected]\",\n packages=find_packages()+packages,\n package_data={'hvplot': ['.version']},\n platforms=['Windows', 'Mac OS X', 'Linux'],\n license='BSD',\n url='https://hvplot.pyviz.org',\n classifiers = [\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development :: Libraries\"],\n python_requires=\">=2.7\",\n install_requires=install_requires,\n extras_require=extras_require,\n tests_require=extras_require['tests'],\n entry_points={\n 'console_scripts': [\n 'hvplot = hvplot.__main__:main'\n ],\n 'pandas_plotting_backends': [\n 'holoviews = hvplot:plotting',\n ],\n },\n)\n\n\nif __name__ == '__main__':\n example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'hvplot','examples')\n if 'develop' not in sys.argv:\n package_assets(example_path)\n\n setup(**setup_args)\n\n if os.path.isdir(example_path):\n shutil.rmtree(example_path)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport sys\nimport shutil\nfrom collections import defaultdict\nfrom setuptools import setup, find_packages\n\n\n########## autover ##########\n\ndef embed_version(basepath, ref='v0.2.2'):\n \"\"\"\n Autover is purely a build time dependency in all cases (conda and\n pip) except for when you use pip's remote git support [git+url] as\n 1) you need a dynamically changing version and 2) the environment\n starts off clean with zero dependencies installed.\n This function acts as a fallback to make Version available until\n PEP518 is commonly supported by pip to express build dependencies.\n \"\"\"\n import io, zipfile, importlib\n try: from urllib.request import urlopen\n except: from urllib import urlopen\n try:\n url = 'https://github.com/ioam/autover/archive/{ref}.zip'\n response = urlopen(url.format(ref=ref))\n zf = zipfile.ZipFile(io.BytesIO(response.read()))\n ref = ref[1:] if ref.startswith('v') else ref\n embed_version = zf.read('autover-{ref}/autover/version.py'.format(ref=ref))\n with open(os.path.join(basepath, 'version.py'), 'wb') as f:\n f.write(embed_version)\n return importlib.import_module(\"version\")\n except:\n return None\n\ndef get_setup_version(reponame):\n \"\"\"\n Helper to get the current version from either git describe or the\n .version file (if available).\n \"\"\"\n import json\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, '.version')\n try:\n from param import version\n except:\n version = embed_version(basepath)\n if version is not None:\n return version.Version.setup_version(basepath, reponame, archive_commit=\"$Format:%h$\")\n else:\n print(\"WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.\")\n return json.load(open(version_file_path, 'r'))['version_string']\n\n########## examples ##########\n\ndef check_pseudo_package(path):\n \"\"\"\n Verifies that a fake subpackage path for assets (notebooks, svgs,\n pngs etc) both exists and is populated with files.\n \"\"\"\n if not os.path.isdir(path):\n raise Exception(\"Please make sure pseudo-package %s exists.\" % path)\n else:\n assets = os.listdir(path)\n if len(assets) == 0:\n raise Exception(\"Please make sure pseudo-package %s is populated.\" % path)\n\n\nexcludes = ['DS_Store', '.log', 'ipynb_checkpoints']\npackages = []\nextensions = defaultdict(list)\n\ndef walker(top, names):\n \"\"\"\n Walks a directory and records all packages and file extensions.\n \"\"\"\n global packages, extensions\n if any(exc in top for exc in excludes):\n return\n package = top[top.rfind('hvplot'):].replace(os.path.sep, '.')\n packages.append(package)\n for name in names:\n ext = '.'.join(name.split('.')[1:])\n ext_str = '*.%s' % ext\n if ext and ext not in excludes and ext_str not in extensions[package]:\n extensions[package].append(ext_str)\n\n\ndef examples(path='hvplot-examples', verbose=False, force=False, root=__file__):\n \"\"\"\n Copies the notebooks to the supplied path.\n \"\"\"\n filepath = os.path.abspath(os.path.dirname(root))\n example_dir = os.path.join(filepath, './examples')\n if not os.path.exists(example_dir):\n example_dir = os.path.join(filepath, '../examples')\n if os.path.exists(path):\n if not force:\n print('%s directory already exists, either delete it or set the force flag' % path)\n return\n shutil.rmtree(path)\n ignore = shutil.ignore_patterns('.ipynb_checkpoints', '*.pyc', '*~')\n tree_root = os.path.abspath(example_dir)\n if os.path.isdir(tree_root):\n shutil.copytree(tree_root, path, ignore=ignore, symlinks=True)\n else:\n print('Cannot find %s' % tree_root)\n\n\n\ndef package_assets(example_path):\n \"\"\"\n Generates pseudo-packages for the examples directory.\n \"\"\"\n examples(example_path, force=True, root=__file__)\n for root, dirs, files in os.walk(example_path):\n walker(root, dirs+files)\n setup_args['packages'] += packages\n for p, exts in extensions.items():\n if exts:\n setup_args['package_data'][p] = exts\n\n\n########## dependencies ##########\n\ninstall_requires = [\n 'bokeh >=1.0.0',\n 'colorcet >=2',\n 'holoviews >=1.11.0',\n 'pandas',\n 'numpy>=1.15'\n]\n\n_examples = [\n 'geoviews >=1.6.0',\n 'panel',\n 'geopandas',\n 'xarray',\n 'networkx',\n 'streamz >=0.3.0',\n 'intake',\n 'intake-parquet',\n 'intake-xarray',\n 'dask',\n 'datashader >=0.6.5',\n 'notebook >=5.4',\n 'rasterio',\n 's3fs',\n 'scipy',\n 'pillow',\n 'selenium',\n 'spatialpandas',\n 'scikit-image'\n]\n\n_examples_extra = _examples + [\n 'pygraphviz',\n]\n\nextras_require = {\n 'tests': [\n 'coveralls',\n 'nose',\n 'flake8',\n 'parameterized',\n 'pytest',\n 'nbsmoke >=0.2.0',\n ],\n 'examples': _examples,\n 'examples_extra': _examples_extra,\n 'doc': _examples_extra + [\n 'nbsite >=0.5.1',\n 'sphinx_holoviz_theme',\n 'tornado <6.0'\n ]\n}\n\n# until pyproject.toml/equivalent is widely supported (setup_requires\n# doesn't work well with pip)\nextras_require['build'] = [\n 'param >=1.6.1',\n 'setuptools' # should make this pip now\n]\n\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\n########## metadata for setuptools ##########\n\nsetup_args = dict(\n name='hvplot',\n version=get_setup_version(\"hvplot\"),\n description='A high-level plotting API for the PyData ecosystem built on HoloViews.',\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n author= \"Philipp Rudiger\",\n author_email= \"[email protected]\",\n maintainer=\"HoloViz developers\",\n maintainer_email=\"[email protected]\",\n packages=find_packages()+packages,\n package_data={'hvplot': ['.version']},\n platforms=['Windows', 'Mac OS X', 'Linux'],\n license='BSD',\n url='https://hvplot.pyviz.org',\n classifiers = [\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development :: Libraries\"],\n python_requires=\">=2.7\",\n install_requires=install_requires,\n extras_require=extras_require,\n tests_require=extras_require['tests'],\n entry_points={\n 'console_scripts': [],\n 'pandas_plotting_backends': [\n 'holoviews = hvplot:plotting',\n ],\n },\n)\n\n\nif __name__ == '__main__':\n example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'hvplot','examples')\n if 'develop' not in sys.argv:\n package_assets(example_path)\n\n setup(**setup_args)\n\n if os.path.isdir(example_path):\n shutil.rmtree(example_path)\n", "path": "setup.py"}]} | 2,740 | 109 |
gh_patches_debug_21491 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-2793 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
verifydb fails with debug in some cases
Using `./bin/instance verifydb -D` to check the `Data.fs` it fails in some cases:
```
Traceback (most recent call last):
File "/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py", line 68, in verify_record
class_info = unpickler.load()
File "/Users/pbauer/.cache/buildout/eggs/ZODB-5.5.1-py3.7.egg/ZODB/_compat.py", line 62, in find_class
return super(Unpickler, self).find_class(modulename, name)
ModuleNotFoundError: No module named 'Products.Archetypes'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./bin/instance", line 262, in <module>
+ sys.argv[1:]))
File "/Users/pbauer/.cache/buildout/eggs/plone.recipe.zope2instance-6.1.3-py3.7.egg/plone/recipe/zope2instance/ctl.py", line 937, in main
c.onecmd(' '.join(options.args))
File "/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/cmd.py", line 217, in onecmd
return func(arg)
File "/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py", line 31, in zopectl_entry
verify_zodb(app, debug=options.debug)
File "/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py", line 50, in verify_zodb
success = verify_record(oid, data, debug)
File "/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py", line 82, in verify_record
pickletools.dis(pickle[pos:])
UnboundLocalError: local variable 'pos' referenced before assignment
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/_scripts/verifydb.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from Zope2.Startup.run import make_wsgi_app
3 from ZODB.interfaces import IStorageCurrentRecordIteration
4 from ZODB.serialize import PersistentUnpickler
5
6 import argparse
7 import io
8 import logging
9 import pdb
10 import pickletools
11 import sys
12 import traceback
13 import Zope2
14
15 logger = logging.getLogger('zodbverify')
16
17
18 def zopectl_entry(self, arg):
19 parser = argparse.ArgumentParser(
20 prog=sys.argv[0] + ' verifydb',
21 description='Verifies that all records in the database can be loaded.',
22 )
23 parser.add_argument(
24 '-D', '--debug', action='store_true', dest='debug',
25 help='pause to debug broken pickles')
26 options = parser.parse_args(arg.split(' ') if arg else [])
27
28 logging.basicConfig(level=logging.INFO)
29 make_wsgi_app({}, self.options.configfile)
30 app = Zope2.app()
31 verify_zodb(app, debug=options.debug)
32
33
34 def verify_zodb(obj, debug=False):
35 storage = obj._p_jar._db._storage
36 if not IStorageCurrentRecordIteration.providedBy(storage):
37 raise TypeError(
38 'ZODB storage {} does not implement record_iternext'.format(
39 storage))
40
41 logger.info('Scanning ZODB...')
42
43 next_ = None
44 count = 0
45 errors = 0
46 while True:
47 count += 1
48 oid, tid, data, next_ = storage.record_iternext(next_)
49 logger.debug('Verifying {}'.format(oid))
50 success = verify_record(oid, data, debug)
51 if not success:
52 errors += 1
53 if next_ is None:
54 break
55
56 logger.info(
57 'Done! Scanned {} records. '
58 'Found {} records that could not be loaded.'.format(
59 count, errors)
60 )
61
62
63 def verify_record(oid, data, debug=False):
64 input_file = io.BytesIO(data)
65 unpickler = PersistentUnpickler(None, persistent_load, input_file)
66 class_info = 'unknown'
67 try:
68 class_info = unpickler.load()
69 pos = input_file.tell()
70 unpickler.load()
71 except Exception:
72 input_file.seek(0)
73 pickle = input_file.read()
74 logger.info('\nCould not process {} record {}:'.format(
75 class_info,
76 repr(oid),
77 ))
78 logger.info(repr(pickle))
79 logger.info(traceback.format_exc())
80 if debug:
81 try:
82 pickletools.dis(pickle[pos:])
83 finally:
84 pdb.set_trace()
85 return False
86 return True
87
88
89 def persistent_load(ref):
90 pass
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/_scripts/verifydb.py b/Products/CMFPlone/_scripts/verifydb.py
--- a/Products/CMFPlone/_scripts/verifydb.py
+++ b/Products/CMFPlone/_scripts/verifydb.py
@@ -64,6 +64,7 @@
input_file = io.BytesIO(data)
unpickler = PersistentUnpickler(None, persistent_load, input_file)
class_info = 'unknown'
+ pos = None
try:
class_info = unpickler.load()
pos = input_file.tell()
@@ -77,11 +78,17 @@
))
logger.info(repr(pickle))
logger.info(traceback.format_exc())
- if debug:
+ if debug and pos is not None:
try:
pickletools.dis(pickle[pos:])
+ except Exception:
+ # ignore exceptions while disassembling the pickle since the
+ # real issue is that it references a unavailable module
+ pass
finally:
pdb.set_trace()
+ elif debug and pos is None:
+ pdb.set_trace()
return False
return True
| {"golden_diff": "diff --git a/Products/CMFPlone/_scripts/verifydb.py b/Products/CMFPlone/_scripts/verifydb.py\n--- a/Products/CMFPlone/_scripts/verifydb.py\n+++ b/Products/CMFPlone/_scripts/verifydb.py\n@@ -64,6 +64,7 @@\n input_file = io.BytesIO(data)\n unpickler = PersistentUnpickler(None, persistent_load, input_file)\n class_info = 'unknown'\n+ pos = None\n try:\n class_info = unpickler.load()\n pos = input_file.tell()\n@@ -77,11 +78,17 @@\n ))\n logger.info(repr(pickle))\n logger.info(traceback.format_exc())\n- if debug:\n+ if debug and pos is not None:\n try:\n pickletools.dis(pickle[pos:])\n+ except Exception:\n+ # ignore exceptions while disassembling the pickle since the\n+ # real issue is that it references a unavailable module\n+ pass\n finally:\n pdb.set_trace()\n+ elif debug and pos is None:\n+ pdb.set_trace()\n return False\n return True\n", "issue": "verifydb fails with debug in some cases\nUsing `./bin/instance verifydb -D` to check the `Data.fs` it fails in some cases:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py\", line 68, in verify_record\r\n class_info = unpickler.load()\r\n File \"/Users/pbauer/.cache/buildout/eggs/ZODB-5.5.1-py3.7.egg/ZODB/_compat.py\", line 62, in find_class\r\n return super(Unpickler, self).find_class(modulename, name)\r\nModuleNotFoundError: No module named 'Products.Archetypes'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"./bin/instance\", line 262, in <module>\r\n + sys.argv[1:]))\r\n File \"/Users/pbauer/.cache/buildout/eggs/plone.recipe.zope2instance-6.1.3-py3.7.egg/plone/recipe/zope2instance/ctl.py\", line 937, in main\r\n c.onecmd(' '.join(options.args))\r\n File \"/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/cmd.py\", line 217, in onecmd\r\n return func(arg)\r\n File \"/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py\", line 31, in zopectl_entry\r\n verify_zodb(app, debug=options.debug)\r\n File \"/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py\", line 50, in verify_zodb\r\n success = verify_record(oid, data, debug)\r\n File \"/Users/pbauer/.cache/buildout/eggs/Products.CMFPlone-5.2rc1-py3.7.egg/Products/CMFPlone/_scripts/verifydb.py\", line 82, in verify_record\r\n pickletools.dis(pickle[pos:])\r\nUnboundLocalError: local variable 'pos' referenced before assignment\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom Zope2.Startup.run import make_wsgi_app\nfrom ZODB.interfaces import IStorageCurrentRecordIteration\nfrom ZODB.serialize import PersistentUnpickler\n\nimport argparse\nimport io\nimport logging\nimport pdb\nimport pickletools\nimport sys\nimport traceback\nimport Zope2\n\nlogger = logging.getLogger('zodbverify')\n\n\ndef zopectl_entry(self, arg):\n parser = argparse.ArgumentParser(\n prog=sys.argv[0] + ' verifydb',\n description='Verifies that all records in the database can be loaded.',\n )\n parser.add_argument(\n '-D', '--debug', action='store_true', dest='debug',\n help='pause to debug broken pickles')\n options = parser.parse_args(arg.split(' ') if arg else [])\n\n logging.basicConfig(level=logging.INFO)\n make_wsgi_app({}, self.options.configfile)\n app = Zope2.app()\n verify_zodb(app, debug=options.debug)\n\n\ndef verify_zodb(obj, debug=False):\n storage = obj._p_jar._db._storage\n if not IStorageCurrentRecordIteration.providedBy(storage):\n raise TypeError(\n 'ZODB storage {} does not implement record_iternext'.format(\n storage))\n\n logger.info('Scanning ZODB...')\n\n next_ = None\n count = 0\n errors = 0\n while True:\n count += 1\n oid, tid, data, next_ = storage.record_iternext(next_)\n logger.debug('Verifying {}'.format(oid))\n success = verify_record(oid, data, debug)\n if not success:\n errors += 1\n if next_ is None:\n break\n\n logger.info(\n 'Done! Scanned {} records. '\n 'Found {} records that could not be loaded.'.format(\n count, errors)\n )\n\n\ndef verify_record(oid, data, debug=False):\n input_file = io.BytesIO(data)\n unpickler = PersistentUnpickler(None, persistent_load, input_file)\n class_info = 'unknown'\n try:\n class_info = unpickler.load()\n pos = input_file.tell()\n unpickler.load()\n except Exception:\n input_file.seek(0)\n pickle = input_file.read()\n logger.info('\\nCould not process {} record {}:'.format(\n class_info,\n repr(oid),\n ))\n logger.info(repr(pickle))\n logger.info(traceback.format_exc())\n if debug:\n try:\n pickletools.dis(pickle[pos:])\n finally:\n pdb.set_trace()\n return False\n return True\n\n\ndef persistent_load(ref):\n pass\n", "path": "Products/CMFPlone/_scripts/verifydb.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom Zope2.Startup.run import make_wsgi_app\nfrom ZODB.interfaces import IStorageCurrentRecordIteration\nfrom ZODB.serialize import PersistentUnpickler\n\nimport argparse\nimport io\nimport logging\nimport pdb\nimport pickletools\nimport sys\nimport traceback\nimport Zope2\n\nlogger = logging.getLogger('zodbverify')\n\n\ndef zopectl_entry(self, arg):\n parser = argparse.ArgumentParser(\n prog=sys.argv[0] + ' verifydb',\n description='Verifies that all records in the database can be loaded.',\n )\n parser.add_argument(\n '-D', '--debug', action='store_true', dest='debug',\n help='pause to debug broken pickles')\n options = parser.parse_args(arg.split(' ') if arg else [])\n\n logging.basicConfig(level=logging.INFO)\n make_wsgi_app({}, self.options.configfile)\n app = Zope2.app()\n verify_zodb(app, debug=options.debug)\n\n\ndef verify_zodb(obj, debug=False):\n storage = obj._p_jar._db._storage\n if not IStorageCurrentRecordIteration.providedBy(storage):\n raise TypeError(\n 'ZODB storage {} does not implement record_iternext'.format(\n storage))\n\n logger.info('Scanning ZODB...')\n\n next_ = None\n count = 0\n errors = 0\n while True:\n count += 1\n oid, tid, data, next_ = storage.record_iternext(next_)\n logger.debug('Verifying {}'.format(oid))\n success = verify_record(oid, data, debug)\n if not success:\n errors += 1\n if next_ is None:\n break\n\n logger.info(\n 'Done! Scanned {} records. '\n 'Found {} records that could not be loaded.'.format(\n count, errors)\n )\n\n\ndef verify_record(oid, data, debug=False):\n input_file = io.BytesIO(data)\n unpickler = PersistentUnpickler(None, persistent_load, input_file)\n class_info = 'unknown'\n pos = None\n try:\n class_info = unpickler.load()\n pos = input_file.tell()\n unpickler.load()\n except Exception:\n input_file.seek(0)\n pickle = input_file.read()\n logger.info('\\nCould not process {} record {}:'.format(\n class_info,\n repr(oid),\n ))\n logger.info(repr(pickle))\n logger.info(traceback.format_exc())\n if debug and pos is not None:\n try:\n pickletools.dis(pickle[pos:])\n except Exception:\n # ignore exceptions while disassembling the pickle since the\n # real issue is that it references a unavailable module\n pass\n finally:\n pdb.set_trace()\n elif debug and pos is None:\n pdb.set_trace()\n return False\n return True\n\n\ndef persistent_load(ref):\n pass\n", "path": "Products/CMFPlone/_scripts/verifydb.py"}]} | 1,572 | 259 |
gh_patches_debug_14450 | rasdani/github-patches | git_diff | ivy-llc__ivy-13814 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lp_pool2d
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/torch/nn/functional/pooling_functions.py`
Content:
```
1 # global
2
3 # local
4 import ivy
5 from ivy import with_unsupported_dtypes
6 from ivy.functional.frontends.tensorflow.func_wrapper import (
7 to_ivy_arrays_and_back,
8 )
9
10
11 def _broadcast_pooling_helper(x, pool_dims: str = "2d", name: str = "padding"):
12 dims = {"1d": 1, "2d": 2, "3d": 3}
13
14 if isinstance(x, int):
15 return tuple([x for _ in range(dims[pool_dims])])
16
17 if len(x) == 1:
18 return tuple([x[0] for _ in range(dims[pool_dims])])
19 elif len(x) == dims[pool_dims]:
20 return tuple(x)
21 elif len(x) != dims[pool_dims]:
22 raise ValueError(
23 f"`{name}` must either be a single int, "
24 f"or a tuple of {dims[pool_dims]} ints. "
25 )
26
27
28 @to_ivy_arrays_and_back
29 def avg_pool1d(
30 input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True
31 ):
32 kernel_size = _broadcast_pooling_helper(kernel_size, "1d", name="kernel_size")
33 stride = _broadcast_pooling_helper(stride, "1d", name="stride")
34 padding = _broadcast_pooling_helper(padding, "1d", name="padding")
35 kernel_pads = list(zip(kernel_size, padding))
36
37 data_format = "NCW"
38
39 if not all([pad <= kernel / 2 for kernel, pad in kernel_pads]):
40 raise ValueError(
41 "pad should be smaller than or equal to half of kernel size, "
42 f"but got padding={padding}, kernel_size={kernel_size}. "
43 )
44 # figure out whether to apply padding
45 if all([pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in kernel_pads]):
46 padding_str = "SAME"
47 else:
48 padding_str = "VALID"
49
50 return ivy.avg_pool1d(
51 input, kernel_size, stride, padding_str, data_format=data_format
52 )
53
54
55 @to_ivy_arrays_and_back
56 def avg_pool2d(
57 input,
58 kernel_size,
59 stride=None,
60 padding=0,
61 ceil_mode=False,
62 count_include_pad=True,
63 divisor_override=None,
64 ):
65 # Figure out input dims N
66 input_rank = input.ndim
67
68 if input_rank == 3:
69 # CHW
70 data_format = "CHW"
71 elif input_rank == 4:
72 # NCHW
73 data_format = "NCHW"
74
75 kernel_size = _broadcast_pooling_helper(kernel_size, "2d", name="kernel_size")
76 stride = _broadcast_pooling_helper(stride, "2d", name="stride")
77 padding = _broadcast_pooling_helper(padding, "2d", name="padding")
78 kernel_pads = list(zip(kernel_size, padding))
79
80 # Padding should be less than or equal to half of kernel size
81 if not all([pad <= kernel / 2 for kernel, pad in kernel_pads]):
82 raise ValueError(
83 "pad should be smaller than or equal to half of kernel size, "
84 f"but got padding={padding}, kernel_size={kernel_size}. "
85 )
86
87 # Figure out padding string
88 if all([pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in kernel_pads]):
89 padding_str = "SAME"
90 else:
91 padding_str = "VALID"
92
93 return ivy.avg_pool2d(
94 input,
95 kernel_size,
96 stride,
97 padding_str,
98 data_format=data_format,
99 )
100
101
102 @with_unsupported_dtypes({"1.11.0 and below": ("float16",)}, "torch")
103 @to_ivy_arrays_and_back
104 def max_pool2d(
105 input,
106 kernel_size,
107 stride=None,
108 padding=0,
109 dilation=1,
110 ceil_mode=False,
111 return_indices=False,
112 ):
113 # ToDo: Add return_indices once superset in implemented
114 dim_check = False
115 if input.ndim == 3:
116 input = input.expand_dims()
117 dim_check = True
118 if not stride:
119 stride = kernel_size
120 ret = ivy.max_pool2d(
121 input,
122 kernel_size,
123 stride,
124 padding,
125 data_format="NCHW",
126 dilation=dilation,
127 ceil_mode=ceil_mode,
128 )
129 if dim_check:
130 return ret.squeeze(0)
131 return ret
132
133
134 @with_unsupported_dtypes(
135 {
136 "1.11.0 and below": (
137 "bfloat16",
138 "float16",
139 )
140 },
141 "torch",
142 )
143 @to_ivy_arrays_and_back
144 def adaptive_avg_pool1d(input, output_size):
145 return ivy.adaptive_avg_pool1d(input, output_size)
146
147
148 @with_unsupported_dtypes(
149 {
150 "1.11.0 and below": (
151 "float16",
152 "bfloat16",
153 )
154 },
155 "torch",
156 )
157 @to_ivy_arrays_and_back
158 def adaptive_avg_pool2d(input, output_size):
159 return ivy.adaptive_avg_pool2d(input, output_size)
160
161
162 @with_unsupported_dtypes(
163 {
164 "1.11.0 and below": (
165 "float16",
166 "bfloat16",
167 )
168 },
169 "torch",
170 )
171 @to_ivy_arrays_and_back
172 def lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
173
174 data_format = "NCW"
175 padding = "VALID"
176 if stride is not None:
177 out = ivy.avg_pool1d(
178 ivy.pow(input, norm_type),
179 kernel_size,
180 stride,
181 padding,
182 data_format=data_format,
183 )
184 else:
185 out = ivy.avg_pool1d(
186 ivy.pow(input, norm_type),
187 kernel_size,
188 kernel_size,
189 padding,
190 data_format=data_format,
191 )
192
193 return ivy.pow(ivy.multiply(out, kernel_size), ivy.divide(1.0, norm_type))
194
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py
--- a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py
+++ b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py
@@ -191,3 +191,29 @@
)
return ivy.pow(ivy.multiply(out, kernel_size), ivy.divide(1.0, norm_type))
+
+
+@to_ivy_arrays_and_back
+def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
+
+ data_format = "NCHW"
+ padding = "VALID"
+ if stride is not None:
+ out = ivy.avg_pool2d(
+ ivy.pow(input, norm_type),
+ kernel_size,
+ stride,
+ padding,
+ data_format=data_format,
+ )
+ else:
+ out = ivy.avg_pool2d(
+ ivy.pow(input, norm_type),
+ kernel_size,
+ kernel_size,
+ padding,
+ data_format=data_format,
+ )
+ if not isinstance(kernel_size, int):
+ kernel_size = kernel_size[0] * kernel_size[1]
+ return ivy.pow(ivy.multiply(out, kernel_size), ivy.divide(1.0, norm_type))
| {"golden_diff": "diff --git a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py\n--- a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py\n+++ b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py\n@@ -191,3 +191,29 @@\n )\n \n return ivy.pow(ivy.multiply(out, kernel_size), ivy.divide(1.0, norm_type))\n+\n+\n+@to_ivy_arrays_and_back\n+def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):\n+\n+ data_format = \"NCHW\"\n+ padding = \"VALID\"\n+ if stride is not None:\n+ out = ivy.avg_pool2d(\n+ ivy.pow(input, norm_type),\n+ kernel_size,\n+ stride,\n+ padding,\n+ data_format=data_format,\n+ )\n+ else:\n+ out = ivy.avg_pool2d(\n+ ivy.pow(input, norm_type),\n+ kernel_size,\n+ kernel_size,\n+ padding,\n+ data_format=data_format,\n+ )\n+ if not isinstance(kernel_size, int):\n+ kernel_size = kernel_size[0] * kernel_size[1]\n+ return ivy.pow(ivy.multiply(out, kernel_size), ivy.divide(1.0, norm_type))\n", "issue": "lp_pool2d\n\n", "before_files": [{"content": "# global\n\n# local\nimport ivy\nfrom ivy import with_unsupported_dtypes\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\ndef _broadcast_pooling_helper(x, pool_dims: str = \"2d\", name: str = \"padding\"):\n dims = {\"1d\": 1, \"2d\": 2, \"3d\": 3}\n\n if isinstance(x, int):\n return tuple([x for _ in range(dims[pool_dims])])\n\n if len(x) == 1:\n return tuple([x[0] for _ in range(dims[pool_dims])])\n elif len(x) == dims[pool_dims]:\n return tuple(x)\n elif len(x) != dims[pool_dims]:\n raise ValueError(\n f\"`{name}` must either be a single int, \"\n f\"or a tuple of {dims[pool_dims]} ints. \"\n )\n\n\n@to_ivy_arrays_and_back\ndef avg_pool1d(\n input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True\n):\n kernel_size = _broadcast_pooling_helper(kernel_size, \"1d\", name=\"kernel_size\")\n stride = _broadcast_pooling_helper(stride, \"1d\", name=\"stride\")\n padding = _broadcast_pooling_helper(padding, \"1d\", name=\"padding\")\n kernel_pads = list(zip(kernel_size, padding))\n\n data_format = \"NCW\"\n\n if not all([pad <= kernel / 2 for kernel, pad in kernel_pads]):\n raise ValueError(\n \"pad should be smaller than or equal to half of kernel size, \"\n f\"but got padding={padding}, kernel_size={kernel_size}. \"\n )\n # figure out whether to apply padding\n if all([pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in kernel_pads]):\n padding_str = \"SAME\"\n else:\n padding_str = \"VALID\"\n\n return ivy.avg_pool1d(\n input, kernel_size, stride, padding_str, data_format=data_format\n )\n\n\n@to_ivy_arrays_and_back\ndef avg_pool2d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n divisor_override=None,\n):\n # Figure out input dims N\n input_rank = input.ndim\n\n if input_rank == 3:\n # CHW\n data_format = \"CHW\"\n elif input_rank == 4:\n # NCHW\n data_format = \"NCHW\"\n\n kernel_size = _broadcast_pooling_helper(kernel_size, \"2d\", name=\"kernel_size\")\n stride = _broadcast_pooling_helper(stride, \"2d\", name=\"stride\")\n padding = _broadcast_pooling_helper(padding, \"2d\", name=\"padding\")\n kernel_pads = list(zip(kernel_size, padding))\n\n # Padding should be less than or equal to half of kernel size\n if not all([pad <= kernel / 2 for kernel, pad in kernel_pads]):\n raise ValueError(\n \"pad should be smaller than or equal to half of kernel size, \"\n f\"but got padding={padding}, kernel_size={kernel_size}. \"\n )\n\n # Figure out padding string\n if all([pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in kernel_pads]):\n padding_str = \"SAME\"\n else:\n padding_str = \"VALID\"\n\n return ivy.avg_pool2d(\n input,\n kernel_size,\n stride,\n padding_str,\n data_format=data_format,\n )\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef max_pool2d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n dilation=1,\n ceil_mode=False,\n return_indices=False,\n):\n # ToDo: Add return_indices once superset in implemented\n dim_check = False\n if input.ndim == 3:\n input = input.expand_dims()\n dim_check = True\n if not stride:\n stride = kernel_size\n ret = ivy.max_pool2d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=\"NCHW\",\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n if dim_check:\n return ret.squeeze(0)\n return ret\n\n\n@with_unsupported_dtypes(\n {\n \"1.11.0 and below\": (\n \"bfloat16\",\n \"float16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef adaptive_avg_pool1d(input, output_size):\n return ivy.adaptive_avg_pool1d(input, output_size)\n\n\n@with_unsupported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef adaptive_avg_pool2d(input, output_size):\n return ivy.adaptive_avg_pool2d(input, output_size)\n\n\n@with_unsupported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):\n\n data_format = \"NCW\"\n padding = \"VALID\"\n if stride is not None:\n out = ivy.avg_pool1d(\n ivy.pow(input, norm_type),\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n )\n else:\n out = ivy.avg_pool1d(\n ivy.pow(input, norm_type),\n kernel_size,\n kernel_size,\n padding,\n data_format=data_format,\n )\n\n return ivy.pow(ivy.multiply(out, kernel_size), ivy.divide(1.0, norm_type))\n", "path": "ivy/functional/frontends/torch/nn/functional/pooling_functions.py"}], "after_files": [{"content": "# global\n\n# local\nimport ivy\nfrom ivy import with_unsupported_dtypes\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\ndef _broadcast_pooling_helper(x, pool_dims: str = \"2d\", name: str = \"padding\"):\n dims = {\"1d\": 1, \"2d\": 2, \"3d\": 3}\n\n if isinstance(x, int):\n return tuple([x for _ in range(dims[pool_dims])])\n\n if len(x) == 1:\n return tuple([x[0] for _ in range(dims[pool_dims])])\n elif len(x) == dims[pool_dims]:\n return tuple(x)\n elif len(x) != dims[pool_dims]:\n raise ValueError(\n f\"`{name}` must either be a single int, \"\n f\"or a tuple of {dims[pool_dims]} ints. \"\n )\n\n\n@to_ivy_arrays_and_back\ndef avg_pool1d(\n input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True\n):\n kernel_size = _broadcast_pooling_helper(kernel_size, \"1d\", name=\"kernel_size\")\n stride = _broadcast_pooling_helper(stride, \"1d\", name=\"stride\")\n padding = _broadcast_pooling_helper(padding, \"1d\", name=\"padding\")\n kernel_pads = list(zip(kernel_size, padding))\n\n data_format = \"NCW\"\n\n if not all([pad <= kernel / 2 for kernel, pad in kernel_pads]):\n raise ValueError(\n \"pad should be smaller than or equal to half of kernel size, \"\n f\"but got padding={padding}, kernel_size={kernel_size}. \"\n )\n # figure out whether to apply padding\n if all([pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in kernel_pads]):\n padding_str = \"SAME\"\n else:\n padding_str = \"VALID\"\n\n return ivy.avg_pool1d(\n input, kernel_size, stride, padding_str, data_format=data_format\n )\n\n\n@to_ivy_arrays_and_back\ndef avg_pool2d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n divisor_override=None,\n):\n # Figure out input dims N\n input_rank = input.ndim\n\n if input_rank == 3:\n # CHW\n data_format = \"CHW\"\n elif input_rank == 4:\n # NCHW\n data_format = \"NCHW\"\n\n kernel_size = _broadcast_pooling_helper(kernel_size, \"2d\", name=\"kernel_size\")\n stride = _broadcast_pooling_helper(stride, \"2d\", name=\"stride\")\n padding = _broadcast_pooling_helper(padding, \"2d\", name=\"padding\")\n kernel_pads = list(zip(kernel_size, padding))\n\n # Padding should be less than or equal to half of kernel size\n if not all([pad <= kernel / 2 for kernel, pad in kernel_pads]):\n raise ValueError(\n \"pad should be smaller than or equal to half of kernel size, \"\n f\"but got padding={padding}, kernel_size={kernel_size}. \"\n )\n\n # Figure out padding string\n if all([pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in kernel_pads]):\n padding_str = \"SAME\"\n else:\n padding_str = \"VALID\"\n\n return ivy.avg_pool2d(\n input,\n kernel_size,\n stride,\n padding_str,\n data_format=data_format,\n )\n\n\n@with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef max_pool2d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n dilation=1,\n ceil_mode=False,\n return_indices=False,\n):\n # ToDo: Add return_indices once superset in implemented\n dim_check = False\n if input.ndim == 3:\n input = input.expand_dims()\n dim_check = True\n if not stride:\n stride = kernel_size\n ret = ivy.max_pool2d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=\"NCHW\",\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n if dim_check:\n return ret.squeeze(0)\n return ret\n\n\n@with_unsupported_dtypes(\n {\n \"1.11.0 and below\": (\n \"bfloat16\",\n \"float16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef adaptive_avg_pool1d(input, output_size):\n return ivy.adaptive_avg_pool1d(input, output_size)\n\n\n@with_unsupported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef adaptive_avg_pool2d(input, output_size):\n return ivy.adaptive_avg_pool2d(input, output_size)\n\n\n@with_unsupported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):\n\n data_format = \"NCW\"\n padding = \"VALID\"\n if stride is not None:\n out = ivy.avg_pool1d(\n ivy.pow(input, norm_type),\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n )\n else:\n out = ivy.avg_pool1d(\n ivy.pow(input, norm_type),\n kernel_size,\n kernel_size,\n padding,\n data_format=data_format,\n )\n\n return ivy.pow(ivy.multiply(out, kernel_size), ivy.divide(1.0, norm_type))\n\n\n@to_ivy_arrays_and_back\ndef lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):\n\n data_format = \"NCHW\"\n padding = \"VALID\"\n if stride is not None:\n out = ivy.avg_pool2d(\n ivy.pow(input, norm_type),\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n )\n else:\n out = ivy.avg_pool2d(\n ivy.pow(input, norm_type),\n kernel_size,\n kernel_size,\n padding,\n data_format=data_format,\n )\n if not isinstance(kernel_size, int):\n kernel_size = kernel_size[0] * kernel_size[1]\n return ivy.pow(ivy.multiply(out, kernel_size), ivy.divide(1.0, norm_type))\n", "path": "ivy/functional/frontends/torch/nn/functional/pooling_functions.py"}]} | 2,096 | 324 |
gh_patches_debug_20175 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3750 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AZURE_9 & CKV_AZURE_10 - Scan fails if protocol value is a wildcard
**Describe the issue**
CKV_AZURE_9 & CKV_AZURE_10
When scanning Bicep files the checks are looking for a protocol value of `tcp` and fail to catch when `*` is used.
**Examples**
The following bicep code fails to produce a finding for CKV_AZURE_9 & CKV_AZURE_10
```
resource nsg 'Microsoft.Network/networkSecurityGroups@2021-05-01' = {
name: nsgName
location: nsgLocation
properties: {
securityRules: [
{
name: 'badrule'
properties: {
access: 'Allow'
destinationAddressPrefix: '*'
destinationPortRange: '*'
direction: 'Inbound'
priority: 100
protocol: '*'
sourceAddressPrefix: '*'
sourcePortRange: '*'
}
}
]
}
}
```
While this works as expected:
```
resource nsg 'Microsoft.Network/networkSecurityGroups@2021-05-01' = {
name: nsgName
location: nsgLocation
properties: {
securityRules: [
{
name: 'badrule'
properties: {
access: 'Allow'
destinationAddressPrefix: '*'
destinationPortRange: '*'
direction: 'Inbound'
priority: 100
protocol: 'tcp'
sourceAddressPrefix: '*'
sourcePortRange: '*'
}
}
]
}
}
```
**Version (please complete the following information):**
- docker container 2.2.0
**Additional context**
A similar problem existed for Terraform that was previously fixed (see https://github.com/bridgecrewio/checkov/issues/601)
I believe the relevant lines is:
https://github.com/bridgecrewio/checkov/blob/master/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py#LL48C4-L48C117
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/arm/checks/resource/NSGRulePortAccessRestricted.py`
Content:
```
1 import re
2 from typing import Union, Dict, Any
3
4 from checkov.common.models.enums import CheckResult, CheckCategories
5 from checkov.arm.base_resource_check import BaseResourceCheck
6
7 # https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups
8 # https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups/securityrules
9
10 INTERNET_ADDRESSES = ["*", "0.0.0.0", "<nw>/0", "/0", "internet", "any"] # nosec
11 PORT_RANGE = re.compile(r"\d+-\d+")
12
13
14 class NSGRulePortAccessRestricted(BaseResourceCheck):
15 def __init__(self, name: str, check_id: str, port: int) -> None:
16 supported_resources = (
17 "Microsoft.Network/networkSecurityGroups",
18 "Microsoft.Network/networkSecurityGroups/securityRules",
19 )
20 categories = (CheckCategories.NETWORKING,)
21 super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)
22 self.port = port
23
24 def is_port_in_range(self, port_range: Union[int, str]) -> bool:
25 if re.match(PORT_RANGE, str(port_range)):
26 start, end = int(port_range.split("-")[0]), int(port_range.split("-")[1])
27 if start <= self.port <= end:
28 return True
29 if port_range in (str(self.port), "*"):
30 return True
31 return False
32
33 def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:
34 if "properties" in conf:
35 securityRules = []
36 if self.entity_type == "Microsoft.Network/networkSecurityGroups":
37 if "securityRules" in conf["properties"]:
38 securityRules.extend(conf["properties"]["securityRules"])
39 if self.entity_type == "Microsoft.Network/networkSecurityGroups/securityRules":
40 securityRules.append(conf)
41
42 for rule in securityRules:
43 portRanges = []
44 sourcePrefixes = []
45 if "properties" in rule:
46 if "access" in rule["properties"] and rule["properties"]["access"].lower() == "allow":
47 if "direction" in rule["properties"] and rule["properties"]["direction"].lower() == "inbound":
48 if "protocol" in rule["properties"] and rule["properties"]["protocol"].lower() == "tcp":
49 if "destinationPortRanges" in rule["properties"]:
50 portRanges.extend(rule["properties"]["destinationPortRanges"])
51 if "destinationPortRange" in rule["properties"]:
52 portRanges.append(rule["properties"]["destinationPortRange"])
53
54 if "sourceAddressPrefixes" in rule["properties"]:
55 sourcePrefixes.extend(rule["properties"]["sourceAddressPrefixes"])
56 if "sourceAddressPrefix" in rule["properties"]:
57 sourcePrefixes.append(rule["properties"]["sourceAddressPrefix"])
58
59 for portRange in portRanges:
60 if self.is_port_in_range(portRange):
61 for prefix in sourcePrefixes:
62 if prefix in INTERNET_ADDRESSES:
63 return CheckResult.FAILED
64
65 return CheckResult.PASSED
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py b/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py
--- a/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py
+++ b/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py
@@ -45,7 +45,7 @@
if "properties" in rule:
if "access" in rule["properties"] and rule["properties"]["access"].lower() == "allow":
if "direction" in rule["properties"] and rule["properties"]["direction"].lower() == "inbound":
- if "protocol" in rule["properties"] and rule["properties"]["protocol"].lower() == "tcp":
+ if "protocol" in rule["properties"] and rule["properties"]["protocol"].lower() in ("tcp", "*"):
if "destinationPortRanges" in rule["properties"]:
portRanges.extend(rule["properties"]["destinationPortRanges"])
if "destinationPortRange" in rule["properties"]:
| {"golden_diff": "diff --git a/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py b/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py\n--- a/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py\n+++ b/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py\n@@ -45,7 +45,7 @@\n if \"properties\" in rule:\n if \"access\" in rule[\"properties\"] and rule[\"properties\"][\"access\"].lower() == \"allow\":\n if \"direction\" in rule[\"properties\"] and rule[\"properties\"][\"direction\"].lower() == \"inbound\":\n- if \"protocol\" in rule[\"properties\"] and rule[\"properties\"][\"protocol\"].lower() == \"tcp\":\n+ if \"protocol\" in rule[\"properties\"] and rule[\"properties\"][\"protocol\"].lower() in (\"tcp\", \"*\"):\n if \"destinationPortRanges\" in rule[\"properties\"]:\n portRanges.extend(rule[\"properties\"][\"destinationPortRanges\"])\n if \"destinationPortRange\" in rule[\"properties\"]:\n", "issue": "CKV_AZURE_9 & CKV_AZURE_10 - Scan fails if protocol value is a wildcard\n**Describe the issue**\r\nCKV_AZURE_9 & CKV_AZURE_10\r\n\r\nWhen scanning Bicep files the checks are looking for a protocol value of `tcp` and fail to catch when `*` is used.\r\n\r\n**Examples**\r\n\r\nThe following bicep code fails to produce a finding for CKV_AZURE_9 & CKV_AZURE_10\r\n```\r\nresource nsg 'Microsoft.Network/networkSecurityGroups@2021-05-01' = {\r\n name: nsgName\r\n location: nsgLocation\r\n properties: {\r\n securityRules: [\r\n {\r\n name: 'badrule'\r\n properties: {\r\n access: 'Allow'\r\n destinationAddressPrefix: '*'\r\n destinationPortRange: '*'\r\n direction: 'Inbound'\r\n priority: 100\r\n protocol: '*'\r\n sourceAddressPrefix: '*'\r\n sourcePortRange: '*'\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n```\r\n\r\nWhile this works as expected:\r\n```\r\nresource nsg 'Microsoft.Network/networkSecurityGroups@2021-05-01' = {\r\n name: nsgName\r\n location: nsgLocation\r\n properties: {\r\n securityRules: [\r\n {\r\n name: 'badrule'\r\n properties: {\r\n access: 'Allow'\r\n destinationAddressPrefix: '*'\r\n destinationPortRange: '*'\r\n direction: 'Inbound'\r\n priority: 100\r\n protocol: 'tcp'\r\n sourceAddressPrefix: '*'\r\n sourcePortRange: '*'\r\n }\r\n }\r\n ]\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - docker container 2.2.0\r\n\r\n**Additional context**\r\nA similar problem existed for Terraform that was previously fixed (see https://github.com/bridgecrewio/checkov/issues/601) \r\n\r\nI believe the relevant lines is: \r\nhttps://github.com/bridgecrewio/checkov/blob/master/checkov/arm/checks/resource/NSGRulePortAccessRestricted.py#LL48C4-L48C117\r\n\r\n\n", "before_files": [{"content": "import re\nfrom typing import Union, Dict, Any\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.arm.base_resource_check import BaseResourceCheck\n\n# https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups\n# https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups/securityrules\n\nINTERNET_ADDRESSES = [\"*\", \"0.0.0.0\", \"<nw>/0\", \"/0\", \"internet\", \"any\"] # nosec\nPORT_RANGE = re.compile(r\"\\d+-\\d+\")\n\n\nclass NSGRulePortAccessRestricted(BaseResourceCheck):\n def __init__(self, name: str, check_id: str, port: int) -> None:\n supported_resources = (\n \"Microsoft.Network/networkSecurityGroups\",\n \"Microsoft.Network/networkSecurityGroups/securityRules\",\n )\n categories = (CheckCategories.NETWORKING,)\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def is_port_in_range(self, port_range: Union[int, str]) -> bool:\n if re.match(PORT_RANGE, str(port_range)):\n start, end = int(port_range.split(\"-\")[0]), int(port_range.split(\"-\")[1])\n if start <= self.port <= end:\n return True\n if port_range in (str(self.port), \"*\"):\n return True\n return False\n\n def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n if \"properties\" in conf:\n securityRules = []\n if self.entity_type == \"Microsoft.Network/networkSecurityGroups\":\n if \"securityRules\" in conf[\"properties\"]:\n securityRules.extend(conf[\"properties\"][\"securityRules\"])\n if self.entity_type == \"Microsoft.Network/networkSecurityGroups/securityRules\":\n securityRules.append(conf)\n\n for rule in securityRules:\n portRanges = []\n sourcePrefixes = []\n if \"properties\" in rule:\n if \"access\" in rule[\"properties\"] and rule[\"properties\"][\"access\"].lower() == \"allow\":\n if \"direction\" in rule[\"properties\"] and rule[\"properties\"][\"direction\"].lower() == \"inbound\":\n if \"protocol\" in rule[\"properties\"] and rule[\"properties\"][\"protocol\"].lower() == \"tcp\":\n if \"destinationPortRanges\" in rule[\"properties\"]:\n portRanges.extend(rule[\"properties\"][\"destinationPortRanges\"])\n if \"destinationPortRange\" in rule[\"properties\"]:\n portRanges.append(rule[\"properties\"][\"destinationPortRange\"])\n\n if \"sourceAddressPrefixes\" in rule[\"properties\"]:\n sourcePrefixes.extend(rule[\"properties\"][\"sourceAddressPrefixes\"])\n if \"sourceAddressPrefix\" in rule[\"properties\"]:\n sourcePrefixes.append(rule[\"properties\"][\"sourceAddressPrefix\"])\n\n for portRange in portRanges:\n if self.is_port_in_range(portRange):\n for prefix in sourcePrefixes:\n if prefix in INTERNET_ADDRESSES:\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n", "path": "checkov/arm/checks/resource/NSGRulePortAccessRestricted.py"}], "after_files": [{"content": "import re\nfrom typing import Union, Dict, Any\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.arm.base_resource_check import BaseResourceCheck\n\n# https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups\n# https://docs.microsoft.com/en-us/azure/templates/microsoft.network/networksecuritygroups/securityrules\n\nINTERNET_ADDRESSES = [\"*\", \"0.0.0.0\", \"<nw>/0\", \"/0\", \"internet\", \"any\"] # nosec\nPORT_RANGE = re.compile(r\"\\d+-\\d+\")\n\n\nclass NSGRulePortAccessRestricted(BaseResourceCheck):\n def __init__(self, name: str, check_id: str, port: int) -> None:\n supported_resources = (\n \"Microsoft.Network/networkSecurityGroups\",\n \"Microsoft.Network/networkSecurityGroups/securityRules\",\n )\n categories = (CheckCategories.NETWORKING,)\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def is_port_in_range(self, port_range: Union[int, str]) -> bool:\n if re.match(PORT_RANGE, str(port_range)):\n start, end = int(port_range.split(\"-\")[0]), int(port_range.split(\"-\")[1])\n if start <= self.port <= end:\n return True\n if port_range in (str(self.port), \"*\"):\n return True\n return False\n\n def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n if \"properties\" in conf:\n securityRules = []\n if self.entity_type == \"Microsoft.Network/networkSecurityGroups\":\n if \"securityRules\" in conf[\"properties\"]:\n securityRules.extend(conf[\"properties\"][\"securityRules\"])\n if self.entity_type == \"Microsoft.Network/networkSecurityGroups/securityRules\":\n securityRules.append(conf)\n\n for rule in securityRules:\n portRanges = []\n sourcePrefixes = []\n if \"properties\" in rule:\n if \"access\" in rule[\"properties\"] and rule[\"properties\"][\"access\"].lower() == \"allow\":\n if \"direction\" in rule[\"properties\"] and rule[\"properties\"][\"direction\"].lower() == \"inbound\":\n if \"protocol\" in rule[\"properties\"] and rule[\"properties\"][\"protocol\"].lower() in (\"tcp\", \"*\"):\n if \"destinationPortRanges\" in rule[\"properties\"]:\n portRanges.extend(rule[\"properties\"][\"destinationPortRanges\"])\n if \"destinationPortRange\" in rule[\"properties\"]:\n portRanges.append(rule[\"properties\"][\"destinationPortRange\"])\n\n if \"sourceAddressPrefixes\" in rule[\"properties\"]:\n sourcePrefixes.extend(rule[\"properties\"][\"sourceAddressPrefixes\"])\n if \"sourceAddressPrefix\" in rule[\"properties\"]:\n sourcePrefixes.append(rule[\"properties\"][\"sourceAddressPrefix\"])\n\n for portRange in portRanges:\n if self.is_port_in_range(portRange):\n for prefix in sourcePrefixes:\n if prefix in INTERNET_ADDRESSES:\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n", "path": "checkov/arm/checks/resource/NSGRulePortAccessRestricted.py"}]} | 1,516 | 221 |
gh_patches_debug_9237 | rasdani/github-patches | git_diff | mars-project__mars-1623 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Setitem for DataFrame leads to a wrong dtypes
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
Add columns for a DataFrame will lead to a wrong dtypes of input DataFrame.
**To Reproduce**
```python
In [1]: import mars.dataframe as md
In [2]: a = md.DataFrame({'a':[1,2,3]})
In [3]: a['new'] = 1
In [4]: a.op.inputs
Out[4]: [DataFrame <op=DataFrameDataSource, key=c212164d24d96ed634711c3b97f334cb>]
In [5]: a.op.inputs[0].dtypes
Out[5]:
a int64
new int64
dtype: object
```
**Expected behavior**
Input DataFrame's dtypes should have only one column.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/dataframe/indexing/setitem.py`
Content:
```
1 # Copyright 1999-2020 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import numpy as np
16 import pandas as pd
17 from pandas.api.types import is_list_like
18
19 from ... import opcodes
20 from ...core import OutputType
21 from ...serialize import KeyField, AnyField
22 from ...tensor.core import TENSOR_TYPE
23 from ...tiles import TilesError
24 from ..core import SERIES_TYPE, DataFrame
25 from ..initializer import Series as asseries
26 from ..operands import DataFrameOperand, DataFrameOperandMixin
27 from ..utils import parse_index
28
29
30 class DataFrameSetitem(DataFrameOperand, DataFrameOperandMixin):
31 _op_type_ = opcodes.INDEXSETVALUE
32
33 _target = KeyField('target')
34 _indexes = AnyField('indexes')
35 _value = AnyField('value')
36
37 def __init__(self, target=None, indexes=None, value=None, output_types=None, **kw):
38 super().__init__(_target=target, _indexes=indexes,
39 _value=value, _output_types=output_types, **kw)
40 if self.output_types is None:
41 self.output_types = [OutputType.dataframe]
42
43 @property
44 def target(self):
45 return self._target
46
47 @property
48 def indexes(self):
49 return self._indexes
50
51 @property
52 def value(self):
53 return self._value
54
55 def _set_inputs(self, inputs):
56 super()._set_inputs(inputs)
57 self._target = self._inputs[0]
58 if len(inputs) > 1:
59 self._value = self._inputs[-1]
60
61 def __call__(self, target: DataFrame, value):
62 inputs = [target]
63 if np.isscalar(value):
64 value_dtype = np.array(value).dtype
65 else:
66 if isinstance(value, (pd.Series, SERIES_TYPE)):
67 value = asseries(value)
68 inputs.append(value)
69 value_dtype = value.dtype
70 elif is_list_like(value) or isinstance(value, TENSOR_TYPE):
71 value = asseries(value, index=target.index)
72 inputs.append(value)
73 value_dtype = value.dtype
74 else: # pragma: no cover
75 raise TypeError('Wrong value type, could be one of scalar, Series or tensor')
76
77 if value.index_value.key != target.index_value.key: # pragma: no cover
78 raise NotImplementedError('Does not support setting value '
79 'with different index for now')
80
81 index_value = target.index_value
82 dtypes = target.dtypes
83 dtypes.loc[self._indexes] = value_dtype
84 columns_value = parse_index(dtypes.index, store_data=True)
85 ret = self.new_dataframe(inputs, shape=(target.shape[0], len(dtypes)),
86 dtypes=dtypes, index_value=index_value,
87 columns_value=columns_value)
88 target.data = ret.data
89
90 @classmethod
91 def tile(cls, op):
92 out = op.outputs[0]
93 target = op.target
94 value = op.value
95 col = op.indexes
96 columns = target.columns_value.to_pandas()
97
98 if not np.isscalar(value):
99 # check if all chunk's index_value are identical
100 target_chunk_index_values = [c.index_value for c in target.chunks
101 if c.index[1] == 0]
102 value_chunk_index_values = [v.index_value for v in value.chunks]
103 is_identical = len(target_chunk_index_values) == len(target_chunk_index_values) and \
104 all(c.key == v.key for c, v in zip(target_chunk_index_values, value_chunk_index_values))
105 if not is_identical:
106 # do rechunk
107 if any(np.isnan(s) for s in target.nsplits[0]) or \
108 any(np.isnan(s) for s in value.nsplits[0]): # pragma: no cover
109 raise TilesError('target or value has unknown chunk shape')
110
111 value = value.rechunk({0: target.nsplits[0]})._inplace_tile()
112
113 out_chunks = []
114 nsplits = [list(ns) for ns in target.nsplits]
115 if col not in columns:
116 nsplits[1][-1] += 1
117 column_chunk_shape = target.chunk_shape[1]
118 # append to the last chunk on columns axis direction
119 for c in target.chunks:
120 if c.index[-1] != column_chunk_shape - 1:
121 # not effected, just output
122 out_chunks.append(c)
123 else:
124 chunk_op = op.copy().reset_key()
125 if np.isscalar(value):
126 chunk_inputs = [c]
127 else:
128 value_chunk = value.cix[c.index[0], ]
129 chunk_inputs = [c, value_chunk]
130
131 dtypes = c.dtypes
132 dtypes.loc[out.dtypes.index[-1]] = out.dtypes.iloc[-1]
133 chunk = chunk_op.new_chunk(chunk_inputs,
134 shape=(c.shape[0], c.shape[1] + 1),
135 dtypes=dtypes,
136 index_value=c.index_value,
137 columns_value=parse_index(dtypes.index, store_data=True),
138 index=c.index)
139 out_chunks.append(chunk)
140 else:
141 # replace exist column
142 for c in target.chunks:
143 if col in c.dtypes:
144 chunk_inputs = [c]
145 if not np.isscalar(value):
146 chunk_inputs.append(value.cix[c.index[0], ])
147 chunk_op = op.copy().reset_key()
148 chunk = chunk_op.new_chunk(chunk_inputs,
149 shape=c.shape,
150 dtypes=c.dtypes,
151 index_value=c.index_value,
152 columns_value=c.columns_value,
153 index=c.index)
154 out_chunks.append(chunk)
155 else:
156 out_chunks.append(c)
157
158 params = out.params
159 params['nsplits'] = tuple(tuple(ns) for ns in nsplits)
160 params['chunks'] = out_chunks
161 new_op = op.copy()
162 return new_op.new_tileables(op.inputs, kws=[params])
163
164 @classmethod
165 def execute(cls, ctx, op):
166 target = ctx[op.target.key].copy()
167 value = ctx[op.value.key] if not np.isscalar(op.value) else op.value
168 target[op.indexes] = value
169 ctx[op.outputs[0].key] = target
170
171
172 def dataframe_setitem(df, col, value):
173 op = DataFrameSetitem(target=df, indexes=col, value=value)
174 return op(df, value)
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mars/dataframe/indexing/setitem.py b/mars/dataframe/indexing/setitem.py
--- a/mars/dataframe/indexing/setitem.py
+++ b/mars/dataframe/indexing/setitem.py
@@ -79,7 +79,7 @@
'with different index for now')
index_value = target.index_value
- dtypes = target.dtypes
+ dtypes = target.dtypes.copy(deep=True)
dtypes.loc[self._indexes] = value_dtype
columns_value = parse_index(dtypes.index, store_data=True)
ret = self.new_dataframe(inputs, shape=(target.shape[0], len(dtypes)),
| {"golden_diff": "diff --git a/mars/dataframe/indexing/setitem.py b/mars/dataframe/indexing/setitem.py\n--- a/mars/dataframe/indexing/setitem.py\n+++ b/mars/dataframe/indexing/setitem.py\n@@ -79,7 +79,7 @@\n 'with different index for now')\n \n index_value = target.index_value\n- dtypes = target.dtypes\n+ dtypes = target.dtypes.copy(deep=True)\n dtypes.loc[self._indexes] = value_dtype\n columns_value = parse_index(dtypes.index, store_data=True)\n ret = self.new_dataframe(inputs, shape=(target.shape[0], len(dtypes)),\n", "issue": "[BUG] Setitem for DataFrame leads to a wrong dtypes\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nAdd columns for a DataFrame will lead to a wrong dtypes of input DataFrame.\r\n\r\n**To Reproduce**\r\n```python\r\nIn [1]: import mars.dataframe as md \r\n\r\nIn [2]: a = md.DataFrame({'a':[1,2,3]}) \r\n\r\nIn [3]: a['new'] = 1 \r\n\r\nIn [4]: a.op.inputs \r\nOut[4]: [DataFrame <op=DataFrameDataSource, key=c212164d24d96ed634711c3b97f334cb>]\r\n\r\nIn [5]: a.op.inputs[0].dtypes \r\nOut[5]: \r\na int64\r\nnew int64\r\ndtype: object\r\n```\r\n**Expected behavior**\r\nInput DataFrame's dtypes should have only one column.\r\n\n", "before_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_list_like\n\nfrom ... import opcodes\nfrom ...core import OutputType\nfrom ...serialize import KeyField, AnyField\nfrom ...tensor.core import TENSOR_TYPE\nfrom ...tiles import TilesError\nfrom ..core import SERIES_TYPE, DataFrame\nfrom ..initializer import Series as asseries\nfrom ..operands import DataFrameOperand, DataFrameOperandMixin\nfrom ..utils import parse_index\n\n\nclass DataFrameSetitem(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = opcodes.INDEXSETVALUE\n\n _target = KeyField('target')\n _indexes = AnyField('indexes')\n _value = AnyField('value')\n\n def __init__(self, target=None, indexes=None, value=None, output_types=None, **kw):\n super().__init__(_target=target, _indexes=indexes,\n _value=value, _output_types=output_types, **kw)\n if self.output_types is None:\n self.output_types = [OutputType.dataframe]\n\n @property\n def target(self):\n return self._target\n\n @property\n def indexes(self):\n return self._indexes\n\n @property\n def value(self):\n return self._value\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._target = self._inputs[0]\n if len(inputs) > 1:\n self._value = self._inputs[-1]\n\n def __call__(self, target: DataFrame, value):\n inputs = [target]\n if np.isscalar(value):\n value_dtype = np.array(value).dtype\n else:\n if isinstance(value, (pd.Series, SERIES_TYPE)):\n value = asseries(value)\n inputs.append(value)\n value_dtype = value.dtype\n elif is_list_like(value) or isinstance(value, TENSOR_TYPE):\n value = asseries(value, index=target.index)\n inputs.append(value)\n value_dtype = value.dtype\n else: # pragma: no cover\n raise TypeError('Wrong value type, could be one of scalar, Series or tensor')\n\n if value.index_value.key != target.index_value.key: # pragma: no cover\n raise NotImplementedError('Does not support setting value '\n 'with different index for now')\n\n index_value = target.index_value\n dtypes = target.dtypes\n dtypes.loc[self._indexes] = value_dtype\n columns_value = parse_index(dtypes.index, store_data=True)\n ret = self.new_dataframe(inputs, shape=(target.shape[0], len(dtypes)),\n dtypes=dtypes, index_value=index_value,\n columns_value=columns_value)\n target.data = ret.data\n\n @classmethod\n def tile(cls, op):\n out = op.outputs[0]\n target = op.target\n value = op.value\n col = op.indexes\n columns = target.columns_value.to_pandas()\n\n if not np.isscalar(value):\n # check if all chunk's index_value are identical\n target_chunk_index_values = [c.index_value for c in target.chunks\n if c.index[1] == 0]\n value_chunk_index_values = [v.index_value for v in value.chunks]\n is_identical = len(target_chunk_index_values) == len(target_chunk_index_values) and \\\n all(c.key == v.key for c, v in zip(target_chunk_index_values, value_chunk_index_values))\n if not is_identical:\n # do rechunk\n if any(np.isnan(s) for s in target.nsplits[0]) or \\\n any(np.isnan(s) for s in value.nsplits[0]): # pragma: no cover\n raise TilesError('target or value has unknown chunk shape')\n\n value = value.rechunk({0: target.nsplits[0]})._inplace_tile()\n\n out_chunks = []\n nsplits = [list(ns) for ns in target.nsplits]\n if col not in columns:\n nsplits[1][-1] += 1\n column_chunk_shape = target.chunk_shape[1]\n # append to the last chunk on columns axis direction\n for c in target.chunks:\n if c.index[-1] != column_chunk_shape - 1:\n # not effected, just output\n out_chunks.append(c)\n else:\n chunk_op = op.copy().reset_key()\n if np.isscalar(value):\n chunk_inputs = [c]\n else:\n value_chunk = value.cix[c.index[0], ]\n chunk_inputs = [c, value_chunk]\n\n dtypes = c.dtypes\n dtypes.loc[out.dtypes.index[-1]] = out.dtypes.iloc[-1]\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=(c.shape[0], c.shape[1] + 1),\n dtypes=dtypes,\n index_value=c.index_value,\n columns_value=parse_index(dtypes.index, store_data=True),\n index=c.index)\n out_chunks.append(chunk)\n else:\n # replace exist column\n for c in target.chunks:\n if col in c.dtypes:\n chunk_inputs = [c]\n if not np.isscalar(value):\n chunk_inputs.append(value.cix[c.index[0], ])\n chunk_op = op.copy().reset_key()\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=c.shape,\n dtypes=c.dtypes,\n index_value=c.index_value,\n columns_value=c.columns_value,\n index=c.index)\n out_chunks.append(chunk)\n else:\n out_chunks.append(c)\n\n params = out.params\n params['nsplits'] = tuple(tuple(ns) for ns in nsplits)\n params['chunks'] = out_chunks\n new_op = op.copy()\n return new_op.new_tileables(op.inputs, kws=[params])\n\n @classmethod\n def execute(cls, ctx, op):\n target = ctx[op.target.key].copy()\n value = ctx[op.value.key] if not np.isscalar(op.value) else op.value\n target[op.indexes] = value\n ctx[op.outputs[0].key] = target\n\n\ndef dataframe_setitem(df, col, value):\n op = DataFrameSetitem(target=df, indexes=col, value=value)\n return op(df, value)\n", "path": "mars/dataframe/indexing/setitem.py"}], "after_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_list_like\n\nfrom ... import opcodes\nfrom ...core import OutputType\nfrom ...serialize import KeyField, AnyField\nfrom ...tensor.core import TENSOR_TYPE\nfrom ...tiles import TilesError\nfrom ..core import SERIES_TYPE, DataFrame\nfrom ..initializer import Series as asseries\nfrom ..operands import DataFrameOperand, DataFrameOperandMixin\nfrom ..utils import parse_index\n\n\nclass DataFrameSetitem(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = opcodes.INDEXSETVALUE\n\n _target = KeyField('target')\n _indexes = AnyField('indexes')\n _value = AnyField('value')\n\n def __init__(self, target=None, indexes=None, value=None, output_types=None, **kw):\n super().__init__(_target=target, _indexes=indexes,\n _value=value, _output_types=output_types, **kw)\n if self.output_types is None:\n self.output_types = [OutputType.dataframe]\n\n @property\n def target(self):\n return self._target\n\n @property\n def indexes(self):\n return self._indexes\n\n @property\n def value(self):\n return self._value\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._target = self._inputs[0]\n if len(inputs) > 1:\n self._value = self._inputs[-1]\n\n def __call__(self, target: DataFrame, value):\n inputs = [target]\n if np.isscalar(value):\n value_dtype = np.array(value).dtype\n else:\n if isinstance(value, (pd.Series, SERIES_TYPE)):\n value = asseries(value)\n inputs.append(value)\n value_dtype = value.dtype\n elif is_list_like(value) or isinstance(value, TENSOR_TYPE):\n value = asseries(value, index=target.index)\n inputs.append(value)\n value_dtype = value.dtype\n else: # pragma: no cover\n raise TypeError('Wrong value type, could be one of scalar, Series or tensor')\n\n if value.index_value.key != target.index_value.key: # pragma: no cover\n raise NotImplementedError('Does not support setting value '\n 'with different index for now')\n\n index_value = target.index_value\n dtypes = target.dtypes.copy(deep=True)\n dtypes.loc[self._indexes] = value_dtype\n columns_value = parse_index(dtypes.index, store_data=True)\n ret = self.new_dataframe(inputs, shape=(target.shape[0], len(dtypes)),\n dtypes=dtypes, index_value=index_value,\n columns_value=columns_value)\n target.data = ret.data\n\n @classmethod\n def tile(cls, op):\n out = op.outputs[0]\n target = op.target\n value = op.value\n col = op.indexes\n columns = target.columns_value.to_pandas()\n\n if not np.isscalar(value):\n # check if all chunk's index_value are identical\n target_chunk_index_values = [c.index_value for c in target.chunks\n if c.index[1] == 0]\n value_chunk_index_values = [v.index_value for v in value.chunks]\n is_identical = len(target_chunk_index_values) == len(target_chunk_index_values) and \\\n all(c.key == v.key for c, v in zip(target_chunk_index_values, value_chunk_index_values))\n if not is_identical:\n # do rechunk\n if any(np.isnan(s) for s in target.nsplits[0]) or \\\n any(np.isnan(s) for s in value.nsplits[0]): # pragma: no cover\n raise TilesError('target or value has unknown chunk shape')\n\n value = value.rechunk({0: target.nsplits[0]})._inplace_tile()\n\n out_chunks = []\n nsplits = [list(ns) for ns in target.nsplits]\n if col not in columns:\n nsplits[1][-1] += 1\n column_chunk_shape = target.chunk_shape[1]\n # append to the last chunk on columns axis direction\n for c in target.chunks:\n if c.index[-1] != column_chunk_shape - 1:\n # not effected, just output\n out_chunks.append(c)\n else:\n chunk_op = op.copy().reset_key()\n if np.isscalar(value):\n chunk_inputs = [c]\n else:\n value_chunk = value.cix[c.index[0], ]\n chunk_inputs = [c, value_chunk]\n\n dtypes = c.dtypes\n dtypes.loc[out.dtypes.index[-1]] = out.dtypes.iloc[-1]\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=(c.shape[0], c.shape[1] + 1),\n dtypes=dtypes,\n index_value=c.index_value,\n columns_value=parse_index(dtypes.index, store_data=True),\n index=c.index)\n out_chunks.append(chunk)\n else:\n # replace exist column\n for c in target.chunks:\n if col in c.dtypes:\n chunk_inputs = [c]\n if not np.isscalar(value):\n chunk_inputs.append(value.cix[c.index[0], ])\n chunk_op = op.copy().reset_key()\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=c.shape,\n dtypes=c.dtypes,\n index_value=c.index_value,\n columns_value=c.columns_value,\n index=c.index)\n out_chunks.append(chunk)\n else:\n out_chunks.append(c)\n\n params = out.params\n params['nsplits'] = tuple(tuple(ns) for ns in nsplits)\n params['chunks'] = out_chunks\n new_op = op.copy()\n return new_op.new_tileables(op.inputs, kws=[params])\n\n @classmethod\n def execute(cls, ctx, op):\n target = ctx[op.target.key].copy()\n value = ctx[op.value.key] if not np.isscalar(op.value) else op.value\n target[op.indexes] = value\n ctx[op.outputs[0].key] = target\n\n\ndef dataframe_setitem(df, col, value):\n op = DataFrameSetitem(target=df, indexes=col, value=value)\n return op(df, value)\n", "path": "mars/dataframe/indexing/setitem.py"}]} | 2,400 | 144 |
gh_patches_debug_29978 | rasdani/github-patches | git_diff | NVIDIA-Merlin__NVTabular-693 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Horovod example scripts fail when user supplies batch size parameter
**Describe the bug**
Using the batch size parameter on the TF Horovod example causes a type error with a mismatch between str and int.
**Steps/Code to reproduce bug**
Run the TF Horovod example with the arguments `--b_size 1024`.
**Expected behavior**
The script should accept a user-provided batch size.
**Environment details (please complete the following information):**
- Environment location: Bare-metal
- Method of NVTabular install: conda
**Additional context**
I believe [this line](https://github.com/NVIDIA/NVTabular/blob/main/examples/horovod/tf_hvd_simple.py#L30) and the same line in the Torch example just need type coercions from str to int.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/horovod/tf_hvd_simple.py`
Content:
```
1 # External dependencies
2 import argparse
3 import glob
4 import os
5
6 import cupy
7
8 # we can control how much memory to give tensorflow with this environment variable
9 # IMPORTANT: make sure you do this before you initialize TF's runtime, otherwise
10 # TF will have claimed all free GPU memory
11 os.environ["TF_MEMORY_ALLOCATION"] = "0.3" # fraction of free memory
12 import horovod.tensorflow as hvd # noqa: E402
13 import tensorflow as tf # noqa: E402
14
15 import nvtabular as nvt # noqa: E402
16 from nvtabular.framework_utils.tensorflow import layers # noqa: E402
17 from nvtabular.loader.tensorflow import KerasSequenceLoader # noqa: E402
18
19 parser = argparse.ArgumentParser(description="Process some integers.")
20 parser.add_argument("--dir_in", default=None, help="Input directory")
21 parser.add_argument("--b_size", default=None, help="batch size")
22 parser.add_argument("--cats", default=None, help="categorical columns")
23 parser.add_argument("--cats_mh", default=None, help="categorical multihot columns")
24 parser.add_argument("--conts", default=None, help="continuous columns")
25 parser.add_argument("--labels", default=None, help="continuous columns")
26 args = parser.parse_args()
27
28
29 BASE_DIR = args.dir_in or "./data/"
30 BATCH_SIZE = args.b_size or 16384 # Batch Size
31 CATEGORICAL_COLUMNS = args.cats or ["movieId", "userId"] # Single-hot
32 CATEGORICAL_MH_COLUMNS = args.cats_mh or ["genres"] # Multi-hot
33 NUMERIC_COLUMNS = args.conts or []
34 TRAIN_PATHS = sorted(
35 glob.glob(os.path.join(BASE_DIR, "train/*.parquet"))
36 ) # Output from ETL-with-NVTabular
37 hvd.init()
38
39 # Seed with system randomness (or a static seed)
40 cupy.random.seed(None)
41
42
43 def seed_fn():
44 """
45 Generate consistent dataloader shuffle seeds across workers
46
47 Reseeds each worker's dataloader each epoch to get fresh a shuffle
48 that's consistent across workers.
49 """
50 min_int, max_int = tf.int32.limits
51 max_rand = max_int // hvd.size()
52
53 # Generate a seed fragment
54 seed_fragment = cupy.random.randint(0, max_rand).get()
55
56 # Aggregate seed fragments from all Horovod workers
57 seed_tensor = tf.constant(seed_fragment)
58 reduced_seed = hvd.allreduce(seed_tensor, name="shuffle_seed", op=hvd.mpi_ops.Sum) % max_rand
59
60 return reduced_seed
61
62
63 proc = nvt.Workflow.load(os.path.join(BASE_DIR, "workflow/"))
64 EMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(proc)
65
66 train_dataset_tf = KerasSequenceLoader(
67 TRAIN_PATHS, # you could also use a glob pattern
68 batch_size=BATCH_SIZE,
69 label_names=["rating"],
70 cat_names=CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS,
71 cont_names=NUMERIC_COLUMNS,
72 engine="parquet",
73 shuffle=True,
74 seed_fn=seed_fn,
75 buffer_size=0.06, # how many batches to load at once
76 parts_per_chunk=1,
77 global_size=hvd.size(),
78 global_rank=hvd.rank(),
79 )
80 inputs = {} # tf.keras.Input placeholders for each feature to be used
81 emb_layers = [] # output of all embedding layers, which will be concatenated
82 for col in CATEGORICAL_COLUMNS:
83 inputs[col] = tf.keras.Input(name=col, dtype=tf.int32, shape=(1,))
84 # Note that we need two input tensors for multi-hot categorical features
85 for col in CATEGORICAL_MH_COLUMNS:
86 inputs[col + "__values"] = tf.keras.Input(name=f"{col}__values", dtype=tf.int64, shape=(1,))
87 inputs[col + "__nnzs"] = tf.keras.Input(name=f"{col}__nnzs", dtype=tf.int64, shape=(1,))
88 for col in CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS:
89 emb_layers.append(
90 tf.feature_column.embedding_column(
91 tf.feature_column.categorical_column_with_identity(
92 col, EMBEDDING_TABLE_SHAPES[col][0] # Input dimension (vocab size)
93 ),
94 EMBEDDING_TABLE_SHAPES[col][1], # Embedding output dimension
95 )
96 )
97 emb_layer = layers.DenseFeatures(emb_layers)
98 x_emb_output = emb_layer(inputs)
99 x = tf.keras.layers.Dense(128, activation="relu")(x_emb_output)
100 x = tf.keras.layers.Dense(128, activation="relu")(x)
101 x = tf.keras.layers.Dense(128, activation="relu")(x)
102 x = tf.keras.layers.Dense(1, activation="sigmoid")(x)
103 model = tf.keras.Model(inputs=inputs, outputs=x)
104 loss = tf.losses.BinaryCrossentropy()
105 opt = tf.keras.optimizers.SGD(0.01 * hvd.size())
106 opt = hvd.DistributedOptimizer(opt)
107 checkpoint_dir = "./checkpoints"
108 checkpoint = tf.train.Checkpoint(model=model, optimizer=opt)
109
110
111 @tf.function(experimental_relax_shapes=True)
112 def training_step(examples, labels, first_batch):
113 with tf.GradientTape() as tape:
114 probs = model(examples, training=True)
115 loss_value = loss(labels, probs)
116 # Horovod: add Horovod Distributed GradientTape.
117 tape = hvd.DistributedGradientTape(tape, sparse_as_dense=True)
118 grads = tape.gradient(loss_value, model.trainable_variables)
119 opt.apply_gradients(zip(grads, model.trainable_variables))
120 # Horovod: broadcast initial variable states from rank 0 to all other processes.
121 # This is necessary to ensure consistent initialization of all workers when
122 # training is started with random weights or restored from a checkpoint.
123 #
124 # Note: broadcast should be done after the first gradient step to ensure optimizer
125 # initialization.
126 if first_batch:
127 hvd.broadcast_variables(model.variables, root_rank=0)
128 hvd.broadcast_variables(opt.variables(), root_rank=0)
129 return loss_value
130
131
132 # Horovod: adjust number of steps based on number of GPUs.
133 for batch, (examples, labels) in enumerate(train_dataset_tf):
134 loss_value = training_step(examples, labels, batch == 0)
135 if batch % 10 == 0 and hvd.local_rank() == 0:
136 print("Step #%d\tLoss: %.6f" % (batch, loss_value))
137 hvd.join()
138 # Horovod: save checkpoints only on worker 0 to prevent other workers from
139 # corrupting it.
140 if hvd.rank() == 0:
141 checkpoint.save(checkpoint_dir)
142
```
Path: `examples/horovod/torch-nvt-horovod.py`
Content:
```
1 import argparse
2 import glob
3 import os
4 from time import time
5
6 import cupy
7 import torch
8
9 import nvtabular as nvt
10 from nvtabular.framework_utils.torch.models import Model
11 from nvtabular.framework_utils.torch.utils import process_epoch
12 from nvtabular.loader.torch import DLDataLoader, TorchAsyncItr
13
14 # Horovod must be the last import to avoid conflicts
15 import horovod.torch as hvd # noqa: E402, isort:skip
16
17
18 parser = argparse.ArgumentParser(description="Train a multi-gpu model with Torch and Horovod")
19 parser.add_argument("--dir_in", default=None, help="Input directory")
20 parser.add_argument("--batch_size", default=None, help="Batch size")
21 parser.add_argument("--cats", default=None, help="Categorical columns")
22 parser.add_argument("--cats_mh", default=None, help="Categorical multihot columns")
23 parser.add_argument("--conts", default=None, help="Continuous columns")
24 parser.add_argument("--labels", default=None, help="Label columns")
25 parser.add_argument("--epochs", default=1, help="Training epochs")
26 args = parser.parse_args()
27
28 hvd.init()
29
30 gpu_to_use = hvd.local_rank()
31
32 if torch.cuda.is_available():
33 torch.cuda.set_device(gpu_to_use)
34
35
36 BASE_DIR = os.path.expanduser(args.dir_in or "./data/")
37 BATCH_SIZE = args.batch_size or 16384 # Batch Size
38 CATEGORICAL_COLUMNS = args.cats or ["movieId", "userId"] # Single-hot
39 CATEGORICAL_MH_COLUMNS = args.cats_mh or ["genres"] # Multi-hot
40 NUMERIC_COLUMNS = args.conts or []
41
42 # Output from ETL-with-NVTabular
43 TRAIN_PATHS = sorted(glob.glob(os.path.join(BASE_DIR, "train", "*.parquet")))
44
45 proc = nvt.Workflow.load(os.path.join(BASE_DIR, "workflow/"))
46
47 EMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(proc)
48
49
50 # TensorItrDataset returns a single batch of x_cat, x_cont, y.
51 def collate_fn(x):
52 return x
53
54
55 # Seed with system randomness (or a static seed)
56 cupy.random.seed(None)
57
58
59 def seed_fn():
60 """
61 Generate consistent dataloader shuffle seeds across workers
62
63 Reseeds each worker's dataloader each epoch to get fresh a shuffle
64 that's consistent across workers.
65 """
66
67 max_rand = torch.iinfo(torch.int).max // hvd.size()
68
69 # Generate a seed fragment
70 seed_fragment = cupy.random.randint(0, max_rand)
71
72 # Aggregate seed fragments from all Horovod workers
73 seed_tensor = torch.tensor(seed_fragment)
74 reduced_seed = hvd.allreduce(seed_tensor, name="shuffle_seed", op=hvd.mpi_ops.Sum) % max_rand
75
76 return reduced_seed
77
78
79 train_dataset = TorchAsyncItr(
80 nvt.Dataset(TRAIN_PATHS),
81 batch_size=BATCH_SIZE,
82 cats=CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS,
83 conts=NUMERIC_COLUMNS,
84 labels=["rating"],
85 devices=[gpu_to_use],
86 global_size=hvd.size(),
87 global_rank=hvd.rank(),
88 shuffle=True,
89 seed_fn=seed_fn,
90 )
91 train_loader = DLDataLoader(
92 train_dataset, batch_size=None, collate_fn=collate_fn, pin_memory=False, num_workers=0
93 )
94
95
96 EMBEDDING_TABLE_SHAPES_TUPLE = (
97 {
98 CATEGORICAL_COLUMNS[0]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_COLUMNS[0]],
99 CATEGORICAL_COLUMNS[1]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_COLUMNS[1]],
100 },
101 {CATEGORICAL_MH_COLUMNS[0]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_MH_COLUMNS[0]]},
102 )
103
104 model = Model(
105 embedding_table_shapes=EMBEDDING_TABLE_SHAPES_TUPLE,
106 num_continuous=0,
107 emb_dropout=0.0,
108 layer_hidden_dims=[128, 128, 128],
109 layer_dropout_rates=[0.0, 0.0, 0.0],
110 ).cuda()
111
112 lr_scaler = hvd.size()
113
114 optimizer = torch.optim.Adam(model.parameters(), lr=0.01 * lr_scaler)
115
116 hvd.broadcast_parameters(model.state_dict(), root_rank=0)
117 hvd.broadcast_optimizer_state(optimizer, root_rank=0)
118
119 optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
120
121 for epoch in range(args.epochs):
122 start = time()
123 print(f"Training epoch {epoch}")
124 train_loss, y_pred, y = process_epoch(train_loader, model, train=True, optimizer=optimizer)
125 hvd.join(gpu_to_use)
126 hvd.broadcast_parameters(model.state_dict(), root_rank=0)
127 print(f"Epoch {epoch:02d}. Train loss: {train_loss:.4f}.")
128 hvd.join(gpu_to_use)
129 t_final = time() - start
130 total_rows = train_dataset.num_rows_processed
131 print(
132 f"run_time: {t_final} - rows: {total_rows} - "
133 f"epochs: {epoch} - dl_thru: {total_rows / t_final}"
134 )
135
136
137 hvd.join(gpu_to_use)
138 if hvd.local_rank() == 0:
139 print("Training complete")
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/horovod/tf_hvd_simple.py b/examples/horovod/tf_hvd_simple.py
--- a/examples/horovod/tf_hvd_simple.py
+++ b/examples/horovod/tf_hvd_simple.py
@@ -18,7 +18,7 @@
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument("--dir_in", default=None, help="Input directory")
-parser.add_argument("--b_size", default=None, help="batch size")
+parser.add_argument("--batch_size", default=None, help="batch size")
parser.add_argument("--cats", default=None, help="categorical columns")
parser.add_argument("--cats_mh", default=None, help="categorical multihot columns")
parser.add_argument("--conts", default=None, help="continuous columns")
@@ -27,7 +27,7 @@
BASE_DIR = args.dir_in or "./data/"
-BATCH_SIZE = args.b_size or 16384 # Batch Size
+BATCH_SIZE = int(args.batch_size) or 16384 # Batch Size
CATEGORICAL_COLUMNS = args.cats or ["movieId", "userId"] # Single-hot
CATEGORICAL_MH_COLUMNS = args.cats_mh or ["genres"] # Multi-hot
NUMERIC_COLUMNS = args.conts or []
diff --git a/examples/horovod/torch-nvt-horovod.py b/examples/horovod/torch-nvt-horovod.py
--- a/examples/horovod/torch-nvt-horovod.py
+++ b/examples/horovod/torch-nvt-horovod.py
@@ -34,7 +34,7 @@
BASE_DIR = os.path.expanduser(args.dir_in or "./data/")
-BATCH_SIZE = args.batch_size or 16384 # Batch Size
+BATCH_SIZE = int(args.batch_size) or 16384 # Batch Size
CATEGORICAL_COLUMNS = args.cats or ["movieId", "userId"] # Single-hot
CATEGORICAL_MH_COLUMNS = args.cats_mh or ["genres"] # Multi-hot
NUMERIC_COLUMNS = args.conts or []
| {"golden_diff": "diff --git a/examples/horovod/tf_hvd_simple.py b/examples/horovod/tf_hvd_simple.py\n--- a/examples/horovod/tf_hvd_simple.py\n+++ b/examples/horovod/tf_hvd_simple.py\n@@ -18,7 +18,7 @@\n \n parser = argparse.ArgumentParser(description=\"Process some integers.\")\n parser.add_argument(\"--dir_in\", default=None, help=\"Input directory\")\n-parser.add_argument(\"--b_size\", default=None, help=\"batch size\")\n+parser.add_argument(\"--batch_size\", default=None, help=\"batch size\")\n parser.add_argument(\"--cats\", default=None, help=\"categorical columns\")\n parser.add_argument(\"--cats_mh\", default=None, help=\"categorical multihot columns\")\n parser.add_argument(\"--conts\", default=None, help=\"continuous columns\")\n@@ -27,7 +27,7 @@\n \n \n BASE_DIR = args.dir_in or \"./data/\"\n-BATCH_SIZE = args.b_size or 16384 # Batch Size\n+BATCH_SIZE = int(args.batch_size) or 16384 # Batch Size\n CATEGORICAL_COLUMNS = args.cats or [\"movieId\", \"userId\"] # Single-hot\n CATEGORICAL_MH_COLUMNS = args.cats_mh or [\"genres\"] # Multi-hot\n NUMERIC_COLUMNS = args.conts or []\ndiff --git a/examples/horovod/torch-nvt-horovod.py b/examples/horovod/torch-nvt-horovod.py\n--- a/examples/horovod/torch-nvt-horovod.py\n+++ b/examples/horovod/torch-nvt-horovod.py\n@@ -34,7 +34,7 @@\n \n \n BASE_DIR = os.path.expanduser(args.dir_in or \"./data/\")\n-BATCH_SIZE = args.batch_size or 16384 # Batch Size\n+BATCH_SIZE = int(args.batch_size) or 16384 # Batch Size\n CATEGORICAL_COLUMNS = args.cats or [\"movieId\", \"userId\"] # Single-hot\n CATEGORICAL_MH_COLUMNS = args.cats_mh or [\"genres\"] # Multi-hot\n NUMERIC_COLUMNS = args.conts or []\n", "issue": "[BUG] Horovod example scripts fail when user supplies batch size parameter\n**Describe the bug**\r\nUsing the batch size parameter on the TF Horovod example causes a type error with a mismatch between str and int.\r\n\r\n**Steps/Code to reproduce bug**\r\nRun the TF Horovod example with the arguments `--b_size 1024`.\r\n\r\n**Expected behavior**\r\nThe script should accept a user-provided batch size.\r\n\r\n**Environment details (please complete the following information):**\r\n - Environment location: Bare-metal\r\n - Method of NVTabular install: conda\r\n \r\n**Additional context**\r\nI believe [this line](https://github.com/NVIDIA/NVTabular/blob/main/examples/horovod/tf_hvd_simple.py#L30) and the same line in the Torch example just need type coercions from str to int.\r\n\n", "before_files": [{"content": "# External dependencies\nimport argparse\nimport glob\nimport os\n\nimport cupy\n\n# we can control how much memory to give tensorflow with this environment variable\n# IMPORTANT: make sure you do this before you initialize TF's runtime, otherwise\n# TF will have claimed all free GPU memory\nos.environ[\"TF_MEMORY_ALLOCATION\"] = \"0.3\" # fraction of free memory\nimport horovod.tensorflow as hvd # noqa: E402\nimport tensorflow as tf # noqa: E402\n\nimport nvtabular as nvt # noqa: E402\nfrom nvtabular.framework_utils.tensorflow import layers # noqa: E402\nfrom nvtabular.loader.tensorflow import KerasSequenceLoader # noqa: E402\n\nparser = argparse.ArgumentParser(description=\"Process some integers.\")\nparser.add_argument(\"--dir_in\", default=None, help=\"Input directory\")\nparser.add_argument(\"--b_size\", default=None, help=\"batch size\")\nparser.add_argument(\"--cats\", default=None, help=\"categorical columns\")\nparser.add_argument(\"--cats_mh\", default=None, help=\"categorical multihot columns\")\nparser.add_argument(\"--conts\", default=None, help=\"continuous columns\")\nparser.add_argument(\"--labels\", default=None, help=\"continuous columns\")\nargs = parser.parse_args()\n\n\nBASE_DIR = args.dir_in or \"./data/\"\nBATCH_SIZE = args.b_size or 16384 # Batch Size\nCATEGORICAL_COLUMNS = args.cats or [\"movieId\", \"userId\"] # Single-hot\nCATEGORICAL_MH_COLUMNS = args.cats_mh or [\"genres\"] # Multi-hot\nNUMERIC_COLUMNS = args.conts or []\nTRAIN_PATHS = sorted(\n glob.glob(os.path.join(BASE_DIR, \"train/*.parquet\"))\n) # Output from ETL-with-NVTabular\nhvd.init()\n\n# Seed with system randomness (or a static seed)\ncupy.random.seed(None)\n\n\ndef seed_fn():\n \"\"\"\n Generate consistent dataloader shuffle seeds across workers\n\n Reseeds each worker's dataloader each epoch to get fresh a shuffle\n that's consistent across workers.\n \"\"\"\n min_int, max_int = tf.int32.limits\n max_rand = max_int // hvd.size()\n\n # Generate a seed fragment\n seed_fragment = cupy.random.randint(0, max_rand).get()\n\n # Aggregate seed fragments from all Horovod workers\n seed_tensor = tf.constant(seed_fragment)\n reduced_seed = hvd.allreduce(seed_tensor, name=\"shuffle_seed\", op=hvd.mpi_ops.Sum) % max_rand\n\n return reduced_seed\n\n\nproc = nvt.Workflow.load(os.path.join(BASE_DIR, \"workflow/\"))\nEMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(proc)\n\ntrain_dataset_tf = KerasSequenceLoader(\n TRAIN_PATHS, # you could also use a glob pattern\n batch_size=BATCH_SIZE,\n label_names=[\"rating\"],\n cat_names=CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS,\n cont_names=NUMERIC_COLUMNS,\n engine=\"parquet\",\n shuffle=True,\n seed_fn=seed_fn,\n buffer_size=0.06, # how many batches to load at once\n parts_per_chunk=1,\n global_size=hvd.size(),\n global_rank=hvd.rank(),\n)\ninputs = {} # tf.keras.Input placeholders for each feature to be used\nemb_layers = [] # output of all embedding layers, which will be concatenated\nfor col in CATEGORICAL_COLUMNS:\n inputs[col] = tf.keras.Input(name=col, dtype=tf.int32, shape=(1,))\n# Note that we need two input tensors for multi-hot categorical features\nfor col in CATEGORICAL_MH_COLUMNS:\n inputs[col + \"__values\"] = tf.keras.Input(name=f\"{col}__values\", dtype=tf.int64, shape=(1,))\n inputs[col + \"__nnzs\"] = tf.keras.Input(name=f\"{col}__nnzs\", dtype=tf.int64, shape=(1,))\nfor col in CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS:\n emb_layers.append(\n tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_identity(\n col, EMBEDDING_TABLE_SHAPES[col][0] # Input dimension (vocab size)\n ),\n EMBEDDING_TABLE_SHAPES[col][1], # Embedding output dimension\n )\n )\nemb_layer = layers.DenseFeatures(emb_layers)\nx_emb_output = emb_layer(inputs)\nx = tf.keras.layers.Dense(128, activation=\"relu\")(x_emb_output)\nx = tf.keras.layers.Dense(128, activation=\"relu\")(x)\nx = tf.keras.layers.Dense(128, activation=\"relu\")(x)\nx = tf.keras.layers.Dense(1, activation=\"sigmoid\")(x)\nmodel = tf.keras.Model(inputs=inputs, outputs=x)\nloss = tf.losses.BinaryCrossentropy()\nopt = tf.keras.optimizers.SGD(0.01 * hvd.size())\nopt = hvd.DistributedOptimizer(opt)\ncheckpoint_dir = \"./checkpoints\"\ncheckpoint = tf.train.Checkpoint(model=model, optimizer=opt)\n\n\[email protected](experimental_relax_shapes=True)\ndef training_step(examples, labels, first_batch):\n with tf.GradientTape() as tape:\n probs = model(examples, training=True)\n loss_value = loss(labels, probs)\n # Horovod: add Horovod Distributed GradientTape.\n tape = hvd.DistributedGradientTape(tape, sparse_as_dense=True)\n grads = tape.gradient(loss_value, model.trainable_variables)\n opt.apply_gradients(zip(grads, model.trainable_variables))\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n #\n # Note: broadcast should be done after the first gradient step to ensure optimizer\n # initialization.\n if first_batch:\n hvd.broadcast_variables(model.variables, root_rank=0)\n hvd.broadcast_variables(opt.variables(), root_rank=0)\n return loss_value\n\n\n# Horovod: adjust number of steps based on number of GPUs.\nfor batch, (examples, labels) in enumerate(train_dataset_tf):\n loss_value = training_step(examples, labels, batch == 0)\n if batch % 10 == 0 and hvd.local_rank() == 0:\n print(\"Step #%d\\tLoss: %.6f\" % (batch, loss_value))\nhvd.join()\n# Horovod: save checkpoints only on worker 0 to prevent other workers from\n# corrupting it.\nif hvd.rank() == 0:\n checkpoint.save(checkpoint_dir)\n", "path": "examples/horovod/tf_hvd_simple.py"}, {"content": "import argparse\nimport glob\nimport os\nfrom time import time\n\nimport cupy\nimport torch\n\nimport nvtabular as nvt\nfrom nvtabular.framework_utils.torch.models import Model\nfrom nvtabular.framework_utils.torch.utils import process_epoch\nfrom nvtabular.loader.torch import DLDataLoader, TorchAsyncItr\n\n# Horovod must be the last import to avoid conflicts\nimport horovod.torch as hvd # noqa: E402, isort:skip\n\n\nparser = argparse.ArgumentParser(description=\"Train a multi-gpu model with Torch and Horovod\")\nparser.add_argument(\"--dir_in\", default=None, help=\"Input directory\")\nparser.add_argument(\"--batch_size\", default=None, help=\"Batch size\")\nparser.add_argument(\"--cats\", default=None, help=\"Categorical columns\")\nparser.add_argument(\"--cats_mh\", default=None, help=\"Categorical multihot columns\")\nparser.add_argument(\"--conts\", default=None, help=\"Continuous columns\")\nparser.add_argument(\"--labels\", default=None, help=\"Label columns\")\nparser.add_argument(\"--epochs\", default=1, help=\"Training epochs\")\nargs = parser.parse_args()\n\nhvd.init()\n\ngpu_to_use = hvd.local_rank()\n\nif torch.cuda.is_available():\n torch.cuda.set_device(gpu_to_use)\n\n\nBASE_DIR = os.path.expanduser(args.dir_in or \"./data/\")\nBATCH_SIZE = args.batch_size or 16384 # Batch Size\nCATEGORICAL_COLUMNS = args.cats or [\"movieId\", \"userId\"] # Single-hot\nCATEGORICAL_MH_COLUMNS = args.cats_mh or [\"genres\"] # Multi-hot\nNUMERIC_COLUMNS = args.conts or []\n\n# Output from ETL-with-NVTabular\nTRAIN_PATHS = sorted(glob.glob(os.path.join(BASE_DIR, \"train\", \"*.parquet\")))\n\nproc = nvt.Workflow.load(os.path.join(BASE_DIR, \"workflow/\"))\n\nEMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(proc)\n\n\n# TensorItrDataset returns a single batch of x_cat, x_cont, y.\ndef collate_fn(x):\n return x\n\n\n# Seed with system randomness (or a static seed)\ncupy.random.seed(None)\n\n\ndef seed_fn():\n \"\"\"\n Generate consistent dataloader shuffle seeds across workers\n\n Reseeds each worker's dataloader each epoch to get fresh a shuffle\n that's consistent across workers.\n \"\"\"\n\n max_rand = torch.iinfo(torch.int).max // hvd.size()\n\n # Generate a seed fragment\n seed_fragment = cupy.random.randint(0, max_rand)\n\n # Aggregate seed fragments from all Horovod workers\n seed_tensor = torch.tensor(seed_fragment)\n reduced_seed = hvd.allreduce(seed_tensor, name=\"shuffle_seed\", op=hvd.mpi_ops.Sum) % max_rand\n\n return reduced_seed\n\n\ntrain_dataset = TorchAsyncItr(\n nvt.Dataset(TRAIN_PATHS),\n batch_size=BATCH_SIZE,\n cats=CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS,\n conts=NUMERIC_COLUMNS,\n labels=[\"rating\"],\n devices=[gpu_to_use],\n global_size=hvd.size(),\n global_rank=hvd.rank(),\n shuffle=True,\n seed_fn=seed_fn,\n)\ntrain_loader = DLDataLoader(\n train_dataset, batch_size=None, collate_fn=collate_fn, pin_memory=False, num_workers=0\n)\n\n\nEMBEDDING_TABLE_SHAPES_TUPLE = (\n {\n CATEGORICAL_COLUMNS[0]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_COLUMNS[0]],\n CATEGORICAL_COLUMNS[1]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_COLUMNS[1]],\n },\n {CATEGORICAL_MH_COLUMNS[0]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_MH_COLUMNS[0]]},\n)\n\nmodel = Model(\n embedding_table_shapes=EMBEDDING_TABLE_SHAPES_TUPLE,\n num_continuous=0,\n emb_dropout=0.0,\n layer_hidden_dims=[128, 128, 128],\n layer_dropout_rates=[0.0, 0.0, 0.0],\n).cuda()\n\nlr_scaler = hvd.size()\n\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01 * lr_scaler)\n\nhvd.broadcast_parameters(model.state_dict(), root_rank=0)\nhvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\noptimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())\n\nfor epoch in range(args.epochs):\n start = time()\n print(f\"Training epoch {epoch}\")\n train_loss, y_pred, y = process_epoch(train_loader, model, train=True, optimizer=optimizer)\n hvd.join(gpu_to_use)\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n print(f\"Epoch {epoch:02d}. Train loss: {train_loss:.4f}.\")\n hvd.join(gpu_to_use)\n t_final = time() - start\n total_rows = train_dataset.num_rows_processed\n print(\n f\"run_time: {t_final} - rows: {total_rows} - \"\n f\"epochs: {epoch} - dl_thru: {total_rows / t_final}\"\n )\n\n\nhvd.join(gpu_to_use)\nif hvd.local_rank() == 0:\n print(\"Training complete\")\n", "path": "examples/horovod/torch-nvt-horovod.py"}], "after_files": [{"content": "# External dependencies\nimport argparse\nimport glob\nimport os\n\nimport cupy\n\n# we can control how much memory to give tensorflow with this environment variable\n# IMPORTANT: make sure you do this before you initialize TF's runtime, otherwise\n# TF will have claimed all free GPU memory\nos.environ[\"TF_MEMORY_ALLOCATION\"] = \"0.3\" # fraction of free memory\nimport horovod.tensorflow as hvd # noqa: E402\nimport tensorflow as tf # noqa: E402\n\nimport nvtabular as nvt # noqa: E402\nfrom nvtabular.framework_utils.tensorflow import layers # noqa: E402\nfrom nvtabular.loader.tensorflow import KerasSequenceLoader # noqa: E402\n\nparser = argparse.ArgumentParser(description=\"Process some integers.\")\nparser.add_argument(\"--dir_in\", default=None, help=\"Input directory\")\nparser.add_argument(\"--batch_size\", default=None, help=\"batch size\")\nparser.add_argument(\"--cats\", default=None, help=\"categorical columns\")\nparser.add_argument(\"--cats_mh\", default=None, help=\"categorical multihot columns\")\nparser.add_argument(\"--conts\", default=None, help=\"continuous columns\")\nparser.add_argument(\"--labels\", default=None, help=\"continuous columns\")\nargs = parser.parse_args()\n\n\nBASE_DIR = args.dir_in or \"./data/\"\nBATCH_SIZE = int(args.batch_size) or 16384 # Batch Size\nCATEGORICAL_COLUMNS = args.cats or [\"movieId\", \"userId\"] # Single-hot\nCATEGORICAL_MH_COLUMNS = args.cats_mh or [\"genres\"] # Multi-hot\nNUMERIC_COLUMNS = args.conts or []\nTRAIN_PATHS = sorted(\n glob.glob(os.path.join(BASE_DIR, \"train/*.parquet\"))\n) # Output from ETL-with-NVTabular\nhvd.init()\n\n# Seed with system randomness (or a static seed)\ncupy.random.seed(None)\n\n\ndef seed_fn():\n \"\"\"\n Generate consistent dataloader shuffle seeds across workers\n\n Reseeds each worker's dataloader each epoch to get fresh a shuffle\n that's consistent across workers.\n \"\"\"\n min_int, max_int = tf.int32.limits\n max_rand = max_int // hvd.size()\n\n # Generate a seed fragment\n seed_fragment = cupy.random.randint(0, max_rand).get()\n\n # Aggregate seed fragments from all Horovod workers\n seed_tensor = tf.constant(seed_fragment)\n reduced_seed = hvd.allreduce(seed_tensor, name=\"shuffle_seed\", op=hvd.mpi_ops.Sum) % max_rand\n\n return reduced_seed\n\n\nproc = nvt.Workflow.load(os.path.join(BASE_DIR, \"workflow/\"))\nEMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(proc)\n\ntrain_dataset_tf = KerasSequenceLoader(\n TRAIN_PATHS, # you could also use a glob pattern\n batch_size=BATCH_SIZE,\n label_names=[\"rating\"],\n cat_names=CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS,\n cont_names=NUMERIC_COLUMNS,\n engine=\"parquet\",\n shuffle=True,\n seed_fn=seed_fn,\n buffer_size=0.06, # how many batches to load at once\n parts_per_chunk=1,\n global_size=hvd.size(),\n global_rank=hvd.rank(),\n)\ninputs = {} # tf.keras.Input placeholders for each feature to be used\nemb_layers = [] # output of all embedding layers, which will be concatenated\nfor col in CATEGORICAL_COLUMNS:\n inputs[col] = tf.keras.Input(name=col, dtype=tf.int32, shape=(1,))\n# Note that we need two input tensors for multi-hot categorical features\nfor col in CATEGORICAL_MH_COLUMNS:\n inputs[col + \"__values\"] = tf.keras.Input(name=f\"{col}__values\", dtype=tf.int64, shape=(1,))\n inputs[col + \"__nnzs\"] = tf.keras.Input(name=f\"{col}__nnzs\", dtype=tf.int64, shape=(1,))\nfor col in CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS:\n emb_layers.append(\n tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_identity(\n col, EMBEDDING_TABLE_SHAPES[col][0] # Input dimension (vocab size)\n ),\n EMBEDDING_TABLE_SHAPES[col][1], # Embedding output dimension\n )\n )\nemb_layer = layers.DenseFeatures(emb_layers)\nx_emb_output = emb_layer(inputs)\nx = tf.keras.layers.Dense(128, activation=\"relu\")(x_emb_output)\nx = tf.keras.layers.Dense(128, activation=\"relu\")(x)\nx = tf.keras.layers.Dense(128, activation=\"relu\")(x)\nx = tf.keras.layers.Dense(1, activation=\"sigmoid\")(x)\nmodel = tf.keras.Model(inputs=inputs, outputs=x)\nloss = tf.losses.BinaryCrossentropy()\nopt = tf.keras.optimizers.SGD(0.01 * hvd.size())\nopt = hvd.DistributedOptimizer(opt)\ncheckpoint_dir = \"./checkpoints\"\ncheckpoint = tf.train.Checkpoint(model=model, optimizer=opt)\n\n\[email protected](experimental_relax_shapes=True)\ndef training_step(examples, labels, first_batch):\n with tf.GradientTape() as tape:\n probs = model(examples, training=True)\n loss_value = loss(labels, probs)\n # Horovod: add Horovod Distributed GradientTape.\n tape = hvd.DistributedGradientTape(tape, sparse_as_dense=True)\n grads = tape.gradient(loss_value, model.trainable_variables)\n opt.apply_gradients(zip(grads, model.trainable_variables))\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n #\n # Note: broadcast should be done after the first gradient step to ensure optimizer\n # initialization.\n if first_batch:\n hvd.broadcast_variables(model.variables, root_rank=0)\n hvd.broadcast_variables(opt.variables(), root_rank=0)\n return loss_value\n\n\n# Horovod: adjust number of steps based on number of GPUs.\nfor batch, (examples, labels) in enumerate(train_dataset_tf):\n loss_value = training_step(examples, labels, batch == 0)\n if batch % 10 == 0 and hvd.local_rank() == 0:\n print(\"Step #%d\\tLoss: %.6f\" % (batch, loss_value))\nhvd.join()\n# Horovod: save checkpoints only on worker 0 to prevent other workers from\n# corrupting it.\nif hvd.rank() == 0:\n checkpoint.save(checkpoint_dir)\n", "path": "examples/horovod/tf_hvd_simple.py"}, {"content": "import argparse\nimport glob\nimport os\nfrom time import time\n\nimport cupy\nimport torch\n\nimport nvtabular as nvt\nfrom nvtabular.framework_utils.torch.models import Model\nfrom nvtabular.framework_utils.torch.utils import process_epoch\nfrom nvtabular.loader.torch import DLDataLoader, TorchAsyncItr\n\n# Horovod must be the last import to avoid conflicts\nimport horovod.torch as hvd # noqa: E402, isort:skip\n\n\nparser = argparse.ArgumentParser(description=\"Train a multi-gpu model with Torch and Horovod\")\nparser.add_argument(\"--dir_in\", default=None, help=\"Input directory\")\nparser.add_argument(\"--batch_size\", default=None, help=\"Batch size\")\nparser.add_argument(\"--cats\", default=None, help=\"Categorical columns\")\nparser.add_argument(\"--cats_mh\", default=None, help=\"Categorical multihot columns\")\nparser.add_argument(\"--conts\", default=None, help=\"Continuous columns\")\nparser.add_argument(\"--labels\", default=None, help=\"Label columns\")\nparser.add_argument(\"--epochs\", default=1, help=\"Training epochs\")\nargs = parser.parse_args()\n\nhvd.init()\n\ngpu_to_use = hvd.local_rank()\n\nif torch.cuda.is_available():\n torch.cuda.set_device(gpu_to_use)\n\n\nBASE_DIR = os.path.expanduser(args.dir_in or \"./data/\")\nBATCH_SIZE = int(args.batch_size) or 16384 # Batch Size\nCATEGORICAL_COLUMNS = args.cats or [\"movieId\", \"userId\"] # Single-hot\nCATEGORICAL_MH_COLUMNS = args.cats_mh or [\"genres\"] # Multi-hot\nNUMERIC_COLUMNS = args.conts or []\n\n# Output from ETL-with-NVTabular\nTRAIN_PATHS = sorted(glob.glob(os.path.join(BASE_DIR, \"train\", \"*.parquet\")))\n\nproc = nvt.Workflow.load(os.path.join(BASE_DIR, \"workflow/\"))\n\nEMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(proc)\n\n\n# TensorItrDataset returns a single batch of x_cat, x_cont, y.\ndef collate_fn(x):\n return x\n\n\n# Seed with system randomness (or a static seed)\ncupy.random.seed(None)\n\n\ndef seed_fn():\n \"\"\"\n Generate consistent dataloader shuffle seeds across workers\n\n Reseeds each worker's dataloader each epoch to get fresh a shuffle\n that's consistent across workers.\n \"\"\"\n\n max_rand = torch.iinfo(torch.int).max // hvd.size()\n\n # Generate a seed fragment\n seed_fragment = cupy.random.randint(0, max_rand)\n\n # Aggregate seed fragments from all Horovod workers\n seed_tensor = torch.tensor(seed_fragment)\n reduced_seed = hvd.allreduce(seed_tensor, name=\"shuffle_seed\", op=hvd.mpi_ops.Sum) % max_rand\n\n return reduced_seed\n\n\ntrain_dataset = TorchAsyncItr(\n nvt.Dataset(TRAIN_PATHS),\n batch_size=BATCH_SIZE,\n cats=CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS,\n conts=NUMERIC_COLUMNS,\n labels=[\"rating\"],\n devices=[gpu_to_use],\n global_size=hvd.size(),\n global_rank=hvd.rank(),\n shuffle=True,\n seed_fn=seed_fn,\n)\ntrain_loader = DLDataLoader(\n train_dataset, batch_size=None, collate_fn=collate_fn, pin_memory=False, num_workers=0\n)\n\n\nEMBEDDING_TABLE_SHAPES_TUPLE = (\n {\n CATEGORICAL_COLUMNS[0]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_COLUMNS[0]],\n CATEGORICAL_COLUMNS[1]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_COLUMNS[1]],\n },\n {CATEGORICAL_MH_COLUMNS[0]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_MH_COLUMNS[0]]},\n)\n\nmodel = Model(\n embedding_table_shapes=EMBEDDING_TABLE_SHAPES_TUPLE,\n num_continuous=0,\n emb_dropout=0.0,\n layer_hidden_dims=[128, 128, 128],\n layer_dropout_rates=[0.0, 0.0, 0.0],\n).cuda()\n\nlr_scaler = hvd.size()\n\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01 * lr_scaler)\n\nhvd.broadcast_parameters(model.state_dict(), root_rank=0)\nhvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\noptimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())\n\nfor epoch in range(args.epochs):\n start = time()\n print(f\"Training epoch {epoch}\")\n train_loss, y_pred, y = process_epoch(train_loader, model, train=True, optimizer=optimizer)\n hvd.join(gpu_to_use)\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n print(f\"Epoch {epoch:02d}. Train loss: {train_loss:.4f}.\")\n hvd.join(gpu_to_use)\n t_final = time() - start\n total_rows = train_dataset.num_rows_processed\n print(\n f\"run_time: {t_final} - rows: {total_rows} - \"\n f\"epochs: {epoch} - dl_thru: {total_rows / t_final}\"\n )\n\n\nhvd.join(gpu_to_use)\nif hvd.local_rank() == 0:\n print(\"Training complete\")\n", "path": "examples/horovod/torch-nvt-horovod.py"}]} | 3,747 | 479 |
gh_patches_debug_8436 | rasdani/github-patches | git_diff | microsoft__playwright-python-959 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: `record_har_omit_content` does not work properly
### Playwright version
1.15.3
### Operating system
Linux
### What browsers are you seeing the problem on?
Chromium, Firefox, WebKit
### Other information
Repo to present bug:
https://github.com/qwark97/har_omit_content_bug
Bug occurs also outside the docker image
### What happened? / Describe the bug
Using `record_har_omit_content` as a `new_page` parameter can manipulate presence of the `text` filed inside `entry.response.content` object in `.har` file.
Not using this parameter (defaults to `False` https://playwright.dev/python/docs/api/class-browser#browser-new-page) allows to see `text` inside `.har` file. Using `record_har_omit_content=True` also works as expected - `text` is absent. Unfortunatelly, passing `record_har_omit_content=False` explicitely **does not** work as expected -> `.har` file **will not** contain `text` filed.
It also looks like passing anything except explicit `None` as a `record_har_omit_content` value (type doesn't matter) will cause with missing `text` filed
### Code snippet to reproduce your bug
_No response_
### Relevant log output
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `playwright/_impl/_browser.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import base64
16 import json
17 from pathlib import Path
18 from types import SimpleNamespace
19 from typing import TYPE_CHECKING, Dict, List, Union
20
21 from playwright._impl._api_structures import (
22 Geolocation,
23 HttpCredentials,
24 ProxySettings,
25 StorageState,
26 ViewportSize,
27 )
28 from playwright._impl._browser_context import BrowserContext
29 from playwright._impl._cdp_session import CDPSession
30 from playwright._impl._connection import ChannelOwner, from_channel
31 from playwright._impl._helper import (
32 ColorScheme,
33 ForcedColors,
34 ReducedMotion,
35 async_readfile,
36 is_safe_close_error,
37 locals_to_params,
38 )
39 from playwright._impl._network import serialize_headers
40 from playwright._impl._page import Page
41
42 if TYPE_CHECKING: # pragma: no cover
43 from playwright._impl._browser_type import BrowserType
44
45
46 class Browser(ChannelOwner):
47
48 Events = SimpleNamespace(
49 Disconnected="disconnected",
50 )
51
52 def __init__(
53 self, parent: "BrowserType", type: str, guid: str, initializer: Dict
54 ) -> None:
55 super().__init__(parent, type, guid, initializer)
56 self._browser_type = parent
57 self._is_connected = True
58 self._is_closed_or_closing = False
59 self._is_remote = False
60 self._is_connected_over_websocket = False
61
62 self._contexts: List[BrowserContext] = []
63 self._channel.on("close", lambda _: self._on_close())
64
65 def __repr__(self) -> str:
66 return f"<Browser type={self._browser_type} version={self.version}>"
67
68 def _on_close(self) -> None:
69 self._is_connected = False
70 self.emit(Browser.Events.Disconnected, self)
71 self._is_closed_or_closing = True
72
73 @property
74 def contexts(self) -> List[BrowserContext]:
75 return self._contexts.copy()
76
77 def is_connected(self) -> bool:
78 return self._is_connected
79
80 async def new_context(
81 self,
82 viewport: ViewportSize = None,
83 screen: ViewportSize = None,
84 noViewport: bool = None,
85 ignoreHTTPSErrors: bool = None,
86 javaScriptEnabled: bool = None,
87 bypassCSP: bool = None,
88 userAgent: str = None,
89 locale: str = None,
90 timezoneId: str = None,
91 geolocation: Geolocation = None,
92 permissions: List[str] = None,
93 extraHTTPHeaders: Dict[str, str] = None,
94 offline: bool = None,
95 httpCredentials: HttpCredentials = None,
96 deviceScaleFactor: float = None,
97 isMobile: bool = None,
98 hasTouch: bool = None,
99 colorScheme: ColorScheme = None,
100 reducedMotion: ReducedMotion = None,
101 forcedColors: ForcedColors = None,
102 acceptDownloads: bool = None,
103 defaultBrowserType: str = None,
104 proxy: ProxySettings = None,
105 recordHarPath: Union[Path, str] = None,
106 recordHarOmitContent: bool = None,
107 recordVideoDir: Union[Path, str] = None,
108 recordVideoSize: ViewportSize = None,
109 storageState: Union[StorageState, str, Path] = None,
110 baseURL: str = None,
111 strictSelectors: bool = None,
112 ) -> BrowserContext:
113 params = locals_to_params(locals())
114 await normalize_context_params(self._connection._is_sync, params)
115
116 channel = await self._channel.send("newContext", params)
117 context = from_channel(channel)
118 self._contexts.append(context)
119 context._browser = self
120 context._options = params
121 return context
122
123 async def new_page(
124 self,
125 viewport: ViewportSize = None,
126 screen: ViewportSize = None,
127 noViewport: bool = None,
128 ignoreHTTPSErrors: bool = None,
129 javaScriptEnabled: bool = None,
130 bypassCSP: bool = None,
131 userAgent: str = None,
132 locale: str = None,
133 timezoneId: str = None,
134 geolocation: Geolocation = None,
135 permissions: List[str] = None,
136 extraHTTPHeaders: Dict[str, str] = None,
137 offline: bool = None,
138 httpCredentials: HttpCredentials = None,
139 deviceScaleFactor: float = None,
140 isMobile: bool = None,
141 hasTouch: bool = None,
142 colorScheme: ColorScheme = None,
143 forcedColors: ForcedColors = None,
144 reducedMotion: ReducedMotion = None,
145 acceptDownloads: bool = None,
146 defaultBrowserType: str = None,
147 proxy: ProxySettings = None,
148 recordHarPath: Union[Path, str] = None,
149 recordHarOmitContent: bool = None,
150 recordVideoDir: Union[Path, str] = None,
151 recordVideoSize: ViewportSize = None,
152 storageState: Union[StorageState, str, Path] = None,
153 baseURL: str = None,
154 strictSelectors: bool = None,
155 ) -> Page:
156 params = locals_to_params(locals())
157 context = await self.new_context(**params)
158 page = await context.new_page()
159 page._owned_context = context
160 context._owner_page = page
161 return page
162
163 async def close(self) -> None:
164 if self._is_closed_or_closing:
165 return
166 self._is_closed_or_closing = True
167 try:
168 await self._channel.send("close")
169 except Exception as e:
170 if not is_safe_close_error(e):
171 raise e
172 if self._is_connected_over_websocket:
173 await self._connection.stop_async()
174
175 @property
176 def version(self) -> str:
177 return self._initializer["version"]
178
179 async def new_browser_cdp_session(self) -> CDPSession:
180 return from_channel(await self._channel.send("newBrowserCDPSession"))
181
182 async def start_tracing(
183 self,
184 page: Page = None,
185 path: Union[str, Path] = None,
186 screenshots: bool = None,
187 categories: List[str] = None,
188 ) -> None:
189 params = locals_to_params(locals())
190 if page:
191 params["page"] = page._channel
192 if path:
193 params["path"] = str(path)
194 await self._channel.send("startTracing", params)
195
196 async def stop_tracing(self) -> bytes:
197 encoded_binary = await self._channel.send("stopTracing")
198 return base64.b64decode(encoded_binary)
199
200
201 async def normalize_context_params(is_sync: bool, params: Dict) -> None:
202 params["sdkLanguage"] = "python" if is_sync else "python-async"
203 if params.get("noViewport"):
204 del params["noViewport"]
205 params["noDefaultViewport"] = True
206 if "defaultBrowserType" in params:
207 del params["defaultBrowserType"]
208 if "extraHTTPHeaders" in params:
209 params["extraHTTPHeaders"] = serialize_headers(params["extraHTTPHeaders"])
210 if "recordHarPath" in params:
211 params["recordHar"] = {"path": str(params["recordHarPath"])}
212 if "recordHarOmitContent" in params:
213 params["recordHar"]["omitContent"] = True
214 del params["recordHarOmitContent"]
215 del params["recordHarPath"]
216 if "recordVideoDir" in params:
217 params["recordVideo"] = {"dir": str(params["recordVideoDir"])}
218 if "recordVideoSize" in params:
219 params["recordVideo"]["size"] = params["recordVideoSize"]
220 del params["recordVideoSize"]
221 del params["recordVideoDir"]
222 if "storageState" in params:
223 storageState = params["storageState"]
224 if not isinstance(storageState, dict):
225 params["storageState"] = json.loads(
226 (await async_readfile(storageState)).decode()
227 )
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/playwright/_impl/_browser.py b/playwright/_impl/_browser.py
--- a/playwright/_impl/_browser.py
+++ b/playwright/_impl/_browser.py
@@ -210,7 +210,7 @@
if "recordHarPath" in params:
params["recordHar"] = {"path": str(params["recordHarPath"])}
if "recordHarOmitContent" in params:
- params["recordHar"]["omitContent"] = True
+ params["recordHar"]["omitContent"] = params["recordHarOmitContent"]
del params["recordHarOmitContent"]
del params["recordHarPath"]
if "recordVideoDir" in params:
| {"golden_diff": "diff --git a/playwright/_impl/_browser.py b/playwright/_impl/_browser.py\n--- a/playwright/_impl/_browser.py\n+++ b/playwright/_impl/_browser.py\n@@ -210,7 +210,7 @@\n if \"recordHarPath\" in params:\n params[\"recordHar\"] = {\"path\": str(params[\"recordHarPath\"])}\n if \"recordHarOmitContent\" in params:\n- params[\"recordHar\"][\"omitContent\"] = True\n+ params[\"recordHar\"][\"omitContent\"] = params[\"recordHarOmitContent\"]\n del params[\"recordHarOmitContent\"]\n del params[\"recordHarPath\"]\n if \"recordVideoDir\" in params:\n", "issue": "[Bug]: `record_har_omit_content` does not work properly\n### Playwright version\r\n\r\n1.15.3\r\n\r\n### Operating system\r\n\r\nLinux\r\n\r\n### What browsers are you seeing the problem on?\r\n\r\nChromium, Firefox, WebKit\r\n\r\n### Other information\r\n\r\nRepo to present bug:\r\nhttps://github.com/qwark97/har_omit_content_bug\r\n\r\nBug occurs also outside the docker image\r\n\r\n### What happened? / Describe the bug\r\n\r\nUsing `record_har_omit_content` as a `new_page` parameter can manipulate presence of the `text` filed inside `entry.response.content` object in `.har` file.\r\n\r\nNot using this parameter (defaults to `False` https://playwright.dev/python/docs/api/class-browser#browser-new-page) allows to see `text` inside `.har` file. Using `record_har_omit_content=True` also works as expected - `text` is absent. Unfortunatelly, passing `record_har_omit_content=False` explicitely **does not** work as expected -> `.har` file **will not** contain `text` filed.\r\n\r\nIt also looks like passing anything except explicit `None` as a `record_har_omit_content` value (type doesn't matter) will cause with missing `text` filed\r\n\r\n### Code snippet to reproduce your bug\r\n\r\n_No response_\r\n\r\n### Relevant log output\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nfrom pathlib import Path\nfrom types import SimpleNamespace\nfrom typing import TYPE_CHECKING, Dict, List, Union\n\nfrom playwright._impl._api_structures import (\n Geolocation,\n HttpCredentials,\n ProxySettings,\n StorageState,\n ViewportSize,\n)\nfrom playwright._impl._browser_context import BrowserContext\nfrom playwright._impl._cdp_session import CDPSession\nfrom playwright._impl._connection import ChannelOwner, from_channel\nfrom playwright._impl._helper import (\n ColorScheme,\n ForcedColors,\n ReducedMotion,\n async_readfile,\n is_safe_close_error,\n locals_to_params,\n)\nfrom playwright._impl._network import serialize_headers\nfrom playwright._impl._page import Page\n\nif TYPE_CHECKING: # pragma: no cover\n from playwright._impl._browser_type import BrowserType\n\n\nclass Browser(ChannelOwner):\n\n Events = SimpleNamespace(\n Disconnected=\"disconnected\",\n )\n\n def __init__(\n self, parent: \"BrowserType\", type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self._browser_type = parent\n self._is_connected = True\n self._is_closed_or_closing = False\n self._is_remote = False\n self._is_connected_over_websocket = False\n\n self._contexts: List[BrowserContext] = []\n self._channel.on(\"close\", lambda _: self._on_close())\n\n def __repr__(self) -> str:\n return f\"<Browser type={self._browser_type} version={self.version}>\"\n\n def _on_close(self) -> None:\n self._is_connected = False\n self.emit(Browser.Events.Disconnected, self)\n self._is_closed_or_closing = True\n\n @property\n def contexts(self) -> List[BrowserContext]:\n return self._contexts.copy()\n\n def is_connected(self) -> bool:\n return self._is_connected\n\n async def new_context(\n self,\n viewport: ViewportSize = None,\n screen: ViewportSize = None,\n noViewport: bool = None,\n ignoreHTTPSErrors: bool = None,\n javaScriptEnabled: bool = None,\n bypassCSP: bool = None,\n userAgent: str = None,\n locale: str = None,\n timezoneId: str = None,\n geolocation: Geolocation = None,\n permissions: List[str] = None,\n extraHTTPHeaders: Dict[str, str] = None,\n offline: bool = None,\n httpCredentials: HttpCredentials = None,\n deviceScaleFactor: float = None,\n isMobile: bool = None,\n hasTouch: bool = None,\n colorScheme: ColorScheme = None,\n reducedMotion: ReducedMotion = None,\n forcedColors: ForcedColors = None,\n acceptDownloads: bool = None,\n defaultBrowserType: str = None,\n proxy: ProxySettings = None,\n recordHarPath: Union[Path, str] = None,\n recordHarOmitContent: bool = None,\n recordVideoDir: Union[Path, str] = None,\n recordVideoSize: ViewportSize = None,\n storageState: Union[StorageState, str, Path] = None,\n baseURL: str = None,\n strictSelectors: bool = None,\n ) -> BrowserContext:\n params = locals_to_params(locals())\n await normalize_context_params(self._connection._is_sync, params)\n\n channel = await self._channel.send(\"newContext\", params)\n context = from_channel(channel)\n self._contexts.append(context)\n context._browser = self\n context._options = params\n return context\n\n async def new_page(\n self,\n viewport: ViewportSize = None,\n screen: ViewportSize = None,\n noViewport: bool = None,\n ignoreHTTPSErrors: bool = None,\n javaScriptEnabled: bool = None,\n bypassCSP: bool = None,\n userAgent: str = None,\n locale: str = None,\n timezoneId: str = None,\n geolocation: Geolocation = None,\n permissions: List[str] = None,\n extraHTTPHeaders: Dict[str, str] = None,\n offline: bool = None,\n httpCredentials: HttpCredentials = None,\n deviceScaleFactor: float = None,\n isMobile: bool = None,\n hasTouch: bool = None,\n colorScheme: ColorScheme = None,\n forcedColors: ForcedColors = None,\n reducedMotion: ReducedMotion = None,\n acceptDownloads: bool = None,\n defaultBrowserType: str = None,\n proxy: ProxySettings = None,\n recordHarPath: Union[Path, str] = None,\n recordHarOmitContent: bool = None,\n recordVideoDir: Union[Path, str] = None,\n recordVideoSize: ViewportSize = None,\n storageState: Union[StorageState, str, Path] = None,\n baseURL: str = None,\n strictSelectors: bool = None,\n ) -> Page:\n params = locals_to_params(locals())\n context = await self.new_context(**params)\n page = await context.new_page()\n page._owned_context = context\n context._owner_page = page\n return page\n\n async def close(self) -> None:\n if self._is_closed_or_closing:\n return\n self._is_closed_or_closing = True\n try:\n await self._channel.send(\"close\")\n except Exception as e:\n if not is_safe_close_error(e):\n raise e\n if self._is_connected_over_websocket:\n await self._connection.stop_async()\n\n @property\n def version(self) -> str:\n return self._initializer[\"version\"]\n\n async def new_browser_cdp_session(self) -> CDPSession:\n return from_channel(await self._channel.send(\"newBrowserCDPSession\"))\n\n async def start_tracing(\n self,\n page: Page = None,\n path: Union[str, Path] = None,\n screenshots: bool = None,\n categories: List[str] = None,\n ) -> None:\n params = locals_to_params(locals())\n if page:\n params[\"page\"] = page._channel\n if path:\n params[\"path\"] = str(path)\n await self._channel.send(\"startTracing\", params)\n\n async def stop_tracing(self) -> bytes:\n encoded_binary = await self._channel.send(\"stopTracing\")\n return base64.b64decode(encoded_binary)\n\n\nasync def normalize_context_params(is_sync: bool, params: Dict) -> None:\n params[\"sdkLanguage\"] = \"python\" if is_sync else \"python-async\"\n if params.get(\"noViewport\"):\n del params[\"noViewport\"]\n params[\"noDefaultViewport\"] = True\n if \"defaultBrowserType\" in params:\n del params[\"defaultBrowserType\"]\n if \"extraHTTPHeaders\" in params:\n params[\"extraHTTPHeaders\"] = serialize_headers(params[\"extraHTTPHeaders\"])\n if \"recordHarPath\" in params:\n params[\"recordHar\"] = {\"path\": str(params[\"recordHarPath\"])}\n if \"recordHarOmitContent\" in params:\n params[\"recordHar\"][\"omitContent\"] = True\n del params[\"recordHarOmitContent\"]\n del params[\"recordHarPath\"]\n if \"recordVideoDir\" in params:\n params[\"recordVideo\"] = {\"dir\": str(params[\"recordVideoDir\"])}\n if \"recordVideoSize\" in params:\n params[\"recordVideo\"][\"size\"] = params[\"recordVideoSize\"]\n del params[\"recordVideoSize\"]\n del params[\"recordVideoDir\"]\n if \"storageState\" in params:\n storageState = params[\"storageState\"]\n if not isinstance(storageState, dict):\n params[\"storageState\"] = json.loads(\n (await async_readfile(storageState)).decode()\n )\n", "path": "playwright/_impl/_browser.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nfrom pathlib import Path\nfrom types import SimpleNamespace\nfrom typing import TYPE_CHECKING, Dict, List, Union\n\nfrom playwright._impl._api_structures import (\n Geolocation,\n HttpCredentials,\n ProxySettings,\n StorageState,\n ViewportSize,\n)\nfrom playwright._impl._browser_context import BrowserContext\nfrom playwright._impl._cdp_session import CDPSession\nfrom playwright._impl._connection import ChannelOwner, from_channel\nfrom playwright._impl._helper import (\n ColorScheme,\n ForcedColors,\n ReducedMotion,\n async_readfile,\n is_safe_close_error,\n locals_to_params,\n)\nfrom playwright._impl._network import serialize_headers\nfrom playwright._impl._page import Page\n\nif TYPE_CHECKING: # pragma: no cover\n from playwright._impl._browser_type import BrowserType\n\n\nclass Browser(ChannelOwner):\n\n Events = SimpleNamespace(\n Disconnected=\"disconnected\",\n )\n\n def __init__(\n self, parent: \"BrowserType\", type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self._browser_type = parent\n self._is_connected = True\n self._is_closed_or_closing = False\n self._is_remote = False\n self._is_connected_over_websocket = False\n\n self._contexts: List[BrowserContext] = []\n self._channel.on(\"close\", lambda _: self._on_close())\n\n def __repr__(self) -> str:\n return f\"<Browser type={self._browser_type} version={self.version}>\"\n\n def _on_close(self) -> None:\n self._is_connected = False\n self.emit(Browser.Events.Disconnected, self)\n self._is_closed_or_closing = True\n\n @property\n def contexts(self) -> List[BrowserContext]:\n return self._contexts.copy()\n\n def is_connected(self) -> bool:\n return self._is_connected\n\n async def new_context(\n self,\n viewport: ViewportSize = None,\n screen: ViewportSize = None,\n noViewport: bool = None,\n ignoreHTTPSErrors: bool = None,\n javaScriptEnabled: bool = None,\n bypassCSP: bool = None,\n userAgent: str = None,\n locale: str = None,\n timezoneId: str = None,\n geolocation: Geolocation = None,\n permissions: List[str] = None,\n extraHTTPHeaders: Dict[str, str] = None,\n offline: bool = None,\n httpCredentials: HttpCredentials = None,\n deviceScaleFactor: float = None,\n isMobile: bool = None,\n hasTouch: bool = None,\n colorScheme: ColorScheme = None,\n reducedMotion: ReducedMotion = None,\n forcedColors: ForcedColors = None,\n acceptDownloads: bool = None,\n defaultBrowserType: str = None,\n proxy: ProxySettings = None,\n recordHarPath: Union[Path, str] = None,\n recordHarOmitContent: bool = None,\n recordVideoDir: Union[Path, str] = None,\n recordVideoSize: ViewportSize = None,\n storageState: Union[StorageState, str, Path] = None,\n baseURL: str = None,\n strictSelectors: bool = None,\n ) -> BrowserContext:\n params = locals_to_params(locals())\n await normalize_context_params(self._connection._is_sync, params)\n\n channel = await self._channel.send(\"newContext\", params)\n context = from_channel(channel)\n self._contexts.append(context)\n context._browser = self\n context._options = params\n return context\n\n async def new_page(\n self,\n viewport: ViewportSize = None,\n screen: ViewportSize = None,\n noViewport: bool = None,\n ignoreHTTPSErrors: bool = None,\n javaScriptEnabled: bool = None,\n bypassCSP: bool = None,\n userAgent: str = None,\n locale: str = None,\n timezoneId: str = None,\n geolocation: Geolocation = None,\n permissions: List[str] = None,\n extraHTTPHeaders: Dict[str, str] = None,\n offline: bool = None,\n httpCredentials: HttpCredentials = None,\n deviceScaleFactor: float = None,\n isMobile: bool = None,\n hasTouch: bool = None,\n colorScheme: ColorScheme = None,\n forcedColors: ForcedColors = None,\n reducedMotion: ReducedMotion = None,\n acceptDownloads: bool = None,\n defaultBrowserType: str = None,\n proxy: ProxySettings = None,\n recordHarPath: Union[Path, str] = None,\n recordHarOmitContent: bool = None,\n recordVideoDir: Union[Path, str] = None,\n recordVideoSize: ViewportSize = None,\n storageState: Union[StorageState, str, Path] = None,\n baseURL: str = None,\n strictSelectors: bool = None,\n ) -> Page:\n params = locals_to_params(locals())\n context = await self.new_context(**params)\n page = await context.new_page()\n page._owned_context = context\n context._owner_page = page\n return page\n\n async def close(self) -> None:\n if self._is_closed_or_closing:\n return\n self._is_closed_or_closing = True\n try:\n await self._channel.send(\"close\")\n except Exception as e:\n if not is_safe_close_error(e):\n raise e\n if self._is_connected_over_websocket:\n await self._connection.stop_async()\n\n @property\n def version(self) -> str:\n return self._initializer[\"version\"]\n\n async def new_browser_cdp_session(self) -> CDPSession:\n return from_channel(await self._channel.send(\"newBrowserCDPSession\"))\n\n async def start_tracing(\n self,\n page: Page = None,\n path: Union[str, Path] = None,\n screenshots: bool = None,\n categories: List[str] = None,\n ) -> None:\n params = locals_to_params(locals())\n if page:\n params[\"page\"] = page._channel\n if path:\n params[\"path\"] = str(path)\n await self._channel.send(\"startTracing\", params)\n\n async def stop_tracing(self) -> bytes:\n encoded_binary = await self._channel.send(\"stopTracing\")\n return base64.b64decode(encoded_binary)\n\n\nasync def normalize_context_params(is_sync: bool, params: Dict) -> None:\n params[\"sdkLanguage\"] = \"python\" if is_sync else \"python-async\"\n if params.get(\"noViewport\"):\n del params[\"noViewport\"]\n params[\"noDefaultViewport\"] = True\n if \"defaultBrowserType\" in params:\n del params[\"defaultBrowserType\"]\n if \"extraHTTPHeaders\" in params:\n params[\"extraHTTPHeaders\"] = serialize_headers(params[\"extraHTTPHeaders\"])\n if \"recordHarPath\" in params:\n params[\"recordHar\"] = {\"path\": str(params[\"recordHarPath\"])}\n if \"recordHarOmitContent\" in params:\n params[\"recordHar\"][\"omitContent\"] = params[\"recordHarOmitContent\"]\n del params[\"recordHarOmitContent\"]\n del params[\"recordHarPath\"]\n if \"recordVideoDir\" in params:\n params[\"recordVideo\"] = {\"dir\": str(params[\"recordVideoDir\"])}\n if \"recordVideoSize\" in params:\n params[\"recordVideo\"][\"size\"] = params[\"recordVideoSize\"]\n del params[\"recordVideoSize\"]\n del params[\"recordVideoDir\"]\n if \"storageState\" in params:\n storageState = params[\"storageState\"]\n if not isinstance(storageState, dict):\n params[\"storageState\"] = json.loads(\n (await async_readfile(storageState)).decode()\n )\n", "path": "playwright/_impl/_browser.py"}]} | 2,968 | 154 |
gh_patches_debug_5785 | rasdani/github-patches | git_diff | ivy-llc__ivy-17476 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
matrix_power
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/linalg.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from ivy.functional.frontends.paddle import promote_types_of_paddle_inputs
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @with_supported_dtypes(
11 {"2.4.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
12 )
13 @to_ivy_arrays_and_back
14 def cross(x, y, /, *, axis=9, name=None):
15 x, y = promote_types_of_paddle_inputs(x, y)
16 return ivy.cross(x, y, axis=axis)
17
18
19 # matmul
20 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
21 @to_ivy_arrays_and_back
22 def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
23 x, y = promote_types_of_paddle_inputs(x, y)
24 return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
25
26
27 # norm
28 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
29 @to_ivy_arrays_and_back
30 def norm(x, p="fro", axis=None, keepdim=False, name=None):
31 if axis is None and p is not None:
32 if p == "fro":
33 p = 2
34 ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)
35 if keepdim:
36 ret = ret.reshape([1] * len(x.shape))
37 if len(ret.shape) == 0:
38 return ivy.array([ret])
39 return ret
40
41 if isinstance(axis, tuple):
42 axis = list(axis)
43 if isinstance(axis, list) and len(axis) == 1:
44 axis = axis[0]
45
46 if isinstance(axis, int):
47 if p == "fro":
48 p = 2
49 if p in [0, 1, 2, ivy.inf, -ivy.inf]:
50 ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)
51 elif isinstance(p, (int, float)):
52 ret = ivy.pow(
53 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
54 float(1.0 / p),
55 )
56
57 elif isinstance(axis, list) and len(axis) == 2:
58 if p == 0:
59 raise ValueError
60 elif p == 1:
61 ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)
62 elif p == 2 or p == "fro":
63 ret = ivy.matrix_norm(x, ord="fro", axis=axis, keepdims=keepdim)
64 elif p == ivy.inf:
65 ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)
66 elif p == -ivy.inf:
67 ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)
68 elif isinstance(p, (int, float)) and p > 0:
69 ret = ivy.pow(
70 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
71 float(1.0 / p),
72 )
73 else:
74 raise ValueError
75
76 else:
77 raise ValueError
78
79 if len(ret.shape) == 0:
80 ret = ivy.array(
81 [ret]
82 ) # this is done so as to match shape of output from paddle
83 return ret
84
85
86 # eig
87 @to_ivy_arrays_and_back
88 def eig(x, name=None):
89 return ivy.eig(x)
90
91
92 # eigvals
93 @to_ivy_arrays_and_back
94 def eigvals(x, name=None):
95 return ivy.eigvals(x)
96
97
98 # eigvalsh
99 @to_ivy_arrays_and_back
100 def eigvalsh(x, UPLO="L", name=None):
101 return ivy.eigvalsh(x, UPLO=UPLO)
102
103
104 # eigh
105 @to_ivy_arrays_and_back
106 def eigh(x, UPLO="L", name=None):
107 return ivy.eigh(x, UPLO=UPLO)
108
109
110 # pinv
111 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
112 @to_ivy_arrays_and_back
113 def pinv(x, rcond=1e-15, hermitian=False, name=None):
114 # TODO: Add hermitian functionality
115 return ivy.pinv(x, rtol=rcond)
116
117
118 # solve
119 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
120 @to_ivy_arrays_and_back
121 def solve(x1, x2, name=None):
122 return ivy.solve(x1, x2)
123
124
125 # cholesky
126 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
127 @to_ivy_arrays_and_back
128 def cholesky(x, /, *, upper=False, name=None):
129 return ivy.cholesky(x, upper=upper)
130
131
132 # bmm
133 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
134 @to_ivy_arrays_and_back
135 def bmm(x, y, transpose_x=False, transpose_y=False, name=None):
136 if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:
137 raise RuntimeError("input must be 3D matrices")
138 x, y = promote_types_of_paddle_inputs(x, y)
139 return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
140
141
142 # matrix_power
143 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
144 @to_ivy_arrays_and_back
145 def matrix_power(x, n, name=None):
146 return ivy.matrix_power(x, n)
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py
--- a/ivy/functional/frontends/paddle/tensor/linalg.py
+++ b/ivy/functional/frontends/paddle/tensor/linalg.py
@@ -130,3 +130,10 @@
raise RuntimeError("input must be 3D matrices")
x, y = promote_types_of_paddle_inputs(x, y)
return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
+
+
+# matrix_power
+@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
+@to_ivy_arrays_and_back
+def matrix_power(x, n, name=None):
+ return ivy.matrix_power(x, n)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py\n--- a/ivy/functional/frontends/paddle/tensor/linalg.py\n+++ b/ivy/functional/frontends/paddle/tensor/linalg.py\n@@ -130,3 +130,10 @@\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n+\n+\n+# matrix_power\n+@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def matrix_power(x, n, name=None):\n+ return ivy.matrix_power(x, n)\n", "issue": " matrix_power\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n\n\n# solve\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef solve(x1, x2, name=None):\n return ivy.solve(x1, x2)\n\n\n# cholesky\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky(x, /, *, upper=False, name=None):\n return ivy.cholesky(x, upper=upper)\n\n\n# bmm\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bmm(x, y, transpose_x=False, transpose_y=False, name=None):\n if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# matrix_power\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matrix_power(x, n, name=None):\n return ivy.matrix_power(x, n)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n\n\n# cholesky\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky(x, /, *, upper=False, name=None):\n return ivy.cholesky(x, upper=upper)\n\n\n# bmm\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bmm(x, y, transpose_x=False, transpose_y=False, name=None):\n if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# matrix_power\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matrix_power(x, n, name=None):\n return ivy.matrix_power(x, n)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}]} | 1,992 | 196 |
gh_patches_debug_31652 | rasdani/github-patches | git_diff | mosaicml__composer-595 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pin the pyright version
The environment variable `PYRIGHT_PYTHON_FORCE_VERSION` needs to be set when pyright is installed from pip. Ideally we can set this variable in the setup.py (e.g. `os.environ['PYRIGHT_PYTHON_FORCE_VERSION'] = '...'`); but if not, then it should be in the `.ci/Jenkinsfile`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 import os
4 import site
5 import sys
6 import textwrap
7
8 import setuptools
9 from setuptools import setup
10 from setuptools.command.develop import develop as develop_orig
11
12 _IS_ROOT = os.getuid() == 0
13 _IS_USER = "--user" in sys.argv[1:]
14 _IS_VIRTUALENV = "VIRTUAL_ENV" in os.environ
15
16
17 # From https://stackoverflow.com/questions/51292333/how-to-tell-from-setup-py-if-the-module-is-being-installed-in-editable-mode
18 class develop(develop_orig):
19
20 def run(self):
21 if _IS_ROOT and (not _IS_VIRTUALENV) and (not _IS_USER):
22 raise RuntimeError(
23 textwrap.dedent("""\
24 When installing in editable mode as root outside of a virtual environment,
25 please specify `--user`. Editable installs as the root user outside of a virtual environment
26 do not work without the `--user` flag. Please instead run something like: `pip install --user -e .`"""
27 ))
28 super().run()
29
30
31 # From https://github.com/pypa/pip/issues/7953#issuecomment-645133255
32 site.ENABLE_USER_SITE = _IS_USER
33
34
35 def package_files(directory: str):
36 # from https://stackoverflow.com/a/36693250
37 paths = []
38 for (path, _, filenames) in os.walk(directory):
39 for filename in filenames:
40 paths.append(os.path.join('..', path, filename))
41 return paths
42
43
44 with open("README.md", "r", encoding="utf-8") as fh:
45 long_description = fh.read()
46
47 install_requires = [
48 "pyyaml>=5.4.1",
49 "tqdm>=4.62.3",
50 "torchmetrics>=0.6.0",
51 "torch_optimizer==0.1.0",
52 "torchvision>=0.9.0",
53 "torch>=1.9",
54 "yahp>=0.0.14",
55 "requests>=2.26.0",
56 "numpy==1.21.5",
57 "apache-libcloud>=3.3.1",
58 "psutil>=5.8.0",
59 ]
60 extra_deps = {}
61
62 extra_deps['base'] = []
63
64 extra_deps['dev'] = [
65 # Imports for docs builds and running tests
66 "custom_inherit==2.3.2",
67 'junitparser>=2.1.1',
68 'coverage[toml]>=6.1.1',
69 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners
70 'pytest>=7.0.0',
71 'toml>=0.10.2',
72 'yapf>=0.32.0',
73 'isort>=5.9.3',
74 'ipython>=7.29.0',
75 'ipykernel>=6.5.0',
76 'jupyter>=1.0.0',
77 'yamllint>=1.26.2',
78 'pytest-timeout>=1.4.2',
79 'pyright==1.1.224.post1',
80 'recommonmark>=0.7.1',
81 'sphinx>=4.2.0',
82 'sphinx_copybutton>=0.4.0',
83 'sphinx_markdown_tables>=0.0.15',
84 'sphinx-argparse>=0.3.1',
85 'sphinxcontrib.katex>=0.8.6',
86 'sphinxext.opengraph>=0.4.2',
87 'sphinxemoji>=0.2.0',
88 'furo>=2022.1.2',
89 'sphinx-copybutton>=0.4.0',
90 'testbook>=0.4.2',
91 'myst-parser==0.16.1',
92 'pylint>=2.12.2',
93 'docformatter>=1.4',
94 'sphinx_panels>=0.6.0',
95 ]
96
97 extra_deps["deepspeed"] = [
98 'deepspeed>=0.5.5',
99 ]
100
101 extra_deps["wandb"] = [
102 'wandb>=0.12.2',
103 ]
104
105 extra_deps["unet"] = [
106 'monai>=0.7.0',
107 'scikit-learn>=1.0.1',
108 ]
109
110 extra_deps["timm"] = [
111 'timm>=0.5.4',
112 ]
113
114 extra_deps["nlp"] = [
115 'transformers>=4.11',
116 'datasets>=1.14',
117 ]
118
119 extra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)
120
121 setup(name="mosaicml",
122 version="0.3.1",
123 author="MosaicML",
124 author_email="[email protected]",
125 description="composing methods for ML training efficiency",
126 long_description=long_description,
127 long_description_content_type="text/markdown",
128 url="https://github.com/mosaicml/composer",
129 include_package_data=True,
130 package_data={
131 "composer": ['py.typed'],
132 "": package_files('composer/yamls'),
133 "": package_files('composer/algorithms')
134 },
135 packages=setuptools.find_packages(exclude=["tests*"]),
136 classifiers=[
137 "Programming Language :: Python :: 3",
138 "Programming Language :: Python :: 3.7",
139 "Programming Language :: Python :: 3.8",
140 "Programming Language :: Python :: 3.9",
141 ],
142 install_requires=install_requires,
143 entry_points={
144 'console_scripts': ['composer = composer.cli.launcher:main',],
145 },
146 extras_require=extra_deps,
147 dependency_links=['https://developer.download.nvidia.com/compute/redist'],
148 python_requires='>=3.7',
149 ext_package="composer",
150 cmdclass={'develop': develop})
151
152 # only visible if user installs with verbose -v flag
153 # Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)
154 print("*" * 20, file=sys.stderr)
155 print(textwrap.dedent("""\
156 NOTE: For best performance, we recommend installing Pillow-SIMD
157 for accelerated image processing operations. To install:
158 \t pip uninstall pillow && pip install pillow-simd"""),
159 file=sys.stderr)
160 print("*" * 20, file=sys.stderr)
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,10 +66,10 @@
"custom_inherit==2.3.2",
'junitparser>=2.1.1',
'coverage[toml]>=6.1.1',
- 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners
+ 'fasteners==0.17.3', # run_directory_uploader tests require fasteners
'pytest>=7.0.0',
- 'toml>=0.10.2',
- 'yapf>=0.32.0',
+ 'toml==0.10.2',
+ 'yapf==0.32.0',
'isort>=5.9.3',
'ipython>=7.29.0',
'ipykernel>=6.5.0',
@@ -77,38 +77,38 @@
'yamllint>=1.26.2',
'pytest-timeout>=1.4.2',
'pyright==1.1.224.post1',
- 'recommonmark>=0.7.1',
+ 'recommonmark==0.7.1',
'sphinx>=4.2.0',
- 'sphinx_copybutton>=0.4.0',
- 'sphinx_markdown_tables>=0.0.15',
- 'sphinx-argparse>=0.3.1',
- 'sphinxcontrib.katex>=0.8.6',
- 'sphinxext.opengraph>=0.4.2',
- 'sphinxemoji>=0.2.0',
+ 'sphinx_copybutton==0.5.0',
+ 'sphinx_markdown_tables==0.0.15',
+ 'sphinx-argparse==0.3.1',
+ 'sphinxcontrib.katex==0.8.6',
+ 'sphinxext.opengraph==0.6.1',
+ 'sphinxemoji==0.2.0',
'furo>=2022.1.2',
- 'sphinx-copybutton>=0.4.0',
- 'testbook>=0.4.2',
+ 'sphinx-copybutton==0.5.0',
+ 'testbook==0.4.2',
'myst-parser==0.16.1',
'pylint>=2.12.2',
'docformatter>=1.4',
- 'sphinx_panels>=0.6.0',
+ 'sphinx_panels==0.6.0',
]
extra_deps["deepspeed"] = [
- 'deepspeed>=0.5.5',
+ 'deepspeed==0.5.10',
]
extra_deps["wandb"] = [
- 'wandb>=0.12.2',
+ 'wandb==0.12.10',
]
extra_deps["unet"] = [
- 'monai>=0.7.0',
+ 'monai==0.8.1',
'scikit-learn>=1.0.1',
]
extra_deps["timm"] = [
- 'timm>=0.5.4',
+ 'timm==0.5.4',
]
extra_deps["nlp"] = [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,10 +66,10 @@\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n- 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners\n+ 'fasteners==0.17.3', # run_directory_uploader tests require fasteners\n 'pytest>=7.0.0',\n- 'toml>=0.10.2',\n- 'yapf>=0.32.0',\n+ 'toml==0.10.2',\n+ 'yapf==0.32.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n@@ -77,38 +77,38 @@\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'pyright==1.1.224.post1',\n- 'recommonmark>=0.7.1',\n+ 'recommonmark==0.7.1',\n 'sphinx>=4.2.0',\n- 'sphinx_copybutton>=0.4.0',\n- 'sphinx_markdown_tables>=0.0.15',\n- 'sphinx-argparse>=0.3.1',\n- 'sphinxcontrib.katex>=0.8.6',\n- 'sphinxext.opengraph>=0.4.2',\n- 'sphinxemoji>=0.2.0',\n+ 'sphinx_copybutton==0.5.0',\n+ 'sphinx_markdown_tables==0.0.15',\n+ 'sphinx-argparse==0.3.1',\n+ 'sphinxcontrib.katex==0.8.6',\n+ 'sphinxext.opengraph==0.6.1',\n+ 'sphinxemoji==0.2.0',\n 'furo>=2022.1.2',\n- 'sphinx-copybutton>=0.4.0',\n- 'testbook>=0.4.2',\n+ 'sphinx-copybutton==0.5.0',\n+ 'testbook==0.4.2',\n 'myst-parser==0.16.1',\n 'pylint>=2.12.2',\n 'docformatter>=1.4',\n- 'sphinx_panels>=0.6.0',\n+ 'sphinx_panels==0.6.0',\n ]\n \n extra_deps[\"deepspeed\"] = [\n- 'deepspeed>=0.5.5',\n+ 'deepspeed==0.5.10',\n ]\n \n extra_deps[\"wandb\"] = [\n- 'wandb>=0.12.2',\n+ 'wandb==0.12.10',\n ]\n \n extra_deps[\"unet\"] = [\n- 'monai>=0.7.0',\n+ 'monai==0.8.1',\n 'scikit-learn>=1.0.1',\n ]\n \n extra_deps[\"timm\"] = [\n- 'timm>=0.5.4',\n+ 'timm==0.5.4',\n ]\n \n extra_deps[\"nlp\"] = [\n", "issue": "Pin the pyright version\nThe environment variable `PYRIGHT_PYTHON_FORCE_VERSION` needs to be set when pyright is installed from pip. Ideally we can set this variable in the setup.py (e.g. `os.environ['PYRIGHT_PYTHON_FORCE_VERSION'] = '...'`); but if not, then it should be in the `.ci/Jenkinsfile`.\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport site\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\nfrom setuptools.command.develop import develop as develop_orig\n\n_IS_ROOT = os.getuid() == 0\n_IS_USER = \"--user\" in sys.argv[1:]\n_IS_VIRTUALENV = \"VIRTUAL_ENV\" in os.environ\n\n\n# From https://stackoverflow.com/questions/51292333/how-to-tell-from-setup-py-if-the-module-is-being-installed-in-editable-mode\nclass develop(develop_orig):\n\n def run(self):\n if _IS_ROOT and (not _IS_VIRTUALENV) and (not _IS_USER):\n raise RuntimeError(\n textwrap.dedent(\"\"\"\\\n When installing in editable mode as root outside of a virtual environment,\n please specify `--user`. Editable installs as the root user outside of a virtual environment\n do not work without the `--user` flag. Please instead run something like: `pip install --user -e .`\"\"\"\n ))\n super().run()\n\n\n# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\nsite.ENABLE_USER_SITE = _IS_USER\n\n\ndef package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.6.0\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"yahp>=0.0.14\",\n \"requests>=2.26.0\",\n \"numpy==1.21.5\",\n \"apache-libcloud>=3.3.1\",\n \"psutil>=5.8.0\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n # Imports for docs builds and running tests\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners\n 'pytest>=7.0.0',\n 'toml>=0.10.2',\n 'yapf>=0.32.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n 'jupyter>=1.0.0',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'pyright==1.1.224.post1',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinxemoji>=0.2.0',\n 'furo>=2022.1.2',\n 'sphinx-copybutton>=0.4.0',\n 'testbook>=0.4.2',\n 'myst-parser==0.16.1',\n 'pylint>=2.12.2',\n 'docformatter>=1.4',\n 'sphinx_panels>=0.6.0',\n]\n\nextra_deps[\"deepspeed\"] = [\n 'deepspeed>=0.5.5',\n]\n\nextra_deps[\"wandb\"] = [\n 'wandb>=0.12.2',\n]\n\nextra_deps[\"unet\"] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps[\"timm\"] = [\n 'timm>=0.5.4',\n]\n\nextra_deps[\"nlp\"] = [\n 'transformers>=4.11',\n 'datasets>=1.14',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(name=\"mosaicml\",\n version=\"0.3.1\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n \"\": package_files('composer/algorithms')\n },\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n cmdclass={'develop': develop})\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"\\\n NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport site\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\nfrom setuptools.command.develop import develop as develop_orig\n\n_IS_ROOT = os.getuid() == 0\n_IS_USER = \"--user\" in sys.argv[1:]\n_IS_VIRTUALENV = \"VIRTUAL_ENV\" in os.environ\n\n\n# From https://stackoverflow.com/questions/51292333/how-to-tell-from-setup-py-if-the-module-is-being-installed-in-editable-mode\nclass develop(develop_orig):\n\n def run(self):\n if _IS_ROOT and (not _IS_VIRTUALENV) and (not _IS_USER):\n raise RuntimeError(\n textwrap.dedent(\"\"\"\\\n When installing in editable mode as root outside of a virtual environment,\n please specify `--user`. Editable installs as the root user outside of a virtual environment\n do not work without the `--user` flag. Please instead run something like: `pip install --user -e .`\"\"\"\n ))\n super().run()\n\n\n# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\nsite.ENABLE_USER_SITE = _IS_USER\n\n\ndef package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.6.0\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"yahp>=0.0.14\",\n \"requests>=2.26.0\",\n \"numpy==1.21.5\",\n \"apache-libcloud>=3.3.1\",\n \"psutil>=5.8.0\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n # Imports for docs builds and running tests\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'fasteners==0.17.3', # run_directory_uploader tests require fasteners\n 'pytest>=7.0.0',\n 'toml==0.10.2',\n 'yapf==0.32.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n 'jupyter>=1.0.0',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'pyright==1.1.224.post1',\n 'recommonmark==0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton==0.5.0',\n 'sphinx_markdown_tables==0.0.15',\n 'sphinx-argparse==0.3.1',\n 'sphinxcontrib.katex==0.8.6',\n 'sphinxext.opengraph==0.6.1',\n 'sphinxemoji==0.2.0',\n 'furo>=2022.1.2',\n 'sphinx-copybutton==0.5.0',\n 'testbook==0.4.2',\n 'myst-parser==0.16.1',\n 'pylint>=2.12.2',\n 'docformatter>=1.4',\n 'sphinx_panels==0.6.0',\n]\n\nextra_deps[\"deepspeed\"] = [\n 'deepspeed==0.5.10',\n]\n\nextra_deps[\"wandb\"] = [\n 'wandb==0.12.10',\n]\n\nextra_deps[\"unet\"] = [\n 'monai==0.8.1',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps[\"timm\"] = [\n 'timm==0.5.4',\n]\n\nextra_deps[\"nlp\"] = [\n 'transformers>=4.11',\n 'datasets>=1.14',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(name=\"mosaicml\",\n version=\"0.3.1\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n \"\": package_files('composer/algorithms')\n },\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n cmdclass={'develop': develop})\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"\\\n NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n", "path": "setup.py"}]} | 2,133 | 781 |
gh_patches_debug_43875 | rasdani/github-patches | git_diff | certbot__certbot-1875 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Webroot breaks for non-root due to chown
Hi,
Today I'm trying out letsencrypt and stumbled upon that using webroot breaks when not using root due to a chown on line 72 of letsencrypt/plugins/webroot.py:
```
49 def prepare(self): # pylint: disable=missing-docstring
50 path_map = self.conf("map")
51
52 if not path_map:
53 raise errors.PluginError("--{0} must be set".format(
54 self.option_name("path")))
55 for name, path in path_map.items():
56 if not os.path.isdir(path):
57 raise errors.PluginError(path + " does not exist or is not a directory")
58 self.full_roots[name] = os.path.join(path, challenges.HTTP01.URI_ROOT_PATH)
59
60 logger.debug("Creating root challenges validation dir at %s",
61 self.full_roots[name])
62 try:
63 os.makedirs(self.full_roots[name])
64 # Set permissions as parent directory (GH #1389)
65 # We don't use the parameters in makedirs because it
66 # may not always work
67 # https://stackoverflow.com/questions/5231901/permission-problems-when-creating-a-dir-with-os-makedirs-python
68 stat_path = os.stat(path)
69 filemode = stat.S_IMODE(stat_path.st_mode)
70 os.chmod(self.full_roots[name], filemode)
71 # Set owner and group, too
72 os.chown(self.full_roots[name], stat_path.st_uid,
73 stat_path.st_gid)
74
75 except OSError as exception:
76 if exception.errno != errno.EEXIST:
77 raise errors.PluginError(
78 "Couldn't create root for {0} http-01 "
79 "challenge responses: {1}", name, exception)
```
It tries to set the ownership of the directory to the owner of the parent. So currently to bypass this issue either the webroot must be chown'd to the letsencrypt user, or the script needs to be run as root. Neither of which are satisfactory solutions. Is chown at all neccesary when letsencrypt is not run as root?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `letsencrypt/plugins/webroot.py`
Content:
```
1 """Webroot plugin."""
2 import errno
3 import logging
4 import os
5 import stat
6
7 import zope.interface
8
9 from acme import challenges
10
11 from letsencrypt import errors
12 from letsencrypt import interfaces
13 from letsencrypt.plugins import common
14
15
16 logger = logging.getLogger(__name__)
17
18
19 class Authenticator(common.Plugin):
20 """Webroot Authenticator."""
21 zope.interface.implements(interfaces.IAuthenticator)
22 zope.interface.classProvides(interfaces.IPluginFactory)
23
24 description = "Webroot Authenticator"
25
26 MORE_INFO = """\
27 Authenticator plugin that performs http-01 challenge by saving
28 necessary validation resources to appropriate paths on the file
29 system. It expects that there is some other HTTP server configured
30 to serve all files under specified web root ({0})."""
31
32 def more_info(self): # pylint: disable=missing-docstring,no-self-use
33 return self.MORE_INFO.format(self.conf("path"))
34
35 @classmethod
36 def add_parser_arguments(cls, add):
37 # --webroot-path and --webroot-map are added in cli.py because they
38 # are parsed in conjunction with --domains
39 pass
40
41 def get_chall_pref(self, domain): # pragma: no cover
42 # pylint: disable=missing-docstring,no-self-use,unused-argument
43 return [challenges.HTTP01]
44
45 def __init__(self, *args, **kwargs):
46 super(Authenticator, self).__init__(*args, **kwargs)
47 self.full_roots = {}
48
49 def prepare(self): # pylint: disable=missing-docstring
50 path_map = self.conf("map")
51
52 if not path_map:
53 raise errors.PluginError("--{0} must be set".format(
54 self.option_name("path")))
55 for name, path in path_map.items():
56 if not os.path.isdir(path):
57 raise errors.PluginError(path + " does not exist or is not a directory")
58 self.full_roots[name] = os.path.join(path, challenges.HTTP01.URI_ROOT_PATH)
59
60 logger.debug("Creating root challenges validation dir at %s",
61 self.full_roots[name])
62 try:
63 os.makedirs(self.full_roots[name])
64 # Set permissions as parent directory (GH #1389)
65 # We don't use the parameters in makedirs because it
66 # may not always work
67 # https://stackoverflow.com/questions/5231901/permission-problems-when-creating-a-dir-with-os-makedirs-python
68 stat_path = os.stat(path)
69 filemode = stat.S_IMODE(stat_path.st_mode)
70 os.chmod(self.full_roots[name], filemode)
71 # Set owner and group, too
72 os.chown(self.full_roots[name], stat_path.st_uid,
73 stat_path.st_gid)
74
75 except OSError as exception:
76 if exception.errno != errno.EEXIST:
77 raise errors.PluginError(
78 "Couldn't create root for {0} http-01 "
79 "challenge responses: {1}", name, exception)
80
81 def perform(self, achalls): # pylint: disable=missing-docstring
82 assert self.full_roots, "Webroot plugin appears to be missing webroot map"
83 return [self._perform_single(achall) for achall in achalls]
84
85 def _path_for_achall(self, achall):
86 try:
87 path = self.full_roots[achall.domain]
88 except IndexError:
89 raise errors.PluginError("Missing --webroot-path for domain: {1}"
90 .format(achall.domain))
91 if not os.path.exists(path):
92 raise errors.PluginError("Mysteriously missing path {0} for domain: {1}"
93 .format(path, achall.domain))
94 return os.path.join(path, achall.chall.encode("token"))
95
96 def _perform_single(self, achall):
97 response, validation = achall.response_and_validation()
98 path = self._path_for_achall(achall)
99 logger.debug("Attempting to save validation to %s", path)
100 with open(path, "w") as validation_file:
101 validation_file.write(validation.encode())
102
103 # Set permissions as parent directory (GH #1389)
104 parent_path = self.full_roots[achall.domain]
105 stat_parent_path = os.stat(parent_path)
106 filemode = stat.S_IMODE(stat_parent_path.st_mode)
107 # Remove execution bit (not needed for this file)
108 os.chmod(path, filemode & ~stat.S_IEXEC)
109 os.chown(path, stat_parent_path.st_uid, stat_parent_path.st_gid)
110
111 return response
112
113 def cleanup(self, achalls): # pylint: disable=missing-docstring
114 for achall in achalls:
115 path = self._path_for_achall(achall)
116 logger.debug("Removing %s", path)
117 os.remove(path)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/letsencrypt/plugins/webroot.py b/letsencrypt/plugins/webroot.py
--- a/letsencrypt/plugins/webroot.py
+++ b/letsencrypt/plugins/webroot.py
@@ -2,7 +2,6 @@
import errno
import logging
import os
-import stat
import zope.interface
@@ -59,24 +58,38 @@
logger.debug("Creating root challenges validation dir at %s",
self.full_roots[name])
+
+ # Change the permissions to be writable (GH #1389)
+ # Umask is used instead of chmod to ensure the client can also
+ # run as non-root (GH #1795)
+ old_umask = os.umask(0o022)
+
try:
- os.makedirs(self.full_roots[name])
- # Set permissions as parent directory (GH #1389)
- # We don't use the parameters in makedirs because it
- # may not always work
+ # This is coupled with the "umask" call above because
+ # os.makedirs's "mode" parameter may not always work:
# https://stackoverflow.com/questions/5231901/permission-problems-when-creating-a-dir-with-os-makedirs-python
- stat_path = os.stat(path)
- filemode = stat.S_IMODE(stat_path.st_mode)
- os.chmod(self.full_roots[name], filemode)
- # Set owner and group, too
- os.chown(self.full_roots[name], stat_path.st_uid,
- stat_path.st_gid)
+ os.makedirs(self.full_roots[name], 0o0755)
+
+ # Set owner as parent directory if possible
+ try:
+ stat_path = os.stat(path)
+ os.chown(self.full_roots[name], stat_path.st_uid,
+ stat_path.st_gid)
+ except OSError as exception:
+ if exception.errno == errno.EACCES:
+ logger.debug("Insufficient permissions to change owner and uid - ignoring")
+ else:
+ raise errors.PluginError(
+ "Couldn't create root for {0} http-01 "
+ "challenge responses: {1}", name, exception)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise errors.PluginError(
"Couldn't create root for {0} http-01 "
"challenge responses: {1}", name, exception)
+ finally:
+ os.umask(old_umask)
def perform(self, achalls): # pylint: disable=missing-docstring
assert self.full_roots, "Webroot plugin appears to be missing webroot map"
@@ -87,26 +100,26 @@
path = self.full_roots[achall.domain]
except IndexError:
raise errors.PluginError("Missing --webroot-path for domain: {1}"
- .format(achall.domain))
+ .format(achall.domain))
if not os.path.exists(path):
raise errors.PluginError("Mysteriously missing path {0} for domain: {1}"
- .format(path, achall.domain))
+ .format(path, achall.domain))
return os.path.join(path, achall.chall.encode("token"))
def _perform_single(self, achall):
response, validation = achall.response_and_validation()
+
path = self._path_for_achall(achall)
logger.debug("Attempting to save validation to %s", path)
- with open(path, "w") as validation_file:
- validation_file.write(validation.encode())
-
- # Set permissions as parent directory (GH #1389)
- parent_path = self.full_roots[achall.domain]
- stat_parent_path = os.stat(parent_path)
- filemode = stat.S_IMODE(stat_parent_path.st_mode)
- # Remove execution bit (not needed for this file)
- os.chmod(path, filemode & ~stat.S_IEXEC)
- os.chown(path, stat_parent_path.st_uid, stat_parent_path.st_gid)
+
+ # Change permissions to be world-readable, owner-writable (GH #1795)
+ old_umask = os.umask(0o022)
+
+ try:
+ with open(path, "w") as validation_file:
+ validation_file.write(validation.encode())
+ finally:
+ os.umask(old_umask)
return response
| {"golden_diff": "diff --git a/letsencrypt/plugins/webroot.py b/letsencrypt/plugins/webroot.py\n--- a/letsencrypt/plugins/webroot.py\n+++ b/letsencrypt/plugins/webroot.py\n@@ -2,7 +2,6 @@\n import errno\n import logging\n import os\n-import stat\n \n import zope.interface\n \n@@ -59,24 +58,38 @@\n \n logger.debug(\"Creating root challenges validation dir at %s\",\n self.full_roots[name])\n+\n+ # Change the permissions to be writable (GH #1389)\n+ # Umask is used instead of chmod to ensure the client can also\n+ # run as non-root (GH #1795)\n+ old_umask = os.umask(0o022)\n+\n try:\n- os.makedirs(self.full_roots[name])\n- # Set permissions as parent directory (GH #1389)\n- # We don't use the parameters in makedirs because it\n- # may not always work\n+ # This is coupled with the \"umask\" call above because\n+ # os.makedirs's \"mode\" parameter may not always work:\n # https://stackoverflow.com/questions/5231901/permission-problems-when-creating-a-dir-with-os-makedirs-python\n- stat_path = os.stat(path)\n- filemode = stat.S_IMODE(stat_path.st_mode)\n- os.chmod(self.full_roots[name], filemode)\n- # Set owner and group, too\n- os.chown(self.full_roots[name], stat_path.st_uid,\n- stat_path.st_gid)\n+ os.makedirs(self.full_roots[name], 0o0755)\n+\n+ # Set owner as parent directory if possible\n+ try:\n+ stat_path = os.stat(path)\n+ os.chown(self.full_roots[name], stat_path.st_uid,\n+ stat_path.st_gid)\n+ except OSError as exception:\n+ if exception.errno == errno.EACCES:\n+ logger.debug(\"Insufficient permissions to change owner and uid - ignoring\")\n+ else:\n+ raise errors.PluginError(\n+ \"Couldn't create root for {0} http-01 \"\n+ \"challenge responses: {1}\", name, exception)\n \n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise errors.PluginError(\n \"Couldn't create root for {0} http-01 \"\n \"challenge responses: {1}\", name, exception)\n+ finally:\n+ os.umask(old_umask)\n \n def perform(self, achalls): # pylint: disable=missing-docstring\n assert self.full_roots, \"Webroot plugin appears to be missing webroot map\"\n@@ -87,26 +100,26 @@\n path = self.full_roots[achall.domain]\n except IndexError:\n raise errors.PluginError(\"Missing --webroot-path for domain: {1}\"\n- .format(achall.domain))\n+ .format(achall.domain))\n if not os.path.exists(path):\n raise errors.PluginError(\"Mysteriously missing path {0} for domain: {1}\"\n- .format(path, achall.domain))\n+ .format(path, achall.domain))\n return os.path.join(path, achall.chall.encode(\"token\"))\n \n def _perform_single(self, achall):\n response, validation = achall.response_and_validation()\n+\n path = self._path_for_achall(achall)\n logger.debug(\"Attempting to save validation to %s\", path)\n- with open(path, \"w\") as validation_file:\n- validation_file.write(validation.encode())\n-\n- # Set permissions as parent directory (GH #1389)\n- parent_path = self.full_roots[achall.domain]\n- stat_parent_path = os.stat(parent_path)\n- filemode = stat.S_IMODE(stat_parent_path.st_mode)\n- # Remove execution bit (not needed for this file)\n- os.chmod(path, filemode & ~stat.S_IEXEC)\n- os.chown(path, stat_parent_path.st_uid, stat_parent_path.st_gid)\n+\n+ # Change permissions to be world-readable, owner-writable (GH #1795)\n+ old_umask = os.umask(0o022)\n+\n+ try:\n+ with open(path, \"w\") as validation_file:\n+ validation_file.write(validation.encode())\n+ finally:\n+ os.umask(old_umask)\n \n return response\n", "issue": "Webroot breaks for non-root due to chown\nHi,\n\nToday I'm trying out letsencrypt and stumbled upon that using webroot breaks when not using root due to a chown on line 72 of letsencrypt/plugins/webroot.py:\n\n```\n 49 def prepare(self): # pylint: disable=missing-docstring\n 50 path_map = self.conf(\"map\")\n 51\n 52 if not path_map:\n 53 raise errors.PluginError(\"--{0} must be set\".format(\n 54 self.option_name(\"path\")))\n 55 for name, path in path_map.items():\n 56 if not os.path.isdir(path):\n 57 raise errors.PluginError(path + \" does not exist or is not a directory\")\n 58 self.full_roots[name] = os.path.join(path, challenges.HTTP01.URI_ROOT_PATH)\n 59\n 60 logger.debug(\"Creating root challenges validation dir at %s\",\n 61 self.full_roots[name])\n 62 try:\n 63 os.makedirs(self.full_roots[name])\n 64 # Set permissions as parent directory (GH #1389)\n 65 # We don't use the parameters in makedirs because it\n 66 # may not always work\n 67 # https://stackoverflow.com/questions/5231901/permission-problems-when-creating-a-dir-with-os-makedirs-python\n 68 stat_path = os.stat(path)\n 69 filemode = stat.S_IMODE(stat_path.st_mode)\n 70 os.chmod(self.full_roots[name], filemode)\n 71 # Set owner and group, too\n 72 os.chown(self.full_roots[name], stat_path.st_uid,\n 73 stat_path.st_gid)\n 74\n 75 except OSError as exception:\n 76 if exception.errno != errno.EEXIST:\n 77 raise errors.PluginError(\n 78 \"Couldn't create root for {0} http-01 \"\n 79 \"challenge responses: {1}\", name, exception)\n```\n\nIt tries to set the ownership of the directory to the owner of the parent. So currently to bypass this issue either the webroot must be chown'd to the letsencrypt user, or the script needs to be run as root. Neither of which are satisfactory solutions. Is chown at all neccesary when letsencrypt is not run as root?\n\n", "before_files": [{"content": "\"\"\"Webroot plugin.\"\"\"\nimport errno\nimport logging\nimport os\nimport stat\n\nimport zope.interface\n\nfrom acme import challenges\n\nfrom letsencrypt import errors\nfrom letsencrypt import interfaces\nfrom letsencrypt.plugins import common\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Authenticator(common.Plugin):\n \"\"\"Webroot Authenticator.\"\"\"\n zope.interface.implements(interfaces.IAuthenticator)\n zope.interface.classProvides(interfaces.IPluginFactory)\n\n description = \"Webroot Authenticator\"\n\n MORE_INFO = \"\"\"\\\nAuthenticator plugin that performs http-01 challenge by saving\nnecessary validation resources to appropriate paths on the file\nsystem. It expects that there is some other HTTP server configured\nto serve all files under specified web root ({0}).\"\"\"\n\n def more_info(self): # pylint: disable=missing-docstring,no-self-use\n return self.MORE_INFO.format(self.conf(\"path\"))\n\n @classmethod\n def add_parser_arguments(cls, add):\n # --webroot-path and --webroot-map are added in cli.py because they\n # are parsed in conjunction with --domains\n pass\n\n def get_chall_pref(self, domain): # pragma: no cover\n # pylint: disable=missing-docstring,no-self-use,unused-argument\n return [challenges.HTTP01]\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self.full_roots = {}\n\n def prepare(self): # pylint: disable=missing-docstring\n path_map = self.conf(\"map\")\n\n if not path_map:\n raise errors.PluginError(\"--{0} must be set\".format(\n self.option_name(\"path\")))\n for name, path in path_map.items():\n if not os.path.isdir(path):\n raise errors.PluginError(path + \" does not exist or is not a directory\")\n self.full_roots[name] = os.path.join(path, challenges.HTTP01.URI_ROOT_PATH)\n\n logger.debug(\"Creating root challenges validation dir at %s\",\n self.full_roots[name])\n try:\n os.makedirs(self.full_roots[name])\n # Set permissions as parent directory (GH #1389)\n # We don't use the parameters in makedirs because it\n # may not always work\n # https://stackoverflow.com/questions/5231901/permission-problems-when-creating-a-dir-with-os-makedirs-python\n stat_path = os.stat(path)\n filemode = stat.S_IMODE(stat_path.st_mode)\n os.chmod(self.full_roots[name], filemode)\n # Set owner and group, too\n os.chown(self.full_roots[name], stat_path.st_uid,\n stat_path.st_gid)\n\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise errors.PluginError(\n \"Couldn't create root for {0} http-01 \"\n \"challenge responses: {1}\", name, exception)\n\n def perform(self, achalls): # pylint: disable=missing-docstring\n assert self.full_roots, \"Webroot plugin appears to be missing webroot map\"\n return [self._perform_single(achall) for achall in achalls]\n\n def _path_for_achall(self, achall):\n try:\n path = self.full_roots[achall.domain]\n except IndexError:\n raise errors.PluginError(\"Missing --webroot-path for domain: {1}\"\n .format(achall.domain))\n if not os.path.exists(path):\n raise errors.PluginError(\"Mysteriously missing path {0} for domain: {1}\"\n .format(path, achall.domain))\n return os.path.join(path, achall.chall.encode(\"token\"))\n\n def _perform_single(self, achall):\n response, validation = achall.response_and_validation()\n path = self._path_for_achall(achall)\n logger.debug(\"Attempting to save validation to %s\", path)\n with open(path, \"w\") as validation_file:\n validation_file.write(validation.encode())\n\n # Set permissions as parent directory (GH #1389)\n parent_path = self.full_roots[achall.domain]\n stat_parent_path = os.stat(parent_path)\n filemode = stat.S_IMODE(stat_parent_path.st_mode)\n # Remove execution bit (not needed for this file)\n os.chmod(path, filemode & ~stat.S_IEXEC)\n os.chown(path, stat_parent_path.st_uid, stat_parent_path.st_gid)\n\n return response\n\n def cleanup(self, achalls): # pylint: disable=missing-docstring\n for achall in achalls:\n path = self._path_for_achall(achall)\n logger.debug(\"Removing %s\", path)\n os.remove(path)\n", "path": "letsencrypt/plugins/webroot.py"}], "after_files": [{"content": "\"\"\"Webroot plugin.\"\"\"\nimport errno\nimport logging\nimport os\n\nimport zope.interface\n\nfrom acme import challenges\n\nfrom letsencrypt import errors\nfrom letsencrypt import interfaces\nfrom letsencrypt.plugins import common\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Authenticator(common.Plugin):\n \"\"\"Webroot Authenticator.\"\"\"\n zope.interface.implements(interfaces.IAuthenticator)\n zope.interface.classProvides(interfaces.IPluginFactory)\n\n description = \"Webroot Authenticator\"\n\n MORE_INFO = \"\"\"\\\nAuthenticator plugin that performs http-01 challenge by saving\nnecessary validation resources to appropriate paths on the file\nsystem. It expects that there is some other HTTP server configured\nto serve all files under specified web root ({0}).\"\"\"\n\n def more_info(self): # pylint: disable=missing-docstring,no-self-use\n return self.MORE_INFO.format(self.conf(\"path\"))\n\n @classmethod\n def add_parser_arguments(cls, add):\n # --webroot-path and --webroot-map are added in cli.py because they\n # are parsed in conjunction with --domains\n pass\n\n def get_chall_pref(self, domain): # pragma: no cover\n # pylint: disable=missing-docstring,no-self-use,unused-argument\n return [challenges.HTTP01]\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self.full_roots = {}\n\n def prepare(self): # pylint: disable=missing-docstring\n path_map = self.conf(\"map\")\n\n if not path_map:\n raise errors.PluginError(\"--{0} must be set\".format(\n self.option_name(\"path\")))\n for name, path in path_map.items():\n if not os.path.isdir(path):\n raise errors.PluginError(path + \" does not exist or is not a directory\")\n self.full_roots[name] = os.path.join(path, challenges.HTTP01.URI_ROOT_PATH)\n\n logger.debug(\"Creating root challenges validation dir at %s\",\n self.full_roots[name])\n\n # Change the permissions to be writable (GH #1389)\n # Umask is used instead of chmod to ensure the client can also\n # run as non-root (GH #1795)\n old_umask = os.umask(0o022)\n\n try:\n # This is coupled with the \"umask\" call above because\n # os.makedirs's \"mode\" parameter may not always work:\n # https://stackoverflow.com/questions/5231901/permission-problems-when-creating-a-dir-with-os-makedirs-python\n os.makedirs(self.full_roots[name], 0o0755)\n\n # Set owner as parent directory if possible\n try:\n stat_path = os.stat(path)\n os.chown(self.full_roots[name], stat_path.st_uid,\n stat_path.st_gid)\n except OSError as exception:\n if exception.errno == errno.EACCES:\n logger.debug(\"Insufficient permissions to change owner and uid - ignoring\")\n else:\n raise errors.PluginError(\n \"Couldn't create root for {0} http-01 \"\n \"challenge responses: {1}\", name, exception)\n\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise errors.PluginError(\n \"Couldn't create root for {0} http-01 \"\n \"challenge responses: {1}\", name, exception)\n finally:\n os.umask(old_umask)\n\n def perform(self, achalls): # pylint: disable=missing-docstring\n assert self.full_roots, \"Webroot plugin appears to be missing webroot map\"\n return [self._perform_single(achall) for achall in achalls]\n\n def _path_for_achall(self, achall):\n try:\n path = self.full_roots[achall.domain]\n except IndexError:\n raise errors.PluginError(\"Missing --webroot-path for domain: {1}\"\n .format(achall.domain))\n if not os.path.exists(path):\n raise errors.PluginError(\"Mysteriously missing path {0} for domain: {1}\"\n .format(path, achall.domain))\n return os.path.join(path, achall.chall.encode(\"token\"))\n\n def _perform_single(self, achall):\n response, validation = achall.response_and_validation()\n\n path = self._path_for_achall(achall)\n logger.debug(\"Attempting to save validation to %s\", path)\n\n # Change permissions to be world-readable, owner-writable (GH #1795)\n old_umask = os.umask(0o022)\n\n try:\n with open(path, \"w\") as validation_file:\n validation_file.write(validation.encode())\n finally:\n os.umask(old_umask)\n\n return response\n\n def cleanup(self, achalls): # pylint: disable=missing-docstring\n for achall in achalls:\n path = self._path_for_achall(achall)\n logger.debug(\"Removing %s\", path)\n os.remove(path)\n", "path": "letsencrypt/plugins/webroot.py"}]} | 2,086 | 983 |
gh_patches_debug_51715 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1358 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Error starting gunicorn when IPv6 is disabled on host
### Description
Paperless fails to launch in at least certain scenarios when IPv6 is disabled on the host. This was working before a change from listening on `0.0.0.0` to listening on `::`.
### Steps to reproduce
Add the following to `/etc/sysctl.conf`:
```
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
```
Set `GRUB_CMDLINE_LINUX_DEFAULT="ipv6.disable=1"` in `/etc/default/grub`, and `update-grub`.
Reboot and run paperless-ngx
### Webserver logs
```bash
Paperless-ngx docker container starting...
Mapping UID and GID for paperless:paperless to 1000:65538
usermod: no changes
Creating directory /tmp/paperless
Adjusting permissions of paperless files. This may take a while.
Waiting for PostgreSQL to start...
Waiting for Redis: redis://paperless-redis.paperless.svc:6379
Connected to Redis broker: redis://paperless-redis.paperless.svc:6379
Apply database migrations...
Operations to perform:
Apply all migrations: admin, auth, authtoken, contenttypes, django_q, documents, paperless_mail, sessions
Running migrations:
No migrations to apply.
Executing /usr/local/bin/paperless_cmd.sh
2022-08-04 14:20:24,984 INFO Set uid to user 0 succeeded
2022-08-04 14:20:24,985 INFO supervisord started with pid 49
2022-08-04 14:20:25,988 INFO spawned: 'consumer' with pid 50
2022-08-04 14:20:25,990 INFO spawned: 'gunicorn' with pid 51
2022-08-04 14:20:25,992 INFO spawned: 'scheduler' with pid 52
[2022-08-04 10:20:26 -0400] [51] [INFO] Starting gunicorn 20.1.0
[2022-08-04 10:20:35 -0400] [72] [ERROR] Retrying in 1 second.
[2022-08-04 10:20:36 -0400] [72] [ERROR] Retrying in 1 second.
[2022-08-04 10:20:37 -0400] [72] [ERROR] Can't connect to ('::', 8000)
2022-08-04 14:20:37,727 INFO exited: gunicorn (exit status 1; not expected)
2022-08-04 14:20:38,730 INFO spawned: 'gunicorn' with pid 74
[2022-08-04 10:20:39 -0400] [74] [INFO] Starting gunicorn 20.1.0
[2022-08-04 10:20:39 -0400] [74] [ERROR] Retrying in 1 second.
2022-08-04 14:20:40,017 INFO success: gunicorn entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)
[2022-08-04 10:20:40 -0400] [74] [ERROR] Retrying in 1 second.
[2022-08-04 10:20:41 -0400] [74] [ERROR] Retrying in 1 second.
[2022-08-04 10:20:42 -0400] [74] [ERROR] Retrying in 1 second.
[2022-08-04 10:20:43 -0400] [74] [ERROR] Retrying in 1 second.
[2022-08-04 10:20:44 -0400] [74] [ERROR] Can't connect to ('::', 8000)
2022-08-04 14:20:44,069 INFO exited: gunicorn (exit status 1; not expected)
```
### Paperless-ngx version
1.8.0
### Host OS
Ubuntu 22.04 x64, Microk8s 1.22
### Installation method
Docker - official image
### Browser
_No response_
### Configuration changes
_No response_
### Other
This seems to be a regression from https://github.com/paperless-ngx/paperless-ngx/pull/924. Changing the listen IP back to 0.0.0.0 in gunicorn.conf.py causing it to bind correctly again
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn.conf.py`
Content:
```
1 import os
2
3 bind = f'[::]:{os.getenv("PAPERLESS_PORT", 8000)}'
4 workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1))
5 worker_class = "paperless.workers.ConfigurableWorker"
6 timeout = 120
7
8
9 def pre_fork(server, worker):
10 pass
11
12
13 def pre_exec(server):
14 server.log.info("Forked child, re-executing.")
15
16
17 def when_ready(server):
18 server.log.info("Server is ready. Spawning workers")
19
20
21 def worker_int(worker):
22 worker.log.info("worker received INT or QUIT signal")
23
24 ## get traceback info
25 import threading, sys, traceback
26
27 id2name = {th.ident: th.name for th in threading.enumerate()}
28 code = []
29 for threadId, stack in sys._current_frames().items():
30 code.append("\n# Thread: %s(%d)" % (id2name.get(threadId, ""), threadId))
31 for filename, lineno, name, line in traceback.extract_stack(stack):
32 code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
33 if line:
34 code.append(" %s" % (line.strip()))
35 worker.log.debug("\n".join(code))
36
37
38 def worker_abort(worker):
39 worker.log.info("worker received SIGABRT signal")
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gunicorn.conf.py b/gunicorn.conf.py
--- a/gunicorn.conf.py
+++ b/gunicorn.conf.py
@@ -1,6 +1,6 @@
import os
-bind = f'[::]:{os.getenv("PAPERLESS_PORT", 8000)}'
+bind = f'{os.getenv("PAPERLESS_BIND_ADDR", "[::]")}:{os.getenv("PAPERLESS_PORT", 8000)}'
workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1))
worker_class = "paperless.workers.ConfigurableWorker"
timeout = 120
| {"golden_diff": "diff --git a/gunicorn.conf.py b/gunicorn.conf.py\n--- a/gunicorn.conf.py\n+++ b/gunicorn.conf.py\n@@ -1,6 +1,6 @@\n import os\n \n-bind = f'[::]:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\n+bind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\n workers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\n worker_class = \"paperless.workers.ConfigurableWorker\"\n timeout = 120\n", "issue": "[BUG] Error starting gunicorn when IPv6 is disabled on host\n### Description\n\nPaperless fails to launch in at least certain scenarios when IPv6 is disabled on the host. This was working before a change from listening on `0.0.0.0` to listening on `::`.\n\n### Steps to reproduce\n\nAdd the following to `/etc/sysctl.conf`:\r\n\r\n```\r\nnet.ipv6.conf.all.disable_ipv6=1\r\nnet.ipv6.conf.default.disable_ipv6=1\r\nnet.ipv6.conf.lo.disable_ipv6=1\r\n```\r\n\r\nSet `GRUB_CMDLINE_LINUX_DEFAULT=\"ipv6.disable=1\"` in `/etc/default/grub`, and `update-grub`.\r\n\r\nReboot and run paperless-ngx\n\n### Webserver logs\n\n```bash\nPaperless-ngx docker container starting...\r\nMapping UID and GID for paperless:paperless to 1000:65538\r\nusermod: no changes\r\nCreating directory /tmp/paperless\r\nAdjusting permissions of paperless files. This may take a while.\r\nWaiting for PostgreSQL to start...\r\nWaiting for Redis: redis://paperless-redis.paperless.svc:6379\r\nConnected to Redis broker: redis://paperless-redis.paperless.svc:6379\r\nApply database migrations...\r\nOperations to perform:\r\n Apply all migrations: admin, auth, authtoken, contenttypes, django_q, documents, paperless_mail, sessions\r\nRunning migrations:\r\n No migrations to apply.\r\nExecuting /usr/local/bin/paperless_cmd.sh\r\n2022-08-04 14:20:24,984 INFO Set uid to user 0 succeeded\r\n2022-08-04 14:20:24,985 INFO supervisord started with pid 49\r\n2022-08-04 14:20:25,988 INFO spawned: 'consumer' with pid 50\r\n2022-08-04 14:20:25,990 INFO spawned: 'gunicorn' with pid 51\r\n2022-08-04 14:20:25,992 INFO spawned: 'scheduler' with pid 52\r\n[2022-08-04 10:20:26 -0400] [51] [INFO] Starting gunicorn 20.1.0\r\n[2022-08-04 10:20:35 -0400] [72] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:36 -0400] [72] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:37 -0400] [72] [ERROR] Can't connect to ('::', 8000)\r\n2022-08-04 14:20:37,727 INFO exited: gunicorn (exit status 1; not expected)\r\n2022-08-04 14:20:38,730 INFO spawned: 'gunicorn' with pid 74\r\n[2022-08-04 10:20:39 -0400] [74] [INFO] Starting gunicorn 20.1.0\r\n[2022-08-04 10:20:39 -0400] [74] [ERROR] Retrying in 1 second.\r\n2022-08-04 14:20:40,017 INFO success: gunicorn entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)\r\n[2022-08-04 10:20:40 -0400] [74] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:41 -0400] [74] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:42 -0400] [74] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:43 -0400] [74] [ERROR] Retrying in 1 second.\r\n[2022-08-04 10:20:44 -0400] [74] [ERROR] Can't connect to ('::', 8000)\r\n2022-08-04 14:20:44,069 INFO exited: gunicorn (exit status 1; not expected)\n```\n\n\n### Paperless-ngx version\n\n1.8.0\n\n### Host OS\n\nUbuntu 22.04 x64, Microk8s 1.22\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\n_No response_\n\n### Configuration changes\n\n_No response_\n\n### Other\n\nThis seems to be a regression from https://github.com/paperless-ngx/paperless-ngx/pull/924. Changing the listen IP back to 0.0.0.0 in gunicorn.conf.py causing it to bind correctly again\n", "before_files": [{"content": "import os\n\nbind = f'[::]:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\nworkers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\nworker_class = \"paperless.workers.ConfigurableWorker\"\ntimeout = 120\n\n\ndef pre_fork(server, worker):\n pass\n\n\ndef pre_exec(server):\n server.log.info(\"Forked child, re-executing.\")\n\n\ndef when_ready(server):\n server.log.info(\"Server is ready. Spawning workers\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker received INT or QUIT signal\")\n\n ## get traceback info\n import threading, sys, traceback\n\n id2name = {th.ident: th.name for th in threading.enumerate()}\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId, \"\"), threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n worker.log.debug(\"\\n\".join(code))\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received SIGABRT signal\")\n", "path": "gunicorn.conf.py"}], "after_files": [{"content": "import os\n\nbind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\nworkers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\nworker_class = \"paperless.workers.ConfigurableWorker\"\ntimeout = 120\n\n\ndef pre_fork(server, worker):\n pass\n\n\ndef pre_exec(server):\n server.log.info(\"Forked child, re-executing.\")\n\n\ndef when_ready(server):\n server.log.info(\"Server is ready. Spawning workers\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker received INT or QUIT signal\")\n\n ## get traceback info\n import threading, sys, traceback\n\n id2name = {th.ident: th.name for th in threading.enumerate()}\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId, \"\"), threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n worker.log.debug(\"\\n\".join(code))\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received SIGABRT signal\")\n", "path": "gunicorn.conf.py"}]} | 1,860 | 133 |
gh_patches_debug_15632 | rasdani/github-patches | git_diff | getredash__redash-3362 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clickhouse: password is optional but we try to access it anyway
For Clickhouse type data sources, we don't require a password. But the code does require it by trying to directly access the value in the options dictionary, instead of using `get`:
https://github.com/getredash/redash/blob/823e4ccdd6fcfee5d0df0d919d87af3100876549/redash/query_runner/clickhouse.py#L77
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/query_runner/clickhouse.py`
Content:
```
1 import logging
2 import re
3
4 import requests
5
6 from redash.query_runner import *
7 from redash.utils import json_dumps, json_loads
8
9 logger = logging.getLogger(__name__)
10
11
12 class ClickHouse(BaseSQLQueryRunner):
13 noop_query = "SELECT 1"
14
15 @classmethod
16 def configuration_schema(cls):
17 return {
18 "type": "object",
19 "properties": {
20 "url": {
21 "type": "string",
22 "default": "http://127.0.0.1:8123"
23 },
24 "user": {
25 "type": "string",
26 "default": "default"
27 },
28 "password": {
29 "type": "string"
30 },
31 "dbname": {
32 "type": "string",
33 "title": "Database Name"
34 },
35 "timeout": {
36 "type": "number",
37 "title": "Request Timeout",
38 "default": 30
39 }
40 },
41 "required": ["dbname"],
42 "secret": ["password"]
43 }
44
45 @classmethod
46 def type(cls):
47 return "clickhouse"
48
49 def _get_tables(self, schema):
50 query = "SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')"
51
52 results, error = self.run_query(query, None)
53
54 if error is not None:
55 raise Exception("Failed getting schema.")
56
57 results = json_loads(results)
58
59 for row in results['rows']:
60 table_name = '{}.{}'.format(row['database'], row['table'])
61
62 if table_name not in schema:
63 schema[table_name] = {'name': table_name, 'columns': []}
64
65 schema[table_name]['columns'].append(row['name'])
66
67 return schema.values()
68
69 def _send_query(self, data, stream=False):
70 r = requests.post(
71 self.configuration['url'],
72 data=data.encode("utf-8"),
73 stream=stream,
74 timeout=self.configuration.get('timeout', 30),
75 params={
76 'user': self.configuration['user'],
77 'password': self.configuration['password'],
78 'database': self.configuration['dbname']
79 }
80 )
81 if r.status_code != 200:
82 raise Exception(r.text)
83 # logging.warning(r.json())
84 return r.json()
85
86 @staticmethod
87 def _define_column_type(column):
88 c = column.lower()
89 f = re.search(r'^nullable\((.*)\)$', c)
90 if f is not None:
91 c = f.group(1)
92 if c.startswith('int') or c.startswith('uint'):
93 return TYPE_INTEGER
94 elif c.startswith('float'):
95 return TYPE_FLOAT
96 elif c == 'datetime':
97 return TYPE_DATETIME
98 elif c == 'date':
99 return TYPE_DATE
100 else:
101 return TYPE_STRING
102
103 def _clickhouse_query(self, query):
104 query += '\nFORMAT JSON'
105 result = self._send_query(query)
106 columns = []
107 columns_int64 = [] # db converts value to string if its type equals UInt64
108 columns_totals = {}
109
110 for r in result['meta']:
111 column_name = r['name']
112 column_type = self._define_column_type(r['type'])
113
114 if r['type'] in ('Int64', 'UInt64', 'Nullable(Int64)', 'Nullable(UInt64)'):
115 columns_int64.append(column_name)
116 else:
117 columns_totals[column_name] = 'Total' if column_type == TYPE_STRING else None
118
119 columns.append({'name': column_name, 'friendly_name': column_name, 'type': column_type})
120
121 rows = result['data']
122 for row in rows:
123 for column in columns_int64:
124 try:
125 row[column] = int(row[column])
126 except TypeError:
127 row[column] = None
128
129 if 'totals' in result:
130 totals = result['totals']
131 for column, value in columns_totals.iteritems():
132 totals[column] = value
133 rows.append(totals)
134
135 return {'columns': columns, 'rows': rows}
136
137 def run_query(self, query, user):
138 logger.debug("Clickhouse is about to execute query: %s", query)
139 if query == "":
140 json_data = None
141 error = "Query is empty"
142 return json_data, error
143 try:
144 q = self._clickhouse_query(query)
145 data = json_dumps(q)
146 error = None
147 except Exception as e:
148 data = None
149 logging.exception(e)
150 error = unicode(e)
151 return data, error
152
153 register(ClickHouse)
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/query_runner/clickhouse.py b/redash/query_runner/clickhouse.py
--- a/redash/query_runner/clickhouse.py
+++ b/redash/query_runner/clickhouse.py
@@ -68,13 +68,13 @@
def _send_query(self, data, stream=False):
r = requests.post(
- self.configuration['url'],
+ self.configuration.get('url', "http://127.0.0.1:8123"),
data=data.encode("utf-8"),
stream=stream,
timeout=self.configuration.get('timeout', 30),
params={
- 'user': self.configuration['user'],
- 'password': self.configuration['password'],
+ 'user': self.configuration.get('user', "default"),
+ 'password': self.configuration.get('password', ""),
'database': self.configuration['dbname']
}
)
| {"golden_diff": "diff --git a/redash/query_runner/clickhouse.py b/redash/query_runner/clickhouse.py\n--- a/redash/query_runner/clickhouse.py\n+++ b/redash/query_runner/clickhouse.py\n@@ -68,13 +68,13 @@\n \n def _send_query(self, data, stream=False):\n r = requests.post(\n- self.configuration['url'],\n+ self.configuration.get('url', \"http://127.0.0.1:8123\"),\n data=data.encode(\"utf-8\"),\n stream=stream,\n timeout=self.configuration.get('timeout', 30),\n params={\n- 'user': self.configuration['user'],\n- 'password': self.configuration['password'],\n+ 'user': self.configuration.get('user', \"default\"),\n+ 'password': self.configuration.get('password', \"\"),\n 'database': self.configuration['dbname']\n }\n )\n", "issue": "Clickhouse: password is optional but we try to access it anyway\nFor Clickhouse type data sources, we don't require a password. But the code does require it by trying to directly access the value in the options dictionary, instead of using `get`:\r\n\r\nhttps://github.com/getredash/redash/blob/823e4ccdd6fcfee5d0df0d919d87af3100876549/redash/query_runner/clickhouse.py#L77\n", "before_files": [{"content": "import logging\nimport re\n\nimport requests\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClickHouse(BaseSQLQueryRunner):\n noop_query = \"SELECT 1\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"default\": \"http://127.0.0.1:8123\"\n },\n \"user\": {\n \"type\": \"string\",\n \"default\": \"default\"\n },\n \"password\": {\n \"type\": \"string\"\n },\n \"dbname\": {\n \"type\": \"string\",\n \"title\": \"Database Name\"\n },\n \"timeout\": {\n \"type\": \"number\",\n \"title\": \"Request Timeout\",\n \"default\": 30\n }\n },\n \"required\": [\"dbname\"],\n \"secret\": [\"password\"]\n }\n\n @classmethod\n def type(cls):\n return \"clickhouse\"\n\n def _get_tables(self, schema):\n query = \"SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results['rows']:\n table_name = '{}.{}'.format(row['database'], row['table'])\n\n if table_name not in schema:\n schema[table_name] = {'name': table_name, 'columns': []}\n\n schema[table_name]['columns'].append(row['name'])\n\n return schema.values()\n\n def _send_query(self, data, stream=False):\n r = requests.post(\n self.configuration['url'],\n data=data.encode(\"utf-8\"),\n stream=stream,\n timeout=self.configuration.get('timeout', 30),\n params={\n 'user': self.configuration['user'],\n 'password': self.configuration['password'],\n 'database': self.configuration['dbname']\n }\n )\n if r.status_code != 200:\n raise Exception(r.text)\n # logging.warning(r.json())\n return r.json()\n\n @staticmethod\n def _define_column_type(column):\n c = column.lower()\n f = re.search(r'^nullable\\((.*)\\)$', c)\n if f is not None:\n c = f.group(1)\n if c.startswith('int') or c.startswith('uint'):\n return TYPE_INTEGER\n elif c.startswith('float'):\n return TYPE_FLOAT\n elif c == 'datetime':\n return TYPE_DATETIME\n elif c == 'date':\n return TYPE_DATE\n else:\n return TYPE_STRING\n\n def _clickhouse_query(self, query):\n query += '\\nFORMAT JSON'\n result = self._send_query(query)\n columns = []\n columns_int64 = [] # db converts value to string if its type equals UInt64\n columns_totals = {}\n\n for r in result['meta']:\n column_name = r['name']\n column_type = self._define_column_type(r['type'])\n\n if r['type'] in ('Int64', 'UInt64', 'Nullable(Int64)', 'Nullable(UInt64)'):\n columns_int64.append(column_name)\n else:\n columns_totals[column_name] = 'Total' if column_type == TYPE_STRING else None\n\n columns.append({'name': column_name, 'friendly_name': column_name, 'type': column_type})\n\n rows = result['data']\n for row in rows:\n for column in columns_int64:\n try:\n row[column] = int(row[column])\n except TypeError:\n row[column] = None\n\n if 'totals' in result:\n totals = result['totals']\n for column, value in columns_totals.iteritems():\n totals[column] = value\n rows.append(totals)\n\n return {'columns': columns, 'rows': rows}\n\n def run_query(self, query, user):\n logger.debug(\"Clickhouse is about to execute query: %s\", query)\n if query == \"\":\n json_data = None\n error = \"Query is empty\"\n return json_data, error\n try:\n q = self._clickhouse_query(query)\n data = json_dumps(q)\n error = None\n except Exception as e:\n data = None\n logging.exception(e)\n error = unicode(e)\n return data, error\n\nregister(ClickHouse)\n", "path": "redash/query_runner/clickhouse.py"}], "after_files": [{"content": "import logging\nimport re\n\nimport requests\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClickHouse(BaseSQLQueryRunner):\n noop_query = \"SELECT 1\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"default\": \"http://127.0.0.1:8123\"\n },\n \"user\": {\n \"type\": \"string\",\n \"default\": \"default\"\n },\n \"password\": {\n \"type\": \"string\"\n },\n \"dbname\": {\n \"type\": \"string\",\n \"title\": \"Database Name\"\n },\n \"timeout\": {\n \"type\": \"number\",\n \"title\": \"Request Timeout\",\n \"default\": 30\n }\n },\n \"required\": [\"dbname\"],\n \"secret\": [\"password\"]\n }\n\n @classmethod\n def type(cls):\n return \"clickhouse\"\n\n def _get_tables(self, schema):\n query = \"SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results['rows']:\n table_name = '{}.{}'.format(row['database'], row['table'])\n\n if table_name not in schema:\n schema[table_name] = {'name': table_name, 'columns': []}\n\n schema[table_name]['columns'].append(row['name'])\n\n return schema.values()\n\n def _send_query(self, data, stream=False):\n r = requests.post(\n self.configuration.get('url', \"http://127.0.0.1:8123\"),\n data=data.encode(\"utf-8\"),\n stream=stream,\n timeout=self.configuration.get('timeout', 30),\n params={\n 'user': self.configuration.get('user', \"default\"),\n 'password': self.configuration.get('password', \"\"),\n 'database': self.configuration['dbname']\n }\n )\n if r.status_code != 200:\n raise Exception(r.text)\n # logging.warning(r.json())\n return r.json()\n\n @staticmethod\n def _define_column_type(column):\n c = column.lower()\n f = re.search(r'^nullable\\((.*)\\)$', c)\n if f is not None:\n c = f.group(1)\n if c.startswith('int') or c.startswith('uint'):\n return TYPE_INTEGER\n elif c.startswith('float'):\n return TYPE_FLOAT\n elif c == 'datetime':\n return TYPE_DATETIME\n elif c == 'date':\n return TYPE_DATE\n else:\n return TYPE_STRING\n\n def _clickhouse_query(self, query):\n query += '\\nFORMAT JSON'\n result = self._send_query(query)\n columns = []\n columns_int64 = [] # db converts value to string if its type equals UInt64\n columns_totals = {}\n\n for r in result['meta']:\n column_name = r['name']\n column_type = self._define_column_type(r['type'])\n\n if r['type'] in ('Int64', 'UInt64', 'Nullable(Int64)', 'Nullable(UInt64)'):\n columns_int64.append(column_name)\n else:\n columns_totals[column_name] = 'Total' if column_type == TYPE_STRING else None\n\n columns.append({'name': column_name, 'friendly_name': column_name, 'type': column_type})\n\n rows = result['data']\n for row in rows:\n for column in columns_int64:\n try:\n row[column] = int(row[column])\n except TypeError:\n row[column] = None\n\n if 'totals' in result:\n totals = result['totals']\n for column, value in columns_totals.iteritems():\n totals[column] = value\n rows.append(totals)\n\n return {'columns': columns, 'rows': rows}\n\n def run_query(self, query, user):\n logger.debug(\"Clickhouse is about to execute query: %s\", query)\n if query == \"\":\n json_data = None\n error = \"Query is empty\"\n return json_data, error\n try:\n q = self._clickhouse_query(query)\n data = json_dumps(q)\n error = None\n except Exception as e:\n data = None\n logging.exception(e)\n error = unicode(e)\n return data, error\n\nregister(ClickHouse)\n", "path": "redash/query_runner/clickhouse.py"}]} | 1,720 | 200 |
gh_patches_debug_6611 | rasdani/github-patches | git_diff | obspy__obspy-3183 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Baer picker failing with fi_prep_cif_var error
**Description of the Problem**
I recently installed obspy on a new python install. Everything appeared to be working until I ran the tests for a package that made a call to the obspy.signal.trigger.pk_baer, which gives the following error:
```
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Input In [5], in <module>
----> 1 p_pick, phase_info = pk_baer(trace.data, df, 20, 60, 7.0, 12.0, 100, 100)
File ~/Gits/obspy/obspy/signal/trigger.py:406, in pk_baer(reltrc, samp_int, tdownmax, tupevent, thr1, thr2, preset_len, p_dur, return_cf)
402 # index in pk_mbaer.c starts with 1, 0 index is lost, length must be
403 # one shorter
404 args = (len(reltrc) - 1, C.byref(pptime), pfm, samp_int,
405 tdownmax, tupevent, thr1, thr2, preset_len, p_dur, cf_p)
--> 406 errcode = clibsignal.ppick(reltrc, *args)
407 if errcode != 0:
408 raise MemoryError("Error in function ppick of mk_mbaer.c")
RuntimeError: ffi_prep_cif_var failed
```
It seems fairly likely to me that one of the libraries I have installed introduced a problem in this environment compared to my old environment. I'm just not sure how to begin figuring out the issue. I tried it in my old environment just to be sure and it works fine.
**Steps to Reproduce**
Running the tests yields the following failures:
FAILED obspy/signal/tests/test_trigger.py::TriggerTestCase::test_pk_baer
FAILED obspy/signal/tests/test_trigger.py::TriggerTestCase::test_pk_baer_cf
in addition to a third, unrelated error in TauPy
Additionally, going through the trigger/picker tutorial will also reproduce the problem (this is probably what the test code is doing, anyways):
```
from obspy.core import read
from obspy.signal.trigger import pk_baer
trace = read("https://examples.obspy.org/ev0_6.a01.gse2")[0]
df = trace.stats.sampling_rate
p_pick, phase_info = pk_baer(trace.data, df,20, 60, 7.0, 12.0, 100, 100)
```
**Versions**
ObsPy: 1.2.2.post0+547.g221c7ef4a9.obspy.master (I tried both this and 1.2.2 as downloaded from conda-forge)
Python: 3.8.12
OS: Ubuntu 16
**Obspy Installation**
I tried installing obspy in two different ways and got the same result both ways: `conda install -c conda-forge obspy` (technically I used mamba, but that shouldn't matter) and from the latest master (`pip install -e {path_to_source}').
**Old Environment**
Obspy: 1.2.2 (installed from conda)
Python: 3.8.8
OS: Ubuntu 16
Thank you,
Shawn
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `obspy/signal/headers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Defines the libsignal and evalresp structures and blockettes.
4 """
5 import ctypes as C # NOQA
6
7 import numpy as np
8
9 from obspy.core.util.libnames import _load_cdll
10
11
12 # Import shared libsignal
13 clibsignal = _load_cdll("signal")
14 # Import shared libevresp
15 clibevresp = _load_cdll("evresp")
16
17 clibsignal.calcSteer.argtypes = [
18 C.c_int, C.c_int, C.c_int, C.c_int, C.c_int, C.c_float,
19 np.ctypeslib.ndpointer(dtype=np.float32, ndim=3,
20 flags='C_CONTIGUOUS'),
21 np.ctypeslib.ndpointer(dtype=np.complex128, ndim=4,
22 flags='C_CONTIGUOUS'),
23 ]
24 clibsignal.calcSteer.restype = C.c_void_p
25
26 clibsignal.generalizedBeamformer.argtypes = [
27 np.ctypeslib.ndpointer(dtype=np.float64, ndim=2,
28 flags='C_CONTIGUOUS'),
29 np.ctypeslib.ndpointer(dtype=np.float64, ndim=2,
30 flags='C_CONTIGUOUS'),
31 np.ctypeslib.ndpointer(dtype=np.complex128, ndim=4,
32 flags='C_CONTIGUOUS'),
33 np.ctypeslib.ndpointer(dtype=np.complex128, ndim=3,
34 flags='C_CONTIGUOUS'),
35 C.c_int, C.c_int, C.c_int, C.c_int, C.c_int,
36 C.c_double,
37 C.c_int,
38 ]
39 clibsignal.generalizedBeamformer.restype = C.c_int
40
41 clibsignal.X_corr.argtypes = [
42 np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
43 flags='C_CONTIGUOUS'),
44 np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
45 flags='C_CONTIGUOUS'),
46 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
47 flags='C_CONTIGUOUS'),
48 C.c_int, C.c_int, C.c_int,
49 C.POINTER(C.c_int), C.POINTER(C.c_double)]
50 clibsignal.X_corr.restype = C.c_int
51
52 clibsignal.recstalta.argtypes = [
53 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
54 flags='C_CONTIGUOUS'),
55 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
56 flags='C_CONTIGUOUS'),
57 C.c_int, C.c_int, C.c_int]
58 clibsignal.recstalta.restype = C.c_void_p
59
60 clibsignal.ppick.argtypes = [
61 np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
62 flags='C_CONTIGUOUS'),
63 C.c_int, C.POINTER(C.c_int), C.c_char_p, C.c_float, C.c_int, C.c_int,
64 C.c_float, C.c_float, C.c_int, C.c_int]
65 clibsignal.ppick.restype = C.c_int
66
67 clibsignal.ar_picker.argtypes = [
68 np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
69 flags='C_CONTIGUOUS'),
70 np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
71 flags='C_CONTIGUOUS'),
72 np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
73 flags='C_CONTIGUOUS'),
74 C.c_int, C.c_float, C.c_float, C.c_float, C.c_float, C.c_float,
75 C.c_float, C.c_float, C.c_int, C.c_int, C.POINTER(C.c_float),
76 C.POINTER(C.c_float), C.c_double, C.c_double, C.c_int]
77 clibsignal.ar_picker.restypes = C.c_int
78
79 clibsignal.utl_geo_km.argtypes = [C.c_double, C.c_double, C.c_double,
80 C.POINTER(C.c_double),
81 C.POINTER(C.c_double)]
82 clibsignal.utl_geo_km.restype = C.c_void_p
83
84 head_stalta_t = np.dtype([
85 ('N', np.uint32),
86 ('nsta', np.uint32),
87 ('nlta', np.uint32),
88 ], align=True)
89
90 clibsignal.stalta.argtypes = [
91 np.ctypeslib.ndpointer(dtype=head_stalta_t, ndim=1,
92 flags='C_CONTIGUOUS'),
93 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
94 flags='C_CONTIGUOUS'),
95 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
96 flags='C_CONTIGUOUS'),
97 ]
98 clibsignal.stalta.restype = C.c_int
99
100 clibsignal.hermite_interpolation.argtypes = [
101 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
102 flags='C_CONTIGUOUS'),
103 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
104 flags='C_CONTIGUOUS'),
105 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
106 flags='C_CONTIGUOUS'),
107 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
108 flags='C_CONTIGUOUS'),
109 C.c_int, C.c_int, C.c_double, C.c_double]
110 clibsignal.hermite_interpolation.restype = C.c_void_p
111
112 clibsignal.lanczos_resample.argtypes = [
113 # y_in
114 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
115 flags='C_CONTIGUOUS'),
116 # y_out
117 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
118 flags='C_CONTIGUOUS'),
119 # dt
120 C.c_double,
121 # offset
122 C.c_double,
123 # len_in
124 C.c_int,
125 # len_out,
126 C.c_int,
127 # a,
128 C.c_int,
129 # window
130 C.c_int]
131 clibsignal.lanczos_resample.restype = None
132
133 clibsignal.calculate_kernel.argtypes = [
134 # double *x
135 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
136 flags='C_CONTIGUOUS'),
137 # double *y
138 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
139 flags='C_CONTIGUOUS'),
140 # int len
141 C.c_int,
142 # int a,
143 C.c_int,
144 # int return_type,
145 C.c_int,
146 # enum lanczos_window_type window
147 C.c_int]
148 clibsignal.calculate_kernel.restype = None
149
150 clibsignal.aic_simple.argtypes = [
151 # double *aic, output
152 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
153 flags='C_CONTIGUOUS'),
154 # double *arr, input
155 np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
156 flags='C_CONTIGUOUS'),
157 # arr size
158 C.c_uint32]
159 clibsignal.recstalta.restype = C.c_void_p
160
161 STALEN = 64
162 NETLEN = 64
163 CHALEN = 64
164 LOCIDLEN = 64
165
166
167 class C_COMPLEX(C.Structure): # noqa
168 _fields_ = [("real", C.c_double),
169 ("imag", C.c_double)]
170
171
172 class RESPONSE(C.Structure):
173 pass
174
175
176 RESPONSE._fields_ = [("station", C.c_char * STALEN),
177 ("network", C.c_char * NETLEN),
178 ("locid", C.c_char * LOCIDLEN),
179 ("channel", C.c_char * CHALEN),
180 ("rvec", C.POINTER(C_COMPLEX)),
181 ("nfreqs", C.c_int),
182 ("freqs", C.POINTER(C.c_double)),
183 ("next", C.POINTER(RESPONSE))]
184
185 clibevresp.evresp.argtypes = [
186 C.c_char_p,
187 C.c_char_p,
188 C.c_char_p,
189 C.c_char_p,
190 C.c_char_p,
191 C.c_char_p,
192 C.c_char_p,
193 np.ctypeslib.ndpointer(dtype=np.float64,
194 ndim=1,
195 flags='C_CONTIGUOUS'),
196 C.c_int,
197 C.c_char_p,
198 C.c_char_p,
199 C.c_int,
200 C.c_int,
201 C.c_int,
202 C.c_int]
203 clibevresp.evresp.restype = C.POINTER(RESPONSE)
204
205 clibevresp.free_response.argtypes = [C.POINTER(RESPONSE)]
206 clibevresp.free_response.restype = C.c_void_p
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/obspy/signal/headers.py b/obspy/signal/headers.py
--- a/obspy/signal/headers.py
+++ b/obspy/signal/headers.py
@@ -61,7 +61,7 @@
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags='C_CONTIGUOUS'),
C.c_int, C.POINTER(C.c_int), C.c_char_p, C.c_float, C.c_int, C.c_int,
- C.c_float, C.c_float, C.c_int, C.c_int]
+ C.c_float, C.c_float, C.c_int, C.c_int, C.POINTER(C.c_float)]
clibsignal.ppick.restype = C.c_int
clibsignal.ar_picker.argtypes = [
| {"golden_diff": "diff --git a/obspy/signal/headers.py b/obspy/signal/headers.py\n--- a/obspy/signal/headers.py\n+++ b/obspy/signal/headers.py\n@@ -61,7 +61,7 @@\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.POINTER(C.c_int), C.c_char_p, C.c_float, C.c_int, C.c_int,\n- C.c_float, C.c_float, C.c_int, C.c_int]\n+ C.c_float, C.c_float, C.c_int, C.c_int, C.POINTER(C.c_float)]\n clibsignal.ppick.restype = C.c_int\n \n clibsignal.ar_picker.argtypes = [\n", "issue": "Baer picker failing with fi_prep_cif_var error\n**Description of the Problem**\r\n\r\nI recently installed obspy on a new python install. Everything appeared to be working until I ran the tests for a package that made a call to the obspy.signal.trigger.pk_baer, which gives the following error:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nRuntimeError Traceback (most recent call last)\r\nInput In [5], in <module>\r\n----> 1 p_pick, phase_info = pk_baer(trace.data, df, 20, 60, 7.0, 12.0, 100, 100)\r\n\r\nFile ~/Gits/obspy/obspy/signal/trigger.py:406, in pk_baer(reltrc, samp_int, tdownmax, tupevent, thr1, thr2, preset_len, p_dur, return_cf)\r\n 402 # index in pk_mbaer.c starts with 1, 0 index is lost, length must be\r\n 403 # one shorter\r\n 404 args = (len(reltrc) - 1, C.byref(pptime), pfm, samp_int,\r\n 405 tdownmax, tupevent, thr1, thr2, preset_len, p_dur, cf_p)\r\n--> 406 errcode = clibsignal.ppick(reltrc, *args)\r\n 407 if errcode != 0:\r\n 408 raise MemoryError(\"Error in function ppick of mk_mbaer.c\")\r\n\r\nRuntimeError: ffi_prep_cif_var failed\r\n```\r\n\r\nIt seems fairly likely to me that one of the libraries I have installed introduced a problem in this environment compared to my old environment. I'm just not sure how to begin figuring out the issue. I tried it in my old environment just to be sure and it works fine.\r\n\r\n**Steps to Reproduce**\r\nRunning the tests yields the following failures:\r\nFAILED obspy/signal/tests/test_trigger.py::TriggerTestCase::test_pk_baer\r\nFAILED obspy/signal/tests/test_trigger.py::TriggerTestCase::test_pk_baer_cf\r\nin addition to a third, unrelated error in TauPy\r\n\r\nAdditionally, going through the trigger/picker tutorial will also reproduce the problem (this is probably what the test code is doing, anyways):\r\n```\r\nfrom obspy.core import read\r\nfrom obspy.signal.trigger import pk_baer\r\ntrace = read(\"https://examples.obspy.org/ev0_6.a01.gse2\")[0]\r\ndf = trace.stats.sampling_rate\r\np_pick, phase_info = pk_baer(trace.data, df,20, 60, 7.0, 12.0, 100, 100)\r\n```\r\n\r\n**Versions**\r\nObsPy: 1.2.2.post0+547.g221c7ef4a9.obspy.master (I tried both this and 1.2.2 as downloaded from conda-forge)\r\nPython: 3.8.12\r\nOS: Ubuntu 16\r\n\r\n**Obspy Installation**\r\nI tried installing obspy in two different ways and got the same result both ways: `conda install -c conda-forge obspy` (technically I used mamba, but that shouldn't matter) and from the latest master (`pip install -e {path_to_source}').\r\n\r\n**Old Environment**\r\nObspy: 1.2.2 (installed from conda)\r\nPython: 3.8.8\r\nOS: Ubuntu 16\r\n\r\nThank you,\r\nShawn\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDefines the libsignal and evalresp structures and blockettes.\n\"\"\"\nimport ctypes as C # NOQA\n\nimport numpy as np\n\nfrom obspy.core.util.libnames import _load_cdll\n\n\n# Import shared libsignal\nclibsignal = _load_cdll(\"signal\")\n# Import shared libevresp\nclibevresp = _load_cdll(\"evresp\")\n\nclibsignal.calcSteer.argtypes = [\n C.c_int, C.c_int, C.c_int, C.c_int, C.c_int, C.c_float,\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=3,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.complex128, ndim=4,\n flags='C_CONTIGUOUS'),\n]\nclibsignal.calcSteer.restype = C.c_void_p\n\nclibsignal.generalizedBeamformer.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=2,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=2,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.complex128, ndim=4,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.complex128, ndim=3,\n flags='C_CONTIGUOUS'),\n C.c_int, C.c_int, C.c_int, C.c_int, C.c_int,\n C.c_double,\n C.c_int,\n]\nclibsignal.generalizedBeamformer.restype = C.c_int\n\nclibsignal.X_corr.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.c_int, C.c_int,\n C.POINTER(C.c_int), C.POINTER(C.c_double)]\nclibsignal.X_corr.restype = C.c_int\n\nclibsignal.recstalta.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.c_int, C.c_int]\nclibsignal.recstalta.restype = C.c_void_p\n\nclibsignal.ppick.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.POINTER(C.c_int), C.c_char_p, C.c_float, C.c_int, C.c_int,\n C.c_float, C.c_float, C.c_int, C.c_int]\nclibsignal.ppick.restype = C.c_int\n\nclibsignal.ar_picker.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.c_float, C.c_float, C.c_float, C.c_float, C.c_float,\n C.c_float, C.c_float, C.c_int, C.c_int, C.POINTER(C.c_float),\n C.POINTER(C.c_float), C.c_double, C.c_double, C.c_int]\nclibsignal.ar_picker.restypes = C.c_int\n\nclibsignal.utl_geo_km.argtypes = [C.c_double, C.c_double, C.c_double,\n C.POINTER(C.c_double),\n C.POINTER(C.c_double)]\nclibsignal.utl_geo_km.restype = C.c_void_p\n\nhead_stalta_t = np.dtype([\n ('N', np.uint32),\n ('nsta', np.uint32),\n ('nlta', np.uint32),\n], align=True)\n\nclibsignal.stalta.argtypes = [\n np.ctypeslib.ndpointer(dtype=head_stalta_t, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n]\nclibsignal.stalta.restype = C.c_int\n\nclibsignal.hermite_interpolation.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.c_int, C.c_double, C.c_double]\nclibsignal.hermite_interpolation.restype = C.c_void_p\n\nclibsignal.lanczos_resample.argtypes = [\n # y_in\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # y_out\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # dt\n C.c_double,\n # offset\n C.c_double,\n # len_in\n C.c_int,\n # len_out,\n C.c_int,\n # a,\n C.c_int,\n # window\n C.c_int]\nclibsignal.lanczos_resample.restype = None\n\nclibsignal.calculate_kernel.argtypes = [\n # double *x\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # double *y\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # int len\n C.c_int,\n # int a,\n C.c_int,\n # int return_type,\n C.c_int,\n # enum lanczos_window_type window\n C.c_int]\nclibsignal.calculate_kernel.restype = None\n\nclibsignal.aic_simple.argtypes = [\n # double *aic, output\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # double *arr, input\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # arr size\n C.c_uint32]\nclibsignal.recstalta.restype = C.c_void_p\n\nSTALEN = 64\nNETLEN = 64\nCHALEN = 64\nLOCIDLEN = 64\n\n\nclass C_COMPLEX(C.Structure): # noqa\n _fields_ = [(\"real\", C.c_double),\n (\"imag\", C.c_double)]\n\n\nclass RESPONSE(C.Structure):\n pass\n\n\nRESPONSE._fields_ = [(\"station\", C.c_char * STALEN),\n (\"network\", C.c_char * NETLEN),\n (\"locid\", C.c_char * LOCIDLEN),\n (\"channel\", C.c_char * CHALEN),\n (\"rvec\", C.POINTER(C_COMPLEX)),\n (\"nfreqs\", C.c_int),\n (\"freqs\", C.POINTER(C.c_double)),\n (\"next\", C.POINTER(RESPONSE))]\n\nclibevresp.evresp.argtypes = [\n C.c_char_p,\n C.c_char_p,\n C.c_char_p,\n C.c_char_p,\n C.c_char_p,\n C.c_char_p,\n C.c_char_p,\n np.ctypeslib.ndpointer(dtype=np.float64,\n ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int,\n C.c_char_p,\n C.c_char_p,\n C.c_int,\n C.c_int,\n C.c_int,\n C.c_int]\nclibevresp.evresp.restype = C.POINTER(RESPONSE)\n\nclibevresp.free_response.argtypes = [C.POINTER(RESPONSE)]\nclibevresp.free_response.restype = C.c_void_p\n", "path": "obspy/signal/headers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDefines the libsignal and evalresp structures and blockettes.\n\"\"\"\nimport ctypes as C # NOQA\n\nimport numpy as np\n\nfrom obspy.core.util.libnames import _load_cdll\n\n\n# Import shared libsignal\nclibsignal = _load_cdll(\"signal\")\n# Import shared libevresp\nclibevresp = _load_cdll(\"evresp\")\n\nclibsignal.calcSteer.argtypes = [\n C.c_int, C.c_int, C.c_int, C.c_int, C.c_int, C.c_float,\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=3,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.complex128, ndim=4,\n flags='C_CONTIGUOUS'),\n]\nclibsignal.calcSteer.restype = C.c_void_p\n\nclibsignal.generalizedBeamformer.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=2,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=2,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.complex128, ndim=4,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.complex128, ndim=3,\n flags='C_CONTIGUOUS'),\n C.c_int, C.c_int, C.c_int, C.c_int, C.c_int,\n C.c_double,\n C.c_int,\n]\nclibsignal.generalizedBeamformer.restype = C.c_int\n\nclibsignal.X_corr.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.c_int, C.c_int,\n C.POINTER(C.c_int), C.POINTER(C.c_double)]\nclibsignal.X_corr.restype = C.c_int\n\nclibsignal.recstalta.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.c_int, C.c_int]\nclibsignal.recstalta.restype = C.c_void_p\n\nclibsignal.ppick.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.POINTER(C.c_int), C.c_char_p, C.c_float, C.c_int, C.c_int,\n C.c_float, C.c_float, C.c_int, C.c_int, C.POINTER(C.c_float)]\nclibsignal.ppick.restype = C.c_int\n\nclibsignal.ar_picker.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.c_float, C.c_float, C.c_float, C.c_float, C.c_float,\n C.c_float, C.c_float, C.c_int, C.c_int, C.POINTER(C.c_float),\n C.POINTER(C.c_float), C.c_double, C.c_double, C.c_int]\nclibsignal.ar_picker.restypes = C.c_int\n\nclibsignal.utl_geo_km.argtypes = [C.c_double, C.c_double, C.c_double,\n C.POINTER(C.c_double),\n C.POINTER(C.c_double)]\nclibsignal.utl_geo_km.restype = C.c_void_p\n\nhead_stalta_t = np.dtype([\n ('N', np.uint32),\n ('nsta', np.uint32),\n ('nlta', np.uint32),\n], align=True)\n\nclibsignal.stalta.argtypes = [\n np.ctypeslib.ndpointer(dtype=head_stalta_t, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n]\nclibsignal.stalta.restype = C.c_int\n\nclibsignal.hermite_interpolation.argtypes = [\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int, C.c_int, C.c_double, C.c_double]\nclibsignal.hermite_interpolation.restype = C.c_void_p\n\nclibsignal.lanczos_resample.argtypes = [\n # y_in\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # y_out\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # dt\n C.c_double,\n # offset\n C.c_double,\n # len_in\n C.c_int,\n # len_out,\n C.c_int,\n # a,\n C.c_int,\n # window\n C.c_int]\nclibsignal.lanczos_resample.restype = None\n\nclibsignal.calculate_kernel.argtypes = [\n # double *x\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # double *y\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # int len\n C.c_int,\n # int a,\n C.c_int,\n # int return_type,\n C.c_int,\n # enum lanczos_window_type window\n C.c_int]\nclibsignal.calculate_kernel.restype = None\n\nclibsignal.aic_simple.argtypes = [\n # double *aic, output\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # double *arr, input\n np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,\n flags='C_CONTIGUOUS'),\n # arr size\n C.c_uint32]\nclibsignal.recstalta.restype = C.c_void_p\n\nSTALEN = 64\nNETLEN = 64\nCHALEN = 64\nLOCIDLEN = 64\n\n\nclass C_COMPLEX(C.Structure): # noqa\n _fields_ = [(\"real\", C.c_double),\n (\"imag\", C.c_double)]\n\n\nclass RESPONSE(C.Structure):\n pass\n\n\nRESPONSE._fields_ = [(\"station\", C.c_char * STALEN),\n (\"network\", C.c_char * NETLEN),\n (\"locid\", C.c_char * LOCIDLEN),\n (\"channel\", C.c_char * CHALEN),\n (\"rvec\", C.POINTER(C_COMPLEX)),\n (\"nfreqs\", C.c_int),\n (\"freqs\", C.POINTER(C.c_double)),\n (\"next\", C.POINTER(RESPONSE))]\n\nclibevresp.evresp.argtypes = [\n C.c_char_p,\n C.c_char_p,\n C.c_char_p,\n C.c_char_p,\n C.c_char_p,\n C.c_char_p,\n C.c_char_p,\n np.ctypeslib.ndpointer(dtype=np.float64,\n ndim=1,\n flags='C_CONTIGUOUS'),\n C.c_int,\n C.c_char_p,\n C.c_char_p,\n C.c_int,\n C.c_int,\n C.c_int,\n C.c_int]\nclibevresp.evresp.restype = C.POINTER(RESPONSE)\n\nclibevresp.free_response.argtypes = [C.POINTER(RESPONSE)]\nclibevresp.free_response.restype = C.c_void_p\n", "path": "obspy/signal/headers.py"}]} | 3,481 | 176 |
gh_patches_debug_13711 | rasdani/github-patches | git_diff | modin-project__modin-1146 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update pandas version to 1.0.3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 import versioneer
3
4 with open("README.md", "r") as fh:
5 long_description = fh.read()
6
7 dask_deps = ["dask>=2.1.0", "distributed>=2.3.2"]
8 ray_deps = ["ray==0.8.0"]
9
10 setup(
11 name="modin",
12 version=versioneer.get_version(),
13 cmdclass=versioneer.get_cmdclass(),
14 description="Modin: Make your pandas code run faster by changing one line of code.",
15 packages=find_packages(),
16 url="https://github.com/modin-project/modin",
17 long_description=long_description,
18 long_description_content_type="text/markdown",
19 install_requires=["pandas==1.0.1", "packaging"],
20 extras_require={
21 # can be installed by pip install modin[dask]
22 "dask": dask_deps,
23 "ray": ray_deps,
24 "all": dask_deps + ray_deps,
25 },
26 python_requires=">=3.5",
27 )
28
```
Path: `modin/pandas/__init__.py`
Content:
```
1 import pandas
2
3 __pandas_version__ = "1.0.1"
4
5 if pandas.__version__ != __pandas_version__:
6 import warnings
7
8 warnings.warn(
9 "The pandas version installed does not match the required pandas version in "
10 "Modin. This may cause undesired side effects!".format(__pandas_version__)
11 )
12
13 from pandas import (
14 eval,
15 unique,
16 value_counts,
17 cut,
18 to_numeric,
19 factorize,
20 test,
21 qcut,
22 date_range,
23 period_range,
24 Index,
25 MultiIndex,
26 CategoricalIndex,
27 bdate_range,
28 DatetimeIndex,
29 Timedelta,
30 Timestamp,
31 to_timedelta,
32 set_eng_float_format,
33 options,
34 set_option,
35 NaT,
36 PeriodIndex,
37 Categorical,
38 Interval,
39 UInt8Dtype,
40 UInt16Dtype,
41 UInt32Dtype,
42 UInt64Dtype,
43 SparseDtype,
44 Int8Dtype,
45 Int16Dtype,
46 Int32Dtype,
47 Int64Dtype,
48 StringDtype,
49 BooleanDtype,
50 CategoricalDtype,
51 DatetimeTZDtype,
52 IntervalDtype,
53 PeriodDtype,
54 RangeIndex,
55 Int64Index,
56 UInt64Index,
57 Float64Index,
58 TimedeltaIndex,
59 IntervalIndex,
60 IndexSlice,
61 Grouper,
62 array,
63 Period,
64 show_versions,
65 DateOffset,
66 timedelta_range,
67 infer_freq,
68 interval_range,
69 ExcelWriter,
70 datetime,
71 NamedAgg,
72 NA,
73 )
74 import threading
75 import os
76 import types
77 import sys
78 import multiprocessing
79
80 from .. import __version__
81 from .concat import concat
82 from .dataframe import DataFrame
83 from .datetimes import to_datetime
84 from .io import (
85 read_csv,
86 read_parquet,
87 read_json,
88 read_html,
89 read_clipboard,
90 read_excel,
91 read_hdf,
92 read_feather,
93 read_stata,
94 read_sas,
95 read_pickle,
96 read_sql,
97 read_gbq,
98 read_table,
99 read_fwf,
100 read_sql_table,
101 read_sql_query,
102 read_spss,
103 ExcelFile,
104 to_pickle,
105 HDFStore,
106 json_normalize,
107 read_orc,
108 )
109 from .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long
110 from .series import Series
111 from .general import (
112 isna,
113 isnull,
114 merge,
115 merge_asof,
116 merge_ordered,
117 pivot_table,
118 notnull,
119 notna,
120 pivot,
121 )
122 from .plotting import Plotting as plotting
123 from .. import __execution_engine__ as execution_engine
124
125 # Set this so that Pandas doesn't try to multithread by itself
126 os.environ["OMP_NUM_THREADS"] = "1"
127 num_cpus = 1
128
129
130 def initialize_ray():
131 import ray
132
133 """Initializes ray based on environment variables and internal defaults."""
134 if threading.current_thread().name == "MainThread":
135 import secrets
136
137 plasma_directory = None
138 num_cpus = os.environ.get("MODIN_CPUS", None) or multiprocessing.cpu_count()
139 cluster = os.environ.get("MODIN_RAY_CLUSTER", None)
140 redis_address = os.environ.get("MODIN_REDIS_ADDRESS", None)
141 redis_password = secrets.token_hex(16)
142 if cluster == "True" and redis_address is not None:
143 # We only start ray in a cluster setting for the head node.
144 ray.init(
145 num_cpus=int(num_cpus),
146 include_webui=False,
147 ignore_reinit_error=True,
148 redis_address=redis_address,
149 redis_password=redis_password,
150 logging_level=100,
151 )
152 elif cluster is None:
153 object_store_memory = os.environ.get("MODIN_MEMORY", None)
154 if os.environ.get("MODIN_OUT_OF_CORE", "False").title() == "True":
155 from tempfile import gettempdir
156
157 plasma_directory = gettempdir()
158 # We may have already set the memory from the environment variable, we don't
159 # want to overwrite that value if we have.
160 if object_store_memory is None:
161 # Round down to the nearest Gigabyte.
162 mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
163 # Default to 8x memory for out of core
164 object_store_memory = 8 * mem_bytes
165 # In case anything failed above, we can still improve the memory for Modin.
166 if object_store_memory is None:
167 # Round down to the nearest Gigabyte.
168 object_store_memory = int(
169 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
170 )
171 # If the memory pool is smaller than 2GB, just use the default in ray.
172 if object_store_memory == 0:
173 object_store_memory = None
174 else:
175 object_store_memory = int(object_store_memory)
176 ray.init(
177 num_cpus=int(num_cpus),
178 include_webui=False,
179 ignore_reinit_error=True,
180 plasma_directory=plasma_directory,
181 object_store_memory=object_store_memory,
182 redis_address=redis_address,
183 redis_password=redis_password,
184 logging_level=100,
185 memory=object_store_memory,
186 )
187 # Register custom serializer for method objects to avoid warning message.
188 # We serialize `MethodType` objects when we use AxisPartition operations.
189 ray.register_custom_serializer(types.MethodType, use_pickle=True)
190
191 # Register a fix import function to run on all_workers including the driver.
192 # This is a hack solution to fix #647, #746
193 def move_stdlib_ahead_of_site_packages(*args):
194 site_packages_path = None
195 site_packages_path_index = -1
196 for i, path in enumerate(sys.path):
197 if sys.exec_prefix in path and path.endswith("site-packages"):
198 site_packages_path = path
199 site_packages_path_index = i
200 # break on first found
201 break
202
203 if site_packages_path is not None:
204 # stdlib packages layout as follows:
205 # - python3.x
206 # - typing.py
207 # - site-packages/
208 # - pandas
209 # So extracting the dirname of the site_packages can point us
210 # to the directory containing standard libraries.
211 sys.path.insert(
212 site_packages_path_index, os.path.dirname(site_packages_path)
213 )
214
215 move_stdlib_ahead_of_site_packages()
216 ray.worker.global_worker.run_function_on_all_workers(
217 move_stdlib_ahead_of_site_packages
218 )
219
220
221 if execution_engine == "Ray":
222 import ray
223
224 initialize_ray()
225 num_cpus = ray.cluster_resources()["CPU"]
226 elif execution_engine == "Dask": # pragma: no cover
227 from distributed.client import get_client
228 import warnings
229
230 if threading.current_thread().name == "MainThread":
231 warnings.warn("The Dask Engine for Modin is experimental.")
232 try:
233 client = get_client()
234 except ValueError:
235 from distributed import Client
236
237 num_cpus = os.environ.get("MODIN_CPUS", None) or multiprocessing.cpu_count()
238 client = Client(n_workers=int(num_cpus))
239 elif execution_engine != "Python":
240 raise ImportError("Unrecognized execution engine: {}.".format(execution_engine))
241
242 DEFAULT_NPARTITIONS = max(4, int(num_cpus))
243
244 __all__ = [
245 "DataFrame",
246 "Series",
247 "read_csv",
248 "read_parquet",
249 "read_json",
250 "read_html",
251 "read_clipboard",
252 "read_excel",
253 "read_hdf",
254 "read_feather",
255 "read_stata",
256 "read_sas",
257 "read_pickle",
258 "read_sql",
259 "read_gbq",
260 "read_table",
261 "read_spss",
262 "read_orc",
263 "json_normalize",
264 "concat",
265 "eval",
266 "unique",
267 "value_counts",
268 "cut",
269 "to_numeric",
270 "factorize",
271 "test",
272 "qcut",
273 "to_datetime",
274 "get_dummies",
275 "isna",
276 "isnull",
277 "merge",
278 "pivot_table",
279 "date_range",
280 "Index",
281 "MultiIndex",
282 "Series",
283 "bdate_range",
284 "period_range",
285 "DatetimeIndex",
286 "to_timedelta",
287 "set_eng_float_format",
288 "options",
289 "set_option",
290 "CategoricalIndex",
291 "Timedelta",
292 "Timestamp",
293 "NaT",
294 "PeriodIndex",
295 "Categorical",
296 "__version__",
297 "melt",
298 "crosstab",
299 "plotting",
300 "Interval",
301 "UInt8Dtype",
302 "UInt16Dtype",
303 "UInt32Dtype",
304 "UInt64Dtype",
305 "SparseDtype",
306 "Int8Dtype",
307 "Int16Dtype",
308 "Int32Dtype",
309 "Int64Dtype",
310 "CategoricalDtype",
311 "DatetimeTZDtype",
312 "IntervalDtype",
313 "PeriodDtype",
314 "BooleanDtype",
315 "StringDtype",
316 "NA",
317 "RangeIndex",
318 "Int64Index",
319 "UInt64Index",
320 "Float64Index",
321 "TimedeltaIndex",
322 "IntervalIndex",
323 "IndexSlice",
324 "Grouper",
325 "array",
326 "Period",
327 "show_versions",
328 "DateOffset",
329 "timedelta_range",
330 "infer_freq",
331 "interval_range",
332 "ExcelWriter",
333 "read_fwf",
334 "read_sql_table",
335 "read_sql_query",
336 "ExcelFile",
337 "to_pickle",
338 "HDFStore",
339 "lreshape",
340 "wide_to_long",
341 "merge_asof",
342 "merge_ordered",
343 "notnull",
344 "notna",
345 "pivot",
346 "datetime",
347 "NamedAgg",
348 "DEFAULT_NPARTITIONS",
349 ]
350
351 del pandas
352
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py
--- a/modin/pandas/__init__.py
+++ b/modin/pandas/__init__.py
@@ -1,6 +1,6 @@
import pandas
-__pandas_version__ = "1.0.1"
+__pandas_version__ = "1.0.3"
if pandas.__version__ != __pandas_version__:
import warnings
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
url="https://github.com/modin-project/modin",
long_description=long_description,
long_description_content_type="text/markdown",
- install_requires=["pandas==1.0.1", "packaging"],
+ install_requires=["pandas==1.0.3", "packaging"],
extras_require={
# can be installed by pip install modin[dask]
"dask": dask_deps,
| {"golden_diff": "diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py\n--- a/modin/pandas/__init__.py\n+++ b/modin/pandas/__init__.py\n@@ -1,6 +1,6 @@\n import pandas\n \n-__pandas_version__ = \"1.0.1\"\n+__pandas_version__ = \"1.0.3\"\n \n if pandas.__version__ != __pandas_version__:\n import warnings\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n- install_requires=[\"pandas==1.0.1\", \"packaging\"],\n+ install_requires=[\"pandas==1.0.3\", \"packaging\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n", "issue": "Update pandas version to 1.0.3\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.1.0\", \"distributed>=2.3.2\"]\nray_deps = [\"ray==0.8.0\"]\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(),\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\"pandas==1.0.1\", \"packaging\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"all\": dask_deps + ray_deps,\n },\n python_requires=\">=3.5\",\n)\n", "path": "setup.py"}, {"content": "import pandas\n\n__pandas_version__ = \"1.0.1\"\n\nif pandas.__version__ != __pandas_version__:\n import warnings\n\n warnings.warn(\n \"The pandas version installed does not match the required pandas version in \"\n \"Modin. This may cause undesired side effects!\".format(__pandas_version__)\n )\n\nfrom pandas import (\n eval,\n unique,\n value_counts,\n cut,\n to_numeric,\n factorize,\n test,\n qcut,\n date_range,\n period_range,\n Index,\n MultiIndex,\n CategoricalIndex,\n bdate_range,\n DatetimeIndex,\n Timedelta,\n Timestamp,\n to_timedelta,\n set_eng_float_format,\n options,\n set_option,\n NaT,\n PeriodIndex,\n Categorical,\n Interval,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n SparseDtype,\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n StringDtype,\n BooleanDtype,\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n RangeIndex,\n Int64Index,\n UInt64Index,\n Float64Index,\n TimedeltaIndex,\n IntervalIndex,\n IndexSlice,\n Grouper,\n array,\n Period,\n show_versions,\n DateOffset,\n timedelta_range,\n infer_freq,\n interval_range,\n ExcelWriter,\n datetime,\n NamedAgg,\n NA,\n)\nimport threading\nimport os\nimport types\nimport sys\nimport multiprocessing\n\nfrom .. import __version__\nfrom .concat import concat\nfrom .dataframe import DataFrame\nfrom .datetimes import to_datetime\nfrom .io import (\n read_csv,\n read_parquet,\n read_json,\n read_html,\n read_clipboard,\n read_excel,\n read_hdf,\n read_feather,\n read_stata,\n read_sas,\n read_pickle,\n read_sql,\n read_gbq,\n read_table,\n read_fwf,\n read_sql_table,\n read_sql_query,\n read_spss,\n ExcelFile,\n to_pickle,\n HDFStore,\n json_normalize,\n read_orc,\n)\nfrom .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long\nfrom .series import Series\nfrom .general import (\n isna,\n isnull,\n merge,\n merge_asof,\n merge_ordered,\n pivot_table,\n notnull,\n notna,\n pivot,\n)\nfrom .plotting import Plotting as plotting\nfrom .. import __execution_engine__ as execution_engine\n\n# Set this so that Pandas doesn't try to multithread by itself\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nnum_cpus = 1\n\n\ndef initialize_ray():\n import ray\n\n \"\"\"Initializes ray based on environment variables and internal defaults.\"\"\"\n if threading.current_thread().name == \"MainThread\":\n import secrets\n\n plasma_directory = None\n num_cpus = os.environ.get(\"MODIN_CPUS\", None) or multiprocessing.cpu_count()\n cluster = os.environ.get(\"MODIN_RAY_CLUSTER\", None)\n redis_address = os.environ.get(\"MODIN_REDIS_ADDRESS\", None)\n redis_password = secrets.token_hex(16)\n if cluster == \"True\" and redis_address is not None:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n num_cpus=int(num_cpus),\n include_webui=False,\n ignore_reinit_error=True,\n redis_address=redis_address,\n redis_password=redis_password,\n logging_level=100,\n )\n elif cluster is None:\n object_store_memory = os.environ.get(\"MODIN_MEMORY\", None)\n if os.environ.get(\"MODIN_OUT_OF_CORE\", \"False\").title() == \"True\":\n from tempfile import gettempdir\n\n plasma_directory = gettempdir()\n # We may have already set the memory from the environment variable, we don't\n # want to overwrite that value if we have.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n # Default to 8x memory for out of core\n object_store_memory = 8 * mem_bytes\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n object_store_memory = int(\n 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n )\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n else:\n object_store_memory = int(object_store_memory)\n ray.init(\n num_cpus=int(num_cpus),\n include_webui=False,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n object_store_memory=object_store_memory,\n redis_address=redis_address,\n redis_password=redis_password,\n logging_level=100,\n memory=object_store_memory,\n )\n # Register custom serializer for method objects to avoid warning message.\n # We serialize `MethodType` objects when we use AxisPartition operations.\n ray.register_custom_serializer(types.MethodType, use_pickle=True)\n\n # Register a fix import function to run on all_workers including the driver.\n # This is a hack solution to fix #647, #746\n def move_stdlib_ahead_of_site_packages(*args):\n site_packages_path = None\n site_packages_path_index = -1\n for i, path in enumerate(sys.path):\n if sys.exec_prefix in path and path.endswith(\"site-packages\"):\n site_packages_path = path\n site_packages_path_index = i\n # break on first found\n break\n\n if site_packages_path is not None:\n # stdlib packages layout as follows:\n # - python3.x\n # - typing.py\n # - site-packages/\n # - pandas\n # So extracting the dirname of the site_packages can point us\n # to the directory containing standard libraries.\n sys.path.insert(\n site_packages_path_index, os.path.dirname(site_packages_path)\n )\n\n move_stdlib_ahead_of_site_packages()\n ray.worker.global_worker.run_function_on_all_workers(\n move_stdlib_ahead_of_site_packages\n )\n\n\nif execution_engine == \"Ray\":\n import ray\n\n initialize_ray()\n num_cpus = ray.cluster_resources()[\"CPU\"]\nelif execution_engine == \"Dask\": # pragma: no cover\n from distributed.client import get_client\n import warnings\n\n if threading.current_thread().name == \"MainThread\":\n warnings.warn(\"The Dask Engine for Modin is experimental.\")\n try:\n client = get_client()\n except ValueError:\n from distributed import Client\n\n num_cpus = os.environ.get(\"MODIN_CPUS\", None) or multiprocessing.cpu_count()\n client = Client(n_workers=int(num_cpus))\nelif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n\nDEFAULT_NPARTITIONS = max(4, int(num_cpus))\n\n__all__ = [\n \"DataFrame\",\n \"Series\",\n \"read_csv\",\n \"read_parquet\",\n \"read_json\",\n \"read_html\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_hdf\",\n \"read_feather\",\n \"read_stata\",\n \"read_sas\",\n \"read_pickle\",\n \"read_sql\",\n \"read_gbq\",\n \"read_table\",\n \"read_spss\",\n \"read_orc\",\n \"json_normalize\",\n \"concat\",\n \"eval\",\n \"unique\",\n \"value_counts\",\n \"cut\",\n \"to_numeric\",\n \"factorize\",\n \"test\",\n \"qcut\",\n \"to_datetime\",\n \"get_dummies\",\n \"isna\",\n \"isnull\",\n \"merge\",\n \"pivot_table\",\n \"date_range\",\n \"Index\",\n \"MultiIndex\",\n \"Series\",\n \"bdate_range\",\n \"period_range\",\n \"DatetimeIndex\",\n \"to_timedelta\",\n \"set_eng_float_format\",\n \"options\",\n \"set_option\",\n \"CategoricalIndex\",\n \"Timedelta\",\n \"Timestamp\",\n \"NaT\",\n \"PeriodIndex\",\n \"Categorical\",\n \"__version__\",\n \"melt\",\n \"crosstab\",\n \"plotting\",\n \"Interval\",\n \"UInt8Dtype\",\n \"UInt16Dtype\",\n \"UInt32Dtype\",\n \"UInt64Dtype\",\n \"SparseDtype\",\n \"Int8Dtype\",\n \"Int16Dtype\",\n \"Int32Dtype\",\n \"Int64Dtype\",\n \"CategoricalDtype\",\n \"DatetimeTZDtype\",\n \"IntervalDtype\",\n \"PeriodDtype\",\n \"BooleanDtype\",\n \"StringDtype\",\n \"NA\",\n \"RangeIndex\",\n \"Int64Index\",\n \"UInt64Index\",\n \"Float64Index\",\n \"TimedeltaIndex\",\n \"IntervalIndex\",\n \"IndexSlice\",\n \"Grouper\",\n \"array\",\n \"Period\",\n \"show_versions\",\n \"DateOffset\",\n \"timedelta_range\",\n \"infer_freq\",\n \"interval_range\",\n \"ExcelWriter\",\n \"read_fwf\",\n \"read_sql_table\",\n \"read_sql_query\",\n \"ExcelFile\",\n \"to_pickle\",\n \"HDFStore\",\n \"lreshape\",\n \"wide_to_long\",\n \"merge_asof\",\n \"merge_ordered\",\n \"notnull\",\n \"notna\",\n \"pivot\",\n \"datetime\",\n \"NamedAgg\",\n \"DEFAULT_NPARTITIONS\",\n]\n\ndel pandas\n", "path": "modin/pandas/__init__.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.1.0\", \"distributed>=2.3.2\"]\nray_deps = [\"ray==0.8.0\"]\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(),\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\"pandas==1.0.3\", \"packaging\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"all\": dask_deps + ray_deps,\n },\n python_requires=\">=3.5\",\n)\n", "path": "setup.py"}, {"content": "import pandas\n\n__pandas_version__ = \"1.0.3\"\n\nif pandas.__version__ != __pandas_version__:\n import warnings\n\n warnings.warn(\n \"The pandas version installed does not match the required pandas version in \"\n \"Modin. This may cause undesired side effects!\".format(__pandas_version__)\n )\n\nfrom pandas import (\n eval,\n unique,\n value_counts,\n cut,\n to_numeric,\n factorize,\n test,\n qcut,\n date_range,\n period_range,\n Index,\n MultiIndex,\n CategoricalIndex,\n bdate_range,\n DatetimeIndex,\n Timedelta,\n Timestamp,\n to_timedelta,\n set_eng_float_format,\n options,\n set_option,\n NaT,\n PeriodIndex,\n Categorical,\n Interval,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n SparseDtype,\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n StringDtype,\n BooleanDtype,\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n RangeIndex,\n Int64Index,\n UInt64Index,\n Float64Index,\n TimedeltaIndex,\n IntervalIndex,\n IndexSlice,\n Grouper,\n array,\n Period,\n show_versions,\n DateOffset,\n timedelta_range,\n infer_freq,\n interval_range,\n ExcelWriter,\n datetime,\n NamedAgg,\n NA,\n)\nimport threading\nimport os\nimport types\nimport sys\nimport multiprocessing\n\nfrom .. import __version__\nfrom .concat import concat\nfrom .dataframe import DataFrame\nfrom .datetimes import to_datetime\nfrom .io import (\n read_csv,\n read_parquet,\n read_json,\n read_html,\n read_clipboard,\n read_excel,\n read_hdf,\n read_feather,\n read_stata,\n read_sas,\n read_pickle,\n read_sql,\n read_gbq,\n read_table,\n read_fwf,\n read_sql_table,\n read_sql_query,\n read_spss,\n ExcelFile,\n to_pickle,\n HDFStore,\n json_normalize,\n read_orc,\n)\nfrom .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long\nfrom .series import Series\nfrom .general import (\n isna,\n isnull,\n merge,\n merge_asof,\n merge_ordered,\n pivot_table,\n notnull,\n notna,\n pivot,\n)\nfrom .plotting import Plotting as plotting\nfrom .. import __execution_engine__ as execution_engine\n\n# Set this so that Pandas doesn't try to multithread by itself\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nnum_cpus = 1\n\n\ndef initialize_ray():\n import ray\n\n \"\"\"Initializes ray based on environment variables and internal defaults.\"\"\"\n if threading.current_thread().name == \"MainThread\":\n import secrets\n\n plasma_directory = None\n num_cpus = os.environ.get(\"MODIN_CPUS\", None) or multiprocessing.cpu_count()\n cluster = os.environ.get(\"MODIN_RAY_CLUSTER\", None)\n redis_address = os.environ.get(\"MODIN_REDIS_ADDRESS\", None)\n redis_password = secrets.token_hex(16)\n if cluster == \"True\" and redis_address is not None:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n num_cpus=int(num_cpus),\n include_webui=False,\n ignore_reinit_error=True,\n redis_address=redis_address,\n redis_password=redis_password,\n logging_level=100,\n )\n elif cluster is None:\n object_store_memory = os.environ.get(\"MODIN_MEMORY\", None)\n if os.environ.get(\"MODIN_OUT_OF_CORE\", \"False\").title() == \"True\":\n from tempfile import gettempdir\n\n plasma_directory = gettempdir()\n # We may have already set the memory from the environment variable, we don't\n # want to overwrite that value if we have.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n # Default to 8x memory for out of core\n object_store_memory = 8 * mem_bytes\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n object_store_memory = int(\n 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n )\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n else:\n object_store_memory = int(object_store_memory)\n ray.init(\n num_cpus=int(num_cpus),\n include_webui=False,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n object_store_memory=object_store_memory,\n redis_address=redis_address,\n redis_password=redis_password,\n logging_level=100,\n memory=object_store_memory,\n )\n # Register custom serializer for method objects to avoid warning message.\n # We serialize `MethodType` objects when we use AxisPartition operations.\n ray.register_custom_serializer(types.MethodType, use_pickle=True)\n\n # Register a fix import function to run on all_workers including the driver.\n # This is a hack solution to fix #647, #746\n def move_stdlib_ahead_of_site_packages(*args):\n site_packages_path = None\n site_packages_path_index = -1\n for i, path in enumerate(sys.path):\n if sys.exec_prefix in path and path.endswith(\"site-packages\"):\n site_packages_path = path\n site_packages_path_index = i\n # break on first found\n break\n\n if site_packages_path is not None:\n # stdlib packages layout as follows:\n # - python3.x\n # - typing.py\n # - site-packages/\n # - pandas\n # So extracting the dirname of the site_packages can point us\n # to the directory containing standard libraries.\n sys.path.insert(\n site_packages_path_index, os.path.dirname(site_packages_path)\n )\n\n move_stdlib_ahead_of_site_packages()\n ray.worker.global_worker.run_function_on_all_workers(\n move_stdlib_ahead_of_site_packages\n )\n\n\nif execution_engine == \"Ray\":\n import ray\n\n initialize_ray()\n num_cpus = ray.cluster_resources()[\"CPU\"]\nelif execution_engine == \"Dask\": # pragma: no cover\n from distributed.client import get_client\n import warnings\n\n if threading.current_thread().name == \"MainThread\":\n warnings.warn(\"The Dask Engine for Modin is experimental.\")\n try:\n client = get_client()\n except ValueError:\n from distributed import Client\n\n num_cpus = os.environ.get(\"MODIN_CPUS\", None) or multiprocessing.cpu_count()\n client = Client(n_workers=int(num_cpus))\nelif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n\nDEFAULT_NPARTITIONS = max(4, int(num_cpus))\n\n__all__ = [\n \"DataFrame\",\n \"Series\",\n \"read_csv\",\n \"read_parquet\",\n \"read_json\",\n \"read_html\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_hdf\",\n \"read_feather\",\n \"read_stata\",\n \"read_sas\",\n \"read_pickle\",\n \"read_sql\",\n \"read_gbq\",\n \"read_table\",\n \"read_spss\",\n \"read_orc\",\n \"json_normalize\",\n \"concat\",\n \"eval\",\n \"unique\",\n \"value_counts\",\n \"cut\",\n \"to_numeric\",\n \"factorize\",\n \"test\",\n \"qcut\",\n \"to_datetime\",\n \"get_dummies\",\n \"isna\",\n \"isnull\",\n \"merge\",\n \"pivot_table\",\n \"date_range\",\n \"Index\",\n \"MultiIndex\",\n \"Series\",\n \"bdate_range\",\n \"period_range\",\n \"DatetimeIndex\",\n \"to_timedelta\",\n \"set_eng_float_format\",\n \"options\",\n \"set_option\",\n \"CategoricalIndex\",\n \"Timedelta\",\n \"Timestamp\",\n \"NaT\",\n \"PeriodIndex\",\n \"Categorical\",\n \"__version__\",\n \"melt\",\n \"crosstab\",\n \"plotting\",\n \"Interval\",\n \"UInt8Dtype\",\n \"UInt16Dtype\",\n \"UInt32Dtype\",\n \"UInt64Dtype\",\n \"SparseDtype\",\n \"Int8Dtype\",\n \"Int16Dtype\",\n \"Int32Dtype\",\n \"Int64Dtype\",\n \"CategoricalDtype\",\n \"DatetimeTZDtype\",\n \"IntervalDtype\",\n \"PeriodDtype\",\n \"BooleanDtype\",\n \"StringDtype\",\n \"NA\",\n \"RangeIndex\",\n \"Int64Index\",\n \"UInt64Index\",\n \"Float64Index\",\n \"TimedeltaIndex\",\n \"IntervalIndex\",\n \"IndexSlice\",\n \"Grouper\",\n \"array\",\n \"Period\",\n \"show_versions\",\n \"DateOffset\",\n \"timedelta_range\",\n \"infer_freq\",\n \"interval_range\",\n \"ExcelWriter\",\n \"read_fwf\",\n \"read_sql_table\",\n \"read_sql_query\",\n \"ExcelFile\",\n \"to_pickle\",\n \"HDFStore\",\n \"lreshape\",\n \"wide_to_long\",\n \"merge_asof\",\n \"merge_ordered\",\n \"notnull\",\n \"notna\",\n \"pivot\",\n \"datetime\",\n \"NamedAgg\",\n \"DEFAULT_NPARTITIONS\",\n]\n\ndel pandas\n", "path": "modin/pandas/__init__.py"}]} | 3,690 | 226 |
gh_patches_debug_27159 | rasdani/github-patches | git_diff | microsoft__Qcodes-552 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: decadac getting and setting voltages fails unpredictably
### Steps to reproduce
1. start Harvard Decadac and import a qcodes instrument instrument:
```
from qcodes.instrument_drivers.Harvard.Decadac import Decadac
sl0 = Decadac('dec_slot_0', port=5, slot=0)
```
2. run
`sl0.ch1_voltage(0)`
3. fail as below, seems like its something to do with the mode (which is set in the init but somehow that isn't reliable), It is also not super reliable to fix by setting the mode to 1 but seems to work if I set it to 0 and back to 1 a few times...
### Expected behaviour
should set the voltage
### Actual behaviour
```
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-18-e543db036233> in <module>()
----> 1 sl0.ch1_voltage(0)
a:\qcodes\qcodes\instrument\parameter.py in __call__(self, *args)
133 else:
134 if self.has_set:
--> 135 self.set(*args)
136 else:
137 raise NotImplementedError('no set cmd found in' +
a:\qcodes\qcodes\instrument\parameter.py in _validate_and_sweep(self, value)
945 e.args = e.args + (
946 'setting {} to {}'.format(self.full_name, repr(value)),)
--> 947 raise e
948
949 def set_step(self, step, max_val_age=None):
a:\qcodes\qcodes\instrument\parameter.py in _validate_and_sweep(self, value)
929 step_clock = time.perf_counter()
930
--> 931 for step_val in self._sweep_steps(value):
932 self._set(step_val)
933 self._save_val(step_val)
a:\qcodes\qcodes\instrument\parameter.py in _sweep_steps(self, value)
891 state = self._latest()
892 if state['ts'] is None or state['ts'] < oldest_ok_val:
--> 893 start_value = self.get()
894 else:
895 start_value = state['value']
a:\qcodes\qcodes\instrument\parameter.py in get(self)
826 except Exception as e:
827 e.args = e.args + ('getting {}'.format(self.full_name),)
--> 828 raise e
829
830 def _valmapping_get_parser(self, val):
a:\qcodes\qcodes\instrument\parameter.py in get(self)
821 def get(self):
822 try:
--> 823 value = self._get()
824 self._save_val(value)
825 return value
a:\qcodes\qcodes\utils\command.py in __call__(self, *args)
175 raise TypeError(
176 'command takes exactly {} args'.format(self.arg_count))
--> 177 return self.exec_function(*args)
a:\qcodes\qcodes\instrument_drivers\Harvard\Decadac.py in _getvoltage(self, channel)
129 response = temp[::-1]
130
--> 131 rawvoltage = self._code2voltage(response, channel)
132 actualvoltage = rawvoltage - self._offsets[channel]
133
a:\qcodes\qcodes\instrument_drivers\Harvard\Decadac.py in _code2voltage(self, code, channel)
232 """
233
--> 234 code = float(code)
235 translationdict = {1: lambda x: (x+1)*20/2**16-10,
236 2: lambda x: (x+1)*10/2**16,
ValueError: ("could not convert string to float: '0! M 0'", 'getting dec_slot_0_ch1_voltage', 'setting dec_slot_0_ch1_voltage to 0')
```
@WilliamHPNielsen @giulioungaretti
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/instrument_drivers/Harvard/Decadac.py`
Content:
```
1 import logging
2 from time import sleep
3 from functools import partial
4 from qcodes.instrument.visa import VisaInstrument
5 from qcodes.utils import validators as vals
6
7 log = logging.getLogger(__name__)
8
9
10 class Decadac(VisaInstrument):
11 """
12 The qcodes driver for the Decadac.
13 Each slot on the Deacadac is to be treated as a seperate
14 four-channel instrument.
15
16 Tested with a Decadec firmware revion number 14081 (Decadac 139).
17
18 The message strategy is the following: always keep the queue empty, so
19 that self.visa_handle.ask(XXX) will return the answer to XXX and not
20 some previous event.
21
22
23 Attributes:
24
25 _ramp_state (bool): If True, ramp state is ON. Default False.
26
27 _ramp_time (int): The ramp time in ms. Default 100 ms.
28 """
29
30 def __init__(self, name, port, slot, timeout=2, baudrate=9600,
31 bytesize=8, **kwargs):
32
33 """
34
35 Creates an instance of the Decadac instrument corresponding to one slot
36 on the physical instrument.
37
38 Args:
39 name (str): What this instrument is called locally.
40
41 port (number): The number (only) of the COM port to connect to.
42
43 slot (int): The slot to use.
44
45 timeout (number): Seconds to wait for message response.
46 Default 0.3.
47
48 baudrate (number): The connection baudrate. Default 9600.
49
50 bytesize (number): The connection bytesize. Default 8.
51 """
52
53 address = 'ASRL{:d}::INSTR'.format(port)
54 self.slot = slot
55
56 super().__init__(name, address, timeout=timeout, **kwargs)
57
58 # set instrument operation state variables
59 self._ramp_state = False
60 self._ramp_time = 100
61 self._voltranges = [1, 1, 1, 1]
62 self._offsets = [0, 0, 0, 0]
63
64 # channels
65 for channelno in range(4):
66 self.add_parameter('ch{}_voltage'.format(channelno),
67 get_cmd=partial(self._getvoltage,
68 channel=channelno),
69 set_cmd=partial(self._setvoltage,
70 channel=channelno),
71 label='Voltage',
72 unit='V')
73
74 self.add_parameter('ch{}_voltrange'.format(channelno),
75 get_cmd=partial(self._getvoltrange, channelno),
76 set_cmd=partial(self._setvoltrange, channelno),
77 vals=vals.Enum(1, 2, 3))
78
79 self.add_parameter('ch{}_offset'.format(channelno),
80 get_cmd=partial(self._getoffset, channelno),
81 set_cmd=partial(self._setoffset, channelno),
82 label='Channel {} offset'.format(channelno),
83 unit='V',
84 docstring="""
85 The offset is applied to the channel.
86 E.g. if ch1_offset = 1 and ch_voltage
87 is set to 1, the instrument is told to
88 output 2 volts.
89 """)
90
91 self.add_parameter('mode',
92 label='Output mode',
93 set_cmd='B {}; M {};'.format(self.slot, '{}'),
94 vals=vals.Enum(0, 1),
95 docstring="""
96 The operational mode of the slot.
97 0: output off, 1: output on.
98 """)
99
100 # initialise hardware settings
101 self.mode.set(1)
102
103 def _getoffset(self, n):
104 return self._offsets[n]
105
106 def _setoffset(self, n, val):
107 self._offsets[n] = val
108
109 def _getvoltrange(self, n):
110 return self._voltranges[n]
111
112 def _setvoltrange(self, n, val):
113 self._voltranges[n] = val
114
115 def _getvoltage(self, channel):
116 """
117 Function to query the voltage. Flushes the message queue in that
118 process.
119 """
120
121 # set the relevant channel and slot to query
122 mssg = 'B {:d}; C {:d};'.format(self.slot, channel)
123 mssg += 'd;'
124
125 # a bit of string juggling to extract the voltage
126 rawresponse = self.visa_handle.ask(mssg)
127 temp = rawresponse[::-1]
128 temp = temp[3:temp.upper().find('D')-1]
129 response = temp[::-1]
130
131 rawvoltage = self._code2voltage(response, channel)
132 actualvoltage = rawvoltage - self._offsets[channel]
133
134 return actualvoltage
135
136 def _setvoltage(self, voltage, channel):
137 """
138 Function to set the voltage. Depending on whether self._ramp_state is
139 True or False, this function either ramps from the current voltage to
140 the specified voltage or directly makes the voltage jump there.
141
142 Args:
143 voltage (number): the set voltage.
144 """
145
146 actualvoltage = voltage + self._offsets[channel]
147 code = self._voltage2code(actualvoltage, channel)
148
149 mssg = 'B {:d}; C {:d};'.format(self.slot, channel)
150
151 if not self._ramp_state:
152 mssg += 'D ' + code + ';'
153
154 self.visa_handle.write(mssg)
155
156 # due to a quirk of the Decadac, we spare the user of an error
157 # sometimes encountered on first run
158 try:
159 self.visa_handle.read()
160 except UnicodeDecodeError:
161 log.warning(" Decadac returned nothing and possibly did nothing. " +
162 "Please re-run the command")
163 pass
164
165 if self._ramp_state:
166 currentcode = self._voltage2code(self._getvoltage(channel),
167 channel)
168 slope = int((float(code)-float(currentcode)) /
169 (10*self._ramp_time)*2**16)
170 if slope < 0:
171 limit = 'L'
172 else:
173 limit = 'U'
174
175 script = ['{',
176 '*1:',
177 'M2;',
178 'T 100;', # 1 timestep: 100 micro s
179 limit + code + ';',
180 'S' + str(slope) + ';',
181 'X0;',
182 '}']
183 runcmd = 'X 1;'
184 mssg += ''.join(script) + runcmd
185 self.visa_handle.write(mssg)
186 sleep(0.0015*self._ramp_time) # Required sleep.
187 self.visa_handle.read()
188
189 # reset channel voltage ranges
190 if slope < 0:
191 self.visa_handle.write('L 0;')
192 self.visa_handle.read()
193 else:
194 self.visa_handle.write('U 65535;')
195 self.visa_handle.read()
196
197 def set_ramping(self, state, time=None):
198 """
199 Function to set _ramp_state and _ramp_time.
200
201 Args:
202 state (bool): True sets ramping ON.
203
204 time (Optiona[int]): the ramp time in ms
205 """
206 self._ramp_state = state
207 if time is not None:
208 self._ramp_time = time
209
210 def get_ramping(self):
211 """
212 Queries the value of self._ramp_state and self._ramp_time.
213
214 Returns:
215 str: ramp state information
216 """
217 switch = {True: 'ON',
218 False: 'OFF'}
219 mssg = 'Ramp state: ' + switch[self._ramp_state]
220 mssg += '. Ramp time: {:d} ms.'.format(int(self._ramp_time))
221 return mssg
222
223 def _code2voltage(self, code, channel):
224 """
225 Helper function translating a 32 bit code used internally by
226 the Decadac into a voltage.
227
228 Args:
229 code (str): The code string from the instrument.
230
231 channel (int): The relevant channel.
232 """
233
234 code = float(code)
235 translationdict = {1: lambda x: (x+1)*20/2**16-10,
236 2: lambda x: (x+1)*10/2**16,
237 3: lambda x: (x+1)*10/2**16-10}
238
239 return translationdict[self._voltranges[channel]](code)
240
241 def _voltage2code(self, voltage, channel):
242 """
243 Helper function translating a voltage in V into a 32 bit code used
244 internally by the Decadac.
245
246 Args:
247 voltage (float): The physical voltage.
248
249 channel (int): The relevant channel.
250
251 Returns:
252 code (str): The corresponding voltage code.
253 """
254 translationdict = {1: lambda x: 2**16/20*(x-2**-16+10),
255 2: lambda x: 2**16/10*(x-2**-16),
256 3: lambda x: 2**16/10*(x-2**-16+10)}
257 voltage_float = translationdict[self._voltranges[channel]](voltage)
258 return str(int(voltage_float))
259
260
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qcodes/instrument_drivers/Harvard/Decadac.py b/qcodes/instrument_drivers/Harvard/Decadac.py
--- a/qcodes/instrument_drivers/Harvard/Decadac.py
+++ b/qcodes/instrument_drivers/Harvard/Decadac.py
@@ -90,15 +90,23 @@
self.add_parameter('mode',
label='Output mode',
- set_cmd='B {}; M {};'.format(self.slot, '{}'),
- vals=vals.Enum(0, 1),
+ set_cmd=partial(self._setmode, self.slot),
+ vals=vals.Enum(0, 2),
docstring="""
The operational mode of the slot.
- 0: output off, 1: output on.
+ 0: output off,
+ 2: 4-channel (low res) mode.
""")
# initialise hardware settings
- self.mode.set(1)
+ self.mode.set(2)
+
+ def _setmode(slot, mode):
+ """
+ set_cmd for the mode parameter
+ """
+ self.visa_handle.write('B {}; M {};'.format(slot, mode)
+ self.visa_handle.read()
def _getoffset(self, n):
return self._offsets[n]
@@ -256,4 +264,3 @@
3: lambda x: 2**16/10*(x-2**-16+10)}
voltage_float = translationdict[self._voltranges[channel]](voltage)
return str(int(voltage_float))
-
| {"golden_diff": "diff --git a/qcodes/instrument_drivers/Harvard/Decadac.py b/qcodes/instrument_drivers/Harvard/Decadac.py\n--- a/qcodes/instrument_drivers/Harvard/Decadac.py\n+++ b/qcodes/instrument_drivers/Harvard/Decadac.py\n@@ -90,15 +90,23 @@\n \n self.add_parameter('mode',\n label='Output mode',\n- set_cmd='B {}; M {};'.format(self.slot, '{}'),\n- vals=vals.Enum(0, 1),\n+ set_cmd=partial(self._setmode, self.slot),\n+ vals=vals.Enum(0, 2),\n docstring=\"\"\"\n The operational mode of the slot.\n- 0: output off, 1: output on.\n+ 0: output off,\n+ 2: 4-channel (low res) mode.\n \"\"\")\n \n # initialise hardware settings\n- self.mode.set(1)\n+ self.mode.set(2)\n+\n+ def _setmode(slot, mode):\n+ \"\"\"\n+ set_cmd for the mode parameter\n+ \"\"\"\n+ self.visa_handle.write('B {}; M {};'.format(slot, mode)\n+ self.visa_handle.read()\n \n def _getoffset(self, n):\n return self._offsets[n]\n@@ -256,4 +264,3 @@\n 3: lambda x: 2**16/10*(x-2**-16+10)}\n voltage_float = translationdict[self._voltranges[channel]](voltage)\n return str(int(voltage_float))\n-\n", "issue": "bug: decadac getting and setting voltages fails unpredictably\n### Steps to reproduce\r\n1. start Harvard Decadac and import a qcodes instrument instrument:\r\n\r\n```\r\nfrom qcodes.instrument_drivers.Harvard.Decadac import Decadac\r\nsl0 = Decadac('dec_slot_0', port=5, slot=0)\r\n```\r\n\r\n2. run \r\n\r\n`sl0.ch1_voltage(0)`\r\n\r\n3. fail as below, seems like its something to do with the mode (which is set in the init but somehow that isn't reliable), It is also not super reliable to fix by setting the mode to 1 but seems to work if I set it to 0 and back to 1 a few times...\r\n\r\n### Expected behaviour\r\nshould set the voltage\r\n\r\n### Actual behaviour\r\n```\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-18-e543db036233> in <module>()\r\n----> 1 sl0.ch1_voltage(0)\r\n\r\na:\\qcodes\\qcodes\\instrument\\parameter.py in __call__(self, *args)\r\n 133 else:\r\n 134 if self.has_set:\r\n--> 135 self.set(*args)\r\n 136 else:\r\n 137 raise NotImplementedError('no set cmd found in' +\r\n\r\na:\\qcodes\\qcodes\\instrument\\parameter.py in _validate_and_sweep(self, value)\r\n 945 e.args = e.args + (\r\n 946 'setting {} to {}'.format(self.full_name, repr(value)),)\r\n--> 947 raise e\r\n 948 \r\n 949 def set_step(self, step, max_val_age=None):\r\n\r\na:\\qcodes\\qcodes\\instrument\\parameter.py in _validate_and_sweep(self, value)\r\n 929 step_clock = time.perf_counter()\r\n 930 \r\n--> 931 for step_val in self._sweep_steps(value):\r\n 932 self._set(step_val)\r\n 933 self._save_val(step_val)\r\n\r\na:\\qcodes\\qcodes\\instrument\\parameter.py in _sweep_steps(self, value)\r\n 891 state = self._latest()\r\n 892 if state['ts'] is None or state['ts'] < oldest_ok_val:\r\n--> 893 start_value = self.get()\r\n 894 else:\r\n 895 start_value = state['value']\r\n\r\na:\\qcodes\\qcodes\\instrument\\parameter.py in get(self)\r\n 826 except Exception as e:\r\n 827 e.args = e.args + ('getting {}'.format(self.full_name),)\r\n--> 828 raise e\r\n 829 \r\n 830 def _valmapping_get_parser(self, val):\r\n\r\na:\\qcodes\\qcodes\\instrument\\parameter.py in get(self)\r\n 821 def get(self):\r\n 822 try:\r\n--> 823 value = self._get()\r\n 824 self._save_val(value)\r\n 825 return value\r\n\r\na:\\qcodes\\qcodes\\utils\\command.py in __call__(self, *args)\r\n 175 raise TypeError(\r\n 176 'command takes exactly {} args'.format(self.arg_count))\r\n--> 177 return self.exec_function(*args)\r\n\r\na:\\qcodes\\qcodes\\instrument_drivers\\Harvard\\Decadac.py in _getvoltage(self, channel)\r\n 129 response = temp[::-1]\r\n 130 \r\n--> 131 rawvoltage = self._code2voltage(response, channel)\r\n 132 actualvoltage = rawvoltage - self._offsets[channel]\r\n 133 \r\n\r\na:\\qcodes\\qcodes\\instrument_drivers\\Harvard\\Decadac.py in _code2voltage(self, code, channel)\r\n 232 \"\"\"\r\n 233 \r\n--> 234 code = float(code)\r\n 235 translationdict = {1: lambda x: (x+1)*20/2**16-10,\r\n 236 2: lambda x: (x+1)*10/2**16,\r\n\r\nValueError: (\"could not convert string to float: '0! M 0'\", 'getting dec_slot_0_ch1_voltage', 'setting dec_slot_0_ch1_voltage to 0')\r\n```\r\n\r\n@WilliamHPNielsen @giulioungaretti \n", "before_files": [{"content": "import logging\nfrom time import sleep\nfrom functools import partial\nfrom qcodes.instrument.visa import VisaInstrument\nfrom qcodes.utils import validators as vals\n\nlog = logging.getLogger(__name__)\n\n\nclass Decadac(VisaInstrument):\n \"\"\"\n The qcodes driver for the Decadac.\n Each slot on the Deacadac is to be treated as a seperate\n four-channel instrument.\n\n Tested with a Decadec firmware revion number 14081 (Decadac 139).\n\n The message strategy is the following: always keep the queue empty, so\n that self.visa_handle.ask(XXX) will return the answer to XXX and not\n some previous event.\n\n\n Attributes:\n\n _ramp_state (bool): If True, ramp state is ON. Default False.\n\n _ramp_time (int): The ramp time in ms. Default 100 ms.\n \"\"\"\n\n def __init__(self, name, port, slot, timeout=2, baudrate=9600,\n bytesize=8, **kwargs):\n\n \"\"\"\n\n Creates an instance of the Decadac instrument corresponding to one slot\n on the physical instrument.\n\n Args:\n name (str): What this instrument is called locally.\n\n port (number): The number (only) of the COM port to connect to.\n\n slot (int): The slot to use.\n\n timeout (number): Seconds to wait for message response.\n Default 0.3.\n\n baudrate (number): The connection baudrate. Default 9600.\n\n bytesize (number): The connection bytesize. Default 8.\n \"\"\"\n\n address = 'ASRL{:d}::INSTR'.format(port)\n self.slot = slot\n\n super().__init__(name, address, timeout=timeout, **kwargs)\n\n # set instrument operation state variables\n self._ramp_state = False\n self._ramp_time = 100\n self._voltranges = [1, 1, 1, 1]\n self._offsets = [0, 0, 0, 0]\n\n # channels\n for channelno in range(4):\n self.add_parameter('ch{}_voltage'.format(channelno),\n get_cmd=partial(self._getvoltage,\n channel=channelno),\n set_cmd=partial(self._setvoltage,\n channel=channelno),\n label='Voltage',\n unit='V')\n\n self.add_parameter('ch{}_voltrange'.format(channelno),\n get_cmd=partial(self._getvoltrange, channelno),\n set_cmd=partial(self._setvoltrange, channelno),\n vals=vals.Enum(1, 2, 3))\n\n self.add_parameter('ch{}_offset'.format(channelno),\n get_cmd=partial(self._getoffset, channelno),\n set_cmd=partial(self._setoffset, channelno),\n label='Channel {} offset'.format(channelno),\n unit='V',\n docstring=\"\"\"\n The offset is applied to the channel.\n E.g. if ch1_offset = 1 and ch_voltage\n is set to 1, the instrument is told to\n output 2 volts.\n \"\"\")\n\n self.add_parameter('mode',\n label='Output mode',\n set_cmd='B {}; M {};'.format(self.slot, '{}'),\n vals=vals.Enum(0, 1),\n docstring=\"\"\"\n The operational mode of the slot.\n 0: output off, 1: output on.\n \"\"\")\n\n # initialise hardware settings\n self.mode.set(1)\n\n def _getoffset(self, n):\n return self._offsets[n]\n\n def _setoffset(self, n, val):\n self._offsets[n] = val\n\n def _getvoltrange(self, n):\n return self._voltranges[n]\n\n def _setvoltrange(self, n, val):\n self._voltranges[n] = val\n\n def _getvoltage(self, channel):\n \"\"\"\n Function to query the voltage. Flushes the message queue in that\n process.\n \"\"\"\n\n # set the relevant channel and slot to query\n mssg = 'B {:d}; C {:d};'.format(self.slot, channel)\n mssg += 'd;'\n\n # a bit of string juggling to extract the voltage\n rawresponse = self.visa_handle.ask(mssg)\n temp = rawresponse[::-1]\n temp = temp[3:temp.upper().find('D')-1]\n response = temp[::-1]\n\n rawvoltage = self._code2voltage(response, channel)\n actualvoltage = rawvoltage - self._offsets[channel]\n\n return actualvoltage\n\n def _setvoltage(self, voltage, channel):\n \"\"\"\n Function to set the voltage. Depending on whether self._ramp_state is\n True or False, this function either ramps from the current voltage to\n the specified voltage or directly makes the voltage jump there.\n\n Args:\n voltage (number): the set voltage.\n \"\"\"\n\n actualvoltage = voltage + self._offsets[channel]\n code = self._voltage2code(actualvoltage, channel)\n\n mssg = 'B {:d}; C {:d};'.format(self.slot, channel)\n\n if not self._ramp_state:\n mssg += 'D ' + code + ';'\n\n self.visa_handle.write(mssg)\n\n # due to a quirk of the Decadac, we spare the user of an error\n # sometimes encountered on first run\n try:\n self.visa_handle.read()\n except UnicodeDecodeError:\n log.warning(\" Decadac returned nothing and possibly did nothing. \" +\n \"Please re-run the command\")\n pass\n\n if self._ramp_state:\n currentcode = self._voltage2code(self._getvoltage(channel),\n channel)\n slope = int((float(code)-float(currentcode)) /\n (10*self._ramp_time)*2**16)\n if slope < 0:\n limit = 'L'\n else:\n limit = 'U'\n\n script = ['{',\n '*1:',\n 'M2;',\n 'T 100;', # 1 timestep: 100 micro s\n limit + code + ';',\n 'S' + str(slope) + ';',\n 'X0;',\n '}']\n runcmd = 'X 1;'\n mssg += ''.join(script) + runcmd\n self.visa_handle.write(mssg)\n sleep(0.0015*self._ramp_time) # Required sleep.\n self.visa_handle.read()\n\n # reset channel voltage ranges\n if slope < 0:\n self.visa_handle.write('L 0;')\n self.visa_handle.read()\n else:\n self.visa_handle.write('U 65535;')\n self.visa_handle.read()\n\n def set_ramping(self, state, time=None):\n \"\"\"\n Function to set _ramp_state and _ramp_time.\n\n Args:\n state (bool): True sets ramping ON.\n\n time (Optiona[int]): the ramp time in ms\n \"\"\"\n self._ramp_state = state\n if time is not None:\n self._ramp_time = time\n\n def get_ramping(self):\n \"\"\"\n Queries the value of self._ramp_state and self._ramp_time.\n\n Returns:\n str: ramp state information\n \"\"\"\n switch = {True: 'ON',\n False: 'OFF'}\n mssg = 'Ramp state: ' + switch[self._ramp_state]\n mssg += '. Ramp time: {:d} ms.'.format(int(self._ramp_time))\n return mssg\n\n def _code2voltage(self, code, channel):\n \"\"\"\n Helper function translating a 32 bit code used internally by\n the Decadac into a voltage.\n\n Args:\n code (str): The code string from the instrument.\n\n channel (int): The relevant channel.\n \"\"\"\n\n code = float(code)\n translationdict = {1: lambda x: (x+1)*20/2**16-10,\n 2: lambda x: (x+1)*10/2**16,\n 3: lambda x: (x+1)*10/2**16-10}\n\n return translationdict[self._voltranges[channel]](code)\n\n def _voltage2code(self, voltage, channel):\n \"\"\"\n Helper function translating a voltage in V into a 32 bit code used\n internally by the Decadac.\n\n Args:\n voltage (float): The physical voltage.\n\n channel (int): The relevant channel.\n\n Returns:\n code (str): The corresponding voltage code.\n \"\"\"\n translationdict = {1: lambda x: 2**16/20*(x-2**-16+10),\n 2: lambda x: 2**16/10*(x-2**-16),\n 3: lambda x: 2**16/10*(x-2**-16+10)}\n voltage_float = translationdict[self._voltranges[channel]](voltage)\n return str(int(voltage_float))\n\n", "path": "qcodes/instrument_drivers/Harvard/Decadac.py"}], "after_files": [{"content": "import logging\nfrom time import sleep\nfrom functools import partial\nfrom qcodes.instrument.visa import VisaInstrument\nfrom qcodes.utils import validators as vals\n\nlog = logging.getLogger(__name__)\n\n\nclass Decadac(VisaInstrument):\n \"\"\"\n The qcodes driver for the Decadac.\n Each slot on the Deacadac is to be treated as a seperate\n four-channel instrument.\n\n Tested with a Decadec firmware revion number 14081 (Decadac 139).\n\n The message strategy is the following: always keep the queue empty, so\n that self.visa_handle.ask(XXX) will return the answer to XXX and not\n some previous event.\n\n\n Attributes:\n\n _ramp_state (bool): If True, ramp state is ON. Default False.\n\n _ramp_time (int): The ramp time in ms. Default 100 ms.\n \"\"\"\n\n def __init__(self, name, port, slot, timeout=2, baudrate=9600,\n bytesize=8, **kwargs):\n\n \"\"\"\n\n Creates an instance of the Decadac instrument corresponding to one slot\n on the physical instrument.\n\n Args:\n name (str): What this instrument is called locally.\n\n port (number): The number (only) of the COM port to connect to.\n\n slot (int): The slot to use.\n\n timeout (number): Seconds to wait for message response.\n Default 0.3.\n\n baudrate (number): The connection baudrate. Default 9600.\n\n bytesize (number): The connection bytesize. Default 8.\n \"\"\"\n\n address = 'ASRL{:d}::INSTR'.format(port)\n self.slot = slot\n\n super().__init__(name, address, timeout=timeout, **kwargs)\n\n # set instrument operation state variables\n self._ramp_state = False\n self._ramp_time = 100\n self._voltranges = [1, 1, 1, 1]\n self._offsets = [0, 0, 0, 0]\n\n # channels\n for channelno in range(4):\n self.add_parameter('ch{}_voltage'.format(channelno),\n get_cmd=partial(self._getvoltage,\n channel=channelno),\n set_cmd=partial(self._setvoltage,\n channel=channelno),\n label='Voltage',\n unit='V')\n\n self.add_parameter('ch{}_voltrange'.format(channelno),\n get_cmd=partial(self._getvoltrange, channelno),\n set_cmd=partial(self._setvoltrange, channelno),\n vals=vals.Enum(1, 2, 3))\n\n self.add_parameter('ch{}_offset'.format(channelno),\n get_cmd=partial(self._getoffset, channelno),\n set_cmd=partial(self._setoffset, channelno),\n label='Channel {} offset'.format(channelno),\n unit='V',\n docstring=\"\"\"\n The offset is applied to the channel.\n E.g. if ch1_offset = 1 and ch_voltage\n is set to 1, the instrument is told to\n output 2 volts.\n \"\"\")\n\n self.add_parameter('mode',\n label='Output mode',\n set_cmd=partial(self._setmode, self.slot),\n vals=vals.Enum(0, 2),\n docstring=\"\"\"\n The operational mode of the slot.\n 0: output off,\n 2: 4-channel (low res) mode.\n \"\"\")\n\n # initialise hardware settings\n self.mode.set(2)\n\n def _setmode(slot, mode):\n \"\"\"\n set_cmd for the mode parameter\n \"\"\"\n self.visa_handle.write('B {}; M {};'.format(slot, mode)\n self.visa_handle.read()\n\n def _getoffset(self, n):\n return self._offsets[n]\n\n def _setoffset(self, n, val):\n self._offsets[n] = val\n\n def _getvoltrange(self, n):\n return self._voltranges[n]\n\n def _setvoltrange(self, n, val):\n self._voltranges[n] = val\n\n def _getvoltage(self, channel):\n \"\"\"\n Function to query the voltage. Flushes the message queue in that\n process.\n \"\"\"\n\n # set the relevant channel and slot to query\n mssg = 'B {:d}; C {:d};'.format(self.slot, channel)\n mssg += 'd;'\n\n # a bit of string juggling to extract the voltage\n rawresponse = self.visa_handle.ask(mssg)\n temp = rawresponse[::-1]\n temp = temp[3:temp.upper().find('D')-1]\n response = temp[::-1]\n\n rawvoltage = self._code2voltage(response, channel)\n actualvoltage = rawvoltage - self._offsets[channel]\n\n return actualvoltage\n\n def _setvoltage(self, voltage, channel):\n \"\"\"\n Function to set the voltage. Depending on whether self._ramp_state is\n True or False, this function either ramps from the current voltage to\n the specified voltage or directly makes the voltage jump there.\n\n Args:\n voltage (number): the set voltage.\n \"\"\"\n\n actualvoltage = voltage + self._offsets[channel]\n code = self._voltage2code(actualvoltage, channel)\n\n mssg = 'B {:d}; C {:d};'.format(self.slot, channel)\n\n if not self._ramp_state:\n mssg += 'D ' + code + ';'\n\n self.visa_handle.write(mssg)\n\n # due to a quirk of the Decadac, we spare the user of an error\n # sometimes encountered on first run\n try:\n self.visa_handle.read()\n except UnicodeDecodeError:\n log.warning(\" Decadac returned nothing and possibly did nothing. \" +\n \"Please re-run the command\")\n pass\n\n if self._ramp_state:\n currentcode = self._voltage2code(self._getvoltage(channel),\n channel)\n slope = int((float(code)-float(currentcode)) /\n (10*self._ramp_time)*2**16)\n if slope < 0:\n limit = 'L'\n else:\n limit = 'U'\n\n script = ['{',\n '*1:',\n 'M2;',\n 'T 100;', # 1 timestep: 100 micro s\n limit + code + ';',\n 'S' + str(slope) + ';',\n 'X0;',\n '}']\n runcmd = 'X 1;'\n mssg += ''.join(script) + runcmd\n self.visa_handle.write(mssg)\n sleep(0.0015*self._ramp_time) # Required sleep.\n self.visa_handle.read()\n\n # reset channel voltage ranges\n if slope < 0:\n self.visa_handle.write('L 0;')\n self.visa_handle.read()\n else:\n self.visa_handle.write('U 65535;')\n self.visa_handle.read()\n\n def set_ramping(self, state, time=None):\n \"\"\"\n Function to set _ramp_state and _ramp_time.\n\n Args:\n state (bool): True sets ramping ON.\n\n time (Optiona[int]): the ramp time in ms\n \"\"\"\n self._ramp_state = state\n if time is not None:\n self._ramp_time = time\n\n def get_ramping(self):\n \"\"\"\n Queries the value of self._ramp_state and self._ramp_time.\n\n Returns:\n str: ramp state information\n \"\"\"\n switch = {True: 'ON',\n False: 'OFF'}\n mssg = 'Ramp state: ' + switch[self._ramp_state]\n mssg += '. Ramp time: {:d} ms.'.format(int(self._ramp_time))\n return mssg\n\n def _code2voltage(self, code, channel):\n \"\"\"\n Helper function translating a 32 bit code used internally by\n the Decadac into a voltage.\n\n Args:\n code (str): The code string from the instrument.\n\n channel (int): The relevant channel.\n \"\"\"\n\n code = float(code)\n translationdict = {1: lambda x: (x+1)*20/2**16-10,\n 2: lambda x: (x+1)*10/2**16,\n 3: lambda x: (x+1)*10/2**16-10}\n\n return translationdict[self._voltranges[channel]](code)\n\n def _voltage2code(self, voltage, channel):\n \"\"\"\n Helper function translating a voltage in V into a 32 bit code used\n internally by the Decadac.\n\n Args:\n voltage (float): The physical voltage.\n\n channel (int): The relevant channel.\n\n Returns:\n code (str): The corresponding voltage code.\n \"\"\"\n translationdict = {1: lambda x: 2**16/20*(x-2**-16+10),\n 2: lambda x: 2**16/10*(x-2**-16),\n 3: lambda x: 2**16/10*(x-2**-16+10)}\n voltage_float = translationdict[self._voltranges[channel]](voltage)\n return str(int(voltage_float))\n", "path": "qcodes/instrument_drivers/Harvard/Decadac.py"}]} | 4,040 | 362 |
gh_patches_debug_17760 | rasdani/github-patches | git_diff | joke2k__faker-105 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Default locale to language if no territory given.
It would be great that if faker was initialized with only a locale and no territory, that it would use a sensible default.
For example I currently have to do the following if using something such as "en" instead of "en_US".
``` py
from faker import Factory
from faker import AVAILABLE_LOCALES
locale = 'en'
if locale not in AVAILABLE_LOCALES:
locale = next(l for l in AVAILABLE_LOCALES if l.startswith(locale))
factory = Factory.create(locale)
```
This happens when using dynamic mock data in local development where django sets the locale to "en" because we do not define territories.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/factory.py`
Content:
```
1 from __future__ import unicode_literals
2 from __future__ import absolute_import
3 import sys
4 from faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES
5 from faker import Generator
6 from faker import providers as providers_mod
7
8
9 class Factory(object):
10
11 @classmethod
12 def create(cls, locale=None, providers=None, generator=None, **config):
13
14 # fix locale to package name
15 locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE
16 if '_' in locale:
17 locale = locale[:2] + locale[2:].upper()
18 if locale not in AVAILABLE_LOCALES:
19 raise AttributeError('Invalid configuration for faker locale "{0}"'.format(locale))
20
21 providers = providers or DEFAULT_PROVIDERS
22
23 faker = generator or Generator(**config)
24 faker.add_provider(providers_mod.BaseProvider)
25 for provider_name in providers:
26
27 provider_class, lang_found = cls._get_provider_class(provider_name, locale)
28 provider = provider_class(faker)
29 provider.__provider__ = provider_name
30 provider.__lang__ = lang_found
31 faker.add_provider(provider)
32
33 return faker
34
35 @classmethod
36 def _get_provider_class(cls, provider, locale=''):
37
38 provider_class = cls._find_provider_class(provider, locale)
39
40 if provider_class:
41 return provider_class, locale
42
43 if locale and locale != DEFAULT_LOCALE:
44 # fallback to default locale
45 provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)
46 if provider_class:
47 return provider_class, DEFAULT_LOCALE
48
49 # fallback to no locale
50 provider_class = cls._find_provider_class(provider)
51 if provider_class:
52 return provider_class, None
53
54 raise ValueError('Unable to find provider "{0}" with locale "{1}"'.format(provider, locale))
55
56 @classmethod
57 def _find_provider_class(cls, provider, locale=''):
58
59 path = "{providers}{lang}.{provider}".format(
60 providers=providers_mod.__package__ or providers_mod.__name__,
61 lang='.' + locale if locale else '',
62 provider=provider
63 )
64
65 try:
66 __import__(path)
67 except ImportError:
68 return None
69
70 return sys.modules[path].Provider
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/factory.py b/faker/factory.py
--- a/faker/factory.py
+++ b/faker/factory.py
@@ -1,5 +1,6 @@
from __future__ import unicode_literals
from __future__ import absolute_import
+import locale as pylocale
import sys
from faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES
from faker import Generator
@@ -13,8 +14,7 @@
# fix locale to package name
locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE
- if '_' in locale:
- locale = locale[:2] + locale[2:].upper()
+ locale = pylocale.normalize(locale).split('.')[0]
if locale not in AVAILABLE_LOCALES:
raise AttributeError('Invalid configuration for faker locale "{0}"'.format(locale))
| {"golden_diff": "diff --git a/faker/factory.py b/faker/factory.py\n--- a/faker/factory.py\n+++ b/faker/factory.py\n@@ -1,5 +1,6 @@\n from __future__ import unicode_literals\n from __future__ import absolute_import\n+import locale as pylocale\n import sys\n from faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES\n from faker import Generator\n@@ -13,8 +14,7 @@\n \n # fix locale to package name\n locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE\n- if '_' in locale:\n- locale = locale[:2] + locale[2:].upper()\n+ locale = pylocale.normalize(locale).split('.')[0]\n if locale not in AVAILABLE_LOCALES:\n raise AttributeError('Invalid configuration for faker locale \"{0}\"'.format(locale))\n", "issue": "Default locale to language if no territory given.\nIt would be great that if faker was initialized with only a locale and no territory, that it would use a sensible default.\n\nFor example I currently have to do the following if using something such as \"en\" instead of \"en_US\". \n\n``` py\nfrom faker import Factory\nfrom faker import AVAILABLE_LOCALES\n\nlocale = 'en'\nif locale not in AVAILABLE_LOCALES:\n locale = next(l for l in AVAILABLE_LOCALES if l.startswith(locale))\n\nfactory = Factory.create(locale)\n```\n\nThis happens when using dynamic mock data in local development where django sets the locale to \"en\" because we do not define territories.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom __future__ import absolute_import\nimport sys\nfrom faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES\nfrom faker import Generator\nfrom faker import providers as providers_mod\n\n\nclass Factory(object):\n\n @classmethod\n def create(cls, locale=None, providers=None, generator=None, **config):\n\n # fix locale to package name\n locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE\n if '_' in locale:\n locale = locale[:2] + locale[2:].upper()\n if locale not in AVAILABLE_LOCALES:\n raise AttributeError('Invalid configuration for faker locale \"{0}\"'.format(locale))\n\n providers = providers or DEFAULT_PROVIDERS\n\n faker = generator or Generator(**config)\n faker.add_provider(providers_mod.BaseProvider)\n for provider_name in providers:\n\n provider_class, lang_found = cls._get_provider_class(provider_name, locale)\n provider = provider_class(faker)\n provider.__provider__ = provider_name\n provider.__lang__ = lang_found\n faker.add_provider(provider)\n\n return faker\n\n @classmethod\n def _get_provider_class(cls, provider, locale=''):\n\n provider_class = cls._find_provider_class(provider, locale)\n\n if provider_class:\n return provider_class, locale\n\n if locale and locale != DEFAULT_LOCALE:\n # fallback to default locale\n provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)\n if provider_class:\n return provider_class, DEFAULT_LOCALE\n\n # fallback to no locale\n provider_class = cls._find_provider_class(provider)\n if provider_class:\n return provider_class, None\n\n raise ValueError('Unable to find provider \"{0}\" with locale \"{1}\"'.format(provider, locale))\n\n @classmethod\n def _find_provider_class(cls, provider, locale=''):\n\n path = \"{providers}{lang}.{provider}\".format(\n providers=providers_mod.__package__ or providers_mod.__name__,\n lang='.' + locale if locale else '',\n provider=provider\n )\n\n try:\n __import__(path)\n except ImportError:\n return None\n\n return sys.modules[path].Provider\n", "path": "faker/factory.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nfrom __future__ import absolute_import\nimport locale as pylocale\nimport sys\nfrom faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES\nfrom faker import Generator\nfrom faker import providers as providers_mod\n\n\nclass Factory(object):\n\n @classmethod\n def create(cls, locale=None, providers=None, generator=None, **config):\n\n # fix locale to package name\n locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE\n locale = pylocale.normalize(locale).split('.')[0]\n if locale not in AVAILABLE_LOCALES:\n raise AttributeError('Invalid configuration for faker locale \"{0}\"'.format(locale))\n\n providers = providers or DEFAULT_PROVIDERS\n\n faker = generator or Generator(**config)\n faker.add_provider(providers_mod.BaseProvider)\n for provider_name in providers:\n\n provider_class, lang_found = cls._get_provider_class(provider_name, locale)\n provider = provider_class(faker)\n provider.__provider__ = provider_name\n provider.__lang__ = lang_found\n faker.add_provider(provider)\n\n return faker\n\n @classmethod\n def _get_provider_class(cls, provider, locale=''):\n\n provider_class = cls._find_provider_class(provider, locale)\n\n if provider_class:\n return provider_class, locale\n\n if locale and locale != DEFAULT_LOCALE:\n # fallback to default locale\n provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)\n if provider_class:\n return provider_class, DEFAULT_LOCALE\n\n # fallback to no locale\n provider_class = cls._find_provider_class(provider)\n if provider_class:\n return provider_class, None\n\n raise ValueError('Unable to find provider \"{0}\" with locale \"{1}\"'.format(provider, locale))\n\n @classmethod\n def _find_provider_class(cls, provider, locale=''):\n\n path = \"{providers}{lang}.{provider}\".format(\n providers=providers_mod.__package__ or providers_mod.__name__,\n lang='.' + locale if locale else '',\n provider=provider\n )\n\n try:\n __import__(path)\n except ImportError:\n return None\n\n return sys.modules[path].Provider\n", "path": "faker/factory.py"}]} | 1,002 | 186 |
gh_patches_debug_20593 | rasdani/github-patches | git_diff | pyload__pyload-1385 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature Request: [UploadedNet] Detect Maintenance Mode
Please update the UploadedNet plugin so it detects uploaded maintenance mode.
Adding a timer to re-check indefinitely every 5 minutes or so might be necessary.
Else all links in the queue are skipped as offline and have to be restarted manually.
Here is the html code if Uploaded is in maintenance
http://paste2.org/OaBy4vZ6
EDiT: A check for the head title "uploaded.net - Maintenance - Wartungsarbeiten" should suffice, I guess..
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `module/plugins/hoster/UploadedTo.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import re
4 import time
5
6 from module.network.RequestFactory import getURL
7 from module.plugins.internal.CaptchaService import ReCaptcha
8 from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
9
10
11 class UploadedTo(SimpleHoster):
12 __name__ = "UploadedTo"
13 __type__ = "hoster"
14 __version__ = "0.86"
15
16 __pattern__ = r'https?://(?:www\.)?(uploaded\.(to|net)|ul\.to)(/file/|/?\?id=|.*?&id=|/)(?P<ID>\w+)'
17 __config__ = [("use_premium", "bool", "Use premium account if available", True)]
18
19 __description__ = """Uploaded.net hoster plugin"""
20 __license__ = "GPLv3"
21 __authors__ = [("Walter Purcaro", "[email protected]")]
22
23
24 DISPOSITION = False
25
26 API_KEY = "lhF2IeeprweDfu9ccWlxXVVypA5nA3EL"
27
28 URL_REPLACEMENTS = [(__pattern__ + ".*", r'http://uploaded.net/file/\g<ID>')]
29
30 LINK_PREMIUM_PATTERN = r'<div class="tfree".*\s*<form method="post" action="(.+?)"'
31
32 WAIT_PATTERN = r'Current waiting period: <span>(\d+)'
33 DL_LIMIT_ERROR = r'You have reached the max. number of possible free downloads for this hour'
34
35
36 @classmethod
37 def apiInfo(cls, url="", get={}, post={}):
38 info = super(UploadedTo, cls).apiInfo(url)
39
40 for _i in xrange(5):
41 html = getURL("http://uploaded.net/api/filemultiple",
42 get={"apikey": cls.API_KEY, 'id_0': re.match(cls.__pattern__, url).group('ID')},
43 decode=True)
44
45 if html != "can't find request":
46 api = html.split(",", 4)
47 if api[0] == "online":
48 info.update({'name': api[4].strip(), 'size': api[2], 'status': 2})
49 else:
50 info['status'] = 1
51 break
52 else:
53 time.sleep(3)
54
55 return info
56
57
58 def setup(self):
59 self.multiDL = self.resumeDownload = self.premium
60 self.chunkLimit = 1 # critical problems with more chunks
61
62
63 def checkErrors(self):
64 if 'var free_enabled = false;' in self.html:
65 self.logError(_("Free-download capacities exhausted"))
66 self.retry(24, 5 * 60)
67
68 elif "limit-size" in self.html:
69 self.fail(_("File too big for free download"))
70
71 elif "limit-slot" in self.html: # Temporary restriction so just wait a bit
72 self.wait(30 * 60, True)
73 self.retry()
74
75 elif "limit-parallel" in self.html:
76 self.fail(_("Cannot download in parallel"))
77
78 elif "limit-dl" in self.html or self.DL_LIMIT_ERROR in self.html: # limit-dl
79 self.wait(3 * 60 * 60, True)
80 self.retry()
81
82 elif '"err":"captcha"' in self.html:
83 self.invalidCaptcha()
84
85 else:
86 m = re.search(self.WAIT_PATTERN, self.html)
87 if m:
88 self.wait(m.group(1))
89
90
91 def handleFree(self, pyfile):
92 self.load("http://uploaded.net/language/en", just_header=True)
93
94 self.html = self.load("http://uploaded.net/js/download.js", decode=True)
95
96 recaptcha = ReCaptcha(self)
97 response, challenge = recaptcha.challenge()
98
99 self.html = self.load("http://uploaded.net/io/ticket/captcha/%s" % self.info['pattern']['ID'],
100 post={'recaptcha_challenge_field': challenge,
101 'recaptcha_response_field' : response})
102
103 if "type:'download'" in self.html:
104 self.correctCaptcha()
105 try:
106 self.link = re.search("url:'(.+?)'", self.html).group(1)
107
108 except Exception:
109 pass
110
111 self.checkErrors()
112
113
114 def checkFile(self, rules={}):
115 if self.checkDownload({'limit-dl': self.DL_LIMIT_ERROR}):
116 self.wait(3 * 60 * 60, True)
117 self.retry()
118
119 return super(UploadedTo, self).checkFile(rules)
120
121
122 getInfo = create_getInfo(UploadedTo)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/module/plugins/hoster/UploadedTo.py b/module/plugins/hoster/UploadedTo.py
--- a/module/plugins/hoster/UploadedTo.py
+++ b/module/plugins/hoster/UploadedTo.py
@@ -11,7 +11,7 @@
class UploadedTo(SimpleHoster):
__name__ = "UploadedTo"
__type__ = "hoster"
- __version__ = "0.86"
+ __version__ = "0.87"
__pattern__ = r'https?://(?:www\.)?(uploaded\.(to|net)|ul\.to)(/file/|/?\?id=|.*?&id=|/)(?P<ID>\w+)'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
@@ -27,6 +27,8 @@
URL_REPLACEMENTS = [(__pattern__ + ".*", r'http://uploaded.net/file/\g<ID>')]
+ TEMP_OFFLINE_PATTERN = r'<title>uploaded\.net - Maintenance - Wartungsarbeiten</title>'
+
LINK_PREMIUM_PATTERN = r'<div class="tfree".*\s*<form method="post" action="(.+?)"'
WAIT_PATTERN = r'Current waiting period: <span>(\d+)'
| {"golden_diff": "diff --git a/module/plugins/hoster/UploadedTo.py b/module/plugins/hoster/UploadedTo.py\n--- a/module/plugins/hoster/UploadedTo.py\n+++ b/module/plugins/hoster/UploadedTo.py\n@@ -11,7 +11,7 @@\n class UploadedTo(SimpleHoster):\n __name__ = \"UploadedTo\"\n __type__ = \"hoster\"\n- __version__ = \"0.86\"\n+ __version__ = \"0.87\"\n \n __pattern__ = r'https?://(?:www\\.)?(uploaded\\.(to|net)|ul\\.to)(/file/|/?\\?id=|.*?&id=|/)(?P<ID>\\w+)'\n __config__ = [(\"use_premium\", \"bool\", \"Use premium account if available\", True)]\n@@ -27,6 +27,8 @@\n \n URL_REPLACEMENTS = [(__pattern__ + \".*\", r'http://uploaded.net/file/\\g<ID>')]\n \n+ TEMP_OFFLINE_PATTERN = r'<title>uploaded\\.net - Maintenance - Wartungsarbeiten</title>'\n+\n LINK_PREMIUM_PATTERN = r'<div class=\"tfree\".*\\s*<form method=\"post\" action=\"(.+?)\"'\n \n WAIT_PATTERN = r'Current waiting period: <span>(\\d+)'\n", "issue": "Feature Request: [UploadedNet] Detect Maintenance Mode\nPlease update the UploadedNet plugin so it detects uploaded maintenance mode.\n\nAdding a timer to re-check indefinitely every 5 minutes or so might be necessary.\n\nElse all links in the queue are skipped as offline and have to be restarted manually.\n\nHere is the html code if Uploaded is in maintenance\n\nhttp://paste2.org/OaBy4vZ6\n\nEDiT: A check for the head title \"uploaded.net - Maintenance - Wartungsarbeiten\" should suffice, I guess..\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\nimport time\n\nfrom module.network.RequestFactory import getURL\nfrom module.plugins.internal.CaptchaService import ReCaptcha\nfrom module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo\n\n\nclass UploadedTo(SimpleHoster):\n __name__ = \"UploadedTo\"\n __type__ = \"hoster\"\n __version__ = \"0.86\"\n\n __pattern__ = r'https?://(?:www\\.)?(uploaded\\.(to|net)|ul\\.to)(/file/|/?\\?id=|.*?&id=|/)(?P<ID>\\w+)'\n __config__ = [(\"use_premium\", \"bool\", \"Use premium account if available\", True)]\n\n __description__ = \"\"\"Uploaded.net hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"[email protected]\")]\n\n\n DISPOSITION = False\n\n API_KEY = \"lhF2IeeprweDfu9ccWlxXVVypA5nA3EL\"\n\n URL_REPLACEMENTS = [(__pattern__ + \".*\", r'http://uploaded.net/file/\\g<ID>')]\n\n LINK_PREMIUM_PATTERN = r'<div class=\"tfree\".*\\s*<form method=\"post\" action=\"(.+?)\"'\n\n WAIT_PATTERN = r'Current waiting period: <span>(\\d+)'\n DL_LIMIT_ERROR = r'You have reached the max. number of possible free downloads for this hour'\n\n\n @classmethod\n def apiInfo(cls, url=\"\", get={}, post={}):\n info = super(UploadedTo, cls).apiInfo(url)\n\n for _i in xrange(5):\n html = getURL(\"http://uploaded.net/api/filemultiple\",\n get={\"apikey\": cls.API_KEY, 'id_0': re.match(cls.__pattern__, url).group('ID')},\n decode=True)\n\n if html != \"can't find request\":\n api = html.split(\",\", 4)\n if api[0] == \"online\":\n info.update({'name': api[4].strip(), 'size': api[2], 'status': 2})\n else:\n info['status'] = 1\n break\n else:\n time.sleep(3)\n\n return info\n\n\n def setup(self):\n self.multiDL = self.resumeDownload = self.premium\n self.chunkLimit = 1 # critical problems with more chunks\n\n\n def checkErrors(self):\n if 'var free_enabled = false;' in self.html:\n self.logError(_(\"Free-download capacities exhausted\"))\n self.retry(24, 5 * 60)\n\n elif \"limit-size\" in self.html:\n self.fail(_(\"File too big for free download\"))\n\n elif \"limit-slot\" in self.html: # Temporary restriction so just wait a bit\n self.wait(30 * 60, True)\n self.retry()\n\n elif \"limit-parallel\" in self.html:\n self.fail(_(\"Cannot download in parallel\"))\n\n elif \"limit-dl\" in self.html or self.DL_LIMIT_ERROR in self.html: # limit-dl\n self.wait(3 * 60 * 60, True)\n self.retry()\n\n elif '\"err\":\"captcha\"' in self.html:\n self.invalidCaptcha()\n\n else:\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.wait(m.group(1))\n\n\n def handleFree(self, pyfile):\n self.load(\"http://uploaded.net/language/en\", just_header=True)\n\n self.html = self.load(\"http://uploaded.net/js/download.js\", decode=True)\n\n recaptcha = ReCaptcha(self)\n response, challenge = recaptcha.challenge()\n\n self.html = self.load(\"http://uploaded.net/io/ticket/captcha/%s\" % self.info['pattern']['ID'],\n post={'recaptcha_challenge_field': challenge,\n 'recaptcha_response_field' : response})\n\n if \"type:'download'\" in self.html:\n self.correctCaptcha()\n try:\n self.link = re.search(\"url:'(.+?)'\", self.html).group(1)\n\n except Exception:\n pass\n\n self.checkErrors()\n\n\n def checkFile(self, rules={}):\n if self.checkDownload({'limit-dl': self.DL_LIMIT_ERROR}):\n self.wait(3 * 60 * 60, True)\n self.retry()\n\n return super(UploadedTo, self).checkFile(rules)\n\n\ngetInfo = create_getInfo(UploadedTo)\n", "path": "module/plugins/hoster/UploadedTo.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\nimport time\n\nfrom module.network.RequestFactory import getURL\nfrom module.plugins.internal.CaptchaService import ReCaptcha\nfrom module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo\n\n\nclass UploadedTo(SimpleHoster):\n __name__ = \"UploadedTo\"\n __type__ = \"hoster\"\n __version__ = \"0.87\"\n\n __pattern__ = r'https?://(?:www\\.)?(uploaded\\.(to|net)|ul\\.to)(/file/|/?\\?id=|.*?&id=|/)(?P<ID>\\w+)'\n __config__ = [(\"use_premium\", \"bool\", \"Use premium account if available\", True)]\n\n __description__ = \"\"\"Uploaded.net hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"[email protected]\")]\n\n\n DISPOSITION = False\n\n API_KEY = \"lhF2IeeprweDfu9ccWlxXVVypA5nA3EL\"\n\n URL_REPLACEMENTS = [(__pattern__ + \".*\", r'http://uploaded.net/file/\\g<ID>')]\n\n TEMP_OFFLINE_PATTERN = r'<title>uploaded\\.net - Maintenance - Wartungsarbeiten</title>'\n\n LINK_PREMIUM_PATTERN = r'<div class=\"tfree\".*\\s*<form method=\"post\" action=\"(.+?)\"'\n\n WAIT_PATTERN = r'Current waiting period: <span>(\\d+)'\n DL_LIMIT_ERROR = r'You have reached the max. number of possible free downloads for this hour'\n\n\n @classmethod\n def apiInfo(cls, url=\"\", get={}, post={}):\n info = super(UploadedTo, cls).apiInfo(url)\n\n for _i in xrange(5):\n html = getURL(\"http://uploaded.net/api/filemultiple\",\n get={\"apikey\": cls.API_KEY, 'id_0': re.match(cls.__pattern__, url).group('ID')},\n decode=True)\n\n if html != \"can't find request\":\n api = html.split(\",\", 4)\n if api[0] == \"online\":\n info.update({'name': api[4].strip(), 'size': api[2], 'status': 2})\n else:\n info['status'] = 1\n break\n else:\n time.sleep(3)\n\n return info\n\n\n def setup(self):\n self.multiDL = self.resumeDownload = self.premium\n self.chunkLimit = 1 # critical problems with more chunks\n\n\n def checkErrors(self):\n if 'var free_enabled = false;' in self.html:\n self.logError(_(\"Free-download capacities exhausted\"))\n self.retry(24, 5 * 60)\n\n elif \"limit-size\" in self.html:\n self.fail(_(\"File too big for free download\"))\n\n elif \"limit-slot\" in self.html: # Temporary restriction so just wait a bit\n self.wait(30 * 60, True)\n self.retry()\n\n elif \"limit-parallel\" in self.html:\n self.fail(_(\"Cannot download in parallel\"))\n\n elif \"limit-dl\" in self.html or self.DL_LIMIT_ERROR in self.html: # limit-dl\n self.wait(3 * 60 * 60, True)\n self.retry()\n\n elif '\"err\":\"captcha\"' in self.html:\n self.invalidCaptcha()\n\n else:\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.wait(m.group(1))\n\n\n def handleFree(self, pyfile):\n self.load(\"http://uploaded.net/language/en\", just_header=True)\n\n self.html = self.load(\"http://uploaded.net/js/download.js\", decode=True)\n\n recaptcha = ReCaptcha(self)\n response, challenge = recaptcha.challenge()\n\n self.html = self.load(\"http://uploaded.net/io/ticket/captcha/%s\" % self.info['pattern']['ID'],\n post={'recaptcha_challenge_field': challenge,\n 'recaptcha_response_field' : response})\n\n if \"type:'download'\" in self.html:\n self.correctCaptcha()\n try:\n self.link = re.search(\"url:'(.+?)'\", self.html).group(1)\n\n except Exception:\n pass\n\n self.checkErrors()\n\n\n def checkFile(self, rules={}):\n if self.checkDownload({'limit-dl': self.DL_LIMIT_ERROR}):\n self.wait(3 * 60 * 60, True)\n self.retry()\n\n return super(UploadedTo, self).checkFile(rules)\n\n\ngetInfo = create_getInfo(UploadedTo)\n", "path": "module/plugins/hoster/UploadedTo.py"}]} | 1,651 | 298 |
gh_patches_debug_9143 | rasdani/github-patches | git_diff | google-deepmind__dm-haiku-168 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Examples are distributed as part of the package
Hi,
I think the examples are folder are currently packaged as part of the package on PyPI. This means that installing haiku will also install the examples as the package `examples`. Should these be excluded from the distribution?
JAX also has examples in their repo, but those are excluded from packaging in
https://github.com/google/jax/blob/main/setup.py#L33
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Setup for pip package."""
16
17 from setuptools import find_namespace_packages
18 from setuptools import setup
19
20
21 def _get_version():
22 with open('haiku/__init__.py') as fp:
23 for line in fp:
24 if line.startswith('__version__'):
25 g = {}
26 exec(line, g) # pylint: disable=exec-used
27 return g['__version__']
28 raise ValueError('`__version__` not defined in `haiku/__init__.py`')
29
30
31 def _parse_requirements(requirements_txt_path):
32 with open(requirements_txt_path) as fp:
33 return fp.read().splitlines()
34
35
36 _VERSION = _get_version()
37
38 EXTRA_PACKAGES = {
39 'jax': ['jax>=0.1.71'],
40 'jaxlib': ['jaxlib>=0.1.49'],
41 }
42
43 setup(
44 name='dm-haiku',
45 version=_VERSION,
46 url='https://github.com/deepmind/dm-haiku',
47 license='Apache 2.0',
48 author='DeepMind',
49 description='Haiku is a library for building neural networks in JAX.',
50 long_description=open('README.md').read(),
51 long_description_content_type='text/markdown',
52 author_email='[email protected]',
53 # Contained modules and scripts.
54 packages=find_namespace_packages(exclude=['*_test.py']),
55 install_requires=_parse_requirements('requirements.txt'),
56 extras_require=EXTRA_PACKAGES,
57 tests_require=_parse_requirements('requirements-test.txt'),
58 requires_python='>=3.7',
59 include_package_data=True,
60 zip_safe=False,
61 # PyPI package information.
62 classifiers=[
63 'Development Status :: 4 - Beta',
64 'Intended Audience :: Developers',
65 'Intended Audience :: Education',
66 'Intended Audience :: Science/Research',
67 'License :: OSI Approved :: Apache Software License',
68 'Programming Language :: Python :: 3',
69 'Programming Language :: Python :: 3.7',
70 'Programming Language :: Python :: 3.8',
71 'Topic :: Scientific/Engineering :: Mathematics',
72 'Topic :: Software Development :: Libraries :: Python Modules',
73 'Topic :: Software Development :: Libraries',
74 ],
75 )
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -51,7 +51,7 @@
long_description_content_type='text/markdown',
author_email='[email protected]',
# Contained modules and scripts.
- packages=find_namespace_packages(exclude=['*_test.py']),
+ packages=find_namespace_packages(exclude=['*_test.py', 'examples']),
install_requires=_parse_requirements('requirements.txt'),
extras_require=EXTRA_PACKAGES,
tests_require=_parse_requirements('requirements-test.txt'),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -51,7 +51,7 @@\n long_description_content_type='text/markdown',\n author_email='[email protected]',\n # Contained modules and scripts.\n- packages=find_namespace_packages(exclude=['*_test.py']),\n+ packages=find_namespace_packages(exclude=['*_test.py', 'examples']),\n install_requires=_parse_requirements('requirements.txt'),\n extras_require=EXTRA_PACKAGES,\n tests_require=_parse_requirements('requirements-test.txt'),\n", "issue": "Examples are distributed as part of the package\nHi, \r\n\r\nI think the examples are folder are currently packaged as part of the package on PyPI. This means that installing haiku will also install the examples as the package `examples`. Should these be excluded from the distribution?\r\n\r\nJAX also has examples in their repo, but those are excluded from packaging in \r\n\r\nhttps://github.com/google/jax/blob/main/setup.py#L33\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Setup for pip package.\"\"\"\n\nfrom setuptools import find_namespace_packages\nfrom setuptools import setup\n\n\ndef _get_version():\n with open('haiku/__init__.py') as fp:\n for line in fp:\n if line.startswith('__version__'):\n g = {}\n exec(line, g) # pylint: disable=exec-used\n return g['__version__']\n raise ValueError('`__version__` not defined in `haiku/__init__.py`')\n\n\ndef _parse_requirements(requirements_txt_path):\n with open(requirements_txt_path) as fp:\n return fp.read().splitlines()\n\n\n_VERSION = _get_version()\n\nEXTRA_PACKAGES = {\n 'jax': ['jax>=0.1.71'],\n 'jaxlib': ['jaxlib>=0.1.49'],\n}\n\nsetup(\n name='dm-haiku',\n version=_VERSION,\n url='https://github.com/deepmind/dm-haiku',\n license='Apache 2.0',\n author='DeepMind',\n description='Haiku is a library for building neural networks in JAX.',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author_email='[email protected]',\n # Contained modules and scripts.\n packages=find_namespace_packages(exclude=['*_test.py']),\n install_requires=_parse_requirements('requirements.txt'),\n extras_require=EXTRA_PACKAGES,\n tests_require=_parse_requirements('requirements-test.txt'),\n requires_python='>=3.7',\n include_package_data=True,\n zip_safe=False,\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Setup for pip package.\"\"\"\n\nfrom setuptools import find_namespace_packages\nfrom setuptools import setup\n\n\ndef _get_version():\n with open('haiku/__init__.py') as fp:\n for line in fp:\n if line.startswith('__version__'):\n g = {}\n exec(line, g) # pylint: disable=exec-used\n return g['__version__']\n raise ValueError('`__version__` not defined in `haiku/__init__.py`')\n\n\ndef _parse_requirements(requirements_txt_path):\n with open(requirements_txt_path) as fp:\n return fp.read().splitlines()\n\n\n_VERSION = _get_version()\n\nEXTRA_PACKAGES = {\n 'jax': ['jax>=0.1.71'],\n 'jaxlib': ['jaxlib>=0.1.49'],\n}\n\nsetup(\n name='dm-haiku',\n version=_VERSION,\n url='https://github.com/deepmind/dm-haiku',\n license='Apache 2.0',\n author='DeepMind',\n description='Haiku is a library for building neural networks in JAX.',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author_email='[email protected]',\n # Contained modules and scripts.\n packages=find_namespace_packages(exclude=['*_test.py', 'examples']),\n install_requires=_parse_requirements('requirements.txt'),\n extras_require=EXTRA_PACKAGES,\n tests_require=_parse_requirements('requirements-test.txt'),\n requires_python='>=3.7',\n include_package_data=True,\n zip_safe=False,\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}]} | 1,096 | 121 |
gh_patches_debug_2119 | rasdani/github-patches | git_diff | qtile__qtile-1578 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
qtile error to load
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/scripts/qtile.py`
Content:
```
1 # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
2 # Copyright (c) 2011, Florian Mounier
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a copy
5 # of this software and associated documentation files (the "Software"), to deal
6 # in the Software without restriction, including without limitation the rights
7 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 # copies of the Software, and to permit persons to whom the Software is
9 # furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice shall be included in
12 # all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 # SOFTWARE.
21
22 # Set the locale before any widgets or anything are imported, so any widget
23 # whose defaults depend on a reasonable locale sees something reasonable.
24 import locale
25 import logging
26 from os import path, getenv, makedirs
27
28 from libqtile.log_utils import init_log, logger
29 from libqtile import confreader
30 from libqtile.backend.x11 import xcore
31
32 locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore
33
34 try:
35 import pkg_resources
36 VERSION = pkg_resources.require("qtile")[0].version
37 except (pkg_resources.DistributionNotFound, ImportError):
38 VERSION = 'dev'
39
40
41 def rename_process():
42 """
43 Try to rename the qtile process if py-setproctitle is installed:
44
45 http://code.google.com/p/py-setproctitle/
46
47 Will fail silently if it's not installed. Setting the title lets you do
48 stuff like "killall qtile".
49 """
50 try:
51 import setproctitle
52 setproctitle.setproctitle("qtile")
53 except ImportError:
54 pass
55
56
57 def make_qtile():
58 from argparse import ArgumentParser
59 parser = ArgumentParser(
60 description='A full-featured, pure-Python tiling window manager.',
61 prog='qtile',
62 )
63 parser.add_argument(
64 '--version',
65 action='version',
66 version=VERSION,
67 )
68 parser.add_argument(
69 "-c", "--config",
70 action="store",
71 default=path.expanduser(path.join(
72 getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),
73 dest="configfile",
74 help='Use the specified configuration file',
75 )
76 parser.add_argument(
77 "-s", "--socket",
78 action="store",
79 default=None,
80 dest="socket",
81 help='Path of the Qtile IPC socket.'
82 )
83 parser.add_argument(
84 "-n", "--no-spawn",
85 action="store_true",
86 default=False,
87 dest="no_spawn",
88 help='Avoid spawning apps. (Used for restart)'
89 )
90 parser.add_argument(
91 '-l', '--log-level',
92 default='WARNING',
93 dest='log_level',
94 choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
95 help='Set qtile log level'
96 )
97 parser.add_argument(
98 '--with-state',
99 default=None,
100 dest='state',
101 help='Pickled QtileState object (typically used only internally)',
102 )
103 options = parser.parse_args()
104 log_level = getattr(logging, options.log_level)
105 init_log(log_level=log_level)
106
107 kore = xcore.XCore()
108 try:
109 if not path.isfile(options.configfile):
110 try:
111 makedirs(path.dirname(options.configfile), exist_ok=True)
112 from shutil import copyfile
113 default_config_path = path.join(path.dirname(__file__),
114 "..",
115 "resources",
116 "default_config.py")
117 copyfile(default_config_path, options.configfile)
118 logger.info('Copied default_config.py to %s', options.configfile)
119 except Exception as e:
120 logger.exception('Failed to copy default_config.py to %s: (%s)',
121 options.configfile, e)
122
123 config = confreader.Config.from_file(kore, options.configfile)
124 except Exception as e:
125 logger.exception('Error while reading config file (%s)', e)
126 config = confreader.Config()
127 from libqtile.widget import TextBox
128 widgets = config.screens[0].bottom.widgets
129 widgets.insert(0, TextBox('Config Err!'))
130
131 # XXX: the import is here because we need to call init_log
132 # before start importing stuff
133 from libqtile.core import session_manager
134 return session_manager.SessionManager(
135 kore,
136 config,
137 fname=options.socket,
138 no_spawn=options.no_spawn,
139 state=options.state,
140 )
141
142
143 def main():
144 rename_process()
145 q = make_qtile()
146 try:
147 q.loop()
148 except Exception:
149 logger.exception('Qtile crashed')
150 logger.info('Exiting...')
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/scripts/qtile.py b/libqtile/scripts/qtile.py
--- a/libqtile/scripts/qtile.py
+++ b/libqtile/scripts/qtile.py
@@ -29,7 +29,11 @@
from libqtile import confreader
from libqtile.backend.x11 import xcore
-locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore
+try:
+ locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore
+except locale.Error:
+ pass
+
try:
import pkg_resources
| {"golden_diff": "diff --git a/libqtile/scripts/qtile.py b/libqtile/scripts/qtile.py\n--- a/libqtile/scripts/qtile.py\n+++ b/libqtile/scripts/qtile.py\n@@ -29,7 +29,11 @@\n from libqtile import confreader\n from libqtile.backend.x11 import xcore\n \n-locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\n+try:\n+ locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\n+except locale.Error:\n+ pass\n+\n \n try:\n import pkg_resources\n", "issue": "qtile error to load\n\n", "before_files": [{"content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n# Copyright (c) 2011, Florian Mounier\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Set the locale before any widgets or anything are imported, so any widget\n# whose defaults depend on a reasonable locale sees something reasonable.\nimport locale\nimport logging\nfrom os import path, getenv, makedirs\n\nfrom libqtile.log_utils import init_log, logger\nfrom libqtile import confreader\nfrom libqtile.backend.x11 import xcore\n\nlocale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef rename_process():\n \"\"\"\n Try to rename the qtile process if py-setproctitle is installed:\n\n http://code.google.com/p/py-setproctitle/\n\n Will fail silently if it's not installed. Setting the title lets you do\n stuff like \"killall qtile\".\n \"\"\"\n try:\n import setproctitle\n setproctitle.setproctitle(\"qtile\")\n except ImportError:\n pass\n\n\ndef make_qtile():\n from argparse import ArgumentParser\n parser = ArgumentParser(\n description='A full-featured, pure-Python tiling window manager.',\n prog='qtile',\n )\n parser.add_argument(\n '--version',\n action='version',\n version=VERSION,\n )\n parser.add_argument(\n \"-c\", \"--config\",\n action=\"store\",\n default=path.expanduser(path.join(\n getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),\n dest=\"configfile\",\n help='Use the specified configuration file',\n )\n parser.add_argument(\n \"-s\", \"--socket\",\n action=\"store\",\n default=None,\n dest=\"socket\",\n help='Path of the Qtile IPC socket.'\n )\n parser.add_argument(\n \"-n\", \"--no-spawn\",\n action=\"store_true\",\n default=False,\n dest=\"no_spawn\",\n help='Avoid spawning apps. (Used for restart)'\n )\n parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n parser.add_argument(\n '--with-state',\n default=None,\n dest='state',\n help='Pickled QtileState object (typically used only internally)',\n )\n options = parser.parse_args()\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level)\n\n kore = xcore.XCore()\n try:\n if not path.isfile(options.configfile):\n try:\n makedirs(path.dirname(options.configfile), exist_ok=True)\n from shutil import copyfile\n default_config_path = path.join(path.dirname(__file__),\n \"..\",\n \"resources\",\n \"default_config.py\")\n copyfile(default_config_path, options.configfile)\n logger.info('Copied default_config.py to %s', options.configfile)\n except Exception as e:\n logger.exception('Failed to copy default_config.py to %s: (%s)',\n options.configfile, e)\n\n config = confreader.Config.from_file(kore, options.configfile)\n except Exception as e:\n logger.exception('Error while reading config file (%s)', e)\n config = confreader.Config()\n from libqtile.widget import TextBox\n widgets = config.screens[0].bottom.widgets\n widgets.insert(0, TextBox('Config Err!'))\n\n # XXX: the import is here because we need to call init_log\n # before start importing stuff\n from libqtile.core import session_manager\n return session_manager.SessionManager(\n kore,\n config,\n fname=options.socket,\n no_spawn=options.no_spawn,\n state=options.state,\n )\n\n\ndef main():\n rename_process()\n q = make_qtile()\n try:\n q.loop()\n except Exception:\n logger.exception('Qtile crashed')\n logger.info('Exiting...')\n", "path": "libqtile/scripts/qtile.py"}], "after_files": [{"content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n# Copyright (c) 2011, Florian Mounier\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Set the locale before any widgets or anything are imported, so any widget\n# whose defaults depend on a reasonable locale sees something reasonable.\nimport locale\nimport logging\nfrom os import path, getenv, makedirs\n\nfrom libqtile.log_utils import init_log, logger\nfrom libqtile import confreader\nfrom libqtile.backend.x11 import xcore\n\ntry:\n locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\nexcept locale.Error:\n pass\n\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef rename_process():\n \"\"\"\n Try to rename the qtile process if py-setproctitle is installed:\n\n http://code.google.com/p/py-setproctitle/\n\n Will fail silently if it's not installed. Setting the title lets you do\n stuff like \"killall qtile\".\n \"\"\"\n try:\n import setproctitle\n setproctitle.setproctitle(\"qtile\")\n except ImportError:\n pass\n\n\ndef make_qtile():\n from argparse import ArgumentParser\n parser = ArgumentParser(\n description='A full-featured, pure-Python tiling window manager.',\n prog='qtile',\n )\n parser.add_argument(\n '--version',\n action='version',\n version=VERSION,\n )\n parser.add_argument(\n \"-c\", \"--config\",\n action=\"store\",\n default=path.expanduser(path.join(\n getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),\n dest=\"configfile\",\n help='Use the specified configuration file',\n )\n parser.add_argument(\n \"-s\", \"--socket\",\n action=\"store\",\n default=None,\n dest=\"socket\",\n help='Path of the Qtile IPC socket.'\n )\n parser.add_argument(\n \"-n\", \"--no-spawn\",\n action=\"store_true\",\n default=False,\n dest=\"no_spawn\",\n help='Avoid spawning apps. (Used for restart)'\n )\n parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n parser.add_argument(\n '--with-state',\n default=None,\n dest='state',\n help='Pickled QtileState object (typically used only internally)',\n )\n options = parser.parse_args()\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level)\n\n kore = xcore.XCore()\n try:\n if not path.isfile(options.configfile):\n try:\n makedirs(path.dirname(options.configfile), exist_ok=True)\n from shutil import copyfile\n default_config_path = path.join(path.dirname(__file__),\n \"..\",\n \"resources\",\n \"default_config.py\")\n copyfile(default_config_path, options.configfile)\n logger.info('Copied default_config.py to %s', options.configfile)\n except Exception as e:\n logger.exception('Failed to copy default_config.py to %s: (%s)',\n options.configfile, e)\n\n config = confreader.Config.from_file(kore, options.configfile)\n except Exception as e:\n logger.exception('Error while reading config file (%s)', e)\n config = confreader.Config()\n from libqtile.widget import TextBox\n widgets = config.screens[0].bottom.widgets\n widgets.insert(0, TextBox('Config Err!'))\n\n # XXX: the import is here because we need to call init_log\n # before start importing stuff\n from libqtile.core import session_manager\n return session_manager.SessionManager(\n kore,\n config,\n fname=options.socket,\n no_spawn=options.no_spawn,\n state=options.state,\n )\n\n\ndef main():\n rename_process()\n q = make_qtile()\n try:\n q.loop()\n except Exception:\n logger.exception('Qtile crashed')\n logger.info('Exiting...')\n", "path": "libqtile/scripts/qtile.py"}]} | 1,740 | 135 |
gh_patches_debug_1121 | rasdani/github-patches | git_diff | dotkom__onlineweb4-973 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add appKom to list of committees in dashboard view
AppKom is missing as a committee in the dashboard view. Users can't add a position in that committee.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/authentication/models.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import datetime
4 import socket
5 import urllib
6 import hashlib
7
8 from django.conf import settings
9 from django.contrib.auth.models import AbstractUser
10 from django.db import models
11 from django.utils.translation import ugettext as _
12 from django.utils import timezone
13 from django.utils.html import strip_tags
14
15 import watson
16 import reversion
17
18 # If this list is changed, remember to check that the year property on
19 # OnlineUser is still correct!
20 FIELD_OF_STUDY_CHOICES = [
21 (0, _(u'Gjest')),
22 (1, _(u'Bachelor i Informatikk (BIT)')),
23 # master degrees take up the interval [10,30]
24 (10, _(u'Software (SW)')),
25 (11, _(u'Informasjonsforvaltning (DIF)')),
26 (12, _(u'Komplekse Datasystemer (KDS)')),
27 (13, _(u'Spillteknologi (SPT)')),
28 (14, _(u'Intelligente Systemer (IRS)')),
29 (15, _(u'Helseinformatikk (MSMEDTEK)')),
30 (30, _(u'Annen mastergrad')),
31 (80, _(u'PhD')),
32 (90, _(u'International')),
33 (100, _(u'Annet Onlinemedlem')),
34 ]
35
36 GENDER_CHOICES = [
37 ("male", _(u"mann")),
38 ("female", _(u"kvinne")),
39 ]
40
41 COMMITTEES = [
42 ('hs', _(u'Hovedstyret')),
43 ('arrkom', _(u'Arrangementskomiteen')),
44 ('bankom', _(u'Bank- og økonomikomiteen')),
45 ('bedkom', _(u'Bedriftskomiteen')),
46 ('dotkom', _(u'Drifts- og utviklingskomiteen')),
47 ('ekskom', _(u'Ekskursjonskomiteen')),
48 ('fagkom', _(u'Fag- og kurskomiteen')),
49 ('jubkom', _(u'Jubileumskomiteen')),
50 ('pangkom', _(u'Pensjonistkomiteen')),
51 ('prokom', _(u'Profil-og aviskomiteen')),
52 ('trikom', _(u'Trivselskomiteen')),
53 ('velkom', _(u'Velkomstkomiteen')),
54 ]
55
56 POSITIONS = [
57 ('medlem', _(u'Medlem')),
58 ('leder', _(u'Leder')),
59 ('nestleder', _(u'Nestleder')),
60 ('okoans', _(u'Økonomiansvarlig')),
61 ]
62
63 class OnlineUser(AbstractUser):
64
65 IMAGE_FOLDER = "images/profiles"
66 IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']
67
68 # Online related fields
69 field_of_study = models.SmallIntegerField(_(u"studieretning"), choices=FIELD_OF_STUDY_CHOICES, default=0)
70 started_date = models.DateField(_(u"startet studie"), default=timezone.now().date())
71 compiled = models.BooleanField(_(u"kompilert"), default=False)
72
73 # Infomail
74 infomail = models.BooleanField(_(u"vil ha infomail"), default=False)
75
76 # Address
77 phone_number = models.CharField(_(u"telefonnummer"), max_length=20, blank=True, null=True)
78 address = models.CharField(_(u"adresse"), max_length=100, blank=True, null=True)
79 zip_code = models.CharField(_(u"postnummer"), max_length=4, blank=True, null=True)
80
81 # Other
82 allergies = models.TextField(_(u"allergier"), blank=True, null=True)
83 mark_rules = models.BooleanField(_(u"godtatt prikkeregler"), default=False)
84 rfid = models.CharField(_(u"RFID"), max_length=50, blank=True, null=True)
85 nickname = models.CharField(_(u"nickname"), max_length=50, blank=True, null=True)
86 website = models.URLField(_(u"hjemmeside"), blank=True, null=True)
87 gender = models.CharField(_(u"kjønn"), max_length=10, choices=GENDER_CHOICES, default="male")
88
89 # NTNU credentials
90 ntnu_username = models.CharField(_(u"NTNU-brukernavn"), max_length=10, blank=True, null=True, unique=True)
91
92 # TODO checkbox for forwarding of @online.ntnu.no mail
93
94 @property
95 def is_member(self):
96 """
97 Returns true if the User object is associated with Online.
98 """
99 if self.ntnu_username:
100 if AllowedUsername.objects.filter(username=self.ntnu_username.lower()).filter(expiration_date__gte=timezone.now()).count() > 0:
101 return True
102 return False
103
104 def get_full_name(self):
105 """
106 Returns the first_name plus the last_name, with a space in between.
107 """
108 full_name = u'%s %s' % (self.first_name, self.last_name)
109 return full_name.strip()
110
111 def get_email(self):
112 email = self.get_emails().filter(primary = True)
113 if email:
114 return email[0]
115 return None
116
117 def get_emails(self):
118 return Email.objects.all().filter(user = self)
119
120 def in_group(self, group_name):
121 return reduce(lambda x,y: x or y.name == group_name, self.groups.all(), False)
122
123 @property
124 def year(self):
125 today = timezone.now().date()
126 started = self.started_date
127
128 # We say that a year is 360 days incase we are a bit slower to
129 # add users one year.
130 year = ((today - started).days / 360) + 1
131
132 if self.field_of_study == 0 or self.field_of_study == 100: # others
133 return 0
134 # dont return a bachelor student as 4th or 5th grade
135 elif self.field_of_study == 1: # bachelor
136 if year > 3:
137 return 3
138 return year
139 elif 10 <= self.field_of_study <= 30: # 10-29 is considered master
140 if year >= 2:
141 return 5
142 return 4
143 elif self.field_of_study == 80: # phd
144 return year + 5
145 elif self.field_of_study == 90: # international
146 if year == 1:
147 return 1
148 return 4
149 # If user's field of study is not matched by any of these tests, return -1
150 else:
151 return -1
152
153 @models.permalink
154 def get_absolute_url(self):
155 return ('profiles_view', None, {'username': self.username})
156
157 def __unicode__(self):
158 return self.get_full_name()
159
160 def save(self, *args, **kwargs):
161 if self.ntnu_username == "":
162 self.ntnu_username = None
163 self.username = self.username.lower()
164 super(OnlineUser, self).save(*args, **kwargs)
165
166 def serializable_object(self):
167 if self.privacy.expose_phone_number:
168 phone = self.phone_number
169 else:
170 phone = "Ikke tilgjengelig"
171
172 return {
173 'id': self.id,
174 'phone': strip_tags(phone),
175 'username': strip_tags(self.username),
176 'value': strip_tags(self.get_full_name()), # typeahead
177 'name': strip_tags(self.get_full_name()),
178 'image': self.get_image_url(75),
179 }
180
181 def get_image_url(self, size=50):
182 default = "%s%s_%s.png" % (settings.BASE_URL,
183 settings.DEFAULT_PROFILE_PICTURE_PREFIX, self.gender)
184
185 gravatar_url = "https://www.gravatar.com/avatar/" + hashlib.md5(self.email).hexdigest() + "?"
186 gravatar_url += urllib.urlencode({'d': default, 's':str(size)})
187 return gravatar_url
188
189 class Meta:
190 ordering = ['first_name', 'last_name']
191 verbose_name = _(u"brukerprofil")
192 verbose_name_plural = _(u"brukerprofiler")
193
194
195 reversion.register(OnlineUser)
196
197
198 class Email(models.Model):
199 user = models.ForeignKey(OnlineUser, related_name="email_user")
200 email = models.EmailField(_(u"epostadresse"), unique=True)
201 primary = models.BooleanField(_(u"primær"), default=False)
202 verified = models.BooleanField(_(u"verifisert"), default=False, editable=False)
203
204 def save(self, *args, **kwargs):
205 primary_email = self.user.get_email()
206 if not primary_email:
207 self.primary = True
208 elif primary_email.email != self.email:
209 self.primary = False
210 self.email = self.email.lower()
211 if self.primary:
212 self.user.email = self.email
213 self.user.save()
214 super(Email, self).save(*args, **kwargs)
215
216 def __unicode__(self):
217 return self.email
218
219 class Meta:
220 verbose_name = _(u"epostadresse")
221 verbose_name_plural = _(u"epostadresser")
222
223
224 reversion.register(Email)
225
226
227 class RegisterToken(models.Model):
228 user = models.ForeignKey(OnlineUser, related_name="register_user")
229 email = models.EmailField(_(u"epost"), max_length=254)
230 token = models.CharField(_(u"token"), max_length=32)
231 created = models.DateTimeField(_(u"opprettet dato"), editable=False, auto_now_add=True)
232
233 @property
234 def is_valid(self):
235 valid_period = datetime.timedelta(days=1)
236 now = timezone.now()
237 return now < self.created + valid_period
238
239
240 reversion.register(RegisterToken)
241
242
243 class AllowedUsername(models.Model):
244 """
245 Holds usernames that are considered valid members of Online and the time they expire.
246 """
247 username = models.CharField(_(u"NTNU-brukernavn"), max_length=10, unique=True)
248 registered = models.DateField(_(u"registrert"))
249 note = models.CharField(_(u"notat"), max_length=100)
250 description = models.TextField(_(u"beskrivelse"), blank=True, null=True)
251 expiration_date = models.DateField(_(u"utløpsdato"))
252
253 @property
254 def is_active(self):
255 return timezone.now().date() < self.expiration_date
256
257 def save(self, *args, **kwargs):
258 self.username = self.username.lower()
259 super(AllowedUsername, self).save(*args, **kwargs)
260
261 def __unicode__(self):
262 return self.username
263
264 class Meta:
265 verbose_name = _(u"medlem")
266 verbose_name_plural = _(u"medlemsregister")
267 ordering = (u"username",)
268
269
270 reversion.register(AllowedUsername)
271
272
273 class Position(models.Model):
274 """
275 Contains a users position in the organization from a given year
276 """
277 period = models.CharField(_(u'periode'), max_length=9, default="2013-2014", blank=False)
278 committee = models.CharField(_(u"komite"), max_length=10, choices=COMMITTEES, default="hs")
279 position = models.CharField(_(u"stilling"), max_length=10, choices=POSITIONS, default="medlem")
280 user = models.ForeignKey(OnlineUser, related_name='positions', blank=False)
281
282 @property
283 def print_string(self):
284 return '%s: %s(%s)' % (self.period, self.committee, self.position)
285
286 def __unicode__(self):
287 return self.print_string
288
289 class Meta:
290 verbose_name = _(u'posisjon')
291 verbose_name_plural = _(u'posisjoner')
292 ordering = ('user', 'period', )
293
294
295 reversion.register(Position)
296
297
298 class SpecialPosition(models.Model):
299 """
300 Special object to represent special positions that typically lasts for life.
301 """
302 position = models.CharField(_(u'Posisjon'), max_length=50, blank=False)
303 since_year = models.IntegerField(_(u'Medlem siden'), max_length=4, blank=False)
304 user = models.ForeignKey(OnlineUser, related_name='special_positions', blank=False)
305
306 def __unicode__(self):
307 return '%s, %s' % (self.user.get_full_name(), self.position)
308
309 class Meta:
310 verbose_name = _(u'spesialposisjon')
311 verbose_name_plural = _(u'spesialposisjoner')
312 ordering = ('user', 'since_year',)
313
314
315 reversion.register(SpecialPosition)
316
317
318 # Register OnlineUser in watson index for searching
319 watson.register(OnlineUser, fields=('first_name', 'last_name', 'ntnu_username', 'nickname'))
320
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/authentication/models.py b/apps/authentication/models.py
--- a/apps/authentication/models.py
+++ b/apps/authentication/models.py
@@ -51,6 +51,7 @@
('prokom', _(u'Profil-og aviskomiteen')),
('trikom', _(u'Trivselskomiteen')),
('velkom', _(u'Velkomstkomiteen')),
+ ('appkom', _(u'Applikasjonskomiteen')),
]
POSITIONS = [
| {"golden_diff": "diff --git a/apps/authentication/models.py b/apps/authentication/models.py\n--- a/apps/authentication/models.py\n+++ b/apps/authentication/models.py\n@@ -51,6 +51,7 @@\n ('prokom', _(u'Profil-og aviskomiteen')),\n ('trikom', _(u'Trivselskomiteen')),\n ('velkom', _(u'Velkomstkomiteen')),\n+ ('appkom', _(u'Applikasjonskomiteen')),\n ]\n \n POSITIONS = [\n", "issue": "Add appKom to list of committees in dashboard view\nAppKom is missing as a committee in the dashboard view. Users can't add a position in that committee.\n\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport socket\nimport urllib\nimport hashlib\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\nfrom django.utils.html import strip_tags\n\nimport watson\nimport reversion\n\n# If this list is changed, remember to check that the year property on\n# OnlineUser is still correct!\nFIELD_OF_STUDY_CHOICES = [\n (0, _(u'Gjest')),\n (1, _(u'Bachelor i Informatikk (BIT)')),\n # master degrees take up the interval [10,30]\n (10, _(u'Software (SW)')),\n (11, _(u'Informasjonsforvaltning (DIF)')),\n (12, _(u'Komplekse Datasystemer (KDS)')),\n (13, _(u'Spillteknologi (SPT)')),\n (14, _(u'Intelligente Systemer (IRS)')),\n (15, _(u'Helseinformatikk (MSMEDTEK)')),\n (30, _(u'Annen mastergrad')),\n (80, _(u'PhD')),\n (90, _(u'International')),\n (100, _(u'Annet Onlinemedlem')),\n]\n\nGENDER_CHOICES = [\n (\"male\", _(u\"mann\")),\n (\"female\", _(u\"kvinne\")),\n]\n\nCOMMITTEES = [\n ('hs', _(u'Hovedstyret')),\n ('arrkom', _(u'Arrangementskomiteen')),\n ('bankom', _(u'Bank- og \u00f8konomikomiteen')),\n ('bedkom', _(u'Bedriftskomiteen')),\n ('dotkom', _(u'Drifts- og utviklingskomiteen')),\n ('ekskom', _(u'Ekskursjonskomiteen')),\n ('fagkom', _(u'Fag- og kurskomiteen')),\n ('jubkom', _(u'Jubileumskomiteen')),\n ('pangkom', _(u'Pensjonistkomiteen')),\n ('prokom', _(u'Profil-og aviskomiteen')),\n ('trikom', _(u'Trivselskomiteen')),\n ('velkom', _(u'Velkomstkomiteen')),\n]\n\nPOSITIONS = [\n ('medlem', _(u'Medlem')),\n ('leder', _(u'Leder')),\n ('nestleder', _(u'Nestleder')),\n ('okoans', _(u'\u00d8konomiansvarlig')),\n]\n\nclass OnlineUser(AbstractUser):\n\n IMAGE_FOLDER = \"images/profiles\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']\n \n # Online related fields\n field_of_study = models.SmallIntegerField(_(u\"studieretning\"), choices=FIELD_OF_STUDY_CHOICES, default=0)\n started_date = models.DateField(_(u\"startet studie\"), default=timezone.now().date())\n compiled = models.BooleanField(_(u\"kompilert\"), default=False)\n\n # Infomail\n infomail = models.BooleanField(_(u\"vil ha infomail\"), default=False)\n\n # Address\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n address = models.CharField(_(u\"adresse\"), max_length=100, blank=True, null=True)\n zip_code = models.CharField(_(u\"postnummer\"), max_length=4, blank=True, null=True)\n\n # Other\n allergies = models.TextField(_(u\"allergier\"), blank=True, null=True)\n mark_rules = models.BooleanField(_(u\"godtatt prikkeregler\"), default=False)\n rfid = models.CharField(_(u\"RFID\"), max_length=50, blank=True, null=True)\n nickname = models.CharField(_(u\"nickname\"), max_length=50, blank=True, null=True)\n website = models.URLField(_(u\"hjemmeside\"), blank=True, null=True)\n gender = models.CharField(_(u\"kj\u00f8nn\"), max_length=10, choices=GENDER_CHOICES, default=\"male\")\n\n # NTNU credentials\n ntnu_username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, blank=True, null=True, unique=True)\n\n # TODO checkbox for forwarding of @online.ntnu.no mail\n\n @property\n def is_member(self):\n \"\"\"\n Returns true if the User object is associated with Online.\n \"\"\"\n if self.ntnu_username:\n if AllowedUsername.objects.filter(username=self.ntnu_username.lower()).filter(expiration_date__gte=timezone.now()).count() > 0:\n return True\n return False\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_email(self):\n email = self.get_emails().filter(primary = True)\n if email:\n return email[0]\n return None\n\n def get_emails(self):\n return Email.objects.all().filter(user = self)\n\n def in_group(self, group_name):\n return reduce(lambda x,y: x or y.name == group_name, self.groups.all(), False)\n\n @property\n def year(self):\n today = timezone.now().date()\n started = self.started_date\n\n # We say that a year is 360 days incase we are a bit slower to\n # add users one year.\n year = ((today - started).days / 360) + 1\n\n if self.field_of_study == 0 or self.field_of_study == 100: # others\n return 0\n # dont return a bachelor student as 4th or 5th grade\n elif self.field_of_study == 1: # bachelor\n if year > 3:\n return 3\n return year\n elif 10 <= self.field_of_study <= 30: # 10-29 is considered master\n if year >= 2:\n return 5\n return 4\n elif self.field_of_study == 80: # phd\n return year + 5\n elif self.field_of_study == 90: # international\n if year == 1:\n return 1\n return 4\n # If user's field of study is not matched by any of these tests, return -1\n else:\n return -1\n\n @models.permalink\n def get_absolute_url(self):\n return ('profiles_view', None, {'username': self.username})\n\n def __unicode__(self):\n return self.get_full_name()\n\n def save(self, *args, **kwargs):\n if self.ntnu_username == \"\":\n self.ntnu_username = None\n self.username = self.username.lower()\n super(OnlineUser, self).save(*args, **kwargs)\n\n def serializable_object(self):\n if self.privacy.expose_phone_number:\n phone = self.phone_number\n else:\n phone = \"Ikke tilgjengelig\"\n\n return {\n 'id': self.id,\n 'phone': strip_tags(phone),\n 'username': strip_tags(self.username),\n 'value': strip_tags(self.get_full_name()), # typeahead\n 'name': strip_tags(self.get_full_name()),\n 'image': self.get_image_url(75),\n }\n\n def get_image_url(self, size=50):\n default = \"%s%s_%s.png\" % (settings.BASE_URL,\n settings.DEFAULT_PROFILE_PICTURE_PREFIX, self.gender)\n\n gravatar_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(self.email).hexdigest() + \"?\"\n gravatar_url += urllib.urlencode({'d': default, 's':str(size)})\n return gravatar_url\n\n class Meta:\n ordering = ['first_name', 'last_name']\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n\n\nreversion.register(OnlineUser)\n\n\nclass Email(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"email_user\")\n email = models.EmailField(_(u\"epostadresse\"), unique=True)\n primary = models.BooleanField(_(u\"prim\u00e6r\"), default=False)\n verified = models.BooleanField(_(u\"verifisert\"), default=False, editable=False)\n\n def save(self, *args, **kwargs):\n primary_email = self.user.get_email()\n if not primary_email:\n self.primary = True\n elif primary_email.email != self.email:\n self.primary = False\n self.email = self.email.lower()\n if self.primary:\n self.user.email = self.email\n self.user.save()\n super(Email, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return self.email\n\n class Meta:\n verbose_name = _(u\"epostadresse\")\n verbose_name_plural = _(u\"epostadresser\")\n\n\nreversion.register(Email)\n\n\nclass RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n email = models.EmailField(_(u\"epost\"), max_length=254)\n token = models.CharField(_(u\"token\"), max_length=32)\n created = models.DateTimeField(_(u\"opprettet dato\"), editable=False, auto_now_add=True)\n\n @property\n def is_valid(self):\n valid_period = datetime.timedelta(days=1)\n now = timezone.now()\n return now < self.created + valid_period \n\n\nreversion.register(RegisterToken)\n\n\nclass AllowedUsername(models.Model):\n \"\"\"\n Holds usernames that are considered valid members of Online and the time they expire.\n \"\"\"\n username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, unique=True)\n registered = models.DateField(_(u\"registrert\"))\n note = models.CharField(_(u\"notat\"), max_length=100)\n description = models.TextField(_(u\"beskrivelse\"), blank=True, null=True)\n expiration_date = models.DateField(_(u\"utl\u00f8psdato\"))\n\n @property\n def is_active(self):\n return timezone.now().date() < self.expiration_date\n\n def save(self, *args, **kwargs):\n self.username = self.username.lower()\n super(AllowedUsername, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"medlem\")\n verbose_name_plural = _(u\"medlemsregister\")\n ordering = (u\"username\",)\n\n\nreversion.register(AllowedUsername)\n\n\nclass Position(models.Model):\n \"\"\"\n Contains a users position in the organization from a given year\n \"\"\"\n period = models.CharField(_(u'periode'), max_length=9, default=\"2013-2014\", blank=False)\n committee = models.CharField(_(u\"komite\"), max_length=10, choices=COMMITTEES, default=\"hs\")\n position = models.CharField(_(u\"stilling\"), max_length=10, choices=POSITIONS, default=\"medlem\")\n user = models.ForeignKey(OnlineUser, related_name='positions', blank=False)\n\n @property\n def print_string(self):\n return '%s: %s(%s)' % (self.period, self.committee, self.position)\n\n def __unicode__(self):\n return self.print_string\n\n class Meta:\n verbose_name = _(u'posisjon')\n verbose_name_plural = _(u'posisjoner')\n ordering = ('user', 'period', )\n\n\nreversion.register(Position)\n\n\nclass SpecialPosition(models.Model):\n \"\"\"\n Special object to represent special positions that typically lasts for life.\n \"\"\"\n position = models.CharField(_(u'Posisjon'), max_length=50, blank=False)\n since_year = models.IntegerField(_(u'Medlem siden'), max_length=4, blank=False)\n user = models.ForeignKey(OnlineUser, related_name='special_positions', blank=False)\n\n def __unicode__(self):\n return '%s, %s' % (self.user.get_full_name(), self.position)\n\n class Meta:\n verbose_name = _(u'spesialposisjon')\n verbose_name_plural = _(u'spesialposisjoner')\n ordering = ('user', 'since_year',)\n\n\nreversion.register(SpecialPosition)\n\n\n# Register OnlineUser in watson index for searching\nwatson.register(OnlineUser, fields=('first_name', 'last_name', 'ntnu_username', 'nickname'))\n", "path": "apps/authentication/models.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport socket\nimport urllib\nimport hashlib\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\nfrom django.utils.html import strip_tags\n\nimport watson\n\n\n# If this list is changed, remember to check that the year property on\n# OnlineUser is still correct!\nFIELD_OF_STUDY_CHOICES = [\n (0, _(u'Gjest')),\n (1, _(u'Bachelor i Informatikk (BIT)')),\n # master degrees take up the interval [10,30]\n (10, _(u'Software (SW)')),\n (11, _(u'Informasjonsforvaltning (DIF)')),\n (12, _(u'Komplekse Datasystemer (KDS)')),\n (13, _(u'Spillteknologi (SPT)')),\n (14, _(u'Intelligente Systemer (IRS)')),\n (15, _(u'Helseinformatikk (MSMEDTEK)')),\n (30, _(u'Annen mastergrad')),\n (80, _(u'PhD')),\n (90, _(u'International')),\n (100, _(u'Annet Onlinemedlem')),\n]\n\nGENDER_CHOICES = [\n (\"male\", _(u\"mann\")),\n (\"female\", _(u\"kvinne\")),\n]\n\nCOMMITTEES = [\n ('hs', _(u'Hovedstyret')),\n ('arrkom', _(u'Arrangementskomiteen')),\n ('bankom', _(u'Bank- og \u00f8konomikomiteen')),\n ('bedkom', _(u'Bedriftskomiteen')),\n ('dotkom', _(u'Drifts- og utviklingskomiteen')),\n ('ekskom', _(u'Ekskursjonskomiteen')),\n ('fagkom', _(u'Fag- og kurskomiteen')),\n ('jubkom', _(u'Jubileumskomiteen')),\n ('pangkom', _(u'Pensjonistkomiteen')),\n ('prokom', _(u'Profil-og aviskomiteen')),\n ('trikom', _(u'Trivselskomiteen')),\n ('velkom', _(u'Velkomstkomiteen')),\n ('appkom', _(u'Applikasjonskomiteen')),\n]\n\nPOSITIONS = [\n ('medlem', _(u'Medlem')),\n ('leder', _(u'Leder')),\n ('nestleder', _(u'Nestleder')),\n ('okoans', _(u'\u00d8konomiansvarlig')),\n]\n\nclass OnlineUser(AbstractUser):\n\n IMAGE_FOLDER = \"images/profiles\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']\n \n # Online related fields\n field_of_study = models.SmallIntegerField(_(u\"studieretning\"), choices=FIELD_OF_STUDY_CHOICES, default=0)\n started_date = models.DateField(_(u\"startet studie\"), default=timezone.now().date())\n compiled = models.BooleanField(_(u\"kompilert\"), default=False)\n\n # Email\n infomail = models.BooleanField(_(u\"vil ha infomail\"), default=True)\n\n # Address\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n address = models.CharField(_(u\"adresse\"), max_length=100, blank=True, null=True)\n zip_code = models.CharField(_(u\"postnummer\"), max_length=4, blank=True, null=True)\n\n # Other\n allergies = models.TextField(_(u\"allergier\"), blank=True, null=True)\n mark_rules = models.BooleanField(_(u\"godtatt prikkeregler\"), default=False)\n rfid = models.CharField(_(u\"RFID\"), max_length=50, blank=True, null=True)\n nickname = models.CharField(_(u\"nickname\"), max_length=50, blank=True, null=True)\n website = models.URLField(_(u\"hjemmeside\"), blank=True, null=True)\n gender = models.CharField(_(u\"kj\u00f8nn\"), max_length=10, choices=GENDER_CHOICES, default=\"male\")\n\n # NTNU credentials\n ntnu_username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, blank=True, null=True, unique=True)\n\n # TODO checkbox for forwarding of @online.ntnu.no mail\n\n @property\n def is_member(self):\n \"\"\"\n Returns true if the User object is associated with Online.\n \"\"\"\n if self.ntnu_username:\n if AllowedUsername.objects.filter(username=self.ntnu_username.lower()).filter(expiration_date__gte=timezone.now()).count() > 0:\n return True\n return False\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_email(self):\n email = self.get_emails().filter(primary = True)\n if email:\n return email[0]\n return None\n\n def get_emails(self):\n return Email.objects.all().filter(user = self)\n\n def in_group(self, group_name):\n return reduce(lambda x,y: x or y.name == group_name, self.groups.all(), False)\n\n @property\n def year(self):\n today = timezone.now().date()\n started = self.started_date\n\n # We say that a year is 360 days incase we are a bit slower to\n # add users one year.\n year = ((today - started).days / 360) + 1\n\n if self.field_of_study == 0 or self.field_of_study == 100: # others\n return 0\n # dont return a bachelor student as 4th or 5th grade\n elif self.field_of_study == 1: # bachelor\n if year > 3:\n return 3\n return year\n elif 10 <= self.field_of_study <= 30: # 10-29 is considered master\n if year >= 2:\n return 5\n return 4\n elif self.field_of_study == 80: # phd\n return year + 5\n elif self.field_of_study == 90: # international\n if year == 1:\n return 1\n return 4\n # If user's field of study is not matched by any of these tests, return -1\n else:\n return -1\n\n @models.permalink\n def get_absolute_url(self):\n return ('profiles_view', None, {'username': self.username})\n\n def __unicode__(self):\n return self.get_full_name()\n\n def save(self, *args, **kwargs):\n if self.ntnu_username == \"\":\n self.ntnu_username = None\n super(OnlineUser, self).save(*args, **kwargs)\n\n def serializable_object(self):\n if self.privacy.expose_phone_number:\n phone = self.phone_number\n else:\n phone = \"Ikke tilgjengelig\"\n\n return {\n 'id': self.id,\n 'phone': strip_tags(phone),\n 'username': strip_tags(self.username),\n 'value': strip_tags(self.get_full_name()), # typeahead\n 'name': strip_tags(self.get_full_name()),\n 'image': self.get_image_url(75),\n }\n\n def get_image_url(self, size=50):\n default = \"%s%s_%s.png\" % (settings.BASE_URL,\n settings.DEFAULT_PROFILE_PICTURE_PREFIX, self.gender)\n\n gravatar_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(self.email).hexdigest() + \"?\"\n gravatar_url += urllib.urlencode({'d': default, 's':str(size)})\n return gravatar_url\n\n class Meta:\n ordering = ['first_name', 'last_name']\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n\n\nclass Email(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"email_user\")\n email = models.EmailField(_(u\"epostadresse\"), unique=True)\n primary = models.BooleanField(_(u\"prim\u00e6r\"), default=False)\n verified = models.BooleanField(_(u\"verifisert\"), default=False, editable=False)\n\n def save(self, *args, **kwargs):\n primary_email = self.user.get_email()\n if not primary_email:\n self.primary = True\n elif primary_email.email != self.email:\n self.primary = False\n self.email = self.email.lower()\n if self.primary:\n self.user.email = self.email\n self.user.save()\n super(Email, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return self.email\n\n class Meta:\n verbose_name = _(u\"epostadresse\")\n verbose_name_plural = _(u\"epostadresser\")\n\n\nclass RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n email = models.EmailField(_(u\"epost\"), max_length=254)\n token = models.CharField(_(u\"token\"), max_length=32)\n created = models.DateTimeField(_(u\"opprettet dato\"), editable=False, auto_now_add=True)\n\n @property\n def is_valid(self):\n valid_period = datetime.timedelta(days=1)\n now = timezone.now()\n return now < self.created + valid_period \n\n\nclass AllowedUsername(models.Model):\n \"\"\"\n Holds usernames that are considered valid members of Online and the time they expire.\n \"\"\"\n username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, unique=True)\n registered = models.DateField(_(u\"registrert\"))\n note = models.CharField(_(u\"notat\"), max_length=100)\n description = models.TextField(_(u\"beskrivelse\"), blank=True, null=True)\n expiration_date = models.DateField(_(u\"utl\u00f8psdato\"))\n\n @property\n def is_active(self):\n return timezone.now().date() < self.expiration_date\n\n def save(self, *args, **kwargs):\n self.username = self.username.lower()\n super(AllowedUsername, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"medlem\")\n verbose_name_plural = _(u\"medlemsregister\")\n ordering = (u\"username\",)\n\n\nclass Position(models.Model):\n \"\"\"\n Contains a users position in the organization from a given year\n \"\"\"\n period = models.CharField(_(u'periode'), max_length=9, default=\"2013-2014\", blank=False)\n committee = models.CharField(_(u\"komite\"), max_length=10, choices=COMMITTEES, default=\"hs\")\n position = models.CharField(_(u\"stilling\"), max_length=10, choices=POSITIONS, default=\"medlem\")\n user = models.ForeignKey(OnlineUser, related_name='positions', blank=False)\n\n @property\n def print_string(self):\n return '%s: %s(%s)' % (self.period, self.committee, self.position)\n\n def __unicode__(self):\n return self.print_string\n\n class Meta:\n verbose_name = _(u'posisjon')\n verbose_name_plural = _(u'posisjoner')\n ordering = ('user', 'period', )\n\n\nclass SpecialPosition(models.Model):\n \"\"\"\n Special object to represent special positions that typically lasts for life.\n \"\"\"\n position = models.CharField(_(u'Posisjon'), max_length=50, blank=False)\n since_year = models.IntegerField(_(u'Medlem siden'), max_length=4, blank=False)\n user = models.ForeignKey(OnlineUser, related_name='special_positions', blank=False)\n\n def __unicode__(self):\n return '%s, %s' % (self.user.get_full_name(), self.position)\n\n class Meta:\n verbose_name = _(u'spesialposisjon')\n verbose_name_plural = _(u'spesialposisjoner')\n ordering = ('user', 'since_year',)\n\n\n\n# Register OnlineUser in watson index for searching\nwatson.register(OnlineUser, fields=('first_name', 'last_name', 'ntnu_username', 'nickname'))\n", "path": "apps/authentication/models.py"}]} | 4,068 | 114 |
gh_patches_debug_9071 | rasdani/github-patches | git_diff | wagtail__wagtail-4341 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Please set up the Wagtail logo to appear as the favicon in readthedocs.
When I have multiple readthedocs tabs open side-by-side, I can't tell which tab is for which set of documentation. [Here is an example of a site that has done this](http://linuxmint-installation-guide.readthedocs.io/en/latest/).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Wagtail documentation build configuration file, created by
4 # sphinx-quickstart on Tue Jan 14 17:38:55 2014.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import sys
16 import os
17
18
19 # on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
20 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
21
22 if not on_rtd: # only import and set the theme if we're building docs locally
23 import sphinx_rtd_theme
24 html_theme = 'sphinx_rtd_theme'
25 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
26
27 # If extensions (or modules to document with autodoc) are in another directory,
28 # add these directories to sys.path here. If the directory is relative to the
29 # documentation root, use os.path.abspath to make it absolute, like shown here.
30 sys.path.insert(0, os.path.abspath('..'))
31
32 # Autodoc may need to import some models modules which require django settings
33 # be configured
34 os.environ['DJANGO_SETTINGS_MODULE'] = 'wagtail.tests.settings'
35 import django
36 django.setup()
37
38 # Use SQLite3 database engine so it doesn't attempt to use psycopg2 on RTD
39 os.environ['DATABASE_ENGINE'] = 'django.db.backends.sqlite3'
40
41
42 # -- General configuration ------------------------------------------------
43
44 # If your documentation needs a minimal Sphinx version, state it here.
45 #needs_sphinx = '1.0'
46
47 # Add any Sphinx extension module names here, as strings. They can be
48 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
49 # ones.
50 extensions = [
51 'sphinx.ext.autodoc',
52 ]
53
54 if not on_rtd:
55 extensions.append('sphinxcontrib.spelling')
56
57 # Add any paths that contain templates here, relative to this directory.
58 templates_path = ['_templates']
59
60 # The suffix of source filenames.
61 source_suffix = '.rst'
62
63 # The encoding of source files.
64 #source_encoding = 'utf-8-sig'
65
66 # The master toctree document.
67 master_doc = 'index'
68
69 # General information about the project.
70 project = u'Wagtail'
71 copyright = u'2015, Torchbox'
72
73 # The version info for the project you're documenting, acts as replacement for
74 # |version| and |release|, also used in various other places throughout the
75 # built documents.
76
77 # Get Wagtail version
78 from wagtail import __version__, VERSION
79
80 # The short X.Y version.
81 version = '{}.{}'.format(VERSION[0], VERSION[1])
82 # The full version, including alpha/beta/rc tags.
83 release = __version__
84
85 # The language for content autogenerated by Sphinx. Refer to documentation
86 # for a list of supported languages.
87 #language = None
88
89 # There are two options for replacing |today|: either, you set today to some
90 # non-false value, then it is used:
91 #today = ''
92 # Else, today_fmt is used as the format for a strftime call.
93 #today_fmt = '%B %d, %Y'
94
95 # List of patterns, relative to source directory, that match files and
96 # directories to ignore when looking for source files.
97 exclude_patterns = ['_build']
98
99 # The reST default role (used for this markup: `text`) to use for all
100 # documents.
101 #default_role = None
102
103 # If true, '()' will be appended to :func: etc. cross-reference text.
104 #add_function_parentheses = True
105
106 # If true, the current module name will be prepended to all description
107 # unit titles (such as .. function::).
108 #add_module_names = True
109
110 # If true, sectionauthor and moduleauthor directives will be shown in the
111 # output. They are ignored by default.
112 #show_authors = False
113
114 # The name of the Pygments (syntax highlighting) style to use.
115 pygments_style = 'sphinx'
116
117 # A list of ignored prefixes for module index sorting.
118 #modindex_common_prefix = []
119
120 # If true, keep warnings as "system message" paragraphs in the built documents.
121 #keep_warnings = False
122
123
124 # splhinxcontrib.spelling settings
125
126 spelling_lang = 'en_GB'
127 spelling_word_list_filename='spelling_wordlist.txt'
128
129
130 # -- Options for HTML output ----------------------------------------------
131
132
133 # Theme options are theme-specific and customize the look and feel of a theme
134 # further. For a list of options available for each theme, see the
135 # documentation.
136 #html_theme_options = {}
137
138
139
140 # The name for this set of Sphinx documents. If None, it defaults to
141 # "<project> v<release> documentation".
142 #html_title = None
143
144 # A shorter title for the navigation bar. Default is the same as html_title.
145 #html_short_title = None
146
147 # The name of an image file (relative to this directory) to place at the top
148 # of the sidebar.
149 html_logo = 'logo.png'
150
151 # The name of an image file (within the static path) to use as favicon of the
152 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
153 # pixels large.
154 #html_favicon = None
155
156 # Add any paths that contain custom static files (such as style sheets) here,
157 # relative to this directory. They are copied after the builtin static files,
158 # so a file named "default.css" will overwrite the builtin "default.css".
159 html_static_path = ['_static']
160
161 # Add any extra paths that contain custom files (such as robots.txt or
162 # .htaccess) here, relative to this directory. These files are copied
163 # directly to the root of the documentation.
164 #html_extra_path = []
165
166 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
167 # using the given strftime format.
168 #html_last_updated_fmt = '%b %d, %Y'
169
170 # If true, SmartyPants will be used to convert quotes and dashes to
171 # typographically correct entities.
172 #html_use_smartypants = True
173
174 # Custom sidebar templates, maps document names to template names.
175 #html_sidebars = {}
176
177 # Additional templates that should be rendered to pages, maps page names to
178 # template names.
179 #html_additional_pages = {}
180
181 # If false, no module index is generated.
182 #html_domain_indices = True
183
184 # If false, no index is generated.
185 #html_use_index = True
186
187 # If true, the index is split into individual pages for each letter.
188 #html_split_index = False
189
190 # If true, links to the reST sources are added to the pages.
191 #html_show_sourcelink = True
192
193 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
194 #html_show_sphinx = True
195
196 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
197 #html_show_copyright = True
198
199 # If true, an OpenSearch description file will be output, and all pages will
200 # contain a <link> tag referring to it. The value of this option must be the
201 # base URL from which the finished HTML is served.
202 #html_use_opensearch = ''
203
204 # This is the file name suffix for HTML files (e.g. ".xhtml").
205 #html_file_suffix = None
206
207 # Output file base name for HTML help builder.
208 htmlhelp_basename = 'Wagtaildoc'
209
210
211 # -- Options for LaTeX output ---------------------------------------------
212
213 latex_elements = {
214 # The paper size ('letterpaper' or 'a4paper').
215 #'papersize': 'letterpaper',
216
217 # The font size ('10pt', '11pt' or '12pt').
218 #'pointsize': '10pt',
219
220 # Additional stuff for the LaTeX preamble.
221 #'preamble': '',
222 }
223
224 # Grouping the document tree into LaTeX files. List of tuples
225 # (source start file, target name, title,
226 # author, documentclass [howto, manual, or own class]).
227 latex_documents = [
228 ('index', 'Wagtail.tex', u'Wagtail Documentation',
229 u'Torchbox', 'manual'),
230 ]
231
232 # The name of an image file (relative to this directory) to place at the top of
233 # the title page.
234 #latex_logo = None
235
236 # For "manual" documents, if this is true, then toplevel headings are parts,
237 # not chapters.
238 #latex_use_parts = False
239
240 # If true, show page references after internal links.
241 #latex_show_pagerefs = False
242
243 # If true, show URL addresses after external links.
244 #latex_show_urls = False
245
246 # Documents to append as an appendix to all manuals.
247 #latex_appendices = []
248
249 # If false, no module index is generated.
250 #latex_domain_indices = True
251
252
253 # -- Options for manual page output ---------------------------------------
254
255 # One entry per manual page. List of tuples
256 # (source start file, name, description, authors, manual section).
257 man_pages = [
258 ('index', 'wagtail', u'Wagtail Documentation',
259 [u'Torchbox'], 1)
260 ]
261
262 # If true, show URL addresses after external links.
263 #man_show_urls = False
264
265
266 # -- Options for Texinfo output -------------------------------------------
267
268 # Grouping the document tree into Texinfo files. List of tuples
269 # (source start file, target name, title, author,
270 # dir menu entry, description, category)
271 texinfo_documents = [
272 ('index', 'Wagtail', u'Wagtail Documentation',
273 u'Torchbox', 'Wagtail', 'One line description of project.',
274 'Miscellaneous'),
275 ]
276
277 # Documents to append as an appendix to all manuals.
278 #texinfo_appendices = []
279
280 # If false, no module index is generated.
281 #texinfo_domain_indices = True
282
283 # How to display URL addresses: 'footnote', 'no', or 'inline'.
284 #texinfo_show_urls = 'footnote'
285
286 # If true, do not generate a @detailmenu in the "Top" node's menu.
287 #texinfo_no_detailmenu = False
288
289
290 def setup(app):
291 app.add_stylesheet('css/custom.css')
292
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -151,7 +151,7 @@
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-#html_favicon = None
+html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -151,7 +151,7 @@\n # The name of an image file (within the static path) to use as favicon of the\n # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n # pixels large.\n-#html_favicon = None\n+html_favicon = 'favicon.ico'\n \n # Add any paths that contain custom static files (such as style sheets) here,\n # relative to this directory. They are copied after the builtin static files,\n", "issue": "Please set up the Wagtail logo to appear as the favicon in readthedocs.\nWhen I have multiple readthedocs tabs open side-by-side, I can't tell which tab is for which set of documentation. [Here is an example of a site that has done this](http://linuxmint-installation-guide.readthedocs.io/en/latest/).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Wagtail documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 14 17:38:55 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n\n# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# Autodoc may need to import some models modules which require django settings\n# be configured\nos.environ['DJANGO_SETTINGS_MODULE'] = 'wagtail.tests.settings'\nimport django\ndjango.setup()\n\n# Use SQLite3 database engine so it doesn't attempt to use psycopg2 on RTD\nos.environ['DATABASE_ENGINE'] = 'django.db.backends.sqlite3'\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n]\n\nif not on_rtd:\n extensions.append('sphinxcontrib.spelling')\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Wagtail'\ncopyright = u'2015, Torchbox'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\n# Get Wagtail version\nfrom wagtail import __version__, VERSION\n\n# The short X.Y version.\nversion = '{}.{}'.format(VERSION[0], VERSION[1])\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# splhinxcontrib.spelling settings\n\nspelling_lang = 'en_GB'\nspelling_word_list_filename='spelling_wordlist.txt'\n\n\n# -- Options for HTML output ----------------------------------------------\n\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = 'logo.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Wagtaildoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Wagtail.tex', u'Wagtail Documentation',\n u'Torchbox', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'wagtail', u'Wagtail Documentation',\n [u'Torchbox'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Wagtail', u'Wagtail Documentation',\n u'Torchbox', 'Wagtail', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\ndef setup(app):\n app.add_stylesheet('css/custom.css')\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Wagtail documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 14 17:38:55 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n\n# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# Autodoc may need to import some models modules which require django settings\n# be configured\nos.environ['DJANGO_SETTINGS_MODULE'] = 'wagtail.tests.settings'\nimport django\ndjango.setup()\n\n# Use SQLite3 database engine so it doesn't attempt to use psycopg2 on RTD\nos.environ['DATABASE_ENGINE'] = 'django.db.backends.sqlite3'\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n]\n\nif not on_rtd:\n extensions.append('sphinxcontrib.spelling')\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Wagtail'\ncopyright = u'2015, Torchbox'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\n# Get Wagtail version\nfrom wagtail import __version__, VERSION\n\n# The short X.Y version.\nversion = '{}.{}'.format(VERSION[0], VERSION[1])\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# splhinxcontrib.spelling settings\n\nspelling_lang = 'en_GB'\nspelling_word_list_filename='spelling_wordlist.txt'\n\n\n# -- Options for HTML output ----------------------------------------------\n\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = 'logo.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = 'favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Wagtaildoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Wagtail.tex', u'Wagtail Documentation',\n u'Torchbox', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'wagtail', u'Wagtail Documentation',\n [u'Torchbox'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Wagtail', u'Wagtail Documentation',\n u'Torchbox', 'Wagtail', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\ndef setup(app):\n app.add_stylesheet('css/custom.css')\n", "path": "docs/conf.py"}]} | 3,372 | 142 |
gh_patches_debug_20420 | rasdani/github-patches | git_diff | dynaconf__dynaconf-877 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] Dynaconf doesn't load configuration file if cwd doesn't exist
**Describe the bug**
When the current workdir directory has been removed, dynaconf refuses to load configuration files.
**To Reproduce**
Steps to reproduce the behavior:
1. Having the following folder structure
<!-- Describe or use the command `$ tree -v` and paste below -->
<details>
<summary> Project structure </summary>
```bash
# /tmp/dyn.yaml
# /home/user/bug_dynaconf/
# app.py
# script.sh
```
</details>
2. Having the following config files:
<!-- Please adjust if you are using different files and formats! -->
<details>
<summary> Config files </summary>
**/tmp/dyn.yaml**
```yaml
offset: 24
```
</details>
3. Having the following app code:
<details>
<summary> Code </summary>
**app.py**
```python
from dynaconf import Dynaconf
settings = Dynaconf(
settings_files=["/tmp/dyn.yaml"]
)
print(settings.offset)
settings.validators.validate()
print(type(settings.offset))
```
</details>
4. Executing under the following environment
<details>
<summary> Execution </summary>
**script.sh**
```bash
#!/bin/bash -x
python3 -m venv venv
source venv/bin/activate
pip install dynaconf==3.1.12
PARENT=$(realpath .)
mkdir nonexistent_dir
cd nonexistent_dir
rm -r ../nonexistent_dir
python $PARENT/app.py
```
</details>
**Expected behavior**
the `app.py` should have printed the type of `offset`, which is an `int`
**Actual behavior**
~~~Python
Traceback (most recent call last):
File "/home/mtarral/debug_dynaconf/app.py", line 7, in <module>
print(settings.offset)
File "/home/mtarral/debug_dynaconf/venv/lib/python3.8/site-packages/dynaconf/base.py", line 138, in __getattr__
value = getattr(self._wrapped, name)
File "/home/mtarral/debug_dynaconf/venv/lib/python3.8/site-packages/dynaconf/base.py", line 300, in __getattribute__
return super().__getattribute__(name)
AttributeError: 'Settings' object has no attribute 'OFFSET'
~~~
**Environment (please complete the following information):**
- OS: ubuntu 20.04
- Dynaconf Version 3.1.12
**Additional context**
Following https://github.com/dynaconf/dynaconf/issues/853, I tried to repro with 3.1.12, and found this issue now.
Thanks for dynaconf !
[bug] Dynaconf doesn't load configuration file if cwd doesn't exist
**Describe the bug**
When the current workdir directory has been removed, dynaconf refuses to load configuration files.
**To Reproduce**
Steps to reproduce the behavior:
1. Having the following folder structure
<!-- Describe or use the command `$ tree -v` and paste below -->
<details>
<summary> Project structure </summary>
```bash
# /tmp/dyn.yaml
# /home/user/bug_dynaconf/
# app.py
# script.sh
```
</details>
2. Having the following config files:
<!-- Please adjust if you are using different files and formats! -->
<details>
<summary> Config files </summary>
**/tmp/dyn.yaml**
```yaml
offset: 24
```
</details>
3. Having the following app code:
<details>
<summary> Code </summary>
**app.py**
```python
from dynaconf import Dynaconf
settings = Dynaconf(
settings_files=["/tmp/dyn.yaml"]
)
print(settings.offset)
settings.validators.validate()
print(type(settings.offset))
```
</details>
4. Executing under the following environment
<details>
<summary> Execution </summary>
**script.sh**
```bash
#!/bin/bash -x
python3 -m venv venv
source venv/bin/activate
pip install dynaconf==3.1.12
PARENT=$(realpath .)
mkdir nonexistent_dir
cd nonexistent_dir
rm -r ../nonexistent_dir
python $PARENT/app.py
```
</details>
**Expected behavior**
the `app.py` should have printed the type of `offset`, which is an `int`
**Actual behavior**
~~~Python
Traceback (most recent call last):
File "/home/mtarral/debug_dynaconf/app.py", line 7, in <module>
print(settings.offset)
File "/home/mtarral/debug_dynaconf/venv/lib/python3.8/site-packages/dynaconf/base.py", line 138, in __getattr__
value = getattr(self._wrapped, name)
File "/home/mtarral/debug_dynaconf/venv/lib/python3.8/site-packages/dynaconf/base.py", line 300, in __getattribute__
return super().__getattribute__(name)
AttributeError: 'Settings' object has no attribute 'OFFSET'
~~~
**Environment (please complete the following information):**
- OS: ubuntu 20.04
- Dynaconf Version 3.1.12
**Additional context**
Following https://github.com/dynaconf/dynaconf/issues/853, I tried to repro with 3.1.12, and found this issue now.
Thanks for dynaconf !
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynaconf/utils/files.py`
Content:
```
1 from __future__ import annotations
2
3 import inspect
4 import io
5 import os
6
7 from dynaconf.utils import deduplicate
8
9
10 def _walk_to_root(path, break_at=None):
11 """
12 Directories starting from the given directory up to the root or break_at
13 """
14 if not os.path.exists(path): # pragma: no cover
15 raise OSError("Starting path not found")
16
17 if os.path.isfile(path): # pragma: no cover
18 path = os.path.dirname(path)
19
20 last_dir = None
21 current_dir = os.path.abspath(path)
22 paths = []
23 while last_dir != current_dir:
24 paths.append(current_dir)
25 paths.append(os.path.join(current_dir, "config"))
26 if break_at and current_dir == os.path.abspath(break_at): # noqa
27 break
28 parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
29 last_dir, current_dir = current_dir, parent_dir
30 return paths
31
32
33 SEARCHTREE = []
34
35
36 def find_file(filename=".env", project_root=None, skip_files=None, **kwargs):
37 """Search in increasingly higher folders for the given file
38 Returns path to the file if found, or an empty string otherwise.
39
40 This function will build a `search_tree` based on:
41
42 - Project_root if specified
43 - Invoked script location and its parents until root
44 - Current working directory
45
46 For each path in the `search_tree` it will also look for an
47 additional `./config` folder.
48 """
49 search_tree = []
50 try:
51 work_dir = os.getcwd()
52 except FileNotFoundError:
53 return ""
54 skip_files = skip_files or []
55
56 # If filename is an absolute path and exists, just return it
57 # if the absolute path does not exist, return empty string so
58 # that it can be joined and avoid IoError
59 if os.path.isabs(filename):
60 return filename if os.path.exists(filename) else ""
61
62 if project_root is not None:
63 search_tree.extend(_walk_to_root(project_root, break_at=work_dir))
64
65 script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename))
66
67 # Path to invoked script and recursively to root with its ./config dirs
68 search_tree.extend(_walk_to_root(script_dir))
69
70 # Path to where Python interpreter was invoked and recursively to root
71 search_tree.extend(_walk_to_root(work_dir))
72
73 # Don't look the same place twice
74 search_tree = deduplicate(search_tree)
75
76 global SEARCHTREE
77 SEARCHTREE[:] = search_tree
78
79 for dirname in search_tree:
80 check_path = os.path.join(dirname, filename)
81 if check_path in skip_files:
82 continue
83 if os.path.exists(check_path):
84 return check_path # First found will return
85
86 # return empty string if not found so it can still be joined in os.path
87 return ""
88
89
90 def read_file(path, **kwargs):
91 content = ""
92 with open(path, **kwargs) as open_file:
93 content = open_file.read().strip()
94 return content
95
96
97 def get_local_filename(filename):
98 """Takes a filename like `settings.toml` and returns `settings.local.toml`
99
100 Arguments:
101 filename {str} -- The filename or complete path
102
103 Returns:
104 [str] -- The same name or path with `.local.` added.
105 """
106 name, _, extension = os.path.basename(str(filename)).rpartition(
107 os.path.extsep
108 )
109
110 return os.path.join(
111 os.path.dirname(str(filename)), f"{name}.local.{extension}"
112 )
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dynaconf/utils/files.py b/dynaconf/utils/files.py
--- a/dynaconf/utils/files.py
+++ b/dynaconf/utils/files.py
@@ -46,6 +46,12 @@
For each path in the `search_tree` it will also look for an
additional `./config` folder.
"""
+ # If filename is an absolute path and exists, just return it
+ # if the absolute path does not exist, return empty string so
+ # that it can be joined and avoid IoError
+ if os.path.isabs(filename):
+ return filename if os.path.exists(filename) else ""
+
search_tree = []
try:
work_dir = os.getcwd()
@@ -53,12 +59,6 @@
return ""
skip_files = skip_files or []
- # If filename is an absolute path and exists, just return it
- # if the absolute path does not exist, return empty string so
- # that it can be joined and avoid IoError
- if os.path.isabs(filename):
- return filename if os.path.exists(filename) else ""
-
if project_root is not None:
search_tree.extend(_walk_to_root(project_root, break_at=work_dir))
| {"golden_diff": "diff --git a/dynaconf/utils/files.py b/dynaconf/utils/files.py\n--- a/dynaconf/utils/files.py\n+++ b/dynaconf/utils/files.py\n@@ -46,6 +46,12 @@\n For each path in the `search_tree` it will also look for an\n additional `./config` folder.\n \"\"\"\n+ # If filename is an absolute path and exists, just return it\n+ # if the absolute path does not exist, return empty string so\n+ # that it can be joined and avoid IoError\n+ if os.path.isabs(filename):\n+ return filename if os.path.exists(filename) else \"\"\n+\n search_tree = []\n try:\n work_dir = os.getcwd()\n@@ -53,12 +59,6 @@\n return \"\"\n skip_files = skip_files or []\n \n- # If filename is an absolute path and exists, just return it\n- # if the absolute path does not exist, return empty string so\n- # that it can be joined and avoid IoError\n- if os.path.isabs(filename):\n- return filename if os.path.exists(filename) else \"\"\n-\n if project_root is not None:\n search_tree.extend(_walk_to_root(project_root, break_at=work_dir))\n", "issue": "[bug] Dynaconf doesn't load configuration file if cwd doesn't exist\n**Describe the bug**\r\nWhen the current workdir directory has been removed, dynaconf refuses to load configuration files.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Having the following folder structure\r\n\r\n<!-- Describe or use the command `$ tree -v` and paste below -->\r\n\r\n<details>\r\n<summary> Project structure </summary>\r\n\r\n```bash\r\n\r\n# /tmp/dyn.yaml\r\n# /home/user/bug_dynaconf/\r\n# app.py\r\n# script.sh\r\n```\r\n</details>\r\n\r\n\r\n2. Having the following config files:\r\n\r\n<!-- Please adjust if you are using different files and formats! -->\r\n\r\n<details>\r\n<summary> Config files </summary>\r\n\r\n**/tmp/dyn.yaml**\r\n```yaml\r\noffset: 24\r\n```\r\n</details>\r\n\r\n3. Having the following app code:\r\n\r\n<details>\r\n<summary> Code </summary>\r\n\r\n**app.py**\r\n```python\r\n\r\nfrom dynaconf import Dynaconf\r\n\r\nsettings = Dynaconf(\r\n settings_files=[\"/tmp/dyn.yaml\"]\r\n)\r\n\r\nprint(settings.offset)\r\n\r\nsettings.validators.validate()\r\n\r\nprint(type(settings.offset))\r\n\r\n```\r\n\r\n</details>\r\n\r\n4. Executing under the following environment\r\n\r\n<details>\r\n<summary> Execution </summary>\r\n\r\n**script.sh**\r\n```bash\r\n#!/bin/bash -x\r\n\r\npython3 -m venv venv\r\nsource venv/bin/activate\r\npip install dynaconf==3.1.12\r\nPARENT=$(realpath .)\r\nmkdir nonexistent_dir\r\ncd nonexistent_dir\r\nrm -r ../nonexistent_dir\r\npython $PARENT/app.py\r\n```\r\n\r\n</details>\r\n\r\n**Expected behavior**\r\nthe `app.py` should have printed the type of `offset`, which is an `int`\r\n\r\n**Actual behavior**\r\n~~~Python\r\nTraceback (most recent call last):\r\n File \"/home/mtarral/debug_dynaconf/app.py\", line 7, in <module>\r\n print(settings.offset)\r\n File \"/home/mtarral/debug_dynaconf/venv/lib/python3.8/site-packages/dynaconf/base.py\", line 138, in __getattr__\r\n value = getattr(self._wrapped, name)\r\n File \"/home/mtarral/debug_dynaconf/venv/lib/python3.8/site-packages/dynaconf/base.py\", line 300, in __getattribute__\r\n return super().__getattribute__(name)\r\nAttributeError: 'Settings' object has no attribute 'OFFSET'\r\n~~~\r\n\r\n**Environment (please complete the following information):**\r\n - OS: ubuntu 20.04\r\n - Dynaconf Version 3.1.12\r\n\r\n**Additional context**\r\nFollowing https://github.com/dynaconf/dynaconf/issues/853, I tried to repro with 3.1.12, and found this issue now.\r\n\r\nThanks for dynaconf !\r\n\n[bug] Dynaconf doesn't load configuration file if cwd doesn't exist\n**Describe the bug**\r\nWhen the current workdir directory has been removed, dynaconf refuses to load configuration files.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Having the following folder structure\r\n\r\n<!-- Describe or use the command `$ tree -v` and paste below -->\r\n\r\n<details>\r\n<summary> Project structure </summary>\r\n\r\n```bash\r\n\r\n# /tmp/dyn.yaml\r\n# /home/user/bug_dynaconf/\r\n# app.py\r\n# script.sh\r\n```\r\n</details>\r\n\r\n\r\n2. Having the following config files:\r\n\r\n<!-- Please adjust if you are using different files and formats! -->\r\n\r\n<details>\r\n<summary> Config files </summary>\r\n\r\n**/tmp/dyn.yaml**\r\n```yaml\r\noffset: 24\r\n```\r\n</details>\r\n\r\n3. Having the following app code:\r\n\r\n<details>\r\n<summary> Code </summary>\r\n\r\n**app.py**\r\n```python\r\n\r\nfrom dynaconf import Dynaconf\r\n\r\nsettings = Dynaconf(\r\n settings_files=[\"/tmp/dyn.yaml\"]\r\n)\r\n\r\nprint(settings.offset)\r\n\r\nsettings.validators.validate()\r\n\r\nprint(type(settings.offset))\r\n\r\n```\r\n\r\n</details>\r\n\r\n4. Executing under the following environment\r\n\r\n<details>\r\n<summary> Execution </summary>\r\n\r\n**script.sh**\r\n```bash\r\n#!/bin/bash -x\r\n\r\npython3 -m venv venv\r\nsource venv/bin/activate\r\npip install dynaconf==3.1.12\r\nPARENT=$(realpath .)\r\nmkdir nonexistent_dir\r\ncd nonexistent_dir\r\nrm -r ../nonexistent_dir\r\npython $PARENT/app.py\r\n```\r\n\r\n</details>\r\n\r\n**Expected behavior**\r\nthe `app.py` should have printed the type of `offset`, which is an `int`\r\n\r\n**Actual behavior**\r\n~~~Python\r\nTraceback (most recent call last):\r\n File \"/home/mtarral/debug_dynaconf/app.py\", line 7, in <module>\r\n print(settings.offset)\r\n File \"/home/mtarral/debug_dynaconf/venv/lib/python3.8/site-packages/dynaconf/base.py\", line 138, in __getattr__\r\n value = getattr(self._wrapped, name)\r\n File \"/home/mtarral/debug_dynaconf/venv/lib/python3.8/site-packages/dynaconf/base.py\", line 300, in __getattribute__\r\n return super().__getattribute__(name)\r\nAttributeError: 'Settings' object has no attribute 'OFFSET'\r\n~~~\r\n\r\n**Environment (please complete the following information):**\r\n - OS: ubuntu 20.04\r\n - Dynaconf Version 3.1.12\r\n\r\n**Additional context**\r\nFollowing https://github.com/dynaconf/dynaconf/issues/853, I tried to repro with 3.1.12, and found this issue now.\r\n\r\nThanks for dynaconf !\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport inspect\nimport io\nimport os\n\nfrom dynaconf.utils import deduplicate\n\n\ndef _walk_to_root(path, break_at=None):\n \"\"\"\n Directories starting from the given directory up to the root or break_at\n \"\"\"\n if not os.path.exists(path): # pragma: no cover\n raise OSError(\"Starting path not found\")\n\n if os.path.isfile(path): # pragma: no cover\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n paths = []\n while last_dir != current_dir:\n paths.append(current_dir)\n paths.append(os.path.join(current_dir, \"config\"))\n if break_at and current_dir == os.path.abspath(break_at): # noqa\n break\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir\n return paths\n\n\nSEARCHTREE = []\n\n\ndef find_file(filename=\".env\", project_root=None, skip_files=None, **kwargs):\n \"\"\"Search in increasingly higher folders for the given file\n Returns path to the file if found, or an empty string otherwise.\n\n This function will build a `search_tree` based on:\n\n - Project_root if specified\n - Invoked script location and its parents until root\n - Current working directory\n\n For each path in the `search_tree` it will also look for an\n additional `./config` folder.\n \"\"\"\n search_tree = []\n try:\n work_dir = os.getcwd()\n except FileNotFoundError:\n return \"\"\n skip_files = skip_files or []\n\n # If filename is an absolute path and exists, just return it\n # if the absolute path does not exist, return empty string so\n # that it can be joined and avoid IoError\n if os.path.isabs(filename):\n return filename if os.path.exists(filename) else \"\"\n\n if project_root is not None:\n search_tree.extend(_walk_to_root(project_root, break_at=work_dir))\n\n script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename))\n\n # Path to invoked script and recursively to root with its ./config dirs\n search_tree.extend(_walk_to_root(script_dir))\n\n # Path to where Python interpreter was invoked and recursively to root\n search_tree.extend(_walk_to_root(work_dir))\n\n # Don't look the same place twice\n search_tree = deduplicate(search_tree)\n\n global SEARCHTREE\n SEARCHTREE[:] = search_tree\n\n for dirname in search_tree:\n check_path = os.path.join(dirname, filename)\n if check_path in skip_files:\n continue\n if os.path.exists(check_path):\n return check_path # First found will return\n\n # return empty string if not found so it can still be joined in os.path\n return \"\"\n\n\ndef read_file(path, **kwargs):\n content = \"\"\n with open(path, **kwargs) as open_file:\n content = open_file.read().strip()\n return content\n\n\ndef get_local_filename(filename):\n \"\"\"Takes a filename like `settings.toml` and returns `settings.local.toml`\n\n Arguments:\n filename {str} -- The filename or complete path\n\n Returns:\n [str] -- The same name or path with `.local.` added.\n \"\"\"\n name, _, extension = os.path.basename(str(filename)).rpartition(\n os.path.extsep\n )\n\n return os.path.join(\n os.path.dirname(str(filename)), f\"{name}.local.{extension}\"\n )\n", "path": "dynaconf/utils/files.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport inspect\nimport io\nimport os\n\nfrom dynaconf.utils import deduplicate\n\n\ndef _walk_to_root(path, break_at=None):\n \"\"\"\n Directories starting from the given directory up to the root or break_at\n \"\"\"\n if not os.path.exists(path): # pragma: no cover\n raise OSError(\"Starting path not found\")\n\n if os.path.isfile(path): # pragma: no cover\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n paths = []\n while last_dir != current_dir:\n paths.append(current_dir)\n paths.append(os.path.join(current_dir, \"config\"))\n if break_at and current_dir == os.path.abspath(break_at): # noqa\n break\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir\n return paths\n\n\nSEARCHTREE = []\n\n\ndef find_file(filename=\".env\", project_root=None, skip_files=None, **kwargs):\n \"\"\"Search in increasingly higher folders for the given file\n Returns path to the file if found, or an empty string otherwise.\n\n This function will build a `search_tree` based on:\n\n - Project_root if specified\n - Invoked script location and its parents until root\n - Current working directory\n\n For each path in the `search_tree` it will also look for an\n additional `./config` folder.\n \"\"\"\n # If filename is an absolute path and exists, just return it\n # if the absolute path does not exist, return empty string so\n # that it can be joined and avoid IoError\n if os.path.isabs(filename):\n return filename if os.path.exists(filename) else \"\"\n\n search_tree = []\n try:\n work_dir = os.getcwd()\n except FileNotFoundError:\n return \"\"\n skip_files = skip_files or []\n\n if project_root is not None:\n search_tree.extend(_walk_to_root(project_root, break_at=work_dir))\n\n script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename))\n\n # Path to invoked script and recursively to root with its ./config dirs\n search_tree.extend(_walk_to_root(script_dir))\n\n # Path to where Python interpreter was invoked and recursively to root\n search_tree.extend(_walk_to_root(work_dir))\n\n # Don't look the same place twice\n search_tree = deduplicate(search_tree)\n\n global SEARCHTREE\n SEARCHTREE[:] = search_tree\n\n for dirname in search_tree:\n check_path = os.path.join(dirname, filename)\n if check_path in skip_files:\n continue\n if os.path.exists(check_path):\n return check_path # First found will return\n\n # return empty string if not found so it can still be joined in os.path\n return \"\"\n\n\ndef read_file(path, **kwargs):\n content = \"\"\n with open(path, **kwargs) as open_file:\n content = open_file.read().strip()\n return content\n\n\ndef get_local_filename(filename):\n \"\"\"Takes a filename like `settings.toml` and returns `settings.local.toml`\n\n Arguments:\n filename {str} -- The filename or complete path\n\n Returns:\n [str] -- The same name or path with `.local.` added.\n \"\"\"\n name, _, extension = os.path.basename(str(filename)).rpartition(\n os.path.extsep\n )\n\n return os.path.join(\n os.path.dirname(str(filename)), f\"{name}.local.{extension}\"\n )\n", "path": "dynaconf/utils/files.py"}]} | 2,475 | 275 |
gh_patches_debug_12875 | rasdani/github-patches | git_diff | ray-project__ray-7114 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Turn off OMP_NUM_THREADS warnings?
Can we just turn off the warnings on each ray.init? We can't force everyone to set the environment variable.
https://github.com/ray-project/ray/blob/3f99be8dad5e0e1abfaede1f25753a0af74f1648/python/ray/__init__.py#L16-L21
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/__init__.py`
Content:
```
1 import os
2 import logging
3 from os.path import dirname
4 import sys
5
6 logger = logging.getLogger(__name__)
7
8 # MUST add pickle5 to the import path because it will be imported by some
9 # raylet modules.
10
11 if "pickle5" in sys.modules:
12 raise ImportError("Ray must be imported before pickle5 because Ray "
13 "requires a specific version of pickle5 (which is "
14 "packaged along with Ray).")
15
16 if "OMP_NUM_THREADS" not in os.environ:
17 logger.warning("[ray] Forcing OMP_NUM_THREADS=1 to avoid performance "
18 "degradation with many workers (issue #6998). You can "
19 "override this by explicitly setting OMP_NUM_THREADS.")
20 os.environ["OMP_NUM_THREADS"] = "1"
21
22 # Add the directory containing pickle5 to the Python path so that we find the
23 # pickle5 version packaged with ray and not a pre-existing pickle5.
24 pickle5_path = os.path.join(
25 os.path.abspath(os.path.dirname(__file__)), "pickle5_files")
26 sys.path.insert(0, pickle5_path)
27
28 # Importing psutil & setproctitle. Must be before ray._raylet is initialized.
29 thirdparty_files = os.path.join(
30 os.path.abspath(os.path.dirname(__file__)), "thirdparty_files")
31 sys.path.insert(0, thirdparty_files)
32
33 # Expose ray ABI symbols which may be dependent by other shared
34 # libraries such as _streaming.so. See BUILD.bazel:_raylet
35 so_path = os.path.join(dirname(__file__), "_raylet.so")
36 if os.path.exists(so_path):
37 import ctypes
38 from ctypes import CDLL
39 CDLL(so_path, ctypes.RTLD_GLOBAL)
40
41 # MUST import ray._raylet before pyarrow to initialize some global variables.
42 # It seems the library related to memory allocation in pyarrow will destroy the
43 # initialization of grpc if we import pyarrow at first.
44 # NOTE(JoeyJiang): See https://github.com/ray-project/ray/issues/5219 for more
45 # details.
46 import ray._raylet # noqa: E402
47
48 if "pyarrow" in sys.modules:
49 raise ImportError("Ray must be imported before pyarrow because Ray "
50 "requires a specific version of pyarrow (which is "
51 "packaged along with Ray).")
52
53 # Add the directory containing pyarrow to the Python path so that we find the
54 # pyarrow version packaged with ray and not a pre-existing pyarrow.
55 pyarrow_path = os.path.join(
56 os.path.abspath(os.path.dirname(__file__)), "pyarrow_files")
57 sys.path.insert(0, pyarrow_path)
58
59 # See https://github.com/ray-project/ray/issues/131.
60 helpful_message = """
61
62 If you are using Anaconda, try fixing this problem by running:
63
64 conda install libgcc
65 """
66
67 try:
68 import pyarrow # noqa: F401
69
70 # pyarrow is not imported inside of _raylet because of the issue described
71 # above. In order for Cython to compile _raylet, pyarrow is set to None
72 # in _raylet instead, so we give _raylet a real reference to it here.
73 # We first do the attribute checks here so that building the documentation
74 # succeeds without fully installing ray..
75 # TODO(edoakes): Fix this.
76 if hasattr(ray, "_raylet") and hasattr(ray._raylet, "pyarrow"):
77 ray._raylet.pyarrow = pyarrow
78 except ImportError as e:
79 if ((hasattr(e, "msg") and isinstance(e.msg, str)
80 and ("libstdc++" in e.msg or "CXX" in e.msg))):
81 # This code path should be taken with Python 3.
82 e.msg += helpful_message
83 elif (hasattr(e, "message") and isinstance(e.message, str)
84 and ("libstdc++" in e.message or "CXX" in e.message)):
85 # This code path should be taken with Python 2.
86 condition = (hasattr(e, "args") and isinstance(e.args, tuple)
87 and len(e.args) == 1 and isinstance(e.args[0], str))
88 if condition:
89 e.args = (e.args[0] + helpful_message, )
90 else:
91 if not hasattr(e, "args"):
92 e.args = ()
93 elif not isinstance(e.args, tuple):
94 e.args = (e.args, )
95 e.args += (helpful_message, )
96 raise
97
98 from ray._raylet import (
99 ActorCheckpointID,
100 ActorClassID,
101 ActorID,
102 ClientID,
103 Config as _Config,
104 JobID,
105 WorkerID,
106 FunctionID,
107 ObjectID,
108 TaskID,
109 UniqueID,
110 Language,
111 ) # noqa: E402
112
113 _config = _Config()
114
115 from ray.profiling import profile # noqa: E402
116 from ray.state import (jobs, nodes, actors, tasks, objects, timeline,
117 object_transfer_timeline, cluster_resources,
118 available_resources, errors) # noqa: E402
119 from ray.worker import (
120 LOCAL_MODE,
121 SCRIPT_MODE,
122 WORKER_MODE,
123 connect,
124 disconnect,
125 get,
126 get_gpu_ids,
127 get_resource_ids,
128 get_webui_url,
129 init,
130 is_initialized,
131 put,
132 register_custom_serializer,
133 remote,
134 shutdown,
135 show_in_webui,
136 wait,
137 ) # noqa: E402
138 import ray.internal # noqa: E402
139 import ray.projects # noqa: E402
140 # We import ray.actor because some code is run in actor.py which initializes
141 # some functions in the worker.
142 import ray.actor # noqa: F401
143 from ray.actor import method # noqa: E402
144 from ray.runtime_context import _get_runtime_context # noqa: E402
145 from ray.cross_language import java_function, java_actor_class # noqa: E402
146
147 # Ray version string.
148 __version__ = "0.9.0.dev0"
149
150 __all__ = [
151 "jobs",
152 "nodes",
153 "actors",
154 "tasks",
155 "objects",
156 "timeline",
157 "object_transfer_timeline",
158 "cluster_resources",
159 "available_resources",
160 "errors",
161 "LOCAL_MODE",
162 "PYTHON_MODE",
163 "SCRIPT_MODE",
164 "WORKER_MODE",
165 "__version__",
166 "_config",
167 "_get_runtime_context",
168 "actor",
169 "connect",
170 "disconnect",
171 "get",
172 "get_gpu_ids",
173 "get_resource_ids",
174 "get_webui_url",
175 "init",
176 "internal",
177 "is_initialized",
178 "method",
179 "profile",
180 "projects",
181 "put",
182 "register_custom_serializer",
183 "remote",
184 "shutdown",
185 "show_in_webui",
186 "wait",
187 "Language",
188 "java_function",
189 "java_actor_class",
190 ]
191
192 # ID types
193 __all__ += [
194 "ActorCheckpointID",
195 "ActorClassID",
196 "ActorID",
197 "ClientID",
198 "JobID",
199 "WorkerID",
200 "FunctionID",
201 "ObjectID",
202 "TaskID",
203 "UniqueID",
204 ]
205
206 import ctypes # noqa: E402
207 # Windows only
208 if hasattr(ctypes, "windll"):
209 # Makes sure that all child processes die when we die. Also makes sure that
210 # fatal crashes result in process termination rather than an error dialog
211 # (the latter is annoying since we have a lot of processes). This is done
212 # by associating all child processes with a "job" object that imposes this
213 # behavior.
214 (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/__init__.py b/python/ray/__init__.py
--- a/python/ray/__init__.py
+++ b/python/ray/__init__.py
@@ -14,9 +14,9 @@
"packaged along with Ray).")
if "OMP_NUM_THREADS" not in os.environ:
- logger.warning("[ray] Forcing OMP_NUM_THREADS=1 to avoid performance "
- "degradation with many workers (issue #6998). You can "
- "override this by explicitly setting OMP_NUM_THREADS.")
+ logger.debug("[ray] Forcing OMP_NUM_THREADS=1 to avoid performance "
+ "degradation with many workers (issue #6998). You can "
+ "override this by explicitly setting OMP_NUM_THREADS.")
os.environ["OMP_NUM_THREADS"] = "1"
# Add the directory containing pickle5 to the Python path so that we find the
| {"golden_diff": "diff --git a/python/ray/__init__.py b/python/ray/__init__.py\n--- a/python/ray/__init__.py\n+++ b/python/ray/__init__.py\n@@ -14,9 +14,9 @@\n \"packaged along with Ray).\")\n \n if \"OMP_NUM_THREADS\" not in os.environ:\n- logger.warning(\"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n- \"degradation with many workers (issue #6998). You can \"\n- \"override this by explicitly setting OMP_NUM_THREADS.\")\n+ logger.debug(\"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n+ \"degradation with many workers (issue #6998). You can \"\n+ \"override this by explicitly setting OMP_NUM_THREADS.\")\n os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n \n # Add the directory containing pickle5 to the Python path so that we find the\n", "issue": "Turn off OMP_NUM_THREADS warnings? \nCan we just turn off the warnings on each ray.init? We can't force everyone to set the environment variable. \r\n\r\nhttps://github.com/ray-project/ray/blob/3f99be8dad5e0e1abfaede1f25753a0af74f1648/python/ray/__init__.py#L16-L21\n", "before_files": [{"content": "import os\nimport logging\nfrom os.path import dirname\nimport sys\n\nlogger = logging.getLogger(__name__)\n\n# MUST add pickle5 to the import path because it will be imported by some\n# raylet modules.\n\nif \"pickle5\" in sys.modules:\n raise ImportError(\"Ray must be imported before pickle5 because Ray \"\n \"requires a specific version of pickle5 (which is \"\n \"packaged along with Ray).\")\n\nif \"OMP_NUM_THREADS\" not in os.environ:\n logger.warning(\"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n \"degradation with many workers (issue #6998). You can \"\n \"override this by explicitly setting OMP_NUM_THREADS.\")\n os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n# Add the directory containing pickle5 to the Python path so that we find the\n# pickle5 version packaged with ray and not a pre-existing pickle5.\npickle5_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pickle5_files\")\nsys.path.insert(0, pickle5_path)\n\n# Importing psutil & setproctitle. Must be before ray._raylet is initialized.\nthirdparty_files = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"thirdparty_files\")\nsys.path.insert(0, thirdparty_files)\n\n# Expose ray ABI symbols which may be dependent by other shared\n# libraries such as _streaming.so. See BUILD.bazel:_raylet\nso_path = os.path.join(dirname(__file__), \"_raylet.so\")\nif os.path.exists(so_path):\n import ctypes\n from ctypes import CDLL\n CDLL(so_path, ctypes.RTLD_GLOBAL)\n\n# MUST import ray._raylet before pyarrow to initialize some global variables.\n# It seems the library related to memory allocation in pyarrow will destroy the\n# initialization of grpc if we import pyarrow at first.\n# NOTE(JoeyJiang): See https://github.com/ray-project/ray/issues/5219 for more\n# details.\nimport ray._raylet # noqa: E402\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\n\n # pyarrow is not imported inside of _raylet because of the issue described\n # above. In order for Cython to compile _raylet, pyarrow is set to None\n # in _raylet instead, so we give _raylet a real reference to it here.\n # We first do the attribute checks here so that building the documentation\n # succeeds without fully installing ray..\n # TODO(edoakes): Fix this.\n if hasattr(ray, \"_raylet\") and hasattr(ray._raylet, \"pyarrow\"):\n ray._raylet.pyarrow = pyarrow\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nfrom ray._raylet import (\n ActorCheckpointID,\n ActorClassID,\n ActorID,\n ClientID,\n Config as _Config,\n JobID,\n WorkerID,\n FunctionID,\n ObjectID,\n TaskID,\n UniqueID,\n Language,\n) # noqa: E402\n\n_config = _Config()\n\nfrom ray.profiling import profile # noqa: E402\nfrom ray.state import (jobs, nodes, actors, tasks, objects, timeline,\n object_transfer_timeline, cluster_resources,\n available_resources, errors) # noqa: E402\nfrom ray.worker import (\n LOCAL_MODE,\n SCRIPT_MODE,\n WORKER_MODE,\n connect,\n disconnect,\n get,\n get_gpu_ids,\n get_resource_ids,\n get_webui_url,\n init,\n is_initialized,\n put,\n register_custom_serializer,\n remote,\n shutdown,\n show_in_webui,\n wait,\n) # noqa: E402\nimport ray.internal # noqa: E402\nimport ray.projects # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\nfrom ray.runtime_context import _get_runtime_context # noqa: E402\nfrom ray.cross_language import java_function, java_actor_class # noqa: E402\n\n# Ray version string.\n__version__ = \"0.9.0.dev0\"\n\n__all__ = [\n \"jobs\",\n \"nodes\",\n \"actors\",\n \"tasks\",\n \"objects\",\n \"timeline\",\n \"object_transfer_timeline\",\n \"cluster_resources\",\n \"available_resources\",\n \"errors\",\n \"LOCAL_MODE\",\n \"PYTHON_MODE\",\n \"SCRIPT_MODE\",\n \"WORKER_MODE\",\n \"__version__\",\n \"_config\",\n \"_get_runtime_context\",\n \"actor\",\n \"connect\",\n \"disconnect\",\n \"get\",\n \"get_gpu_ids\",\n \"get_resource_ids\",\n \"get_webui_url\",\n \"init\",\n \"internal\",\n \"is_initialized\",\n \"method\",\n \"profile\",\n \"projects\",\n \"put\",\n \"register_custom_serializer\",\n \"remote\",\n \"shutdown\",\n \"show_in_webui\",\n \"wait\",\n \"Language\",\n \"java_function\",\n \"java_actor_class\",\n]\n\n# ID types\n__all__ += [\n \"ActorCheckpointID\",\n \"ActorClassID\",\n \"ActorID\",\n \"ClientID\",\n \"JobID\",\n \"WorkerID\",\n \"FunctionID\",\n \"ObjectID\",\n \"TaskID\",\n \"UniqueID\",\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py"}], "after_files": [{"content": "import os\nimport logging\nfrom os.path import dirname\nimport sys\n\nlogger = logging.getLogger(__name__)\n\n# MUST add pickle5 to the import path because it will be imported by some\n# raylet modules.\n\nif \"pickle5\" in sys.modules:\n raise ImportError(\"Ray must be imported before pickle5 because Ray \"\n \"requires a specific version of pickle5 (which is \"\n \"packaged along with Ray).\")\n\nif \"OMP_NUM_THREADS\" not in os.environ:\n logger.debug(\"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n \"degradation with many workers (issue #6998). You can \"\n \"override this by explicitly setting OMP_NUM_THREADS.\")\n os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n# Add the directory containing pickle5 to the Python path so that we find the\n# pickle5 version packaged with ray and not a pre-existing pickle5.\npickle5_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pickle5_files\")\nsys.path.insert(0, pickle5_path)\n\n# Importing psutil & setproctitle. Must be before ray._raylet is initialized.\nthirdparty_files = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"thirdparty_files\")\nsys.path.insert(0, thirdparty_files)\n\n# Expose ray ABI symbols which may be dependent by other shared\n# libraries such as _streaming.so. See BUILD.bazel:_raylet\nso_path = os.path.join(dirname(__file__), \"_raylet.so\")\nif os.path.exists(so_path):\n import ctypes\n from ctypes import CDLL\n CDLL(so_path, ctypes.RTLD_GLOBAL)\n\n# MUST import ray._raylet before pyarrow to initialize some global variables.\n# It seems the library related to memory allocation in pyarrow will destroy the\n# initialization of grpc if we import pyarrow at first.\n# NOTE(JoeyJiang): See https://github.com/ray-project/ray/issues/5219 for more\n# details.\nimport ray._raylet # noqa: E402\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\n\n # pyarrow is not imported inside of _raylet because of the issue described\n # above. In order for Cython to compile _raylet, pyarrow is set to None\n # in _raylet instead, so we give _raylet a real reference to it here.\n # We first do the attribute checks here so that building the documentation\n # succeeds without fully installing ray..\n # TODO(edoakes): Fix this.\n if hasattr(ray, \"_raylet\") and hasattr(ray._raylet, \"pyarrow\"):\n ray._raylet.pyarrow = pyarrow\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nfrom ray._raylet import (\n ActorCheckpointID,\n ActorClassID,\n ActorID,\n ClientID,\n Config as _Config,\n JobID,\n WorkerID,\n FunctionID,\n ObjectID,\n TaskID,\n UniqueID,\n Language,\n) # noqa: E402\n\n_config = _Config()\n\nfrom ray.profiling import profile # noqa: E402\nfrom ray.state import (jobs, nodes, actors, tasks, objects, timeline,\n object_transfer_timeline, cluster_resources,\n available_resources, errors) # noqa: E402\nfrom ray.worker import (\n LOCAL_MODE,\n SCRIPT_MODE,\n WORKER_MODE,\n connect,\n disconnect,\n get,\n get_gpu_ids,\n get_resource_ids,\n get_webui_url,\n init,\n is_initialized,\n put,\n register_custom_serializer,\n remote,\n shutdown,\n show_in_webui,\n wait,\n) # noqa: E402\nimport ray.internal # noqa: E402\nimport ray.projects # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\nfrom ray.runtime_context import _get_runtime_context # noqa: E402\nfrom ray.cross_language import java_function, java_actor_class # noqa: E402\n\n# Ray version string.\n__version__ = \"0.9.0.dev0\"\n\n__all__ = [\n \"jobs\",\n \"nodes\",\n \"actors\",\n \"tasks\",\n \"objects\",\n \"timeline\",\n \"object_transfer_timeline\",\n \"cluster_resources\",\n \"available_resources\",\n \"errors\",\n \"LOCAL_MODE\",\n \"PYTHON_MODE\",\n \"SCRIPT_MODE\",\n \"WORKER_MODE\",\n \"__version__\",\n \"_config\",\n \"_get_runtime_context\",\n \"actor\",\n \"connect\",\n \"disconnect\",\n \"get\",\n \"get_gpu_ids\",\n \"get_resource_ids\",\n \"get_webui_url\",\n \"init\",\n \"internal\",\n \"is_initialized\",\n \"method\",\n \"profile\",\n \"projects\",\n \"put\",\n \"register_custom_serializer\",\n \"remote\",\n \"shutdown\",\n \"show_in_webui\",\n \"wait\",\n \"Language\",\n \"java_function\",\n \"java_actor_class\",\n]\n\n# ID types\n__all__ += [\n \"ActorCheckpointID\",\n \"ActorClassID\",\n \"ActorID\",\n \"ClientID\",\n \"JobID\",\n \"WorkerID\",\n \"FunctionID\",\n \"ObjectID\",\n \"TaskID\",\n \"UniqueID\",\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py"}]} | 2,715 | 205 |
gh_patches_debug_9754 | rasdani/github-patches | git_diff | graspologic-org__graspologic-968 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] mug2vec assumes undirected graphs but doesn't check for this
## Expected Behavior
Should run on a set of directed graphs
## Actual Behavior
Breaks on this line
https://github.com/microsoft/graspologic/blob/2125f27bc3f2739f4f2c784d5b700417df63c5d7/graspologic/embed/mug2vec.py#L141
since `omni_embedding` is a tuple
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `graspologic/embed/mug2vec.py`
Content:
```
1 # Copyright (c) Microsoft Corporation and contributors.
2 # Licensed under the MIT License.
3
4 from typing import Any, Optional
5
6 import numpy as np
7 from sklearn.base import BaseEstimator
8 from typing_extensions import Literal
9
10 from graspologic.types import List
11
12 from ..types import GraphRepresentation
13 from ..utils import pass_to_ranks
14 from .mds import ClassicalMDS
15 from .omni import OmnibusEmbed
16
17
18 class mug2vec(BaseEstimator):
19 r"""
20 Multigraphs-2-vectors (mug2vec).
21
22 mug2vec is a sequence of three algorithms that learns a feature vector for each
23 input graph.
24
25 Steps:
26
27 1. Pass to ranks - ranks all edge weights from smallest to largest valued edges
28 then normalize by a constant.
29
30 2. Omnibus embedding - jointly learns a low dimensional matrix representation for
31 all graphs under the random dot product model (RDPG).
32
33 3. Classical MDS (cMDS) - learns a feature vector for each graph by computing
34 Euclidean distance between each pair of graph embeddings from omnibus embedding,
35 followed by an eigen decomposition.
36
37 Parameters
38 ----------
39 pass_to_ranks: {'simple-nonzero' (default), 'simple-all', 'zero-boost'} string, or None
40
41 - 'simple-nonzero'
42 assigns ranks to all non-zero edges, settling ties using
43 the average. Ranks are then scaled by
44 :math:`\frac{rank(\text{non-zero edges})}{\text{total non-zero edges} + 1}`
45 - 'simple-all'
46 assigns ranks to all non-zero edges, settling ties using
47 the average. Ranks are then scaled by
48 :math:`\frac{rank(\text{non-zero edges})}{n^2 + 1}`
49 where n is the number of nodes
50 - 'zero-boost'
51 preserves the edge weight for all 0s, but ranks the other
52 edges as if the ranks of all 0 edges has been assigned. If there are
53 10 0-valued edges, the lowest non-zero edge gets weight 11 / (number
54 of possible edges). Ties settled by the average of the weight that those
55 edges would have received. Number of possible edges is determined
56 by the type of graph (loopless or looped, directed or undirected).
57 - None
58 No pass to ranks applied.
59
60 omnibus_components, cmds_components : int or None, default = None
61 Desired dimensionality of output data. If "full",
62 ``n_components`` must be ``<= min(X.shape)``. Otherwise, ``n_components`` must be
63 ``< min(X.shape)``. If None, then optimal dimensions will be chosen by
64 :func:`~graspologic.embed.select_dimension` using ``n_elbows`` argument.
65
66 omnibus_n_elbows, cmds_n_elbows: int, optional, default: 2
67 If ``n_components`` is None, then compute the optimal embedding dimension using
68 :func:`~graspologic.embed.select_dimension`. Otherwise, ignored.
69
70 svd_seed : int or None (default ``None``)
71 Allows you to seed the randomized svd solver used in the Omnibus embedding
72 for deterministic, albeit pseudo-randomized behavior.
73
74 Attributes
75 ----------
76 omnibus_n_components_ : int
77 Equals the parameter ``n_components``. If input ``n_components`` was None,
78 then equals the optimal embedding dimension.
79
80 cmds_n_components_ : int
81 Equals the parameter ``n_components``. If input ``n_components`` was None,
82 then equals the optimal embedding dimension.
83
84 embeddings_ : array, shape (n_components, n_features)
85 Embeddings from the pipeline. Each graph is a point in ``n_features``
86 dimensions.
87
88 See also
89 --------
90 graspologic.utils.pass_to_ranks
91 graspologic.embed.OmnibusEmbed
92 graspologic.embed.ClassicalMDS
93 graspologic.embed.select_dimension
94 """
95
96 def __init__(
97 self,
98 pass_to_ranks: Literal[
99 "simple-nonzero", "simple-all", "zero-boost"
100 ] = "simple-nonzero",
101 omnibus_components: Optional[int] = None,
102 omnibus_n_elbows: int = 2,
103 cmds_components: Optional[int] = None,
104 cmds_n_elbows: int = 2,
105 svd_seed: Optional[int] = None,
106 ) -> None:
107 self.pass_to_ranks = pass_to_ranks
108 self.omnibus_components = omnibus_components
109 self.omnibus_n_elbows = omnibus_n_elbows
110 self.cmds_components = cmds_components
111 self.cmds_n_elbows = cmds_n_elbows
112 self.svd_seed = svd_seed
113
114 def _check_inputs(self) -> None:
115 variables = self.get_params()
116 variables.pop("pass_to_ranks")
117
118 for name, val in variables.items():
119 if val is not None:
120 if not isinstance(val, int):
121 msg = "{} must be an int or None.".format(name)
122 raise ValueError(msg)
123 elif val <= 0:
124 msg = "{} must be > 0.".format(name)
125 raise ValueError(msg)
126
127 def fit(
128 self, graphs: List[GraphRepresentation], y: Optional[Any] = None
129 ) -> "mug2vec":
130 """
131 Computes a vector for each graph.
132
133 Parameters
134 ----------
135 graphs : list of nx.Graph or ndarray, or ndarray
136 If list of nx.Graph, each Graph must contain same number of nodes.
137 If list of ndarray, each array must have shape (n_vertices, n_vertices).
138 If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).
139
140 y : Ignored
141
142 Returns
143 -------
144 self : returns an instance of self.
145 """
146 # Check these prior to PTR just in case
147 self._check_inputs()
148
149 if self.pass_to_ranks is not None:
150 graphs = [pass_to_ranks(g, self.pass_to_ranks) for g in graphs]
151
152 omni = OmnibusEmbed(
153 n_components=self.omnibus_components,
154 n_elbows=self.omnibus_n_elbows,
155 svd_seed=self.svd_seed,
156 )
157 omnibus_embedding = omni.fit_transform(graphs)
158
159 self.omnibus_n_components_ = omnibus_embedding.shape[-1]
160
161 cmds = ClassicalMDS(
162 n_components=self.cmds_components,
163 n_elbows=self.cmds_n_elbows,
164 svd_seed=self.svd_seed,
165 )
166 self.embeddings_: np.ndarray = cmds.fit_transform(omnibus_embedding)
167 self.cmds_components_ = self.embeddings_.shape[-1]
168
169 return self
170
171 def fit_transform(
172 self, graphs: List[GraphRepresentation], y: Optional[Any] = None
173 ) -> np.ndarray:
174 """
175 Computes a vector for each graph.
176
177 Parameters
178 ----------
179 graphs : list of nx.Graph or ndarray, or ndarray
180 If list of nx.Graph, each Graph must contain same number of nodes.
181 If list of ndarray, each array must have shape (n_vertices, n_vertices).
182 If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).
183
184 y : Ignored
185
186 Returns
187 -------
188 embeddings : embeddings generated by fit.
189 """
190 self.fit(graphs)
191
192 return self.embeddings_
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/graspologic/embed/mug2vec.py b/graspologic/embed/mug2vec.py
--- a/graspologic/embed/mug2vec.py
+++ b/graspologic/embed/mug2vec.py
@@ -153,10 +153,11 @@
n_components=self.omnibus_components,
n_elbows=self.omnibus_n_elbows,
svd_seed=self.svd_seed,
+ concat=True,
)
omnibus_embedding = omni.fit_transform(graphs)
- self.omnibus_n_components_ = omnibus_embedding.shape[-1]
+ self.omnibus_n_components_ = len(omni.singular_values_)
cmds = ClassicalMDS(
n_components=self.cmds_components,
| {"golden_diff": "diff --git a/graspologic/embed/mug2vec.py b/graspologic/embed/mug2vec.py\n--- a/graspologic/embed/mug2vec.py\n+++ b/graspologic/embed/mug2vec.py\n@@ -153,10 +153,11 @@\n n_components=self.omnibus_components,\n n_elbows=self.omnibus_n_elbows,\n svd_seed=self.svd_seed,\n+ concat=True,\n )\n omnibus_embedding = omni.fit_transform(graphs)\n \n- self.omnibus_n_components_ = omnibus_embedding.shape[-1]\n+ self.omnibus_n_components_ = len(omni.singular_values_)\n \n cmds = ClassicalMDS(\n n_components=self.cmds_components,\n", "issue": "[BUG] mug2vec assumes undirected graphs but doesn't check for this\n## Expected Behavior\r\nShould run on a set of directed graphs\r\n\r\n## Actual Behavior\r\nBreaks on this line \r\nhttps://github.com/microsoft/graspologic/blob/2125f27bc3f2739f4f2c784d5b700417df63c5d7/graspologic/embed/mug2vec.py#L141\r\nsince `omni_embedding` is a tuple \r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom typing import Any, Optional\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator\nfrom typing_extensions import Literal\n\nfrom graspologic.types import List\n\nfrom ..types import GraphRepresentation\nfrom ..utils import pass_to_ranks\nfrom .mds import ClassicalMDS\nfrom .omni import OmnibusEmbed\n\n\nclass mug2vec(BaseEstimator):\n r\"\"\"\n Multigraphs-2-vectors (mug2vec).\n\n mug2vec is a sequence of three algorithms that learns a feature vector for each\n input graph.\n\n Steps:\n\n 1. Pass to ranks - ranks all edge weights from smallest to largest valued edges\n then normalize by a constant.\n\n 2. Omnibus embedding - jointly learns a low dimensional matrix representation for\n all graphs under the random dot product model (RDPG).\n\n 3. Classical MDS (cMDS) - learns a feature vector for each graph by computing\n Euclidean distance between each pair of graph embeddings from omnibus embedding,\n followed by an eigen decomposition.\n\n Parameters\n ----------\n pass_to_ranks: {'simple-nonzero' (default), 'simple-all', 'zero-boost'} string, or None\n\n - 'simple-nonzero'\n assigns ranks to all non-zero edges, settling ties using\n the average. Ranks are then scaled by\n :math:`\\frac{rank(\\text{non-zero edges})}{\\text{total non-zero edges} + 1}`\n - 'simple-all'\n assigns ranks to all non-zero edges, settling ties using\n the average. Ranks are then scaled by\n :math:`\\frac{rank(\\text{non-zero edges})}{n^2 + 1}`\n where n is the number of nodes\n - 'zero-boost'\n preserves the edge weight for all 0s, but ranks the other\n edges as if the ranks of all 0 edges has been assigned. If there are\n 10 0-valued edges, the lowest non-zero edge gets weight 11 / (number\n of possible edges). Ties settled by the average of the weight that those\n edges would have received. Number of possible edges is determined\n by the type of graph (loopless or looped, directed or undirected).\n - None\n No pass to ranks applied.\n\n omnibus_components, cmds_components : int or None, default = None\n Desired dimensionality of output data. If \"full\",\n ``n_components`` must be ``<= min(X.shape)``. Otherwise, ``n_components`` must be\n ``< min(X.shape)``. If None, then optimal dimensions will be chosen by\n :func:`~graspologic.embed.select_dimension` using ``n_elbows`` argument.\n\n omnibus_n_elbows, cmds_n_elbows: int, optional, default: 2\n If ``n_components`` is None, then compute the optimal embedding dimension using\n :func:`~graspologic.embed.select_dimension`. Otherwise, ignored.\n\n svd_seed : int or None (default ``None``)\n Allows you to seed the randomized svd solver used in the Omnibus embedding\n for deterministic, albeit pseudo-randomized behavior.\n\n Attributes\n ----------\n omnibus_n_components_ : int\n Equals the parameter ``n_components``. If input ``n_components`` was None,\n then equals the optimal embedding dimension.\n\n cmds_n_components_ : int\n Equals the parameter ``n_components``. If input ``n_components`` was None,\n then equals the optimal embedding dimension.\n\n embeddings_ : array, shape (n_components, n_features)\n Embeddings from the pipeline. Each graph is a point in ``n_features``\n dimensions.\n\n See also\n --------\n graspologic.utils.pass_to_ranks\n graspologic.embed.OmnibusEmbed\n graspologic.embed.ClassicalMDS\n graspologic.embed.select_dimension\n \"\"\"\n\n def __init__(\n self,\n pass_to_ranks: Literal[\n \"simple-nonzero\", \"simple-all\", \"zero-boost\"\n ] = \"simple-nonzero\",\n omnibus_components: Optional[int] = None,\n omnibus_n_elbows: int = 2,\n cmds_components: Optional[int] = None,\n cmds_n_elbows: int = 2,\n svd_seed: Optional[int] = None,\n ) -> None:\n self.pass_to_ranks = pass_to_ranks\n self.omnibus_components = omnibus_components\n self.omnibus_n_elbows = omnibus_n_elbows\n self.cmds_components = cmds_components\n self.cmds_n_elbows = cmds_n_elbows\n self.svd_seed = svd_seed\n\n def _check_inputs(self) -> None:\n variables = self.get_params()\n variables.pop(\"pass_to_ranks\")\n\n for name, val in variables.items():\n if val is not None:\n if not isinstance(val, int):\n msg = \"{} must be an int or None.\".format(name)\n raise ValueError(msg)\n elif val <= 0:\n msg = \"{} must be > 0.\".format(name)\n raise ValueError(msg)\n\n def fit(\n self, graphs: List[GraphRepresentation], y: Optional[Any] = None\n ) -> \"mug2vec\":\n \"\"\"\n Computes a vector for each graph.\n\n Parameters\n ----------\n graphs : list of nx.Graph or ndarray, or ndarray\n If list of nx.Graph, each Graph must contain same number of nodes.\n If list of ndarray, each array must have shape (n_vertices, n_vertices).\n If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).\n\n y : Ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n # Check these prior to PTR just in case\n self._check_inputs()\n\n if self.pass_to_ranks is not None:\n graphs = [pass_to_ranks(g, self.pass_to_ranks) for g in graphs]\n\n omni = OmnibusEmbed(\n n_components=self.omnibus_components,\n n_elbows=self.omnibus_n_elbows,\n svd_seed=self.svd_seed,\n )\n omnibus_embedding = omni.fit_transform(graphs)\n\n self.omnibus_n_components_ = omnibus_embedding.shape[-1]\n\n cmds = ClassicalMDS(\n n_components=self.cmds_components,\n n_elbows=self.cmds_n_elbows,\n svd_seed=self.svd_seed,\n )\n self.embeddings_: np.ndarray = cmds.fit_transform(omnibus_embedding)\n self.cmds_components_ = self.embeddings_.shape[-1]\n\n return self\n\n def fit_transform(\n self, graphs: List[GraphRepresentation], y: Optional[Any] = None\n ) -> np.ndarray:\n \"\"\"\n Computes a vector for each graph.\n\n Parameters\n ----------\n graphs : list of nx.Graph or ndarray, or ndarray\n If list of nx.Graph, each Graph must contain same number of nodes.\n If list of ndarray, each array must have shape (n_vertices, n_vertices).\n If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).\n\n y : Ignored\n\n Returns\n -------\n embeddings : embeddings generated by fit.\n \"\"\"\n self.fit(graphs)\n\n return self.embeddings_\n", "path": "graspologic/embed/mug2vec.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom typing import Any, Optional\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator\nfrom typing_extensions import Literal\n\nfrom graspologic.types import List\n\nfrom ..types import GraphRepresentation\nfrom ..utils import pass_to_ranks\nfrom .mds import ClassicalMDS\nfrom .omni import OmnibusEmbed\n\n\nclass mug2vec(BaseEstimator):\n r\"\"\"\n Multigraphs-2-vectors (mug2vec).\n\n mug2vec is a sequence of three algorithms that learns a feature vector for each\n input graph.\n\n Steps:\n\n 1. Pass to ranks - ranks all edge weights from smallest to largest valued edges\n then normalize by a constant.\n\n 2. Omnibus embedding - jointly learns a low dimensional matrix representation for\n all graphs under the random dot product model (RDPG).\n\n 3. Classical MDS (cMDS) - learns a feature vector for each graph by computing\n Euclidean distance between each pair of graph embeddings from omnibus embedding,\n followed by an eigen decomposition.\n\n Parameters\n ----------\n pass_to_ranks: {'simple-nonzero' (default), 'simple-all', 'zero-boost'} string, or None\n\n - 'simple-nonzero'\n assigns ranks to all non-zero edges, settling ties using\n the average. Ranks are then scaled by\n :math:`\\frac{rank(\\text{non-zero edges})}{\\text{total non-zero edges} + 1}`\n - 'simple-all'\n assigns ranks to all non-zero edges, settling ties using\n the average. Ranks are then scaled by\n :math:`\\frac{rank(\\text{non-zero edges})}{n^2 + 1}`\n where n is the number of nodes\n - 'zero-boost'\n preserves the edge weight for all 0s, but ranks the other\n edges as if the ranks of all 0 edges has been assigned. If there are\n 10 0-valued edges, the lowest non-zero edge gets weight 11 / (number\n of possible edges). Ties settled by the average of the weight that those\n edges would have received. Number of possible edges is determined\n by the type of graph (loopless or looped, directed or undirected).\n - None\n No pass to ranks applied.\n\n omnibus_components, cmds_components : int or None, default = None\n Desired dimensionality of output data. If \"full\",\n ``n_components`` must be ``<= min(X.shape)``. Otherwise, ``n_components`` must be\n ``< min(X.shape)``. If None, then optimal dimensions will be chosen by\n :func:`~graspologic.embed.select_dimension` using ``n_elbows`` argument.\n\n omnibus_n_elbows, cmds_n_elbows: int, optional, default: 2\n If ``n_components`` is None, then compute the optimal embedding dimension using\n :func:`~graspologic.embed.select_dimension`. Otherwise, ignored.\n\n svd_seed : int or None (default ``None``)\n Allows you to seed the randomized svd solver used in the Omnibus embedding\n for deterministic, albeit pseudo-randomized behavior.\n\n Attributes\n ----------\n omnibus_n_components_ : int\n Equals the parameter ``n_components``. If input ``n_components`` was None,\n then equals the optimal embedding dimension.\n\n cmds_n_components_ : int\n Equals the parameter ``n_components``. If input ``n_components`` was None,\n then equals the optimal embedding dimension.\n\n embeddings_ : array, shape (n_components, n_features)\n Embeddings from the pipeline. Each graph is a point in ``n_features``\n dimensions.\n\n See also\n --------\n graspologic.utils.pass_to_ranks\n graspologic.embed.OmnibusEmbed\n graspologic.embed.ClassicalMDS\n graspologic.embed.select_dimension\n \"\"\"\n\n def __init__(\n self,\n pass_to_ranks: Literal[\n \"simple-nonzero\", \"simple-all\", \"zero-boost\"\n ] = \"simple-nonzero\",\n omnibus_components: Optional[int] = None,\n omnibus_n_elbows: int = 2,\n cmds_components: Optional[int] = None,\n cmds_n_elbows: int = 2,\n svd_seed: Optional[int] = None,\n ) -> None:\n self.pass_to_ranks = pass_to_ranks\n self.omnibus_components = omnibus_components\n self.omnibus_n_elbows = omnibus_n_elbows\n self.cmds_components = cmds_components\n self.cmds_n_elbows = cmds_n_elbows\n self.svd_seed = svd_seed\n\n def _check_inputs(self) -> None:\n variables = self.get_params()\n variables.pop(\"pass_to_ranks\")\n\n for name, val in variables.items():\n if val is not None:\n if not isinstance(val, int):\n msg = \"{} must be an int or None.\".format(name)\n raise ValueError(msg)\n elif val <= 0:\n msg = \"{} must be > 0.\".format(name)\n raise ValueError(msg)\n\n def fit(\n self, graphs: List[GraphRepresentation], y: Optional[Any] = None\n ) -> \"mug2vec\":\n \"\"\"\n Computes a vector for each graph.\n\n Parameters\n ----------\n graphs : list of nx.Graph or ndarray, or ndarray\n If list of nx.Graph, each Graph must contain same number of nodes.\n If list of ndarray, each array must have shape (n_vertices, n_vertices).\n If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).\n\n y : Ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n # Check these prior to PTR just in case\n self._check_inputs()\n\n if self.pass_to_ranks is not None:\n graphs = [pass_to_ranks(g, self.pass_to_ranks) for g in graphs]\n\n omni = OmnibusEmbed(\n n_components=self.omnibus_components,\n n_elbows=self.omnibus_n_elbows,\n svd_seed=self.svd_seed,\n concat=True,\n )\n omnibus_embedding = omni.fit_transform(graphs)\n\n self.omnibus_n_components_ = len(omni.singular_values_)\n\n cmds = ClassicalMDS(\n n_components=self.cmds_components,\n n_elbows=self.cmds_n_elbows,\n svd_seed=self.svd_seed,\n )\n self.embeddings_: np.ndarray = cmds.fit_transform(omnibus_embedding)\n self.cmds_components_ = self.embeddings_.shape[-1]\n\n return self\n\n def fit_transform(\n self, graphs: List[GraphRepresentation], y: Optional[Any] = None\n ) -> np.ndarray:\n \"\"\"\n Computes a vector for each graph.\n\n Parameters\n ----------\n graphs : list of nx.Graph or ndarray, or ndarray\n If list of nx.Graph, each Graph must contain same number of nodes.\n If list of ndarray, each array must have shape (n_vertices, n_vertices).\n If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).\n\n y : Ignored\n\n Returns\n -------\n embeddings : embeddings generated by fit.\n \"\"\"\n self.fit(graphs)\n\n return self.embeddings_\n", "path": "graspologic/embed/mug2vec.py"}]} | 2,469 | 165 |
gh_patches_debug_4210 | rasdani/github-patches | git_diff | huggingface__transformers-8049 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BlenderbotSmallTokenizer throws tuple index out of range error for stopword
Using transformers==3.4.0
Script used:
```
from transformers import BlenderbotSmallTokenizer, BlenderbotForConditionalGeneration
mname = 'facebook/blenderbot-90M'
tokenizer = BlenderbotSmallTokenizer.from_pretrained(mname)
sentence = "."
tokenizer(sentence)['input_ids']
```
This throws `IndexError: tuple index out of range`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/transformers/tokenization_blenderbot.py`
Content:
```
1 #!/usr/bin/env python3
2 # coding=utf-8
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 #
5 # This source code is licensed under the MIT license found in the;
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 # LICENSE file in the root directory of this source tree.
17 """"BlenderbotTokenizer and BlenderbotSmallTokenizer"""
18 import json
19 import os
20 from typing import Dict, List, Optional, Tuple
21
22 import regex as re
23
24 from .tokenization_roberta import RobertaTokenizer
25 from .tokenization_utils import PreTrainedTokenizer
26 from .utils import logging
27
28
29 logger = logging.get_logger(__name__)
30
31
32 VOCAB_FILES_NAMES = {
33 "vocab_file": "vocab.json",
34 "merges_file": "merges.txt",
35 # "tokenizer_config_file": "tokenizer_config.json",
36 }
37 CKPT_3B = "facebook/blenderbot-3B"
38
39
40 class BlenderbotTokenizer(RobertaTokenizer):
41 r"""
42 Construct a Blenderbot tokenizer.
43
44 :class:`~transformers.Blenderbot` is nearly identical to :class:`~transformers.RobertaTokenizer` and runs
45 end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesnt add BOS
46 token to the beginning of sequences.
47
48 Refer to superclass :class:`~transformers.RobertaTokenizer` for usage examples and documentation concerning
49 parameters.
50 """
51 vocab_files_names = {
52 "vocab_file": "vocab.json",
53 "merges_file": "merges.txt",
54 "tokenizer_config_file": "tokenizer_config.json",
55 }
56 pretrained_vocab_files_map = {
57 "vocab_file": {CKPT_3B: "https://cdn.huggingface.co/facebook/blenderbot-3B/vocab.json"},
58 "merges_file": {CKPT_3B: "https://cdn.huggingface.co/facebook/blenderbot-3B/merges.txt"},
59 "tokenizer_config_file": {CKPT_3B: "https://cdn.huggingface.co/facebook/blenderbot-3B/tokenizer_config.json"},
60 }
61 max_model_input_sizes = {"facebook/blenderbot-3B": 128}
62
63 def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: List[int] = None):
64 """
65 Build model inputs from a sequence or a pair of sequence for sequence classification tasks
66 by concatenating and adding special tokens.
67 A Blenderbot sequence has the following format:
68
69 - single sequence: `` X </s>``
70
71 Args:
72 token_ids_0 (:obj:`List[int]`):
73 List of IDs to which the special tokens will be added
74 token_ids_1 (:obj:`List[int]`, `optional`):
75 Will be ignored
76
77 Returns:
78 :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
79 """
80 return token_ids_0 + [self.eos_token_id]
81
82
83 def get_pairs(word):
84 """Return set of symbol pairs in a word.
85
86 Word is represented as tuple of symbols (symbols being variable-length strings).
87 """
88 pairs = set()
89 prev_char = word[0]
90 for char in word[1:]:
91 pairs.add((prev_char, char))
92 prev_char = char
93
94 pairs = set(pairs)
95 return pairs
96
97
98 class BlenderbotSmallTokenizer(PreTrainedTokenizer):
99 """
100 Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding)
101
102 This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users
103 should refer to the superclass for more information regarding methods.
104
105 Args:
106 vocab_file (:obj:`str`):
107 File containing the vocabulary.
108 merges_file (:obj:`str`):
109 Path to the merges file.
110 bos_token (:obj:`str`, `optional`, defaults to :obj:`"__start__"`):
111 The beginning of sentence token.
112 eos_token (:obj:`str`, `optional`, defaults to :obj:`"__end__"`):
113 The end of sentence token.
114 unk_token (:obj:`str`, `optional`, defaults to :obj:`"__unk__"`):
115 The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.
116 pad_token (:obj:`str`, `optional`, defaults to :obj:`"__pad__"`):
117 The token used for padding, for example when batching sequences of different lengths.
118 **kwargs
119 Additional keyword arguments passed along to :class:`~transformers.PreTrainedTokenizer`
120 """
121
122 vocab_files_names = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
123 pretrained_vocab_files_map = {
124 "vocab_file": {"facebook/blenderbot-90M": "https://cdn.huggingface.co/facebook/blenderbot-90M/vocab.json"},
125 "merges_file": {"facebook/blenderbot-90M": "https://cdn.huggingface.co/facebook/blenderbot-90M/merges.txt"},
126 }
127 max_model_input_sizes = {"facebook/blenderbot-90M": 512}
128
129 def __init__(
130 self,
131 vocab_file,
132 merges_file,
133 bos_token="__start__",
134 eos_token="__end__",
135 unk_token="__unk__",
136 pad_token="__null",
137 **kwargs
138 ):
139 super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)
140
141 with open(vocab_file, encoding="utf-8") as vocab_handle:
142 self.encoder = json.load(vocab_handle)
143 self.decoder = {v: k for k, v in self.encoder.items()}
144 with open(merges_file, encoding="utf-8") as merges_handle:
145 merges = merges_handle.read().split("\n")[1:-1]
146 merges = [tuple(merge.split()) for merge in merges]
147 self.bpe_ranks = dict(zip(merges, range(len(merges))))
148 self.cache = {}
149
150 @property
151 def vocab_size(self) -> int:
152 return len(self.encoder)
153
154 def get_vocab(self) -> Dict:
155 return dict(self.encoder, **self.added_tokens_encoder)
156
157 def bpe(self, token: str) -> str:
158 if token in self.cache:
159 return self.cache[token]
160 token = re.sub("([.,!?()])", r" \1", token)
161 token = re.sub("(')", r" \1 ", token)
162 token = re.sub("\s{2,}", " ", token)
163 if "\n" in token:
164 token = token.replace("\n", " __newln__")
165
166 tokens = token.split(" ")
167 words = []
168 for token in tokens:
169 token = token.lower()
170 word = tuple(token)
171 word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
172 pairs = get_pairs(word)
173
174 if not pairs:
175 words.append(token)
176 continue
177
178 while True:
179 bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
180 if bigram not in self.bpe_ranks:
181 break
182 first, second = bigram
183 new_word = []
184 i = 0
185
186 while i < len(word):
187 try:
188 j = word.index(first, i)
189 new_word.extend(word[i:j])
190 i = j
191 except ValueError:
192 new_word.extend(word[i:])
193 break
194
195 if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
196 new_word.append(first + second)
197 i += 2
198 else:
199 new_word.append(word[i])
200 i += 1
201 new_word = tuple(new_word)
202 word = new_word
203 if len(word) == 1:
204 break
205 else:
206 pairs = get_pairs(word)
207 word = "@@ ".join(word)
208 word = word[:-4]
209
210 self.cache[token] = word
211 words.append(word)
212 return " ".join(words)
213
214 def _tokenize(self, text: str) -> List[str]:
215 """ Split a string into tokens using BPE."""
216 split_tokens = []
217
218 words = re.findall(r"\S+\n?", text)
219
220 for token in words:
221 split_tokens.extend([t for t in self.bpe(token).split(" ")])
222 return split_tokens
223
224 def _convert_token_to_id(self, token: str) -> int:
225 """ Converts a token to an id using the vocab. """
226 token = token.lower()
227 return self.encoder.get(token, self.encoder.get(self.unk_token))
228
229 def _convert_id_to_token(self, index: int) -> str:
230 """Converts an index (integer) in a token (str) using the vocab."""
231 return self.decoder.get(index, self.unk_token)
232
233 def convert_tokens_to_string(self, tokens: List[str]) -> str:
234 """ Converts a sequence of tokens in a single string. """
235 out_string = " ".join(tokens).replace("@@ ", "").strip()
236 return out_string
237
238 def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
239 if not os.path.isdir(save_directory):
240 logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
241 return
242 vocab_file = os.path.join(
243 save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
244 )
245 merge_file = os.path.join(
246 save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
247 )
248
249 with open(vocab_file, "w", encoding="utf-8") as f:
250 f.write(json.dumps(self.encoder, ensure_ascii=False))
251
252 index = 0
253 with open(merge_file, "w", encoding="utf-8") as writer:
254 writer.write("#version: 0.2\n")
255 for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
256 if index != token_index:
257 logger.warning(
258 "Saving vocabulary to {}: BPE merge indices are not consecutive."
259 " Please check that the tokenizer is not corrupted!".format(merge_file)
260 )
261 index = token_index
262 writer.write(" ".join(bpe_tokens) + "\n")
263 index += 1
264
265 return vocab_file, merge_file
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/transformers/tokenization_blenderbot.py b/src/transformers/tokenization_blenderbot.py
--- a/src/transformers/tokenization_blenderbot.py
+++ b/src/transformers/tokenization_blenderbot.py
@@ -166,6 +166,9 @@
tokens = token.split(" ")
words = []
for token in tokens:
+ if not len(token):
+ continue
+
token = token.lower()
word = tuple(token)
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
| {"golden_diff": "diff --git a/src/transformers/tokenization_blenderbot.py b/src/transformers/tokenization_blenderbot.py\n--- a/src/transformers/tokenization_blenderbot.py\n+++ b/src/transformers/tokenization_blenderbot.py\n@@ -166,6 +166,9 @@\n tokens = token.split(\" \")\n words = []\n for token in tokens:\n+ if not len(token):\n+ continue\n+\n token = token.lower()\n word = tuple(token)\n word = tuple(list(word[:-1]) + [word[-1] + \"</w>\"])\n", "issue": "BlenderbotSmallTokenizer throws tuple index out of range error for stopword\nUsing transformers==3.4.0\r\n\r\nScript used:\r\n```\r\n\r\nfrom transformers import BlenderbotSmallTokenizer, BlenderbotForConditionalGeneration\r\nmname = 'facebook/blenderbot-90M'\r\ntokenizer = BlenderbotSmallTokenizer.from_pretrained(mname)\r\n\r\nsentence = \".\"\r\ntokenizer(sentence)['input_ids']\r\n```\r\n\r\nThis throws `IndexError: tuple index out of range`\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the;\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# LICENSE file in the root directory of this source tree.\n\"\"\"\"BlenderbotTokenizer and BlenderbotSmallTokenizer\"\"\"\nimport json\nimport os\nfrom typing import Dict, List, Optional, Tuple\n\nimport regex as re\n\nfrom .tokenization_roberta import RobertaTokenizer\nfrom .tokenization_utils import PreTrainedTokenizer\nfrom .utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\nVOCAB_FILES_NAMES = {\n \"vocab_file\": \"vocab.json\",\n \"merges_file\": \"merges.txt\",\n # \"tokenizer_config_file\": \"tokenizer_config.json\",\n}\nCKPT_3B = \"facebook/blenderbot-3B\"\n\n\nclass BlenderbotTokenizer(RobertaTokenizer):\n r\"\"\"\n Construct a Blenderbot tokenizer.\n\n :class:`~transformers.Blenderbot` is nearly identical to :class:`~transformers.RobertaTokenizer` and runs\n end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesnt add BOS\n token to the beginning of sequences.\n\n Refer to superclass :class:`~transformers.RobertaTokenizer` for usage examples and documentation concerning\n parameters.\n \"\"\"\n vocab_files_names = {\n \"vocab_file\": \"vocab.json\",\n \"merges_file\": \"merges.txt\",\n \"tokenizer_config_file\": \"tokenizer_config.json\",\n }\n pretrained_vocab_files_map = {\n \"vocab_file\": {CKPT_3B: \"https://cdn.huggingface.co/facebook/blenderbot-3B/vocab.json\"},\n \"merges_file\": {CKPT_3B: \"https://cdn.huggingface.co/facebook/blenderbot-3B/merges.txt\"},\n \"tokenizer_config_file\": {CKPT_3B: \"https://cdn.huggingface.co/facebook/blenderbot-3B/tokenizer_config.json\"},\n }\n max_model_input_sizes = {\"facebook/blenderbot-3B\": 128}\n\n def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: List[int] = None):\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks\n by concatenating and adding special tokens.\n A Blenderbot sequence has the following format:\n\n - single sequence: `` X </s>``\n\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs to which the special tokens will be added\n token_ids_1 (:obj:`List[int]`, `optional`):\n Will be ignored\n\n Returns:\n :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.\n \"\"\"\n return token_ids_0 + [self.eos_token_id]\n\n\ndef get_pairs(word):\n \"\"\"Return set of symbol pairs in a word.\n\n Word is represented as tuple of symbols (symbols being variable-length strings).\n \"\"\"\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n\n pairs = set(pairs)\n return pairs\n\n\nclass BlenderbotSmallTokenizer(PreTrainedTokenizer):\n \"\"\"\n Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding)\n\n This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users\n should refer to the superclass for more information regarding methods.\n\n Args:\n vocab_file (:obj:`str`):\n File containing the vocabulary.\n merges_file (:obj:`str`):\n Path to the merges file.\n bos_token (:obj:`str`, `optional`, defaults to :obj:`\"__start__\"`):\n The beginning of sentence token.\n eos_token (:obj:`str`, `optional`, defaults to :obj:`\"__end__\"`):\n The end of sentence token.\n unk_token (:obj:`str`, `optional`, defaults to :obj:`\"__unk__\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.\n pad_token (:obj:`str`, `optional`, defaults to :obj:`\"__pad__\"`):\n The token used for padding, for example when batching sequences of different lengths.\n **kwargs\n Additional keyword arguments passed along to :class:`~transformers.PreTrainedTokenizer`\n \"\"\"\n\n vocab_files_names = {\"vocab_file\": \"vocab.json\", \"merges_file\": \"merges.txt\"}\n pretrained_vocab_files_map = {\n \"vocab_file\": {\"facebook/blenderbot-90M\": \"https://cdn.huggingface.co/facebook/blenderbot-90M/vocab.json\"},\n \"merges_file\": {\"facebook/blenderbot-90M\": \"https://cdn.huggingface.co/facebook/blenderbot-90M/merges.txt\"},\n }\n max_model_input_sizes = {\"facebook/blenderbot-90M\": 512}\n\n def __init__(\n self,\n vocab_file,\n merges_file,\n bos_token=\"__start__\",\n eos_token=\"__end__\",\n unk_token=\"__unk__\",\n pad_token=\"__null\",\n **kwargs\n ):\n super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)\n\n with open(vocab_file, encoding=\"utf-8\") as vocab_handle:\n self.encoder = json.load(vocab_handle)\n self.decoder = {v: k for k, v in self.encoder.items()}\n with open(merges_file, encoding=\"utf-8\") as merges_handle:\n merges = merges_handle.read().split(\"\\n\")[1:-1]\n merges = [tuple(merge.split()) for merge in merges]\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n self.cache = {}\n\n @property\n def vocab_size(self) -> int:\n return len(self.encoder)\n\n def get_vocab(self) -> Dict:\n return dict(self.encoder, **self.added_tokens_encoder)\n\n def bpe(self, token: str) -> str:\n if token in self.cache:\n return self.cache[token]\n token = re.sub(\"([.,!?()])\", r\" \\1\", token)\n token = re.sub(\"(')\", r\" \\1 \", token)\n token = re.sub(\"\\s{2,}\", \" \", token)\n if \"\\n\" in token:\n token = token.replace(\"\\n\", \" __newln__\")\n\n tokens = token.split(\" \")\n words = []\n for token in tokens:\n token = token.lower()\n word = tuple(token)\n word = tuple(list(word[:-1]) + [word[-1] + \"</w>\"])\n pairs = get_pairs(word)\n\n if not pairs:\n words.append(token)\n continue\n\n while True:\n bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float(\"inf\")))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except ValueError:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = \"@@ \".join(word)\n word = word[:-4]\n\n self.cache[token] = word\n words.append(word)\n return \" \".join(words)\n\n def _tokenize(self, text: str) -> List[str]:\n \"\"\" Split a string into tokens using BPE.\"\"\"\n split_tokens = []\n\n words = re.findall(r\"\\S+\\n?\", text)\n\n for token in words:\n split_tokens.extend([t for t in self.bpe(token).split(\" \")])\n return split_tokens\n\n def _convert_token_to_id(self, token: str) -> int:\n \"\"\" Converts a token to an id using the vocab. \"\"\"\n token = token.lower()\n return self.encoder.get(token, self.encoder.get(self.unk_token))\n\n def _convert_id_to_token(self, index: int) -> str:\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.decoder.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens: List[str]) -> str:\n \"\"\" Converts a sequence of tokens in a single string. \"\"\"\n out_string = \" \".join(tokens).replace(\"@@ \", \"\").strip()\n return out_string\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n if not os.path.isdir(save_directory):\n logger.error(\"Vocabulary path ({}) should be a directory\".format(save_directory))\n return\n vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n merge_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"merges_file\"]\n )\n\n with open(vocab_file, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(self.encoder, ensure_ascii=False))\n\n index = 0\n with open(merge_file, \"w\", encoding=\"utf-8\") as writer:\n writer.write(\"#version: 0.2\\n\")\n for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: BPE merge indices are not consecutive.\"\n \" Please check that the tokenizer is not corrupted!\".format(merge_file)\n )\n index = token_index\n writer.write(\" \".join(bpe_tokens) + \"\\n\")\n index += 1\n\n return vocab_file, merge_file\n", "path": "src/transformers/tokenization_blenderbot.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the;\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# LICENSE file in the root directory of this source tree.\n\"\"\"\"BlenderbotTokenizer and BlenderbotSmallTokenizer\"\"\"\nimport json\nimport os\nfrom typing import Dict, List, Optional, Tuple\n\nimport regex as re\n\nfrom .tokenization_roberta import RobertaTokenizer\nfrom .tokenization_utils import PreTrainedTokenizer\nfrom .utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\nVOCAB_FILES_NAMES = {\n \"vocab_file\": \"vocab.json\",\n \"merges_file\": \"merges.txt\",\n # \"tokenizer_config_file\": \"tokenizer_config.json\",\n}\nCKPT_3B = \"facebook/blenderbot-3B\"\n\n\nclass BlenderbotTokenizer(RobertaTokenizer):\n r\"\"\"\n Construct a Blenderbot tokenizer.\n\n :class:`~transformers.Blenderbot` is nearly identical to :class:`~transformers.RobertaTokenizer` and runs\n end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesnt add BOS\n token to the beginning of sequences.\n\n Refer to superclass :class:`~transformers.RobertaTokenizer` for usage examples and documentation concerning\n parameters.\n \"\"\"\n vocab_files_names = {\n \"vocab_file\": \"vocab.json\",\n \"merges_file\": \"merges.txt\",\n \"tokenizer_config_file\": \"tokenizer_config.json\",\n }\n pretrained_vocab_files_map = {\n \"vocab_file\": {CKPT_3B: \"https://cdn.huggingface.co/facebook/blenderbot-3B/vocab.json\"},\n \"merges_file\": {CKPT_3B: \"https://cdn.huggingface.co/facebook/blenderbot-3B/merges.txt\"},\n \"tokenizer_config_file\": {CKPT_3B: \"https://cdn.huggingface.co/facebook/blenderbot-3B/tokenizer_config.json\"},\n }\n max_model_input_sizes = {\"facebook/blenderbot-3B\": 128}\n\n def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: List[int] = None):\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks\n by concatenating and adding special tokens.\n A Blenderbot sequence has the following format:\n\n - single sequence: `` X </s>``\n\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs to which the special tokens will be added\n token_ids_1 (:obj:`List[int]`, `optional`):\n Will be ignored\n\n Returns:\n :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.\n \"\"\"\n return token_ids_0 + [self.eos_token_id]\n\n\ndef get_pairs(word):\n \"\"\"Return set of symbol pairs in a word.\n\n Word is represented as tuple of symbols (symbols being variable-length strings).\n \"\"\"\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n\n pairs = set(pairs)\n return pairs\n\n\nclass BlenderbotSmallTokenizer(PreTrainedTokenizer):\n \"\"\"\n Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding)\n\n This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users\n should refer to the superclass for more information regarding methods.\n\n Args:\n vocab_file (:obj:`str`):\n File containing the vocabulary.\n merges_file (:obj:`str`):\n Path to the merges file.\n bos_token (:obj:`str`, `optional`, defaults to :obj:`\"__start__\"`):\n The beginning of sentence token.\n eos_token (:obj:`str`, `optional`, defaults to :obj:`\"__end__\"`):\n The end of sentence token.\n unk_token (:obj:`str`, `optional`, defaults to :obj:`\"__unk__\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.\n pad_token (:obj:`str`, `optional`, defaults to :obj:`\"__pad__\"`):\n The token used for padding, for example when batching sequences of different lengths.\n **kwargs\n Additional keyword arguments passed along to :class:`~transformers.PreTrainedTokenizer`\n \"\"\"\n\n vocab_files_names = {\"vocab_file\": \"vocab.json\", \"merges_file\": \"merges.txt\"}\n pretrained_vocab_files_map = {\n \"vocab_file\": {\"facebook/blenderbot-90M\": \"https://cdn.huggingface.co/facebook/blenderbot-90M/vocab.json\"},\n \"merges_file\": {\"facebook/blenderbot-90M\": \"https://cdn.huggingface.co/facebook/blenderbot-90M/merges.txt\"},\n }\n max_model_input_sizes = {\"facebook/blenderbot-90M\": 512}\n\n def __init__(\n self,\n vocab_file,\n merges_file,\n bos_token=\"__start__\",\n eos_token=\"__end__\",\n unk_token=\"__unk__\",\n pad_token=\"__null\",\n **kwargs\n ):\n super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)\n\n with open(vocab_file, encoding=\"utf-8\") as vocab_handle:\n self.encoder = json.load(vocab_handle)\n self.decoder = {v: k for k, v in self.encoder.items()}\n with open(merges_file, encoding=\"utf-8\") as merges_handle:\n merges = merges_handle.read().split(\"\\n\")[1:-1]\n merges = [tuple(merge.split()) for merge in merges]\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n self.cache = {}\n\n @property\n def vocab_size(self) -> int:\n return len(self.encoder)\n\n def get_vocab(self) -> Dict:\n return dict(self.encoder, **self.added_tokens_encoder)\n\n def bpe(self, token: str) -> str:\n if token in self.cache:\n return self.cache[token]\n token = re.sub(\"([.,!?()])\", r\" \\1\", token)\n token = re.sub(\"(')\", r\" \\1 \", token)\n token = re.sub(\"\\s{2,}\", \" \", token)\n if \"\\n\" in token:\n token = token.replace(\"\\n\", \" __newln__\")\n\n tokens = token.split(\" \")\n words = []\n for token in tokens:\n if not len(token):\n continue\n\n token = token.lower()\n word = tuple(token)\n word = tuple(list(word[:-1]) + [word[-1] + \"</w>\"])\n pairs = get_pairs(word)\n\n if not pairs:\n words.append(token)\n continue\n\n while True:\n bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float(\"inf\")))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except ValueError:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = \"@@ \".join(word)\n word = word[:-4]\n\n self.cache[token] = word\n words.append(word)\n return \" \".join(words)\n\n def _tokenize(self, text: str) -> List[str]:\n \"\"\" Split a string into tokens using BPE.\"\"\"\n split_tokens = []\n\n words = re.findall(r\"\\S+\\n?\", text)\n\n for token in words:\n split_tokens.extend([t for t in self.bpe(token).split(\" \")])\n return split_tokens\n\n def _convert_token_to_id(self, token: str) -> int:\n \"\"\" Converts a token to an id using the vocab. \"\"\"\n token = token.lower()\n return self.encoder.get(token, self.encoder.get(self.unk_token))\n\n def _convert_id_to_token(self, index: int) -> str:\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.decoder.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens: List[str]) -> str:\n \"\"\" Converts a sequence of tokens in a single string. \"\"\"\n out_string = \" \".join(tokens).replace(\"@@ \", \"\").strip()\n return out_string\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n if not os.path.isdir(save_directory):\n logger.error(\"Vocabulary path ({}) should be a directory\".format(save_directory))\n return\n vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n merge_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"merges_file\"]\n )\n\n with open(vocab_file, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(self.encoder, ensure_ascii=False))\n\n index = 0\n with open(merge_file, \"w\", encoding=\"utf-8\") as writer:\n writer.write(\"#version: 0.2\\n\")\n for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: BPE merge indices are not consecutive.\"\n \" Please check that the tokenizer is not corrupted!\".format(merge_file)\n )\n index = token_index\n writer.write(\" \".join(bpe_tokens) + \"\\n\")\n index += 1\n\n return vocab_file, merge_file\n", "path": "src/transformers/tokenization_blenderbot.py"}]} | 3,449 | 127 |
gh_patches_debug_6420 | rasdani/github-patches | git_diff | deepset-ai__haystack-3170 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Join Nodes produce incorrect results if preceded by another JoinNode
**Describe the bug**
We have a pipeline made up of four Retrievers, whose results are combined in two JoinDocuments nodes, then another JoinDocuments node to combine the results of those prior JoinDocuments. Weights are specified for each Join node.
However, while the Retriever outputs are correct, the sequence of JoinNodes does not product all of the resulting documents.
We got around this by using just one JoinDocument node with all four inputs, but are reporting this bug because it could appear in other pipelines.
**Error message**
See example pipeline in https://github.com/JeffRisberg/HaystackPipelineTest. There is no error message, however the results in the pipeline are missing documents.
**Expected behavior**
The resulting set of documents from any sequence of Joins should be the same as using JoinNode with all inputs.
**Additional context**
In the example pipeline at https://github.com/JeffRisberg/HaystackPipelineTest, I've simplified the problem down to using a simple InMemoryDocumentStore and a FilterRetriever, so that we have control over the documents. The results are clearly wrong.
**To Reproduce**
Download https://github.com/JeffRisberg/HaystackPipelineTest and run main.py. All 16 documents should be printed, but only 4 are.
**FAQ Check**
- [yes] Have you had a look at [our new FAQ page](https://haystack.deepset.ai/overview/faq)?
**System:**
- OS: MacOS
- GPU/CPU: CPU
- Haystack version (commit or version number): 1.6.0
- DocumentStore: InMemoryDocumentStore
- Reader: none
- Retriever: FilterRetreiver
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/nodes/base.py`
Content:
```
1 from __future__ import annotations
2 from typing import Any, Optional, Dict, List, Tuple, Union, Callable, Type
3
4 from copy import deepcopy
5 from abc import ABC, abstractmethod
6 from functools import wraps
7 import inspect
8 import logging
9
10 from haystack.schema import Document, MultiLabel
11 from haystack.errors import PipelineSchemaError
12 from haystack.telemetry import send_custom_event
13 from haystack.utils import args_to_kwargs
14
15
16 logger = logging.getLogger(__name__)
17
18
19 def exportable_to_yaml(init_func):
20 """
21 Decorator that saves the init parameters of a node that later can
22 be used with exporting YAML configuration of a Pipeline. We ensure
23 that only params passed to the __init__ function of the implementation
24 are saved, ignoring calls to the ancestors.
25 """
26
27 @wraps(init_func)
28 def wrapper_exportable_to_yaml(self, *args, **kwargs):
29
30 # Create the configuration dictionary if it doesn't exist yet
31 if not self._component_config:
32 self._component_config = {"params": {}, "type": type(self).__name__}
33
34 # NOTE: inner classes constructor's __qualname__ will include the outer class' name,
35 # e.g. "OuterClass.InnerClass.__init__". We then take only the last two parts of the
36 # fully qualified name, in the previous example that would be "InnerClass.__init__"
37 name_components = init_func.__qualname__.split(".")
38 # Reconstruct the inner class' __qualname__ and compare with the __qualname__ of the implementation class.
39 # If the number of components is wrong, let the IndexError bubble up, there's nothing we can do anyways.
40 if f"{name_components[-2]}.{name_components[-1]}" == f"{self.__class__.__name__}.{init_func.__name__}":
41 # Store all the input parameters in self._component_config
42 args_as_kwargs = args_to_kwargs(args, init_func)
43 params = {**args_as_kwargs, **kwargs}
44 for k, v in params.items():
45 self._component_config["params"][k] = v
46
47 # Call the actuall __init__ function with all the arguments
48 init_func(self, *args, **kwargs)
49
50 return wrapper_exportable_to_yaml
51
52
53 class BaseComponent(ABC):
54 """
55 A base class for implementing nodes in a Pipeline.
56 """
57
58 outgoing_edges: int
59 _subclasses: dict = {}
60 _component_config: dict = {}
61
62 def __init__(self):
63 # a small subset of the component's parameters is sent in an event after applying filters defined in haystack.telemetry.NonPrivateParameters
64 component_params = self._component_config.get("params", {})
65 send_custom_event(event=f"{type(self).__name__} initialized", payload=component_params)
66 self.outgoing_edges = self._calculate_outgoing_edges(component_params=component_params)
67
68 # __init_subclass__ is invoked when a subclass of BaseComponent is _imported_
69 # (not instantiated). It works approximately as a metaclass.
70 def __init_subclass__(cls, **kwargs):
71
72 super().__init_subclass__(**kwargs)
73
74 # Each component must specify the number of outgoing edges (= different outputs).
75 # During pipeline validation this number is compared to the requested number of output edges.
76 if not hasattr(cls, "outgoing_edges"):
77 raise ValueError(
78 "BaseComponent subclasses must define the outgoing_edges class attribute. "
79 "If this number depends on the component's parameters, make sure to override the _calculate_outgoing_edges() method. "
80 "See https://haystack.deepset.ai/pipeline_nodes/custom-nodes for more information."
81 )
82
83 # Automatically registers all the init parameters in
84 # an instance attribute called `_component_config`,
85 # used to save this component to YAML. See exportable_to_yaml()
86 cls.__init__ = exportable_to_yaml(cls.__init__)
87
88 # Keeps track of all available subclasses by name.
89 # Enables generic load() for all specific component implementations.
90 # Registers abstract classes and base classes too.
91 cls._subclasses[cls.__name__] = cls
92
93 @property
94 def name(self) -> Optional[str]:
95 return self._component_config.get("name", None)
96
97 @name.setter
98 def name(self, value: str):
99 self._component_config["name"] = value
100
101 @property
102 def utilized_components(self) -> List[BaseComponent]:
103 if "params" not in self._component_config:
104 return list()
105 return [param for param in self._component_config["params"].values() if isinstance(param, BaseComponent)]
106
107 @property
108 def type(self) -> str:
109 return self._component_config["type"]
110
111 def get_params(self, return_defaults: bool = False) -> Dict[str, Any]:
112 component_signature = dict(inspect.signature(self.__class__).parameters)
113 params: Dict[str, Any] = {}
114 for key, value in self._component_config["params"].items():
115 if value != component_signature[key].default or return_defaults:
116 params[key] = value
117 if return_defaults:
118 for key, param in component_signature.items():
119 if key not in params:
120 params[key] = param.default
121 return params
122
123 @classmethod
124 def get_subclass(cls, component_type: str) -> Type[BaseComponent]:
125 if component_type not in cls._subclasses.keys():
126 raise PipelineSchemaError(f"Haystack component with the name '{component_type}' not found.")
127 subclass = cls._subclasses[component_type]
128 return subclass
129
130 @classmethod
131 def _calculate_outgoing_edges(cls, component_params: Dict[str, Any]) -> int:
132 """
133 Returns the number of outgoing edges for an instance of the component class given its component params.
134
135 In some cases (e.g. RouteDocuments) the number of outgoing edges is not static but rather depends on its component params.
136 Setting the number of outgoing edges inside the constructor would not be sufficient, since it is already required for validating the pipeline when there is no instance yet.
137 Hence, this method is responsible for calculating the number of outgoing edges
138 - during pipeline validation
139 - to set the effective instance value of `outgoing_edges`.
140
141 Override this method if the number of outgoing edges depends on the component params.
142 If not overridden, returns the number of outgoing edges as defined in the component class.
143
144 :param component_params: parameters to pass to the __init__() of the component.
145 """
146 return cls.outgoing_edges
147
148 @classmethod
149 def _create_instance(cls, component_type: str, component_params: Dict[str, Any], name: Optional[str] = None):
150 """
151 Returns an instance of the given subclass of BaseComponent.
152
153 :param component_type: name of the component class to load.
154 :param component_params: parameters to pass to the __init__() of the component.
155 :param name: name of the component instance
156 """
157 subclass = cls.get_subclass(component_type)
158 instance = subclass(**component_params)
159 instance.name = name
160 return instance
161
162 @abstractmethod
163 def run(
164 self,
165 query: Optional[str] = None,
166 file_paths: Optional[List[str]] = None,
167 labels: Optional[MultiLabel] = None,
168 documents: Optional[List[Document]] = None,
169 meta: Optional[dict] = None,
170 ) -> Tuple[Dict, str]:
171 """
172 Method that will be executed when the node in the graph is called.
173
174 The argument that are passed can vary between different types of nodes
175 (e.g. retriever nodes expect different args than a reader node)
176
177
178 See an example for an implementation in haystack/reader/base/BaseReader.py
179 :return:
180 """
181 pass
182
183 @abstractmethod
184 def run_batch(
185 self,
186 queries: Optional[Union[str, List[str]]] = None,
187 file_paths: Optional[List[str]] = None,
188 labels: Optional[Union[MultiLabel, List[MultiLabel]]] = None,
189 documents: Optional[Union[List[Document], List[List[Document]]]] = None,
190 meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
191 params: Optional[dict] = None,
192 debug: Optional[bool] = None,
193 ):
194 pass
195
196 def _dispatch_run(self, **kwargs) -> Tuple[Dict, str]:
197 """
198 The Pipelines call this method when run() is executed. This method in turn executes the _dispatch_run_general()
199 method with the correct run method.
200 """
201 return self._dispatch_run_general(self.run, **kwargs)
202
203 def _dispatch_run_batch(self, **kwargs):
204 """
205 The Pipelines call this method when run_batch() is executed. This method in turn executes the
206 _dispatch_run_general() method with the correct run method.
207 """
208 return self._dispatch_run_general(self.run_batch, **kwargs)
209
210 def _dispatch_run_general(self, run_method: Callable, **kwargs):
211 """
212 This method takes care of the following:
213 - inspect run_method's signature to validate if all necessary arguments are available
214 - pop `debug` and sets them on the instance to control debug output
215 - call run_method with the corresponding arguments and gather output
216 - collate `_debug` information if present
217 - merge component output with the preceding output and pass it on to the subsequent Component in the Pipeline
218 """
219 arguments = deepcopy(kwargs)
220 params = arguments.get("params") or {}
221
222 run_signature_args = inspect.signature(run_method).parameters.keys()
223
224 run_params: Dict[str, Any] = {}
225 for key, value in params.items():
226 if key == self.name: # targeted params for this node
227 if isinstance(value, dict):
228 # Extract debug attributes
229 if "debug" in value.keys():
230 self.debug = value.pop("debug")
231
232 for _k, _v in value.items():
233 if _k not in run_signature_args:
234 raise Exception(f"Invalid parameter '{_k}' for the node '{self.name}'.")
235
236 run_params.update(**value)
237 elif key in run_signature_args: # global params
238 run_params[key] = value
239
240 run_inputs = {}
241 for key, value in arguments.items():
242 if key in run_signature_args:
243 run_inputs[key] = value
244
245 output, stream = run_method(**run_inputs, **run_params)
246
247 # Collect debug information
248 debug_info = {}
249 if getattr(self, "debug", None):
250 # Include input
251 debug_info["input"] = {**run_inputs, **run_params}
252 debug_info["input"]["debug"] = self.debug
253 # Include output, exclude _debug to avoid recursion
254 filtered_output = {key: value for key, value in output.items() if key != "_debug"}
255 debug_info["output"] = filtered_output
256 # Include custom debug info
257 custom_debug = output.get("_debug", {})
258 if custom_debug:
259 debug_info["runtime"] = custom_debug
260
261 # append _debug information from nodes
262 all_debug = arguments.get("_debug", {})
263 if debug_info:
264 all_debug[self.name] = debug_info
265 if all_debug:
266 output["_debug"] = all_debug
267
268 # add "extra" args that were not used by the node
269 for k, v in arguments.items():
270 if k not in output.keys():
271 output[k] = v
272
273 output["params"] = params
274 return output, stream
275
276
277 class RootNode(BaseComponent):
278 """
279 RootNode feeds inputs together with corresponding params to a Pipeline.
280 """
281
282 outgoing_edges = 1
283
284 def run(self): # type: ignore
285 return {}, "output_1"
286
287 def run_batch(self): # type: ignore
288 return {}, "output_1"
289
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/nodes/base.py b/haystack/nodes/base.py
--- a/haystack/nodes/base.py
+++ b/haystack/nodes/base.py
@@ -265,9 +265,9 @@
if all_debug:
output["_debug"] = all_debug
- # add "extra" args that were not used by the node
+ # add "extra" args that were not used by the node, but not the 'inputs' value
for k, v in arguments.items():
- if k not in output.keys():
+ if k not in output.keys() and k != "inputs":
output[k] = v
output["params"] = params
| {"golden_diff": "diff --git a/haystack/nodes/base.py b/haystack/nodes/base.py\n--- a/haystack/nodes/base.py\n+++ b/haystack/nodes/base.py\n@@ -265,9 +265,9 @@\n if all_debug:\n output[\"_debug\"] = all_debug\n \n- # add \"extra\" args that were not used by the node\n+ # add \"extra\" args that were not used by the node, but not the 'inputs' value\n for k, v in arguments.items():\n- if k not in output.keys():\n+ if k not in output.keys() and k != \"inputs\":\n output[k] = v\n \n output[\"params\"] = params\n", "issue": "Join Nodes produce incorrect results if preceded by another JoinNode\n**Describe the bug**\r\nWe have a pipeline made up of four Retrievers, whose results are combined in two JoinDocuments nodes, then another JoinDocuments node to combine the results of those prior JoinDocuments. Weights are specified for each Join node.\r\n\r\nHowever, while the Retriever outputs are correct, the sequence of JoinNodes does not product all of the resulting documents.\r\n\r\nWe got around this by using just one JoinDocument node with all four inputs, but are reporting this bug because it could appear in other pipelines.\r\n\r\n**Error message**\r\nSee example pipeline in https://github.com/JeffRisberg/HaystackPipelineTest. There is no error message, however the results in the pipeline are missing documents.\r\n\r\n**Expected behavior**\r\nThe resulting set of documents from any sequence of Joins should be the same as using JoinNode with all inputs.\r\n\r\n**Additional context**\r\nIn the example pipeline at https://github.com/JeffRisberg/HaystackPipelineTest, I've simplified the problem down to using a simple InMemoryDocumentStore and a FilterRetriever, so that we have control over the documents. The results are clearly wrong.\r\n\r\n**To Reproduce**\r\nDownload https://github.com/JeffRisberg/HaystackPipelineTest and run main.py. All 16 documents should be printed, but only 4 are.\r\n\r\n**FAQ Check**\r\n- [yes] Have you had a look at [our new FAQ page](https://haystack.deepset.ai/overview/faq)?\r\n\r\n**System:**\r\n - OS: MacOS\r\n - GPU/CPU: CPU\r\n - Haystack version (commit or version number): 1.6.0\r\n - DocumentStore: InMemoryDocumentStore\r\n - Reader: none\r\n - Retriever: FilterRetreiver\r\n\n", "before_files": [{"content": "from __future__ import annotations\nfrom typing import Any, Optional, Dict, List, Tuple, Union, Callable, Type\n\nfrom copy import deepcopy\nfrom abc import ABC, abstractmethod\nfrom functools import wraps\nimport inspect\nimport logging\n\nfrom haystack.schema import Document, MultiLabel\nfrom haystack.errors import PipelineSchemaError\nfrom haystack.telemetry import send_custom_event\nfrom haystack.utils import args_to_kwargs\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef exportable_to_yaml(init_func):\n \"\"\"\n Decorator that saves the init parameters of a node that later can\n be used with exporting YAML configuration of a Pipeline. We ensure\n that only params passed to the __init__ function of the implementation\n are saved, ignoring calls to the ancestors.\n \"\"\"\n\n @wraps(init_func)\n def wrapper_exportable_to_yaml(self, *args, **kwargs):\n\n # Create the configuration dictionary if it doesn't exist yet\n if not self._component_config:\n self._component_config = {\"params\": {}, \"type\": type(self).__name__}\n\n # NOTE: inner classes constructor's __qualname__ will include the outer class' name,\n # e.g. \"OuterClass.InnerClass.__init__\". We then take only the last two parts of the\n # fully qualified name, in the previous example that would be \"InnerClass.__init__\"\n name_components = init_func.__qualname__.split(\".\")\n # Reconstruct the inner class' __qualname__ and compare with the __qualname__ of the implementation class.\n # If the number of components is wrong, let the IndexError bubble up, there's nothing we can do anyways.\n if f\"{name_components[-2]}.{name_components[-1]}\" == f\"{self.__class__.__name__}.{init_func.__name__}\":\n # Store all the input parameters in self._component_config\n args_as_kwargs = args_to_kwargs(args, init_func)\n params = {**args_as_kwargs, **kwargs}\n for k, v in params.items():\n self._component_config[\"params\"][k] = v\n\n # Call the actuall __init__ function with all the arguments\n init_func(self, *args, **kwargs)\n\n return wrapper_exportable_to_yaml\n\n\nclass BaseComponent(ABC):\n \"\"\"\n A base class for implementing nodes in a Pipeline.\n \"\"\"\n\n outgoing_edges: int\n _subclasses: dict = {}\n _component_config: dict = {}\n\n def __init__(self):\n # a small subset of the component's parameters is sent in an event after applying filters defined in haystack.telemetry.NonPrivateParameters\n component_params = self._component_config.get(\"params\", {})\n send_custom_event(event=f\"{type(self).__name__} initialized\", payload=component_params)\n self.outgoing_edges = self._calculate_outgoing_edges(component_params=component_params)\n\n # __init_subclass__ is invoked when a subclass of BaseComponent is _imported_\n # (not instantiated). It works approximately as a metaclass.\n def __init_subclass__(cls, **kwargs):\n\n super().__init_subclass__(**kwargs)\n\n # Each component must specify the number of outgoing edges (= different outputs).\n # During pipeline validation this number is compared to the requested number of output edges.\n if not hasattr(cls, \"outgoing_edges\"):\n raise ValueError(\n \"BaseComponent subclasses must define the outgoing_edges class attribute. \"\n \"If this number depends on the component's parameters, make sure to override the _calculate_outgoing_edges() method. \"\n \"See https://haystack.deepset.ai/pipeline_nodes/custom-nodes for more information.\"\n )\n\n # Automatically registers all the init parameters in\n # an instance attribute called `_component_config`,\n # used to save this component to YAML. See exportable_to_yaml()\n cls.__init__ = exportable_to_yaml(cls.__init__)\n\n # Keeps track of all available subclasses by name.\n # Enables generic load() for all specific component implementations.\n # Registers abstract classes and base classes too.\n cls._subclasses[cls.__name__] = cls\n\n @property\n def name(self) -> Optional[str]:\n return self._component_config.get(\"name\", None)\n\n @name.setter\n def name(self, value: str):\n self._component_config[\"name\"] = value\n\n @property\n def utilized_components(self) -> List[BaseComponent]:\n if \"params\" not in self._component_config:\n return list()\n return [param for param in self._component_config[\"params\"].values() if isinstance(param, BaseComponent)]\n\n @property\n def type(self) -> str:\n return self._component_config[\"type\"]\n\n def get_params(self, return_defaults: bool = False) -> Dict[str, Any]:\n component_signature = dict(inspect.signature(self.__class__).parameters)\n params: Dict[str, Any] = {}\n for key, value in self._component_config[\"params\"].items():\n if value != component_signature[key].default or return_defaults:\n params[key] = value\n if return_defaults:\n for key, param in component_signature.items():\n if key not in params:\n params[key] = param.default\n return params\n\n @classmethod\n def get_subclass(cls, component_type: str) -> Type[BaseComponent]:\n if component_type not in cls._subclasses.keys():\n raise PipelineSchemaError(f\"Haystack component with the name '{component_type}' not found.\")\n subclass = cls._subclasses[component_type]\n return subclass\n\n @classmethod\n def _calculate_outgoing_edges(cls, component_params: Dict[str, Any]) -> int:\n \"\"\"\n Returns the number of outgoing edges for an instance of the component class given its component params.\n\n In some cases (e.g. RouteDocuments) the number of outgoing edges is not static but rather depends on its component params.\n Setting the number of outgoing edges inside the constructor would not be sufficient, since it is already required for validating the pipeline when there is no instance yet.\n Hence, this method is responsible for calculating the number of outgoing edges\n - during pipeline validation\n - to set the effective instance value of `outgoing_edges`.\n\n Override this method if the number of outgoing edges depends on the component params.\n If not overridden, returns the number of outgoing edges as defined in the component class.\n\n :param component_params: parameters to pass to the __init__() of the component.\n \"\"\"\n return cls.outgoing_edges\n\n @classmethod\n def _create_instance(cls, component_type: str, component_params: Dict[str, Any], name: Optional[str] = None):\n \"\"\"\n Returns an instance of the given subclass of BaseComponent.\n\n :param component_type: name of the component class to load.\n :param component_params: parameters to pass to the __init__() of the component.\n :param name: name of the component instance\n \"\"\"\n subclass = cls.get_subclass(component_type)\n instance = subclass(**component_params)\n instance.name = name\n return instance\n\n @abstractmethod\n def run(\n self,\n query: Optional[str] = None,\n file_paths: Optional[List[str]] = None,\n labels: Optional[MultiLabel] = None,\n documents: Optional[List[Document]] = None,\n meta: Optional[dict] = None,\n ) -> Tuple[Dict, str]:\n \"\"\"\n Method that will be executed when the node in the graph is called.\n\n The argument that are passed can vary between different types of nodes\n (e.g. retriever nodes expect different args than a reader node)\n\n\n See an example for an implementation in haystack/reader/base/BaseReader.py\n :return:\n \"\"\"\n pass\n\n @abstractmethod\n def run_batch(\n self,\n queries: Optional[Union[str, List[str]]] = None,\n file_paths: Optional[List[str]] = None,\n labels: Optional[Union[MultiLabel, List[MultiLabel]]] = None,\n documents: Optional[Union[List[Document], List[List[Document]]]] = None,\n meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,\n params: Optional[dict] = None,\n debug: Optional[bool] = None,\n ):\n pass\n\n def _dispatch_run(self, **kwargs) -> Tuple[Dict, str]:\n \"\"\"\n The Pipelines call this method when run() is executed. This method in turn executes the _dispatch_run_general()\n method with the correct run method.\n \"\"\"\n return self._dispatch_run_general(self.run, **kwargs)\n\n def _dispatch_run_batch(self, **kwargs):\n \"\"\"\n The Pipelines call this method when run_batch() is executed. This method in turn executes the\n _dispatch_run_general() method with the correct run method.\n \"\"\"\n return self._dispatch_run_general(self.run_batch, **kwargs)\n\n def _dispatch_run_general(self, run_method: Callable, **kwargs):\n \"\"\"\n This method takes care of the following:\n - inspect run_method's signature to validate if all necessary arguments are available\n - pop `debug` and sets them on the instance to control debug output\n - call run_method with the corresponding arguments and gather output\n - collate `_debug` information if present\n - merge component output with the preceding output and pass it on to the subsequent Component in the Pipeline\n \"\"\"\n arguments = deepcopy(kwargs)\n params = arguments.get(\"params\") or {}\n\n run_signature_args = inspect.signature(run_method).parameters.keys()\n\n run_params: Dict[str, Any] = {}\n for key, value in params.items():\n if key == self.name: # targeted params for this node\n if isinstance(value, dict):\n # Extract debug attributes\n if \"debug\" in value.keys():\n self.debug = value.pop(\"debug\")\n\n for _k, _v in value.items():\n if _k not in run_signature_args:\n raise Exception(f\"Invalid parameter '{_k}' for the node '{self.name}'.\")\n\n run_params.update(**value)\n elif key in run_signature_args: # global params\n run_params[key] = value\n\n run_inputs = {}\n for key, value in arguments.items():\n if key in run_signature_args:\n run_inputs[key] = value\n\n output, stream = run_method(**run_inputs, **run_params)\n\n # Collect debug information\n debug_info = {}\n if getattr(self, \"debug\", None):\n # Include input\n debug_info[\"input\"] = {**run_inputs, **run_params}\n debug_info[\"input\"][\"debug\"] = self.debug\n # Include output, exclude _debug to avoid recursion\n filtered_output = {key: value for key, value in output.items() if key != \"_debug\"}\n debug_info[\"output\"] = filtered_output\n # Include custom debug info\n custom_debug = output.get(\"_debug\", {})\n if custom_debug:\n debug_info[\"runtime\"] = custom_debug\n\n # append _debug information from nodes\n all_debug = arguments.get(\"_debug\", {})\n if debug_info:\n all_debug[self.name] = debug_info\n if all_debug:\n output[\"_debug\"] = all_debug\n\n # add \"extra\" args that were not used by the node\n for k, v in arguments.items():\n if k not in output.keys():\n output[k] = v\n\n output[\"params\"] = params\n return output, stream\n\n\nclass RootNode(BaseComponent):\n \"\"\"\n RootNode feeds inputs together with corresponding params to a Pipeline.\n \"\"\"\n\n outgoing_edges = 1\n\n def run(self): # type: ignore\n return {}, \"output_1\"\n\n def run_batch(self): # type: ignore\n return {}, \"output_1\"\n", "path": "haystack/nodes/base.py"}], "after_files": [{"content": "from __future__ import annotations\nfrom typing import Any, Optional, Dict, List, Tuple, Union, Callable, Type\n\nfrom copy import deepcopy\nfrom abc import ABC, abstractmethod\nfrom functools import wraps\nimport inspect\nimport logging\n\nfrom haystack.schema import Document, MultiLabel\nfrom haystack.errors import PipelineSchemaError\nfrom haystack.telemetry import send_custom_event\nfrom haystack.utils import args_to_kwargs\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef exportable_to_yaml(init_func):\n \"\"\"\n Decorator that saves the init parameters of a node that later can\n be used with exporting YAML configuration of a Pipeline. We ensure\n that only params passed to the __init__ function of the implementation\n are saved, ignoring calls to the ancestors.\n \"\"\"\n\n @wraps(init_func)\n def wrapper_exportable_to_yaml(self, *args, **kwargs):\n\n # Create the configuration dictionary if it doesn't exist yet\n if not self._component_config:\n self._component_config = {\"params\": {}, \"type\": type(self).__name__}\n\n # NOTE: inner classes constructor's __qualname__ will include the outer class' name,\n # e.g. \"OuterClass.InnerClass.__init__\". We then take only the last two parts of the\n # fully qualified name, in the previous example that would be \"InnerClass.__init__\"\n name_components = init_func.__qualname__.split(\".\")\n # Reconstruct the inner class' __qualname__ and compare with the __qualname__ of the implementation class.\n # If the number of components is wrong, let the IndexError bubble up, there's nothing we can do anyways.\n if f\"{name_components[-2]}.{name_components[-1]}\" == f\"{self.__class__.__name__}.{init_func.__name__}\":\n # Store all the input parameters in self._component_config\n args_as_kwargs = args_to_kwargs(args, init_func)\n params = {**args_as_kwargs, **kwargs}\n for k, v in params.items():\n self._component_config[\"params\"][k] = v\n\n # Call the actuall __init__ function with all the arguments\n init_func(self, *args, **kwargs)\n\n return wrapper_exportable_to_yaml\n\n\nclass BaseComponent(ABC):\n \"\"\"\n A base class for implementing nodes in a Pipeline.\n \"\"\"\n\n outgoing_edges: int\n _subclasses: dict = {}\n _component_config: dict = {}\n\n def __init__(self):\n # a small subset of the component's parameters is sent in an event after applying filters defined in haystack.telemetry.NonPrivateParameters\n component_params = self._component_config.get(\"params\", {})\n send_custom_event(event=f\"{type(self).__name__} initialized\", payload=component_params)\n self.outgoing_edges = self._calculate_outgoing_edges(component_params=component_params)\n\n # __init_subclass__ is invoked when a subclass of BaseComponent is _imported_\n # (not instantiated). It works approximately as a metaclass.\n def __init_subclass__(cls, **kwargs):\n\n super().__init_subclass__(**kwargs)\n\n # Each component must specify the number of outgoing edges (= different outputs).\n # During pipeline validation this number is compared to the requested number of output edges.\n if not hasattr(cls, \"outgoing_edges\"):\n raise ValueError(\n \"BaseComponent subclasses must define the outgoing_edges class attribute. \"\n \"If this number depends on the component's parameters, make sure to override the _calculate_outgoing_edges() method. \"\n \"See https://haystack.deepset.ai/pipeline_nodes/custom-nodes for more information.\"\n )\n\n # Automatically registers all the init parameters in\n # an instance attribute called `_component_config`,\n # used to save this component to YAML. See exportable_to_yaml()\n cls.__init__ = exportable_to_yaml(cls.__init__)\n\n # Keeps track of all available subclasses by name.\n # Enables generic load() for all specific component implementations.\n # Registers abstract classes and base classes too.\n cls._subclasses[cls.__name__] = cls\n\n @property\n def name(self) -> Optional[str]:\n return self._component_config.get(\"name\", None)\n\n @name.setter\n def name(self, value: str):\n self._component_config[\"name\"] = value\n\n @property\n def utilized_components(self) -> List[BaseComponent]:\n if \"params\" not in self._component_config:\n return list()\n return [param for param in self._component_config[\"params\"].values() if isinstance(param, BaseComponent)]\n\n @property\n def type(self) -> str:\n return self._component_config[\"type\"]\n\n def get_params(self, return_defaults: bool = False) -> Dict[str, Any]:\n component_signature = dict(inspect.signature(self.__class__).parameters)\n params: Dict[str, Any] = {}\n for key, value in self._component_config[\"params\"].items():\n if value != component_signature[key].default or return_defaults:\n params[key] = value\n if return_defaults:\n for key, param in component_signature.items():\n if key not in params:\n params[key] = param.default\n return params\n\n @classmethod\n def get_subclass(cls, component_type: str) -> Type[BaseComponent]:\n if component_type not in cls._subclasses.keys():\n raise PipelineSchemaError(f\"Haystack component with the name '{component_type}' not found.\")\n subclass = cls._subclasses[component_type]\n return subclass\n\n @classmethod\n def _calculate_outgoing_edges(cls, component_params: Dict[str, Any]) -> int:\n \"\"\"\n Returns the number of outgoing edges for an instance of the component class given its component params.\n\n In some cases (e.g. RouteDocuments) the number of outgoing edges is not static but rather depends on its component params.\n Setting the number of outgoing edges inside the constructor would not be sufficient, since it is already required for validating the pipeline when there is no instance yet.\n Hence, this method is responsible for calculating the number of outgoing edges\n - during pipeline validation\n - to set the effective instance value of `outgoing_edges`.\n\n Override this method if the number of outgoing edges depends on the component params.\n If not overridden, returns the number of outgoing edges as defined in the component class.\n\n :param component_params: parameters to pass to the __init__() of the component.\n \"\"\"\n return cls.outgoing_edges\n\n @classmethod\n def _create_instance(cls, component_type: str, component_params: Dict[str, Any], name: Optional[str] = None):\n \"\"\"\n Returns an instance of the given subclass of BaseComponent.\n\n :param component_type: name of the component class to load.\n :param component_params: parameters to pass to the __init__() of the component.\n :param name: name of the component instance\n \"\"\"\n subclass = cls.get_subclass(component_type)\n instance = subclass(**component_params)\n instance.name = name\n return instance\n\n @abstractmethod\n def run(\n self,\n query: Optional[str] = None,\n file_paths: Optional[List[str]] = None,\n labels: Optional[MultiLabel] = None,\n documents: Optional[List[Document]] = None,\n meta: Optional[dict] = None,\n ) -> Tuple[Dict, str]:\n \"\"\"\n Method that will be executed when the node in the graph is called.\n\n The argument that are passed can vary between different types of nodes\n (e.g. retriever nodes expect different args than a reader node)\n\n\n See an example for an implementation in haystack/reader/base/BaseReader.py\n :return:\n \"\"\"\n pass\n\n @abstractmethod\n def run_batch(\n self,\n queries: Optional[Union[str, List[str]]] = None,\n file_paths: Optional[List[str]] = None,\n labels: Optional[Union[MultiLabel, List[MultiLabel]]] = None,\n documents: Optional[Union[List[Document], List[List[Document]]]] = None,\n meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,\n params: Optional[dict] = None,\n debug: Optional[bool] = None,\n ):\n pass\n\n def _dispatch_run(self, **kwargs) -> Tuple[Dict, str]:\n \"\"\"\n The Pipelines call this method when run() is executed. This method in turn executes the _dispatch_run_general()\n method with the correct run method.\n \"\"\"\n return self._dispatch_run_general(self.run, **kwargs)\n\n def _dispatch_run_batch(self, **kwargs):\n \"\"\"\n The Pipelines call this method when run_batch() is executed. This method in turn executes the\n _dispatch_run_general() method with the correct run method.\n \"\"\"\n return self._dispatch_run_general(self.run_batch, **kwargs)\n\n def _dispatch_run_general(self, run_method: Callable, **kwargs):\n \"\"\"\n This method takes care of the following:\n - inspect run_method's signature to validate if all necessary arguments are available\n - pop `debug` and sets them on the instance to control debug output\n - call run_method with the corresponding arguments and gather output\n - collate `_debug` information if present\n - merge component output with the preceding output and pass it on to the subsequent Component in the Pipeline\n \"\"\"\n arguments = deepcopy(kwargs)\n params = arguments.get(\"params\") or {}\n\n run_signature_args = inspect.signature(run_method).parameters.keys()\n\n run_params: Dict[str, Any] = {}\n for key, value in params.items():\n if key == self.name: # targeted params for this node\n if isinstance(value, dict):\n # Extract debug attributes\n if \"debug\" in value.keys():\n self.debug = value.pop(\"debug\")\n\n for _k, _v in value.items():\n if _k not in run_signature_args:\n raise Exception(f\"Invalid parameter '{_k}' for the node '{self.name}'.\")\n\n run_params.update(**value)\n elif key in run_signature_args: # global params\n run_params[key] = value\n\n run_inputs = {}\n for key, value in arguments.items():\n if key in run_signature_args:\n run_inputs[key] = value\n\n output, stream = run_method(**run_inputs, **run_params)\n\n # Collect debug information\n debug_info = {}\n if getattr(self, \"debug\", None):\n # Include input\n debug_info[\"input\"] = {**run_inputs, **run_params}\n debug_info[\"input\"][\"debug\"] = self.debug\n # Include output, exclude _debug to avoid recursion\n filtered_output = {key: value for key, value in output.items() if key != \"_debug\"}\n debug_info[\"output\"] = filtered_output\n # Include custom debug info\n custom_debug = output.get(\"_debug\", {})\n if custom_debug:\n debug_info[\"runtime\"] = custom_debug\n\n # append _debug information from nodes\n all_debug = arguments.get(\"_debug\", {})\n if debug_info:\n all_debug[self.name] = debug_info\n if all_debug:\n output[\"_debug\"] = all_debug\n\n # add \"extra\" args that were not used by the node, but not the 'inputs' value\n for k, v in arguments.items():\n if k not in output.keys() and k != \"inputs\":\n output[k] = v\n\n output[\"params\"] = params\n return output, stream\n\n\nclass RootNode(BaseComponent):\n \"\"\"\n RootNode feeds inputs together with corresponding params to a Pipeline.\n \"\"\"\n\n outgoing_edges = 1\n\n def run(self): # type: ignore\n return {}, \"output_1\"\n\n def run_batch(self): # type: ignore\n return {}, \"output_1\"\n", "path": "haystack/nodes/base.py"}]} | 3,951 | 156 |
gh_patches_debug_27520 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-378 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OT Trace propagator fails with exception when no headers are present
**Describe your environment**
Latest version (main branch) of both opentelemetry-python and opentelemetry-python-contrib
**Steps to reproduce**
```python
from opentelemetry.propagators.ot_trace import OTTracePropagator
from opentelemetry.propagators.textmap import DictGetter
getter = DictGetter()
carrier = {}
propagator = OTTracePropagator()
propagator.extract(getter, carrier)
```
results in:
```
Traceback (most recent call last):
File "repro.py", line 8, in <module>
propagator.extract(getter, {})
File "(...)/opentelemetry-python-contrib/propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py", line 76, in extract
and _valid_extract_traceid.fullmatch(traceid) is not None
TypeError: expected string or bytes-like object
```
**What is the expected behavior?**
Propagator extraction returns an unmodified context due to lack of relevant headers in the carrier.
> If a value can not be parsed from the carrier, for a cross-cutting concern, the implementation MUST NOT throw an exception and MUST NOT store a new value in the Context, in order to preserve any previously existing valid value.
_from [OTel specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md#extract)_
**What is the actual behavior?**
Propagator extraction fails with an exception when no headers are present in the carrier.
**Additional context**
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from re import compile as re_compile
16 from typing import Iterable, Optional
17
18 from opentelemetry.baggage import get_all, set_baggage
19 from opentelemetry.context import Context
20 from opentelemetry.propagators.textmap import (
21 Getter,
22 Setter,
23 TextMapPropagator,
24 TextMapPropagatorT,
25 )
26 from opentelemetry.trace import (
27 INVALID_SPAN_ID,
28 INVALID_TRACE_ID,
29 NonRecordingSpan,
30 SpanContext,
31 TraceFlags,
32 get_current_span,
33 set_span_in_context,
34 )
35
36 OT_TRACE_ID_HEADER = "ot-tracer-traceid"
37 OT_SPAN_ID_HEADER = "ot-tracer-spanid"
38 OT_SAMPLED_HEADER = "ot-tracer-sampled"
39 OT_BAGGAGE_PREFIX = "ot-baggage-"
40
41 _valid_header_name = re_compile(r"[\w_^`!#$%&'*+.|~]+")
42 _valid_header_value = re_compile(r"[\t\x20-\x7e\x80-\xff]+")
43 _valid_extract_traceid = re_compile(r"[0-9a-f]{1,32}")
44 _valid_extract_spanid = re_compile(r"[0-9a-f]{1,16}")
45
46
47 class OTTracePropagator(TextMapPropagator):
48 """Propagator for the OTTrace HTTP header format"""
49
50 def extract(
51 self,
52 getter: Getter[TextMapPropagatorT],
53 carrier: TextMapPropagatorT,
54 context: Optional[Context] = None,
55 ) -> Context:
56
57 traceid = _extract_first_element(
58 getter.get(carrier, OT_TRACE_ID_HEADER)
59 )
60
61 spanid = _extract_first_element(getter.get(carrier, OT_SPAN_ID_HEADER))
62
63 sampled = _extract_first_element(
64 getter.get(carrier, OT_SAMPLED_HEADER)
65 )
66
67 if sampled == "true":
68 traceflags = TraceFlags.SAMPLED
69 else:
70 traceflags = TraceFlags.DEFAULT
71
72 if (
73 traceid != INVALID_TRACE_ID
74 and _valid_extract_traceid.fullmatch(traceid) is not None
75 and spanid != INVALID_SPAN_ID
76 and _valid_extract_spanid.fullmatch(spanid) is not None
77 ):
78 context = set_span_in_context(
79 NonRecordingSpan(
80 SpanContext(
81 trace_id=int(traceid, 16),
82 span_id=int(spanid, 16),
83 is_remote=True,
84 trace_flags=traceflags,
85 )
86 ),
87 context,
88 )
89
90 baggage = get_all(context) or {}
91
92 for key in getter.keys(carrier):
93
94 if not key.startswith(OT_BAGGAGE_PREFIX):
95 continue
96
97 baggage[
98 key[len(OT_BAGGAGE_PREFIX) :]
99 ] = _extract_first_element(getter.get(carrier, key))
100
101 for key, value in baggage.items():
102 context = set_baggage(key, value, context)
103
104 return context
105
106 def inject(
107 self,
108 set_in_carrier: Setter[TextMapPropagatorT],
109 carrier: TextMapPropagatorT,
110 context: Optional[Context] = None,
111 ) -> None:
112
113 span_context = get_current_span(context).get_span_context()
114
115 if span_context.trace_id == INVALID_TRACE_ID:
116 return
117
118 set_in_carrier(
119 carrier, OT_TRACE_ID_HEADER, hex(span_context.trace_id)[2:][-16:]
120 )
121 set_in_carrier(
122 carrier, OT_SPAN_ID_HEADER, hex(span_context.span_id)[2:][-16:],
123 )
124
125 if span_context.trace_flags == TraceFlags.SAMPLED:
126 traceflags = "true"
127 else:
128 traceflags = "false"
129
130 set_in_carrier(carrier, OT_SAMPLED_HEADER, traceflags)
131
132 baggage = get_all(context)
133
134 if not baggage:
135 return
136
137 for header_name, header_value in baggage.items():
138
139 if (
140 _valid_header_name.fullmatch(header_name) is None
141 or _valid_header_value.fullmatch(header_value) is None
142 ):
143 continue
144
145 set_in_carrier(
146 carrier,
147 "".join([OT_BAGGAGE_PREFIX, header_name]),
148 header_value,
149 )
150
151 @property
152 def fields(self):
153 """Returns a set with the fields set in `inject`.
154
155 See
156 `opentelemetry.propagators.textmap.TextMapPropagator.fields`
157 """
158 return {
159 OT_TRACE_ID_HEADER,
160 OT_SPAN_ID_HEADER,
161 OT_SAMPLED_HEADER,
162 }
163
164
165 def _extract_first_element(
166 items: Iterable[TextMapPropagatorT],
167 ) -> Optional[TextMapPropagatorT]:
168 if items is None:
169 return None
170 return next(iter(items), None)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py b/propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py
--- a/propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py
+++ b/propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py
@@ -13,7 +13,7 @@
# limitations under the License.
from re import compile as re_compile
-from typing import Iterable, Optional
+from typing import Any, Iterable, Optional
from opentelemetry.baggage import get_all, set_baggage
from opentelemetry.context import Context
@@ -55,10 +55,12 @@
) -> Context:
traceid = _extract_first_element(
- getter.get(carrier, OT_TRACE_ID_HEADER)
+ getter.get(carrier, OT_TRACE_ID_HEADER), INVALID_TRACE_ID
)
- spanid = _extract_first_element(getter.get(carrier, OT_SPAN_ID_HEADER))
+ spanid = _extract_first_element(
+ getter.get(carrier, OT_SPAN_ID_HEADER), INVALID_SPAN_ID
+ )
sampled = _extract_first_element(
getter.get(carrier, OT_SAMPLED_HEADER)
@@ -163,8 +165,8 @@
def _extract_first_element(
- items: Iterable[TextMapPropagatorT],
+ items: Iterable[TextMapPropagatorT], default: Any = None,
) -> Optional[TextMapPropagatorT]:
if items is None:
- return None
+ return default
return next(iter(items), None)
| {"golden_diff": "diff --git a/propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py b/propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py\n--- a/propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py\n+++ b/propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \n from re import compile as re_compile\n-from typing import Iterable, Optional\n+from typing import Any, Iterable, Optional\n \n from opentelemetry.baggage import get_all, set_baggage\n from opentelemetry.context import Context\n@@ -55,10 +55,12 @@\n ) -> Context:\n \n traceid = _extract_first_element(\n- getter.get(carrier, OT_TRACE_ID_HEADER)\n+ getter.get(carrier, OT_TRACE_ID_HEADER), INVALID_TRACE_ID\n )\n \n- spanid = _extract_first_element(getter.get(carrier, OT_SPAN_ID_HEADER))\n+ spanid = _extract_first_element(\n+ getter.get(carrier, OT_SPAN_ID_HEADER), INVALID_SPAN_ID\n+ )\n \n sampled = _extract_first_element(\n getter.get(carrier, OT_SAMPLED_HEADER)\n@@ -163,8 +165,8 @@\n \n \n def _extract_first_element(\n- items: Iterable[TextMapPropagatorT],\n+ items: Iterable[TextMapPropagatorT], default: Any = None,\n ) -> Optional[TextMapPropagatorT]:\n if items is None:\n- return None\n+ return default\n return next(iter(items), None)\n", "issue": "OT Trace propagator fails with exception when no headers are present\n**Describe your environment**\r\nLatest version (main branch) of both opentelemetry-python and opentelemetry-python-contrib\r\n\r\n**Steps to reproduce**\r\n```python\r\nfrom opentelemetry.propagators.ot_trace import OTTracePropagator\r\nfrom opentelemetry.propagators.textmap import DictGetter\r\n\r\ngetter = DictGetter()\r\ncarrier = {}\r\n\r\npropagator = OTTracePropagator()\r\npropagator.extract(getter, carrier)\r\n```\r\nresults in:\r\n```\r\nTraceback (most recent call last):\r\n File \"repro.py\", line 8, in <module>\r\n propagator.extract(getter, {})\r\n File \"(...)/opentelemetry-python-contrib/propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py\", line 76, in extract\r\n and _valid_extract_traceid.fullmatch(traceid) is not None\r\nTypeError: expected string or bytes-like object\r\n```\r\n**What is the expected behavior?**\r\nPropagator extraction returns an unmodified context due to lack of relevant headers in the carrier.\r\n> If a value can not be parsed from the carrier, for a cross-cutting concern, the implementation MUST NOT throw an exception and MUST NOT store a new value in the Context, in order to preserve any previously existing valid value.\r\n\r\n_from [OTel specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md#extract)_\r\n\r\n**What is the actual behavior?**\r\nPropagator extraction fails with an exception when no headers are present in the carrier.\r\n\r\n**Additional context**\r\nN/A\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom re import compile as re_compile\nfrom typing import Iterable, Optional\n\nfrom opentelemetry.baggage import get_all, set_baggage\nfrom opentelemetry.context import Context\nfrom opentelemetry.propagators.textmap import (\n Getter,\n Setter,\n TextMapPropagator,\n TextMapPropagatorT,\n)\nfrom opentelemetry.trace import (\n INVALID_SPAN_ID,\n INVALID_TRACE_ID,\n NonRecordingSpan,\n SpanContext,\n TraceFlags,\n get_current_span,\n set_span_in_context,\n)\n\nOT_TRACE_ID_HEADER = \"ot-tracer-traceid\"\nOT_SPAN_ID_HEADER = \"ot-tracer-spanid\"\nOT_SAMPLED_HEADER = \"ot-tracer-sampled\"\nOT_BAGGAGE_PREFIX = \"ot-baggage-\"\n\n_valid_header_name = re_compile(r\"[\\w_^`!#$%&'*+.|~]+\")\n_valid_header_value = re_compile(r\"[\\t\\x20-\\x7e\\x80-\\xff]+\")\n_valid_extract_traceid = re_compile(r\"[0-9a-f]{1,32}\")\n_valid_extract_spanid = re_compile(r\"[0-9a-f]{1,16}\")\n\n\nclass OTTracePropagator(TextMapPropagator):\n \"\"\"Propagator for the OTTrace HTTP header format\"\"\"\n\n def extract(\n self,\n getter: Getter[TextMapPropagatorT],\n carrier: TextMapPropagatorT,\n context: Optional[Context] = None,\n ) -> Context:\n\n traceid = _extract_first_element(\n getter.get(carrier, OT_TRACE_ID_HEADER)\n )\n\n spanid = _extract_first_element(getter.get(carrier, OT_SPAN_ID_HEADER))\n\n sampled = _extract_first_element(\n getter.get(carrier, OT_SAMPLED_HEADER)\n )\n\n if sampled == \"true\":\n traceflags = TraceFlags.SAMPLED\n else:\n traceflags = TraceFlags.DEFAULT\n\n if (\n traceid != INVALID_TRACE_ID\n and _valid_extract_traceid.fullmatch(traceid) is not None\n and spanid != INVALID_SPAN_ID\n and _valid_extract_spanid.fullmatch(spanid) is not None\n ):\n context = set_span_in_context(\n NonRecordingSpan(\n SpanContext(\n trace_id=int(traceid, 16),\n span_id=int(spanid, 16),\n is_remote=True,\n trace_flags=traceflags,\n )\n ),\n context,\n )\n\n baggage = get_all(context) or {}\n\n for key in getter.keys(carrier):\n\n if not key.startswith(OT_BAGGAGE_PREFIX):\n continue\n\n baggage[\n key[len(OT_BAGGAGE_PREFIX) :]\n ] = _extract_first_element(getter.get(carrier, key))\n\n for key, value in baggage.items():\n context = set_baggage(key, value, context)\n\n return context\n\n def inject(\n self,\n set_in_carrier: Setter[TextMapPropagatorT],\n carrier: TextMapPropagatorT,\n context: Optional[Context] = None,\n ) -> None:\n\n span_context = get_current_span(context).get_span_context()\n\n if span_context.trace_id == INVALID_TRACE_ID:\n return\n\n set_in_carrier(\n carrier, OT_TRACE_ID_HEADER, hex(span_context.trace_id)[2:][-16:]\n )\n set_in_carrier(\n carrier, OT_SPAN_ID_HEADER, hex(span_context.span_id)[2:][-16:],\n )\n\n if span_context.trace_flags == TraceFlags.SAMPLED:\n traceflags = \"true\"\n else:\n traceflags = \"false\"\n\n set_in_carrier(carrier, OT_SAMPLED_HEADER, traceflags)\n\n baggage = get_all(context)\n\n if not baggage:\n return\n\n for header_name, header_value in baggage.items():\n\n if (\n _valid_header_name.fullmatch(header_name) is None\n or _valid_header_value.fullmatch(header_value) is None\n ):\n continue\n\n set_in_carrier(\n carrier,\n \"\".join([OT_BAGGAGE_PREFIX, header_name]),\n header_value,\n )\n\n @property\n def fields(self):\n \"\"\"Returns a set with the fields set in `inject`.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.fields`\n \"\"\"\n return {\n OT_TRACE_ID_HEADER,\n OT_SPAN_ID_HEADER,\n OT_SAMPLED_HEADER,\n }\n\n\ndef _extract_first_element(\n items: Iterable[TextMapPropagatorT],\n) -> Optional[TextMapPropagatorT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom re import compile as re_compile\nfrom typing import Any, Iterable, Optional\n\nfrom opentelemetry.baggage import get_all, set_baggage\nfrom opentelemetry.context import Context\nfrom opentelemetry.propagators.textmap import (\n Getter,\n Setter,\n TextMapPropagator,\n TextMapPropagatorT,\n)\nfrom opentelemetry.trace import (\n INVALID_SPAN_ID,\n INVALID_TRACE_ID,\n NonRecordingSpan,\n SpanContext,\n TraceFlags,\n get_current_span,\n set_span_in_context,\n)\n\nOT_TRACE_ID_HEADER = \"ot-tracer-traceid\"\nOT_SPAN_ID_HEADER = \"ot-tracer-spanid\"\nOT_SAMPLED_HEADER = \"ot-tracer-sampled\"\nOT_BAGGAGE_PREFIX = \"ot-baggage-\"\n\n_valid_header_name = re_compile(r\"[\\w_^`!#$%&'*+.|~]+\")\n_valid_header_value = re_compile(r\"[\\t\\x20-\\x7e\\x80-\\xff]+\")\n_valid_extract_traceid = re_compile(r\"[0-9a-f]{1,32}\")\n_valid_extract_spanid = re_compile(r\"[0-9a-f]{1,16}\")\n\n\nclass OTTracePropagator(TextMapPropagator):\n \"\"\"Propagator for the OTTrace HTTP header format\"\"\"\n\n def extract(\n self,\n getter: Getter[TextMapPropagatorT],\n carrier: TextMapPropagatorT,\n context: Optional[Context] = None,\n ) -> Context:\n\n traceid = _extract_first_element(\n getter.get(carrier, OT_TRACE_ID_HEADER), INVALID_TRACE_ID\n )\n\n spanid = _extract_first_element(\n getter.get(carrier, OT_SPAN_ID_HEADER), INVALID_SPAN_ID\n )\n\n sampled = _extract_first_element(\n getter.get(carrier, OT_SAMPLED_HEADER)\n )\n\n if sampled == \"true\":\n traceflags = TraceFlags.SAMPLED\n else:\n traceflags = TraceFlags.DEFAULT\n\n if (\n traceid != INVALID_TRACE_ID\n and _valid_extract_traceid.fullmatch(traceid) is not None\n and spanid != INVALID_SPAN_ID\n and _valid_extract_spanid.fullmatch(spanid) is not None\n ):\n context = set_span_in_context(\n NonRecordingSpan(\n SpanContext(\n trace_id=int(traceid, 16),\n span_id=int(spanid, 16),\n is_remote=True,\n trace_flags=traceflags,\n )\n ),\n context,\n )\n\n baggage = get_all(context) or {}\n\n for key in getter.keys(carrier):\n\n if not key.startswith(OT_BAGGAGE_PREFIX):\n continue\n\n baggage[\n key[len(OT_BAGGAGE_PREFIX) :]\n ] = _extract_first_element(getter.get(carrier, key))\n\n for key, value in baggage.items():\n context = set_baggage(key, value, context)\n\n return context\n\n def inject(\n self,\n set_in_carrier: Setter[TextMapPropagatorT],\n carrier: TextMapPropagatorT,\n context: Optional[Context] = None,\n ) -> None:\n\n span_context = get_current_span(context).get_span_context()\n\n if span_context.trace_id == INVALID_TRACE_ID:\n return\n\n set_in_carrier(\n carrier, OT_TRACE_ID_HEADER, hex(span_context.trace_id)[2:][-16:]\n )\n set_in_carrier(\n carrier, OT_SPAN_ID_HEADER, hex(span_context.span_id)[2:][-16:],\n )\n\n if span_context.trace_flags == TraceFlags.SAMPLED:\n traceflags = \"true\"\n else:\n traceflags = \"false\"\n\n set_in_carrier(carrier, OT_SAMPLED_HEADER, traceflags)\n\n baggage = get_all(context)\n\n if not baggage:\n return\n\n for header_name, header_value in baggage.items():\n\n if (\n _valid_header_name.fullmatch(header_name) is None\n or _valid_header_value.fullmatch(header_value) is None\n ):\n continue\n\n set_in_carrier(\n carrier,\n \"\".join([OT_BAGGAGE_PREFIX, header_name]),\n header_value,\n )\n\n @property\n def fields(self):\n \"\"\"Returns a set with the fields set in `inject`.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.fields`\n \"\"\"\n return {\n OT_TRACE_ID_HEADER,\n OT_SPAN_ID_HEADER,\n OT_SAMPLED_HEADER,\n }\n\n\ndef _extract_first_element(\n items: Iterable[TextMapPropagatorT], default: Any = None,\n) -> Optional[TextMapPropagatorT]:\n if items is None:\n return default\n return next(iter(items), None)\n", "path": "propagator/opentelemetry-propagator-ot-trace/src/opentelemetry/propagators/ot_trace/__init__.py"}]} | 2,205 | 415 |
gh_patches_debug_21177 | rasdani/github-patches | git_diff | Flexget__Flexget-2474 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unhandled error in plugin argenteam
<!---
Before opening an issue, verify:
- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget
- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions
- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon
- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues
- Did you fill out the issue template as completely as possible?
The issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!
--->
### Expected behavior:
<!---
Get list of shows from Trakt and download needed shows,
--->
### Actual behavior:
It gets the list of shows from Trakt and starts searching for each show using the plug-ins. It then crashes.
### Steps to reproduce:
- Step 1: It happens every time it runs.
#### Config:
```
variables: secrets.yml
templates:
tv:
configure_series:
settings:
quality: 720p+
identified_by: ep
from:
trakt_list:
account: "{? trakt.account ?}"
list: "{? trakt.series ?}"
type: shows
reject:
regexp:
reject:
- \b(s|d)ub(s|bed|lado|titulado)?\b
- \bhc\b
- \bdual\b
- \b3d\b
- \bfre(nch)?\b
- \bita(lian)?\b
- \bspa(nish)?\b
- \bger(man)?\b
- \bcastellano\b
- \brus(sian)?\b
- \btamil\b
- \bhindi\b
- \bh265\b
- \bx265\b
content_filter:
reject:
- '*.rar'
- '*.zip'
- '*.exe'
place_torrent:
utorrent:
url: "{? utorrent.url ?}"
username: "{? utorrent.user ?}"
password: "{? utorrent.pass ?}"
path: "{? utorrent.path ?}"
pushbullet:
notify:
entries:
title: "[flexget] Downloading"
message: "{{title}}"
via:
- pushbullet:
api_key: "{? pushbullet.api ?}"
tasks:
follow show from ep:
seen: local
trakt_list:
account: "{? trakt.account ?}"
list: "{? trakt.series ?}"
type: episodes
accept_all: yes
set_series_begin: yes
list_remove:
- trakt_list:
account: "{? trakt.account ?}"
list: "{? trakt.series ?}"
type: episodes
list_add:
- trakt_list:
account: "{? trakt.account ?}"
list: "{? trakt.series ?}"
type: shows
get shows:
template:
- tv
- reject
- place_torrent
- pushbullet
discover:
what:
- next_series_episodes:
from_start: yes
from:
- piratebay:
category: highres tv
- limetorrents:
category: tv
- rarbg:
category: [1, 18, 41, 49]
- argenteam:
force_subtitles: no
```
#### Log:
<details>
<summary>(click to expand)</summary>
Debug Log - https://pastebin.com/bTr4qX6a
```
2019-03-16 05:40 VERBOSE trakt_list get shows Retrieving `shows` list `720p_list`
2019-03-16 05:40 VERBOSE discover get shows Discovering 26 titles ...
2019-03-16 05:40 INFO discover get shows Ignoring interval because of --discover-now
2019-03-16 05:40 INFO task get shows Plugin next_series_episodes has requested task to be ran again after execution has completed. Reason: Look for next season
2019-03-16 05:40 VERBOSE discover get shows The Big Bang Theory (2007) S12E18 hasn't been released yet (Expected: 2019-04-04 00:00:00)
2019-03-16 05:40 VERBOSE discover get shows Modern Family (2009) S10E18 hasn't been released yet (Expected: 2019-03-20 00:00:00)
2019-03-16 05:40 VERBOSE discover get shows Game of Thrones (2011) S08E01 hasn't been released yet (Expected: 2019-04-14 00:00:00)
2019-03-16 05:40 VERBOSE discover get shows The Goldbergs (2013) S06E19 hasn't been released yet (Expected: 2019-03-20 00:00:00)
2019-03-16 05:40 VERBOSE discover get shows Schooled (2019) S01E09 hasn't been released yet (Expected: 2019-03-20 00:00:00)
2019-03-16 05:41 VERBOSE discover get shows Searching for `Shameless (2011) S04E04` with plugin `piratebay` (1 of 10)
2019-03-16 05:41 VERBOSE discover get shows Searching for `Shameless (2011) S04E04` with plugin `limetorrents` (1 of 10)
2019-03-16 05:41 VERBOSE discover get shows Searching for `Shameless (2011) S04E04` with plugin `rarbg` (1 of 10)
2019-03-16 05:41 VERBOSE discover get shows Searching for `Shameless (2011) S04E04` with plugin `argenteam` (1 of 10)
2019-03-16 05:41 CRITICAL task get shows BUG: Unhandled error in plugin discover: list index out of range
2019-03-16 05:41 CRITICAL manager get shows An unexpected crash has occurred. Writing crash report to C:\Users\Amishman\flexget\crash_report.2019.03.16.054135803000.log. Please verify you are running the latest version of flexget by using "flexget -V" from CLI or by using version_checker plugin at http://flexget.com/wiki/Plugins/version_checker. You are currently using version 2.20.12
2019-03-16 05:41 WARNING task get shows Aborting task (plugin: discover)
```
</details>
### Additional information:
- FlexGet version: 2.20.12
- Python version: 2.7.13
- Installation method: pip
- Using daemon (yes/no): no
- OS and version: Windows 7 Pro
- Link to crash log:
[crash_report.2019.03.16.054135803000.log](https://github.com/Flexget/Flexget/files/2973746/crash_report.2019.03.16.054135803000.log)
<!---
In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.
Please verify that the following data is present before submitting your issue:
- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.
- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).
- FlexGet version (use `flexget -V` to get it).
- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.
- Installation method (pip, git install, etc).
- Whether or not you're running FlexGet as a daemon.
- OS and version.
- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.
--->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/components/sites/sites/argenteam.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5
6 from requests import RequestException
7
8 from flexget import plugin
9 from flexget.entry import Entry
10 from flexget.event import event
11 from flexget.components.sites.utils import normalize_scene
12
13 log = logging.getLogger('argenteam')
14
15
16 class SearchArgenteam(object):
17 """ Argenteam
18 Search plugin which gives results from www.argenteam.net, latin american (Argentina) web.
19
20 Configuration:
21 - force_subtitles: [yes/no] #Force download release with subtitles made by aRGENTeaM. Default is yes
22
23 Example
24 argenteam:
25 force_subtitles: yes
26 """
27
28 schema = {
29 'type': 'object',
30 'properties': {'force_subtitles': {'type': 'boolean', 'default': True}},
31 "additionalProperties": False,
32 }
33
34 base_url = 'http://www.argenteam.net/api/v1/'
35
36 @plugin.internet(log)
37 def search(self, task, entry, config):
38 """
39 Search for releases
40 """
41
42 entries = set()
43
44 for search_string in entry.get('search_strings', [entry['title']]):
45
46 try:
47 params = {'q': normalize_scene(search_string)}
48 resp = task.requests.get(self.base_url + 'search', params=params)
49 log.debug('Requesting: %s', resp.url)
50 response = resp.json()
51 except RequestException as e:
52 log.error('Argenteam request failed: %s', e)
53 return
54
55 if not response:
56 log.debug('Empty response from Argenteam')
57 continue
58
59 if not response.get('total'):
60 log.debug('No results found for %s', search_string)
61 continue
62
63 results = response.get('results')
64 if results[0]['type'] == 'tvshow':
65 log.error('Argenteam type tvshow not supported yet.')
66 continue
67
68 url = '{}{}?id={}'.format(self.base_url, results[0]['type'], results[0]['id'])
69 try:
70 resp = task.requests.get(url)
71 log.debug('Requesting releases for: %s', url)
72 response = resp.json()
73 except RequestException as e:
74 log.error('Argenteam request failed: %s', e)
75 return
76
77 for release in response['releases']:
78 for torrent in release['torrents']:
79 if (
80 config.get('force_subtitles')
81 and release['subtitles']
82 or not config.get('force_subtitles')
83 ):
84 e = Entry()
85
86 e['title'] = ' '.join(
87 (
88 search_string,
89 release['source'],
90 release['codec'],
91 release['team'],
92 release['tags'],
93 )
94 )
95 e['url'] = torrent['uri']
96
97 # Save aRGENTeaM subtitle URL for this release
98 if 'subtitles' in release:
99 e['argenteam_subtitle'] = release['subtitles'][0]['uri']
100 log.debug('Argenteam subtitle found: %s', e['argenteam_subtitle'])
101
102 if 'tvdb' in response:
103 e['tvdb_id'] = response['tvdb']
104 if 'info' in response and 'imdb' in response['info']:
105 e['imdb_id'] = response['info']['imdb']
106
107 entries.add(e)
108
109 return entries
110
111
112 @event('plugin.register')
113 def register_plugin():
114 plugin.register(SearchArgenteam, 'argenteam', interfaces=['search'], api_ver=2)
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/components/sites/sites/argenteam.py b/flexget/components/sites/sites/argenteam.py
--- a/flexget/components/sites/sites/argenteam.py
+++ b/flexget/components/sites/sites/argenteam.py
@@ -74,6 +74,7 @@
log.error('Argenteam request failed: %s', e)
return
+ log.debug('%s releases found.', len(response['releases']))
for release in response['releases']:
for torrent in release['torrents']:
if (
@@ -95,7 +96,7 @@
e['url'] = torrent['uri']
# Save aRGENTeaM subtitle URL for this release
- if 'subtitles' in release:
+ if 'subtitles' in release and len(release['subtitles']) > 0:
e['argenteam_subtitle'] = release['subtitles'][0]['uri']
log.debug('Argenteam subtitle found: %s', e['argenteam_subtitle'])
| {"golden_diff": "diff --git a/flexget/components/sites/sites/argenteam.py b/flexget/components/sites/sites/argenteam.py\n--- a/flexget/components/sites/sites/argenteam.py\n+++ b/flexget/components/sites/sites/argenteam.py\n@@ -74,6 +74,7 @@\n log.error('Argenteam request failed: %s', e)\n return\n \n+ log.debug('%s releases found.', len(response['releases']))\n for release in response['releases']:\n for torrent in release['torrents']:\n if (\n@@ -95,7 +96,7 @@\n e['url'] = torrent['uri']\n \n # Save aRGENTeaM subtitle URL for this release\n- if 'subtitles' in release:\n+ if 'subtitles' in release and len(release['subtitles']) > 0:\n e['argenteam_subtitle'] = release['subtitles'][0]['uri']\n log.debug('Argenteam subtitle found: %s', e['argenteam_subtitle'])\n", "issue": "Unhandled error in plugin argenteam\n<!---\r\nBefore opening an issue, verify:\r\n\r\n- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget\r\n- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions\r\n- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon\r\n- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues\r\n- Did you fill out the issue template as completely as possible?\r\n\r\nThe issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!\r\n--->\r\n### Expected behavior:\r\n<!---\r\nGet list of shows from Trakt and download needed shows,\r\n--->\r\n\r\n### Actual behavior:\r\nIt gets the list of shows from Trakt and starts searching for each show using the plug-ins. It then crashes.\r\n### Steps to reproduce:\r\n- Step 1: It happens every time it runs.\r\n\r\n#### Config:\r\n```\r\nvariables: secrets.yml\r\n\r\ntemplates:\r\n\r\n\r\n tv:\r\n configure_series:\r\n settings:\r\n quality: 720p+\r\n identified_by: ep\r\n from:\r\n trakt_list:\r\n account: \"{? trakt.account ?}\"\r\n list: \"{? trakt.series ?}\"\r\n type: shows\r\n\r\n\r\n\r\n reject:\r\n regexp:\r\n reject:\r\n - \\b(s|d)ub(s|bed|lado|titulado)?\\b\r\n - \\bhc\\b\r\n - \\bdual\\b\r\n - \\b3d\\b\r\n - \\bfre(nch)?\\b\r\n - \\bita(lian)?\\b\r\n - \\bspa(nish)?\\b\r\n - \\bger(man)?\\b\r\n - \\bcastellano\\b\r\n - \\brus(sian)?\\b\r\n - \\btamil\\b\r\n - \\bhindi\\b\r\n - \\bh265\\b\r\n - \\bx265\\b\r\n content_filter:\r\n reject:\r\n - '*.rar'\r\n - '*.zip'\r\n - '*.exe'\r\n\r\n\r\n\r\n place_torrent:\r\n utorrent:\r\n url: \"{? utorrent.url ?}\"\r\n username: \"{? utorrent.user ?}\"\r\n password: \"{? utorrent.pass ?}\"\r\n path: \"{? utorrent.path ?}\"\r\n\r\n\r\n\r\n pushbullet:\r\n notify:\r\n entries:\r\n title: \"[flexget] Downloading\"\r\n message: \"{{title}}\"\r\n via:\r\n - pushbullet:\r\n api_key: \"{? pushbullet.api ?}\"\r\n\r\n\r\n\r\n\r\n\r\ntasks:\r\n\r\n\r\n follow show from ep:\r\n seen: local\r\n trakt_list:\r\n account: \"{? trakt.account ?}\"\r\n list: \"{? trakt.series ?}\"\r\n type: episodes\r\n accept_all: yes\r\n set_series_begin: yes\r\n list_remove:\r\n - trakt_list:\r\n account: \"{? trakt.account ?}\"\r\n list: \"{? trakt.series ?}\"\r\n type: episodes\r\n list_add:\r\n - trakt_list:\r\n account: \"{? trakt.account ?}\"\r\n list: \"{? trakt.series ?}\"\r\n type: shows\r\n\r\n\r\n\r\n get shows:\r\n template:\r\n - tv\r\n - reject\r\n - place_torrent\r\n - pushbullet\r\n discover:\r\n what:\r\n - next_series_episodes:\r\n from_start: yes\r\n from:\r\n - piratebay:\r\n category: highres tv\r\n - limetorrents:\r\n category: tv\r\n - rarbg:\r\n category: [1, 18, 41, 49]\r\n - argenteam:\r\n force_subtitles: no\r\n```\r\n \r\n#### Log:\r\n<details>\r\n <summary>(click to expand)</summary>\r\n\r\n\r\nDebug Log - https://pastebin.com/bTr4qX6a\r\n```\r\n2019-03-16 05:40 VERBOSE trakt_list get shows Retrieving `shows` list `720p_list`\r\n2019-03-16 05:40 VERBOSE discover get shows Discovering 26 titles ...\r\n2019-03-16 05:40 INFO discover get shows Ignoring interval because of --discover-now\r\n2019-03-16 05:40 INFO task get shows Plugin next_series_episodes has requested task to be ran again after execution has completed. Reason: Look for next season\r\n2019-03-16 05:40 VERBOSE discover get shows The Big Bang Theory (2007) S12E18 hasn't been released yet (Expected: 2019-04-04 00:00:00)\r\n2019-03-16 05:40 VERBOSE discover get shows Modern Family (2009) S10E18 hasn't been released yet (Expected: 2019-03-20 00:00:00)\r\n2019-03-16 05:40 VERBOSE discover get shows Game of Thrones (2011) S08E01 hasn't been released yet (Expected: 2019-04-14 00:00:00)\r\n2019-03-16 05:40 VERBOSE discover get shows The Goldbergs (2013) S06E19 hasn't been released yet (Expected: 2019-03-20 00:00:00)\r\n2019-03-16 05:40 VERBOSE discover get shows Schooled (2019) S01E09 hasn't been released yet (Expected: 2019-03-20 00:00:00)\r\n2019-03-16 05:41 VERBOSE discover get shows Searching for `Shameless (2011) S04E04` with plugin `piratebay` (1 of 10)\r\n2019-03-16 05:41 VERBOSE discover get shows Searching for `Shameless (2011) S04E04` with plugin `limetorrents` (1 of 10)\r\n2019-03-16 05:41 VERBOSE discover get shows Searching for `Shameless (2011) S04E04` with plugin `rarbg` (1 of 10)\r\n2019-03-16 05:41 VERBOSE discover get shows Searching for `Shameless (2011) S04E04` with plugin `argenteam` (1 of 10)\r\n2019-03-16 05:41 CRITICAL task get shows BUG: Unhandled error in plugin discover: list index out of range\r\n2019-03-16 05:41 CRITICAL manager get shows An unexpected crash has occurred. Writing crash report to C:\\Users\\Amishman\\flexget\\crash_report.2019.03.16.054135803000.log. Please verify you are running the latest version of flexget by using \"flexget -V\" from CLI or by using version_checker plugin at http://flexget.com/wiki/Plugins/version_checker. You are currently using version 2.20.12\r\n2019-03-16 05:41 WARNING task get shows Aborting task (plugin: discover)\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 2.20.12\r\n- Python version: 2.7.13\r\n- Installation method: pip\r\n- Using daemon (yes/no): no\r\n- OS and version: Windows 7 Pro\r\n- Link to crash log:\r\n[crash_report.2019.03.16.054135803000.log](https://github.com/Flexget/Flexget/files/2973746/crash_report.2019.03.16.054135803000.log)\r\n\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.\r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom requests import RequestException\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.components.sites.utils import normalize_scene\n\nlog = logging.getLogger('argenteam')\n\n\nclass SearchArgenteam(object):\n \"\"\" Argenteam\n Search plugin which gives results from www.argenteam.net, latin american (Argentina) web.\n\n Configuration:\n - force_subtitles: [yes/no] #Force download release with subtitles made by aRGENTeaM. Default is yes\n\n Example\n argenteam:\n force_subtitles: yes\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {'force_subtitles': {'type': 'boolean', 'default': True}},\n \"additionalProperties\": False,\n }\n\n base_url = 'http://www.argenteam.net/api/v1/'\n\n @plugin.internet(log)\n def search(self, task, entry, config):\n \"\"\"\n Search for releases\n \"\"\"\n\n entries = set()\n\n for search_string in entry.get('search_strings', [entry['title']]):\n\n try:\n params = {'q': normalize_scene(search_string)}\n resp = task.requests.get(self.base_url + 'search', params=params)\n log.debug('Requesting: %s', resp.url)\n response = resp.json()\n except RequestException as e:\n log.error('Argenteam request failed: %s', e)\n return\n\n if not response:\n log.debug('Empty response from Argenteam')\n continue\n\n if not response.get('total'):\n log.debug('No results found for %s', search_string)\n continue\n\n results = response.get('results')\n if results[0]['type'] == 'tvshow':\n log.error('Argenteam type tvshow not supported yet.')\n continue\n\n url = '{}{}?id={}'.format(self.base_url, results[0]['type'], results[0]['id'])\n try:\n resp = task.requests.get(url)\n log.debug('Requesting releases for: %s', url)\n response = resp.json()\n except RequestException as e:\n log.error('Argenteam request failed: %s', e)\n return\n\n for release in response['releases']:\n for torrent in release['torrents']:\n if (\n config.get('force_subtitles')\n and release['subtitles']\n or not config.get('force_subtitles')\n ):\n e = Entry()\n\n e['title'] = ' '.join(\n (\n search_string,\n release['source'],\n release['codec'],\n release['team'],\n release['tags'],\n )\n )\n e['url'] = torrent['uri']\n\n # Save aRGENTeaM subtitle URL for this release\n if 'subtitles' in release:\n e['argenteam_subtitle'] = release['subtitles'][0]['uri']\n log.debug('Argenteam subtitle found: %s', e['argenteam_subtitle'])\n\n if 'tvdb' in response:\n e['tvdb_id'] = response['tvdb']\n if 'info' in response and 'imdb' in response['info']:\n e['imdb_id'] = response['info']['imdb']\n\n entries.add(e)\n\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(SearchArgenteam, 'argenteam', interfaces=['search'], api_ver=2)\n", "path": "flexget/components/sites/sites/argenteam.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom requests import RequestException\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.components.sites.utils import normalize_scene\n\nlog = logging.getLogger('argenteam')\n\n\nclass SearchArgenteam(object):\n \"\"\" Argenteam\n Search plugin which gives results from www.argenteam.net, latin american (Argentina) web.\n\n Configuration:\n - force_subtitles: [yes/no] #Force download release with subtitles made by aRGENTeaM. Default is yes\n\n Example\n argenteam:\n force_subtitles: yes\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {'force_subtitles': {'type': 'boolean', 'default': True}},\n \"additionalProperties\": False,\n }\n\n base_url = 'http://www.argenteam.net/api/v1/'\n\n @plugin.internet(log)\n def search(self, task, entry, config):\n \"\"\"\n Search for releases\n \"\"\"\n\n entries = set()\n\n for search_string in entry.get('search_strings', [entry['title']]):\n\n try:\n params = {'q': normalize_scene(search_string)}\n resp = task.requests.get(self.base_url + 'search', params=params)\n log.debug('Requesting: %s', resp.url)\n response = resp.json()\n except RequestException as e:\n log.error('Argenteam request failed: %s', e)\n return\n\n if not response:\n log.debug('Empty response from Argenteam')\n continue\n\n if not response.get('total'):\n log.debug('No results found for %s', search_string)\n continue\n\n results = response.get('results')\n if results[0]['type'] == 'tvshow':\n log.error('Argenteam type tvshow not supported yet.')\n continue\n\n url = '{}{}?id={}'.format(self.base_url, results[0]['type'], results[0]['id'])\n try:\n resp = task.requests.get(url)\n log.debug('Requesting releases for: %s', url)\n response = resp.json()\n except RequestException as e:\n log.error('Argenteam request failed: %s', e)\n return\n\n log.debug('%s releases found.', len(response['releases']))\n for release in response['releases']:\n for torrent in release['torrents']:\n if (\n config.get('force_subtitles')\n and release['subtitles']\n or not config.get('force_subtitles')\n ):\n e = Entry()\n\n e['title'] = ' '.join(\n (\n search_string,\n release['source'],\n release['codec'],\n release['team'],\n release['tags'],\n )\n )\n e['url'] = torrent['uri']\n\n # Save aRGENTeaM subtitle URL for this release\n if 'subtitles' in release and len(release['subtitles']) > 0:\n e['argenteam_subtitle'] = release['subtitles'][0]['uri']\n log.debug('Argenteam subtitle found: %s', e['argenteam_subtitle'])\n\n if 'tvdb' in response:\n e['tvdb_id'] = response['tvdb']\n if 'info' in response and 'imdb' in response['info']:\n e['imdb_id'] = response['info']['imdb']\n\n entries.add(e)\n\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(SearchArgenteam, 'argenteam', interfaces=['search'], api_ver=2)\n", "path": "flexget/components/sites/sites/argenteam.py"}]} | 3,492 | 228 |
gh_patches_debug_3650 | rasdani/github-patches | git_diff | internetarchive__openlibrary-7836 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Members of `/usergroup/read-only` can edit covers
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
Members of the `read-only` usergroup are still able to upload cover images and URLs via the manage covers modal.
### Evidence / Screenshot (if possible)
### Relevant url?
<!-- `https://openlibrary.org/...` -->
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
While logged-in as a member of the read-only group:
1. Go to any book page
2. Attempt to add a new cover via the manage covers modal
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual: The cover is updated.
* Expected: The update fails gracefully.
### Details
- **Logged in (Y/N)?**Y
- **Browser type/version?**Any
- **Operating system?**Any
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
Check for membership in the read-only group before persisting the new cover. If submitter is a member, do not store the image.
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
POST handler for manage cover form: https://github.com/internetarchive/openlibrary/blob/master/openlibrary/plugins/upstream/covers.py#L34-L53
### Stakeholders
<!-- @ tag stakeholders of this bug -->
@seabelis
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/plugins/upstream/covers.py`
Content:
```
1 """Handle book cover/author photo upload.
2 """
3 from logging import getLogger
4
5 import requests
6 import web
7 from io import BytesIO
8
9 from infogami.utils import delegate
10 from infogami.utils.view import safeint
11 from openlibrary import accounts
12 from openlibrary.plugins.upstream.models import Image
13 from openlibrary.plugins.upstream.utils import (
14 get_coverstore_url,
15 get_coverstore_public_url,
16 render_template,
17 )
18
19 logger = getLogger("openlibrary.plugins.upstream.covers")
20
21
22 def setup():
23 pass
24
25
26 class add_cover(delegate.page):
27 path = r"(/books/OL\d+M)/add-cover"
28 cover_category = "b"
29
30 def GET(self, key):
31 book = web.ctx.site.get(key)
32 return render_template('covers/add', book)
33
34 def POST(self, key):
35 book = web.ctx.site.get(key)
36 if not book:
37 raise web.notfound("")
38
39 i = web.input(file={}, url="")
40
41 # remove references to field storage objects
42 web.ctx.pop("_fieldstorage", None)
43
44 data = self.upload(key, i)
45
46 if coverid := data.get('id'):
47 if isinstance(i.url, bytes):
48 i.url = i.url.decode("utf-8")
49 self.save(book, coverid, url=i.url)
50 cover = Image(web.ctx.site, "b", coverid)
51 return render_template("covers/saved", cover)
52 else:
53 return render_template("covers/add", book, {'url': i.url}, data)
54
55 def upload(self, key, i):
56 """Uploads a cover to coverstore and returns the response."""
57 olid = key.split("/")[-1]
58
59 if i.file is not None and hasattr(i.file, 'value'):
60 data = i.file.value
61 else:
62 data = None
63
64 if i.url and i.url.strip() == "https://":
65 i.url = ""
66
67 user = accounts.get_current_user()
68 params = {
69 "author": user and user.key,
70 "source_url": i.url,
71 "olid": olid,
72 "ip": web.ctx.ip,
73 }
74
75 upload_url = f'{get_coverstore_url()}/{self.cover_category}/upload2'
76
77 if upload_url.startswith("//"):
78 upload_url = "http:" + upload_url
79
80 try:
81 files = {'data': BytesIO(data)}
82 response = requests.post(upload_url, data=params, files=files)
83 return web.storage(response.json())
84 except requests.HTTPError as e:
85 logger.exception("Covers upload failed")
86 return web.storage({'error': str(e)})
87
88 def save(self, book, coverid, url=None):
89 book.covers = [coverid] + [cover.id for cover in book.get_covers()]
90 book._save(
91 f'{get_coverstore_public_url()}/b/id/{coverid}-S.jpg',
92 action="add-cover",
93 data={"url": url},
94 )
95
96
97 class add_work_cover(add_cover):
98 path = r"(/works/OL\d+W)/add-cover"
99 cover_category = "w"
100
101 def upload(self, key, i):
102 if "coverid" in i and safeint(i.coverid):
103 return web.storage(id=int(i.coverid))
104 else:
105 return add_cover.upload(self, key, i)
106
107
108 class add_photo(add_cover):
109 path = r"(/authors/OL\d+A)/add-photo"
110 cover_category = "a"
111
112 def save(self, author, photoid, url=None):
113 author.photos = [photoid] + [photo.id for photo in author.get_photos()]
114 author._save("Added new photo", action="add-photo", data={"url": url})
115
116
117 class manage_covers(delegate.page):
118 path = r"(/books/OL\d+M)/manage-covers"
119
120 def GET(self, key):
121 book = web.ctx.site.get(key)
122 if not book:
123 raise web.notfound()
124 return render_template("covers/manage", key, self.get_images(book))
125
126 def get_images(self, book):
127 return book.get_covers()
128
129 def get_image(self, book):
130 return book.get_cover()
131
132 def save_images(self, book, covers):
133 book.covers = covers
134 book._save('Update covers')
135
136 def POST(self, key):
137 book = web.ctx.site.get(key)
138 if not book:
139 raise web.notfound()
140
141 images = web.input(image=[]).image
142 if '-' in images:
143 images = [int(id) for id in images[: images.index('-')]]
144 self.save_images(book, images)
145 return render_template("covers/saved", self.get_image(book), showinfo=False)
146 else:
147 # ERROR
148 pass
149
150
151 class manage_work_covers(manage_covers):
152 path = r"(/works/OL\d+W)/manage-covers"
153
154
155 class manage_photos(manage_covers):
156 path = r"(/authors/OL\d+A)/manage-photos"
157
158 def get_images(self, author):
159 return author.get_photos()
160
161 def get_image(self, author):
162 return author.get_photo()
163
164 def save_images(self, author, photos):
165 author.photos = photos
166 author._save('Update photos')
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openlibrary/plugins/upstream/covers.py b/openlibrary/plugins/upstream/covers.py
--- a/openlibrary/plugins/upstream/covers.py
+++ b/openlibrary/plugins/upstream/covers.py
@@ -36,6 +36,10 @@
if not book:
raise web.notfound("")
+ user = accounts.get_current_user()
+ if user and user.is_read_only():
+ raise web.forbidden(message="Patron not permitted to upload images")
+
i = web.input(file={}, url="")
# remove references to field storage objects
| {"golden_diff": "diff --git a/openlibrary/plugins/upstream/covers.py b/openlibrary/plugins/upstream/covers.py\n--- a/openlibrary/plugins/upstream/covers.py\n+++ b/openlibrary/plugins/upstream/covers.py\n@@ -36,6 +36,10 @@\n if not book:\n raise web.notfound(\"\")\n \n+ user = accounts.get_current_user()\n+ if user and user.is_read_only():\n+ raise web.forbidden(message=\"Patron not permitted to upload images\")\n+\n i = web.input(file={}, url=\"\")\n \n # remove references to field storage objects\n", "issue": "Members of `/usergroup/read-only` can edit covers\n<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->\r\nMembers of the `read-only` usergroup are still able to upload cover images and URLs via the manage covers modal.\r\n\r\n### Evidence / Screenshot (if possible)\r\n\r\n### Relevant url?\r\n<!-- `https://openlibrary.org/...` -->\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\nWhile logged-in as a member of the read-only group:\r\n1. Go to any book page\r\n2. Attempt to add a new cover via the manage covers modal\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual: The cover is updated.\r\n* Expected: The update fails gracefully.\r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?**Y\r\n- **Browser type/version?**Any\r\n- **Operating system?**Any\r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\nCheck for membership in the read-only group before persisting the new cover. If submitter is a member, do not store the image.\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\nPOST handler for manage cover form: https://github.com/internetarchive/openlibrary/blob/master/openlibrary/plugins/upstream/covers.py#L34-L53\r\n\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n@seabelis \n", "before_files": [{"content": "\"\"\"Handle book cover/author photo upload.\n\"\"\"\nfrom logging import getLogger\n\nimport requests\nimport web\nfrom io import BytesIO\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary import accounts\nfrom openlibrary.plugins.upstream.models import Image\nfrom openlibrary.plugins.upstream.utils import (\n get_coverstore_url,\n get_coverstore_public_url,\n render_template,\n)\n\nlogger = getLogger(\"openlibrary.plugins.upstream.covers\")\n\n\ndef setup():\n pass\n\n\nclass add_cover(delegate.page):\n path = r\"(/books/OL\\d+M)/add-cover\"\n cover_category = \"b\"\n\n def GET(self, key):\n book = web.ctx.site.get(key)\n return render_template('covers/add', book)\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound(\"\")\n\n i = web.input(file={}, url=\"\")\n\n # remove references to field storage objects\n web.ctx.pop(\"_fieldstorage\", None)\n\n data = self.upload(key, i)\n\n if coverid := data.get('id'):\n if isinstance(i.url, bytes):\n i.url = i.url.decode(\"utf-8\")\n self.save(book, coverid, url=i.url)\n cover = Image(web.ctx.site, \"b\", coverid)\n return render_template(\"covers/saved\", cover)\n else:\n return render_template(\"covers/add\", book, {'url': i.url}, data)\n\n def upload(self, key, i):\n \"\"\"Uploads a cover to coverstore and returns the response.\"\"\"\n olid = key.split(\"/\")[-1]\n\n if i.file is not None and hasattr(i.file, 'value'):\n data = i.file.value\n else:\n data = None\n\n if i.url and i.url.strip() == \"https://\":\n i.url = \"\"\n\n user = accounts.get_current_user()\n params = {\n \"author\": user and user.key,\n \"source_url\": i.url,\n \"olid\": olid,\n \"ip\": web.ctx.ip,\n }\n\n upload_url = f'{get_coverstore_url()}/{self.cover_category}/upload2'\n\n if upload_url.startswith(\"//\"):\n upload_url = \"http:\" + upload_url\n\n try:\n files = {'data': BytesIO(data)}\n response = requests.post(upload_url, data=params, files=files)\n return web.storage(response.json())\n except requests.HTTPError as e:\n logger.exception(\"Covers upload failed\")\n return web.storage({'error': str(e)})\n\n def save(self, book, coverid, url=None):\n book.covers = [coverid] + [cover.id for cover in book.get_covers()]\n book._save(\n f'{get_coverstore_public_url()}/b/id/{coverid}-S.jpg',\n action=\"add-cover\",\n data={\"url\": url},\n )\n\n\nclass add_work_cover(add_cover):\n path = r\"(/works/OL\\d+W)/add-cover\"\n cover_category = \"w\"\n\n def upload(self, key, i):\n if \"coverid\" in i and safeint(i.coverid):\n return web.storage(id=int(i.coverid))\n else:\n return add_cover.upload(self, key, i)\n\n\nclass add_photo(add_cover):\n path = r\"(/authors/OL\\d+A)/add-photo\"\n cover_category = \"a\"\n\n def save(self, author, photoid, url=None):\n author.photos = [photoid] + [photo.id for photo in author.get_photos()]\n author._save(\"Added new photo\", action=\"add-photo\", data={\"url\": url})\n\n\nclass manage_covers(delegate.page):\n path = r\"(/books/OL\\d+M)/manage-covers\"\n\n def GET(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n return render_template(\"covers/manage\", key, self.get_images(book))\n\n def get_images(self, book):\n return book.get_covers()\n\n def get_image(self, book):\n return book.get_cover()\n\n def save_images(self, book, covers):\n book.covers = covers\n book._save('Update covers')\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n\n images = web.input(image=[]).image\n if '-' in images:\n images = [int(id) for id in images[: images.index('-')]]\n self.save_images(book, images)\n return render_template(\"covers/saved\", self.get_image(book), showinfo=False)\n else:\n # ERROR\n pass\n\n\nclass manage_work_covers(manage_covers):\n path = r\"(/works/OL\\d+W)/manage-covers\"\n\n\nclass manage_photos(manage_covers):\n path = r\"(/authors/OL\\d+A)/manage-photos\"\n\n def get_images(self, author):\n return author.get_photos()\n\n def get_image(self, author):\n return author.get_photo()\n\n def save_images(self, author, photos):\n author.photos = photos\n author._save('Update photos')\n", "path": "openlibrary/plugins/upstream/covers.py"}], "after_files": [{"content": "\"\"\"Handle book cover/author photo upload.\n\"\"\"\nfrom logging import getLogger\n\nimport requests\nimport web\nfrom io import BytesIO\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary import accounts\nfrom openlibrary.plugins.upstream.models import Image\nfrom openlibrary.plugins.upstream.utils import (\n get_coverstore_url,\n get_coverstore_public_url,\n render_template,\n)\n\nlogger = getLogger(\"openlibrary.plugins.upstream.covers\")\n\n\ndef setup():\n pass\n\n\nclass add_cover(delegate.page):\n path = r\"(/books/OL\\d+M)/add-cover\"\n cover_category = \"b\"\n\n def GET(self, key):\n book = web.ctx.site.get(key)\n return render_template('covers/add', book)\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound(\"\")\n\n user = accounts.get_current_user()\n if user and user.is_read_only():\n raise web.forbidden(message=\"Patron not permitted to upload images\")\n\n i = web.input(file={}, url=\"\")\n\n # remove references to field storage objects\n web.ctx.pop(\"_fieldstorage\", None)\n\n data = self.upload(key, i)\n\n if coverid := data.get('id'):\n if isinstance(i.url, bytes):\n i.url = i.url.decode(\"utf-8\")\n self.save(book, coverid, url=i.url)\n cover = Image(web.ctx.site, \"b\", coverid)\n return render_template(\"covers/saved\", cover)\n else:\n return render_template(\"covers/add\", book, {'url': i.url}, data)\n\n def upload(self, key, i):\n \"\"\"Uploads a cover to coverstore and returns the response.\"\"\"\n olid = key.split(\"/\")[-1]\n\n if i.file is not None and hasattr(i.file, 'value'):\n data = i.file.value\n else:\n data = None\n\n if i.url and i.url.strip() == \"https://\":\n i.url = \"\"\n\n user = accounts.get_current_user()\n params = {\n \"author\": user and user.key,\n \"source_url\": i.url,\n \"olid\": olid,\n \"ip\": web.ctx.ip,\n }\n\n upload_url = f'{get_coverstore_url()}/{self.cover_category}/upload2'\n\n if upload_url.startswith(\"//\"):\n upload_url = \"http:\" + upload_url\n\n try:\n files = {'data': BytesIO(data)}\n response = requests.post(upload_url, data=params, files=files)\n return web.storage(response.json())\n except requests.HTTPError as e:\n logger.exception(\"Covers upload failed\")\n return web.storage({'error': str(e)})\n\n def save(self, book, coverid, url=None):\n book.covers = [coverid] + [cover.id for cover in book.get_covers()]\n book._save(\n f'{get_coverstore_public_url()}/b/id/{coverid}-S.jpg',\n action=\"add-cover\",\n data={\"url\": url},\n )\n\n\nclass add_work_cover(add_cover):\n path = r\"(/works/OL\\d+W)/add-cover\"\n cover_category = \"w\"\n\n def upload(self, key, i):\n if \"coverid\" in i and safeint(i.coverid):\n return web.storage(id=int(i.coverid))\n else:\n return add_cover.upload(self, key, i)\n\n\nclass add_photo(add_cover):\n path = r\"(/authors/OL\\d+A)/add-photo\"\n cover_category = \"a\"\n\n def save(self, author, photoid, url=None):\n author.photos = [photoid] + [photo.id for photo in author.get_photos()]\n author._save(\"Added new photo\", action=\"add-photo\", data={\"url\": url})\n\n\nclass manage_covers(delegate.page):\n path = r\"(/books/OL\\d+M)/manage-covers\"\n\n def GET(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n return render_template(\"covers/manage\", key, self.get_images(book))\n\n def get_images(self, book):\n return book.get_covers()\n\n def get_image(self, book):\n return book.get_cover()\n\n def save_images(self, book, covers):\n book.covers = covers\n book._save('Update covers')\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n\n images = web.input(image=[]).image\n if '-' in images:\n images = [int(id) for id in images[: images.index('-')]]\n self.save_images(book, images)\n return render_template(\"covers/saved\", self.get_image(book), showinfo=False)\n else:\n # ERROR\n pass\n\n\nclass manage_work_covers(manage_covers):\n path = r\"(/works/OL\\d+W)/manage-covers\"\n\n\nclass manage_photos(manage_covers):\n path = r\"(/authors/OL\\d+A)/manage-photos\"\n\n def get_images(self, author):\n return author.get_photos()\n\n def get_image(self, author):\n return author.get_photo()\n\n def save_images(self, author, photos):\n author.photos = photos\n author._save('Update photos')\n", "path": "openlibrary/plugins/upstream/covers.py"}]} | 2,146 | 124 |
gh_patches_debug_11280 | rasdani/github-patches | git_diff | scverse__scanpy-1856 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Increase visibility of ecosystem page
As discussed at last meeting
- [ ] Document process for adding entries (note this on top of ecosystem page)
- [ ] Link from external
- [ ] Clarify goals/ differences b/w ecosystem and external
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/external/__init__.py`
Content:
```
1 from . import tl
2 from . import pl
3 from . import pp
4 from . import exporting
5
6 import sys
7 from .. import _utils
8
9 _utils.annotate_doc_types(sys.modules[__name__], 'scanpy')
10 del sys, _utils
11
12
13 __doc__ = """\
14 External API
15 ============
16
17
18 Import Scanpy's wrappers to external tools as::
19
20 import scanpy.external as sce
21
22 If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!
23
24 Preprocessing: PP
25 ------------------
26
27 Data integration
28 ~~~~~~~~~~~~~~~~
29
30 .. autosummary::
31 :toctree: .
32
33 pp.bbknn
34 pp.harmony_integrate
35 pp.mnn_correct
36 pp.scanorama_integrate
37
38
39 Sample demultiplexing, Doublet detection
40 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
41
42 .. autosummary::
43 :toctree: .
44
45 pp.scrublet
46 pp.scrublet_simulate_doublets
47 pl.scrublet_score_distribution
48 pp.hashsolo
49
50 Imputation
51 ~~~~~~~~~~
52
53 Note that the fundamental limitations of imputation are still under `debate
54 <https://github.com/theislab/scanpy/issues/189>`__.
55
56 .. autosummary::
57 :toctree: .
58
59 pp.dca
60 pp.magic
61
62
63 Tools: TL
64 ----------
65
66 Embeddings
67 ~~~~~~~~~~
68
69 .. autosummary::
70 :toctree: .
71
72 tl.phate
73 tl.palantir
74 tl.trimap
75 tl.sam
76
77 Clustering and trajectory inference
78 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
79
80 .. autosummary::
81 :toctree: .
82
83 tl.phenograph
84 tl.harmony_timeseries
85 tl.wishbone
86 tl.palantir
87 tl.palantir_results
88
89 Gene scores, Cell cycle
90 ~~~~~~~~~~~~~~~~~~~~~~~
91
92 .. autosummary::
93 :toctree: .
94
95 tl.sandbag
96 tl.cyclone
97
98
99 Plotting: PL
100 ------------
101
102 .. autosummary::
103 :toctree: .
104
105 pl.phate
106 pl.trimap
107 pl.sam
108 pl.wishbone_marker_trajectory
109
110 Exporting
111 ---------
112
113 .. autosummary::
114 :toctree: .
115
116 exporting.spring_project
117 exporting.cellbrowser
118 """
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scanpy/external/__init__.py b/scanpy/external/__init__.py
--- a/scanpy/external/__init__.py
+++ b/scanpy/external/__init__.py
@@ -14,12 +14,15 @@
External API
============
+.. note::
+ More tools that integrate well with scanpy and anndata can be found on the :doc:`ecosystem page <../ecosystem>`.
Import Scanpy's wrappers to external tools as::
import scanpy.external as sce
-If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!
+If you'd like to include a tool here, consider making a pull request (:doc:`instructions <../dev/external-tools>`).
+If the tool already uses `scanpy` or `anndata`, it may fit better in the :doc:`ecosystem page <../ecosystem>`.
Preprocessing: PP
------------------
| {"golden_diff": "diff --git a/scanpy/external/__init__.py b/scanpy/external/__init__.py\n--- a/scanpy/external/__init__.py\n+++ b/scanpy/external/__init__.py\n@@ -14,12 +14,15 @@\n External API\n ============\n \n+.. note::\n+ More tools that integrate well with scanpy and anndata can be found on the :doc:`ecosystem page <../ecosystem>`.\n \n Import Scanpy's wrappers to external tools as::\n \n import scanpy.external as sce\n \n-If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!\n+If you'd like to include a tool here, consider making a pull request (:doc:`instructions <../dev/external-tools>`).\n+If the tool already uses `scanpy` or `anndata`, it may fit better in the :doc:`ecosystem page <../ecosystem>`.\n \n Preprocessing: PP\n ------------------\n", "issue": "Increase visibility of ecosystem page\nAs discussed at last meeting\r\n\r\n- [ ] Document process for adding entries (note this on top of ecosystem page)\r\n- [ ] Link from external\r\n- [ ] Clarify goals/ differences b/w ecosystem and external\n", "before_files": [{"content": "from . import tl\nfrom . import pl\nfrom . import pp\nfrom . import exporting\n\nimport sys\nfrom .. import _utils\n\n_utils.annotate_doc_types(sys.modules[__name__], 'scanpy')\ndel sys, _utils\n\n\n__doc__ = \"\"\"\\\nExternal API\n============\n\n\nImport Scanpy's wrappers to external tools as::\n\n import scanpy.external as sce\n\nIf you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!\n\nPreprocessing: PP\n------------------\n\nData integration\n~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.bbknn\n pp.harmony_integrate\n pp.mnn_correct\n pp.scanorama_integrate\n\n\nSample demultiplexing, Doublet detection\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.scrublet\n pp.scrublet_simulate_doublets\n pl.scrublet_score_distribution\n pp.hashsolo\n\nImputation\n~~~~~~~~~~\n\nNote that the fundamental limitations of imputation are still under `debate\n<https://github.com/theislab/scanpy/issues/189>`__.\n\n.. autosummary::\n :toctree: .\n\n pp.dca\n pp.magic\n\n\nTools: TL\n----------\n\nEmbeddings\n~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phate\n tl.palantir\n tl.trimap\n tl.sam\n\nClustering and trajectory inference\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phenograph\n tl.harmony_timeseries\n tl.wishbone\n tl.palantir\n tl.palantir_results\n\nGene scores, Cell cycle\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.sandbag\n tl.cyclone\n\n\nPlotting: PL\n------------\n\n.. autosummary::\n :toctree: .\n\n pl.phate\n pl.trimap\n pl.sam\n pl.wishbone_marker_trajectory\n\nExporting\n---------\n\n.. autosummary::\n :toctree: .\n\n exporting.spring_project\n exporting.cellbrowser\n\"\"\"\n", "path": "scanpy/external/__init__.py"}], "after_files": [{"content": "from . import tl\nfrom . import pl\nfrom . import pp\nfrom . import exporting\n\nimport sys\nfrom .. import _utils\n\n_utils.annotate_doc_types(sys.modules[__name__], 'scanpy')\ndel sys, _utils\n\n\n__doc__ = \"\"\"\\\nExternal API\n============\n\n.. note::\n More tools that integrate well with scanpy and anndata can be found on the :doc:`ecosystem page <../ecosystem>`.\n\nImport Scanpy's wrappers to external tools as::\n\n import scanpy.external as sce\n\nIf you'd like to include a tool here, consider making a pull request (:doc:`instructions <../dev/external-tools>`).\nIf the tool already uses `scanpy` or `anndata`, it may fit better in the :doc:`ecosystem page <../ecosystem>`.\n\nPreprocessing: PP\n------------------\n\nData integration\n~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.bbknn\n pp.harmony_integrate\n pp.mnn_correct\n pp.scanorama_integrate\n\n\nSample demultiplexing, Doublet detection\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.scrublet\n pp.scrublet_simulate_doublets\n pl.scrublet_score_distribution\n pp.hashsolo\n\nImputation\n~~~~~~~~~~\n\nNote that the fundamental limitations of imputation are still under `debate\n<https://github.com/theislab/scanpy/issues/189>`__.\n\n.. autosummary::\n :toctree: .\n\n pp.dca\n pp.magic\n\n\nTools: TL\n----------\n\nEmbeddings\n~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phate\n tl.palantir\n tl.trimap\n tl.sam\n\nClustering and trajectory inference\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phenograph\n tl.harmony_timeseries\n tl.wishbone\n tl.palantir\n tl.palantir_results\n\nGene scores, Cell cycle\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.sandbag\n tl.cyclone\n\n\nPlotting: PL\n------------\n\n.. autosummary::\n :toctree: .\n\n pl.phate\n pl.trimap\n pl.sam\n pl.wishbone_marker_trajectory\n\nExporting\n---------\n\n.. autosummary::\n :toctree: .\n\n exporting.spring_project\n exporting.cellbrowser\n\"\"\"\n", "path": "scanpy/external/__init__.py"}]} | 1,087 | 227 |
gh_patches_debug_37868 | rasdani/github-patches | git_diff | privacyidea__privacyidea-1524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change yubico POST request to GET request
See: https://community.privacyidea.org/t/yubico-auth-not-working-since-a-few-hours-quickfix/977/3
We need to change the POST request to the yubicloud to a GET request.
*bummer*
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `privacyidea/lib/tokens/yubicotoken.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # privacyIDEA is a fork of LinOTP
4 # May 08, 2014 Cornelius Kölbel
5 # License: AGPLv3
6 # contact: http://www.privacyidea.org
7 #
8 # 2017-11-24 Cornelius Kölbel <[email protected]>
9 # Generate the nonce on an HSM
10 # 2016-04-04 Cornelius Kölbel <[email protected]>
11 # Use central yubico_api_signature function
12 # 2015-01-28 Rewrite during flask migration
13 # Change to use requests module
14 # Cornelius Kölbel <[email protected]>
15 #
16 #
17 # Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
18 # License: LSE
19 # contact: http://www.linotp.org
20 # http://www.lsexperts.de
21 # [email protected]
22 #
23 # This code is free software; you can redistribute it and/or
24 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
25 # License as published by the Free Software Foundation; either
26 # version 3 of the License, or any later version.
27 #
28 # This code is distributed in the hope that it will be useful,
29 # but WITHOUT ANY WARRANTY; without even the implied warranty of
30 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
32 #
33 # You should have received a copy of the GNU Affero General Public
34 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
35 #
36 __doc__ = """
37 This is the implementation of the yubico token type.
38 Authentication requests are forwarded to the Yubico Cloud service YubiCloud.
39
40 The code is tested in tests/test_lib_tokens_yubico
41 """
42 import logging
43 from privacyidea.lib.decorators import check_token_locked
44 import traceback
45 import requests
46 from privacyidea.api.lib.utils import getParam
47 from privacyidea.lib.crypto import geturandom
48 from privacyidea.lib.config import get_from_config
49 from privacyidea.lib.log import log_with
50 from privacyidea.lib.tokenclass import TokenClass, TOKENKIND
51 from privacyidea.lib.tokens.yubikeytoken import (yubico_check_api_signature,
52 yubico_api_signature)
53 import os
54 import binascii
55 from privacyidea.lib import _
56
57 YUBICO_LEN_ID = 12
58 YUBICO_LEN_OTP = 44
59 YUBICO_URL = "https://api.yubico.com/wsapi/2.0/verify"
60 DEFAULT_CLIENT_ID = 20771
61 DEFAULT_API_KEY = "9iE9DRkPHQDJbAFFC31/dum5I54="
62
63 optional = True
64 required = False
65
66 log = logging.getLogger(__name__)
67
68
69 class YubicoTokenClass(TokenClass):
70
71 def __init__(self, db_token):
72 TokenClass.__init__(self, db_token)
73 self.set_type(u"yubico")
74 self.tokenid = ""
75
76 @staticmethod
77 def get_class_type():
78 return "yubico"
79
80 @staticmethod
81 def get_class_prefix():
82 return "UBCM"
83
84 @staticmethod
85 @log_with(log)
86 def get_class_info(key=None, ret='all'):
87 """
88 :param key: subsection identifier
89 :type key: string
90 :param ret: default return value, if nothing is found
91 :type ret: user defined
92 :return: subsection if key exists or user defined
93 :rtype: dict or string
94 """
95 res = {'type': 'yubico',
96 'title': 'Yubico Token',
97 'description': _('Yubikey Cloud mode: Forward authentication '
98 'request to YubiCloud.'),
99 'user': ['enroll'],
100 # This tokentype is enrollable in the UI for...
101 'ui_enroll': ["admin", "user"],
102 'policy' : {},
103 }
104
105 if key:
106 ret = res.get(key, {})
107 else:
108 if ret == 'all':
109 ret = res
110 return ret
111
112 def update(self, param):
113 tokenid = getParam(param, "yubico.tokenid", required)
114 if len(tokenid) < YUBICO_LEN_ID:
115 log.error("The tokenid needs to be {0:d} characters long!".format(YUBICO_LEN_ID))
116 raise Exception("The Yubikey token ID needs to be {0:d} characters long!".format(YUBICO_LEN_ID))
117
118 if len(tokenid) > YUBICO_LEN_ID:
119 tokenid = tokenid[:YUBICO_LEN_ID]
120 self.tokenid = tokenid
121 # overwrite the maybe wrong lenght given at the command line
122 param['otplen'] = 44
123 TokenClass.update(self, param)
124 self.add_tokeninfo("yubico.tokenid", self.tokenid)
125 self.add_tokeninfo("tokenkind", TOKENKIND.HARDWARE)
126
127 @log_with(log)
128 @check_token_locked
129 def check_otp(self, anOtpVal, counter=None, window=None, options=None):
130 """
131 Here we contact the Yubico Cloud server to validate the OtpVal.
132 """
133 res = -1
134
135 apiId = get_from_config("yubico.id", DEFAULT_CLIENT_ID)
136 apiKey = get_from_config("yubico.secret", DEFAULT_API_KEY)
137 yubico_url = get_from_config("yubico.url", YUBICO_URL)
138
139 if apiKey == DEFAULT_API_KEY or apiId == DEFAULT_CLIENT_ID:
140 log.warning("Usage of default apiKey or apiId not recommended!")
141 log.warning("Please register your own apiKey and apiId at "
142 "yubico website!")
143 log.warning("Configure of apiKey and apiId at the "
144 "privacyidea manage config menu!")
145
146 tokenid = self.get_tokeninfo("yubico.tokenid")
147 if len(anOtpVal) < 12:
148 log.warning("The otpval is too short: {0!r}".format(anOtpVal))
149 elif anOtpVal[:12] != tokenid:
150 log.warning("The tokenid in the OTP value does not match "
151 "the assigned token!")
152 else:
153 nonce = geturandom(20, hex=True)
154 p = {'nonce': nonce,
155 'otp': anOtpVal,
156 'id': apiId}
157 # Also send the signature to the yubico server
158 p["h"] = yubico_api_signature(p, apiKey)
159
160 try:
161 r = requests.post(yubico_url,
162 data=p)
163
164 if r.status_code == requests.codes.ok:
165 response = r.text
166 elements = response.split()
167 data = {}
168 for elem in elements:
169 k, v = elem.split("=", 1)
170 data[k] = v
171 result = data.get("status")
172 return_nonce = data.get("nonce")
173 # check signature:
174 signature_valid = yubico_check_api_signature(data, apiKey)
175
176 if not signature_valid:
177 log.error("The hash of the return from the yubico "
178 "authentication server ({0!s}) "
179 "does not match the data!".format(yubico_url))
180
181 if nonce != return_nonce:
182 log.error("The returned nonce does not match "
183 "the sent nonce!")
184
185 if result == "OK":
186 res = 1
187 if nonce != return_nonce or not signature_valid:
188 log.warning("Nonce and Hash do not match.")
189 res = -2
190 else:
191 # possible results are listed here:
192 # https://github.com/Yubico/yubikey-val/wiki/ValidationProtocolV20
193 log.warning("failed with {0!r}".format(result))
194
195 except Exception as ex:
196 log.error("Error getting response from Yubico Cloud Server"
197 " (%r): %r" % (yubico_url, ex))
198 log.debug("{0!s}".format(traceback.format_exc()))
199
200 return res
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/privacyidea/lib/tokens/yubicotoken.py b/privacyidea/lib/tokens/yubicotoken.py
--- a/privacyidea/lib/tokens/yubicotoken.py
+++ b/privacyidea/lib/tokens/yubicotoken.py
@@ -5,6 +5,8 @@
# License: AGPLv3
# contact: http://www.privacyidea.org
#
+# 2019-03-21 Cornelius Kölbel <[email protected]>
+# Change POST to GET request
# 2017-11-24 Cornelius Kölbel <[email protected]>
# Generate the nonce on an HSM
# 2016-04-04 Cornelius Kölbel <[email protected]>
@@ -50,13 +52,16 @@
from privacyidea.lib.tokenclass import TokenClass, TOKENKIND
from privacyidea.lib.tokens.yubikeytoken import (yubico_check_api_signature,
yubico_api_signature)
-import os
-import binascii
+from six.moves.urllib.parse import urlencode
from privacyidea.lib import _
YUBICO_LEN_ID = 12
YUBICO_LEN_OTP = 44
YUBICO_URL = "https://api.yubico.com/wsapi/2.0/verify"
+# The Yubico API requires GET requests. See: https://developers.yubico.com/yubikey-val/Validation_Protocol_V2.0.html
+# Previously we used POST requests.
+# If you want to have the old behaviour, you can set this to True
+DO_YUBICO_POST = False
DEFAULT_CLIENT_ID = 20771
DEFAULT_API_KEY = "9iE9DRkPHQDJbAFFC31/dum5I54="
@@ -135,6 +140,7 @@
apiId = get_from_config("yubico.id", DEFAULT_CLIENT_ID)
apiKey = get_from_config("yubico.secret", DEFAULT_API_KEY)
yubico_url = get_from_config("yubico.url", YUBICO_URL)
+ do_yubico_post = get_from_config("yubico.do_post", DO_YUBICO_POST)
if apiKey == DEFAULT_API_KEY or apiId == DEFAULT_CLIENT_ID:
log.warning("Usage of default apiKey or apiId not recommended!")
@@ -158,8 +164,12 @@
p["h"] = yubico_api_signature(p, apiKey)
try:
- r = requests.post(yubico_url,
- data=p)
+ if do_yubico_post:
+ r = requests.post(yubico_url,
+ data=p)
+ else:
+ r = requests.get(yubico_url,
+ params=urlencode(p))
if r.status_code == requests.codes.ok:
response = r.text
| {"golden_diff": "diff --git a/privacyidea/lib/tokens/yubicotoken.py b/privacyidea/lib/tokens/yubicotoken.py\n--- a/privacyidea/lib/tokens/yubicotoken.py\n+++ b/privacyidea/lib/tokens/yubicotoken.py\n@@ -5,6 +5,8 @@\n # License: AGPLv3\n # contact: http://www.privacyidea.org\n #\n+# 2019-03-21 Cornelius K\u00f6lbel <[email protected]>\n+# Change POST to GET request\n # 2017-11-24 Cornelius K\u00f6lbel <[email protected]>\n # Generate the nonce on an HSM\n # 2016-04-04 Cornelius K\u00f6lbel <[email protected]>\n@@ -50,13 +52,16 @@\n from privacyidea.lib.tokenclass import TokenClass, TOKENKIND\n from privacyidea.lib.tokens.yubikeytoken import (yubico_check_api_signature,\n yubico_api_signature)\n-import os\n-import binascii\n+from six.moves.urllib.parse import urlencode\n from privacyidea.lib import _\n \n YUBICO_LEN_ID = 12\n YUBICO_LEN_OTP = 44\n YUBICO_URL = \"https://api.yubico.com/wsapi/2.0/verify\"\n+# The Yubico API requires GET requests. See: https://developers.yubico.com/yubikey-val/Validation_Protocol_V2.0.html\n+# Previously we used POST requests.\n+# If you want to have the old behaviour, you can set this to True\n+DO_YUBICO_POST = False\n DEFAULT_CLIENT_ID = 20771\n DEFAULT_API_KEY = \"9iE9DRkPHQDJbAFFC31/dum5I54=\"\n \n@@ -135,6 +140,7 @@\n apiId = get_from_config(\"yubico.id\", DEFAULT_CLIENT_ID)\n apiKey = get_from_config(\"yubico.secret\", DEFAULT_API_KEY)\n yubico_url = get_from_config(\"yubico.url\", YUBICO_URL)\n+ do_yubico_post = get_from_config(\"yubico.do_post\", DO_YUBICO_POST)\n \n if apiKey == DEFAULT_API_KEY or apiId == DEFAULT_CLIENT_ID:\n log.warning(\"Usage of default apiKey or apiId not recommended!\")\n@@ -158,8 +164,12 @@\n p[\"h\"] = yubico_api_signature(p, apiKey)\n \n try:\n- r = requests.post(yubico_url,\n- data=p)\n+ if do_yubico_post:\n+ r = requests.post(yubico_url,\n+ data=p)\n+ else:\n+ r = requests.get(yubico_url,\n+ params=urlencode(p))\n \n if r.status_code == requests.codes.ok:\n response = r.text\n", "issue": "Change yubico POST request to GET request\nSee: https://community.privacyidea.org/t/yubico-auth-not-working-since-a-few-hours-quickfix/977/3\r\n\r\nWe need to change the POST request to the yubicloud to a GET request.\r\n\r\n*bummer*\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA is a fork of LinOTP\n# May 08, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# 2017-11-24 Cornelius K\u00f6lbel <[email protected]>\n# Generate the nonce on an HSM\n# 2016-04-04 Cornelius K\u00f6lbel <[email protected]>\n# Use central yubico_api_signature function\n# 2015-01-28 Rewrite during flask migration\n# Change to use requests module\n# Cornelius K\u00f6lbel <[email protected]>\n#\n#\n# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH\n# License: LSE\n# contact: http://www.linotp.org\n# http://www.lsexperts.de\n# [email protected]\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n__doc__ = \"\"\"\nThis is the implementation of the yubico token type.\nAuthentication requests are forwarded to the Yubico Cloud service YubiCloud.\n\nThe code is tested in tests/test_lib_tokens_yubico\n\"\"\"\nimport logging\nfrom privacyidea.lib.decorators import check_token_locked\nimport traceback\nimport requests\nfrom privacyidea.api.lib.utils import getParam\nfrom privacyidea.lib.crypto import geturandom\nfrom privacyidea.lib.config import get_from_config\nfrom privacyidea.lib.log import log_with\nfrom privacyidea.lib.tokenclass import TokenClass, TOKENKIND\nfrom privacyidea.lib.tokens.yubikeytoken import (yubico_check_api_signature,\n yubico_api_signature)\nimport os\nimport binascii\nfrom privacyidea.lib import _\n\nYUBICO_LEN_ID = 12\nYUBICO_LEN_OTP = 44\nYUBICO_URL = \"https://api.yubico.com/wsapi/2.0/verify\"\nDEFAULT_CLIENT_ID = 20771\nDEFAULT_API_KEY = \"9iE9DRkPHQDJbAFFC31/dum5I54=\"\n\noptional = True\nrequired = False\n\nlog = logging.getLogger(__name__)\n\n\nclass YubicoTokenClass(TokenClass):\n\n def __init__(self, db_token):\n TokenClass.__init__(self, db_token)\n self.set_type(u\"yubico\")\n self.tokenid = \"\"\n\n @staticmethod\n def get_class_type():\n return \"yubico\"\n\n @staticmethod\n def get_class_prefix():\n return \"UBCM\"\n\n @staticmethod\n @log_with(log)\n def get_class_info(key=None, ret='all'):\n \"\"\"\n :param key: subsection identifier\n :type key: string\n :param ret: default return value, if nothing is found\n :type ret: user defined\n :return: subsection if key exists or user defined\n :rtype: dict or string\n \"\"\"\n res = {'type': 'yubico',\n 'title': 'Yubico Token',\n 'description': _('Yubikey Cloud mode: Forward authentication '\n 'request to YubiCloud.'),\n 'user': ['enroll'],\n # This tokentype is enrollable in the UI for...\n 'ui_enroll': [\"admin\", \"user\"],\n 'policy' : {},\n }\n\n if key:\n ret = res.get(key, {})\n else:\n if ret == 'all':\n ret = res\n return ret\n\n def update(self, param):\n tokenid = getParam(param, \"yubico.tokenid\", required)\n if len(tokenid) < YUBICO_LEN_ID:\n log.error(\"The tokenid needs to be {0:d} characters long!\".format(YUBICO_LEN_ID))\n raise Exception(\"The Yubikey token ID needs to be {0:d} characters long!\".format(YUBICO_LEN_ID))\n\n if len(tokenid) > YUBICO_LEN_ID:\n tokenid = tokenid[:YUBICO_LEN_ID]\n self.tokenid = tokenid\n # overwrite the maybe wrong lenght given at the command line\n param['otplen'] = 44\n TokenClass.update(self, param)\n self.add_tokeninfo(\"yubico.tokenid\", self.tokenid)\n self.add_tokeninfo(\"tokenkind\", TOKENKIND.HARDWARE)\n\n @log_with(log)\n @check_token_locked\n def check_otp(self, anOtpVal, counter=None, window=None, options=None):\n \"\"\"\n Here we contact the Yubico Cloud server to validate the OtpVal.\n \"\"\"\n res = -1\n\n apiId = get_from_config(\"yubico.id\", DEFAULT_CLIENT_ID)\n apiKey = get_from_config(\"yubico.secret\", DEFAULT_API_KEY)\n yubico_url = get_from_config(\"yubico.url\", YUBICO_URL)\n\n if apiKey == DEFAULT_API_KEY or apiId == DEFAULT_CLIENT_ID:\n log.warning(\"Usage of default apiKey or apiId not recommended!\")\n log.warning(\"Please register your own apiKey and apiId at \"\n \"yubico website!\")\n log.warning(\"Configure of apiKey and apiId at the \"\n \"privacyidea manage config menu!\")\n\n tokenid = self.get_tokeninfo(\"yubico.tokenid\")\n if len(anOtpVal) < 12:\n log.warning(\"The otpval is too short: {0!r}\".format(anOtpVal))\n elif anOtpVal[:12] != tokenid:\n log.warning(\"The tokenid in the OTP value does not match \"\n \"the assigned token!\")\n else:\n nonce = geturandom(20, hex=True)\n p = {'nonce': nonce,\n 'otp': anOtpVal,\n 'id': apiId}\n # Also send the signature to the yubico server\n p[\"h\"] = yubico_api_signature(p, apiKey)\n\n try:\n r = requests.post(yubico_url,\n data=p)\n\n if r.status_code == requests.codes.ok:\n response = r.text\n elements = response.split()\n data = {}\n for elem in elements:\n k, v = elem.split(\"=\", 1)\n data[k] = v\n result = data.get(\"status\")\n return_nonce = data.get(\"nonce\")\n # check signature:\n signature_valid = yubico_check_api_signature(data, apiKey)\n\n if not signature_valid:\n log.error(\"The hash of the return from the yubico \"\n \"authentication server ({0!s}) \"\n \"does not match the data!\".format(yubico_url))\n\n if nonce != return_nonce:\n log.error(\"The returned nonce does not match \"\n \"the sent nonce!\")\n\n if result == \"OK\":\n res = 1\n if nonce != return_nonce or not signature_valid:\n log.warning(\"Nonce and Hash do not match.\")\n res = -2\n else:\n # possible results are listed here:\n # https://github.com/Yubico/yubikey-val/wiki/ValidationProtocolV20\n log.warning(\"failed with {0!r}\".format(result))\n\n except Exception as ex:\n log.error(\"Error getting response from Yubico Cloud Server\"\n \" (%r): %r\" % (yubico_url, ex))\n log.debug(\"{0!s}\".format(traceback.format_exc()))\n\n return res\n", "path": "privacyidea/lib/tokens/yubicotoken.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA is a fork of LinOTP\n# May 08, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# 2019-03-21 Cornelius K\u00f6lbel <[email protected]>\n# Change POST to GET request\n# 2017-11-24 Cornelius K\u00f6lbel <[email protected]>\n# Generate the nonce on an HSM\n# 2016-04-04 Cornelius K\u00f6lbel <[email protected]>\n# Use central yubico_api_signature function\n# 2015-01-28 Rewrite during flask migration\n# Change to use requests module\n# Cornelius K\u00f6lbel <[email protected]>\n#\n#\n# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH\n# License: LSE\n# contact: http://www.linotp.org\n# http://www.lsexperts.de\n# [email protected]\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n__doc__ = \"\"\"\nThis is the implementation of the yubico token type.\nAuthentication requests are forwarded to the Yubico Cloud service YubiCloud.\n\nThe code is tested in tests/test_lib_tokens_yubico\n\"\"\"\nimport logging\nfrom privacyidea.lib.decorators import check_token_locked\nimport traceback\nimport requests\nfrom privacyidea.api.lib.utils import getParam\nfrom privacyidea.lib.crypto import geturandom\nfrom privacyidea.lib.config import get_from_config\nfrom privacyidea.lib.log import log_with\nfrom privacyidea.lib.tokenclass import TokenClass, TOKENKIND\nfrom privacyidea.lib.tokens.yubikeytoken import (yubico_check_api_signature,\n yubico_api_signature)\nfrom six.moves.urllib.parse import urlencode\nfrom privacyidea.lib import _\n\nYUBICO_LEN_ID = 12\nYUBICO_LEN_OTP = 44\nYUBICO_URL = \"https://api.yubico.com/wsapi/2.0/verify\"\n# The Yubico API requires GET requests. See: https://developers.yubico.com/yubikey-val/Validation_Protocol_V2.0.html\n# Previously we used POST requests.\n# If you want to have the old behaviour, you can set this to True\nDO_YUBICO_POST = False\nDEFAULT_CLIENT_ID = 20771\nDEFAULT_API_KEY = \"9iE9DRkPHQDJbAFFC31/dum5I54=\"\n\noptional = True\nrequired = False\n\nlog = logging.getLogger(__name__)\n\n\nclass YubicoTokenClass(TokenClass):\n\n def __init__(self, db_token):\n TokenClass.__init__(self, db_token)\n self.set_type(u\"yubico\")\n self.tokenid = \"\"\n\n @staticmethod\n def get_class_type():\n return \"yubico\"\n\n @staticmethod\n def get_class_prefix():\n return \"UBCM\"\n\n @staticmethod\n @log_with(log)\n def get_class_info(key=None, ret='all'):\n \"\"\"\n :param key: subsection identifier\n :type key: string\n :param ret: default return value, if nothing is found\n :type ret: user defined\n :return: subsection if key exists or user defined\n :rtype: dict or string\n \"\"\"\n res = {'type': 'yubico',\n 'title': 'Yubico Token',\n 'description': _('Yubikey Cloud mode: Forward authentication '\n 'request to YubiCloud.'),\n 'user': ['enroll'],\n # This tokentype is enrollable in the UI for...\n 'ui_enroll': [\"admin\", \"user\"],\n 'policy' : {},\n }\n\n if key:\n ret = res.get(key, {})\n else:\n if ret == 'all':\n ret = res\n return ret\n\n def update(self, param):\n tokenid = getParam(param, \"yubico.tokenid\", required)\n if len(tokenid) < YUBICO_LEN_ID:\n log.error(\"The tokenid needs to be {0:d} characters long!\".format(YUBICO_LEN_ID))\n raise Exception(\"The Yubikey token ID needs to be {0:d} characters long!\".format(YUBICO_LEN_ID))\n\n if len(tokenid) > YUBICO_LEN_ID:\n tokenid = tokenid[:YUBICO_LEN_ID]\n self.tokenid = tokenid\n # overwrite the maybe wrong lenght given at the command line\n param['otplen'] = 44\n TokenClass.update(self, param)\n self.add_tokeninfo(\"yubico.tokenid\", self.tokenid)\n self.add_tokeninfo(\"tokenkind\", TOKENKIND.HARDWARE)\n\n @log_with(log)\n @check_token_locked\n def check_otp(self, anOtpVal, counter=None, window=None, options=None):\n \"\"\"\n Here we contact the Yubico Cloud server to validate the OtpVal.\n \"\"\"\n res = -1\n\n apiId = get_from_config(\"yubico.id\", DEFAULT_CLIENT_ID)\n apiKey = get_from_config(\"yubico.secret\", DEFAULT_API_KEY)\n yubico_url = get_from_config(\"yubico.url\", YUBICO_URL)\n do_yubico_post = get_from_config(\"yubico.do_post\", DO_YUBICO_POST)\n\n if apiKey == DEFAULT_API_KEY or apiId == DEFAULT_CLIENT_ID:\n log.warning(\"Usage of default apiKey or apiId not recommended!\")\n log.warning(\"Please register your own apiKey and apiId at \"\n \"yubico website!\")\n log.warning(\"Configure of apiKey and apiId at the \"\n \"privacyidea manage config menu!\")\n\n tokenid = self.get_tokeninfo(\"yubico.tokenid\")\n if len(anOtpVal) < 12:\n log.warning(\"The otpval is too short: {0!r}\".format(anOtpVal))\n elif anOtpVal[:12] != tokenid:\n log.warning(\"The tokenid in the OTP value does not match \"\n \"the assigned token!\")\n else:\n nonce = geturandom(20, hex=True)\n p = {'nonce': nonce,\n 'otp': anOtpVal,\n 'id': apiId}\n # Also send the signature to the yubico server\n p[\"h\"] = yubico_api_signature(p, apiKey)\n\n try:\n if do_yubico_post:\n r = requests.post(yubico_url,\n data=p)\n else:\n r = requests.get(yubico_url,\n params=urlencode(p))\n\n if r.status_code == requests.codes.ok:\n response = r.text\n elements = response.split()\n data = {}\n for elem in elements:\n k, v = elem.split(\"=\", 1)\n data[k] = v\n result = data.get(\"status\")\n return_nonce = data.get(\"nonce\")\n # check signature:\n signature_valid = yubico_check_api_signature(data, apiKey)\n\n if not signature_valid:\n log.error(\"The hash of the return from the yubico \"\n \"authentication server ({0!s}) \"\n \"does not match the data!\".format(yubico_url))\n\n if nonce != return_nonce:\n log.error(\"The returned nonce does not match \"\n \"the sent nonce!\")\n\n if result == \"OK\":\n res = 1\n if nonce != return_nonce or not signature_valid:\n log.warning(\"Nonce and Hash do not match.\")\n res = -2\n else:\n # possible results are listed here:\n # https://github.com/Yubico/yubikey-val/wiki/ValidationProtocolV20\n log.warning(\"failed with {0!r}\".format(result))\n\n except Exception as ex:\n log.error(\"Error getting response from Yubico Cloud Server\"\n \" (%r): %r\" % (yubico_url, ex))\n log.debug(\"{0!s}\".format(traceback.format_exc()))\n\n return res\n", "path": "privacyidea/lib/tokens/yubicotoken.py"}]} | 2,612 | 661 |
gh_patches_debug_17241 | rasdani/github-patches | git_diff | microsoft__torchgeo-1713 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HTTP internal server error when trying to download ``AbovegroundLiveWoodyBiomassDensity``
### Description
There is an ``urllib.error.HTTPError: HTTP Error 500: Internal Server Error`` occuring . This issue occures when compiling from github and installing using pip3. It is occuring for`` AbovegroundLiveWoodyBiomassDensity`` which is a module from ``torchgeo.datasets.agb_live_woody_density``
### Steps to reproduce
python
``
from torchgeo.datasets.agb_live_woody_density import AbovegroundLiveWoodyBiomassDensity
ALWBD = AbovegroundLiveWoodyBiomassDensity(paths="~/test", download=True)
``
### Version
0.6.0.dev0 and 0.5.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchgeo/datasets/agb_live_woody_density.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """Aboveground Live Woody Biomass Density dataset."""
5
6 import json
7 import os
8 from collections.abc import Iterable
9 from typing import Any, Callable, Optional, Union
10
11 import matplotlib.pyplot as plt
12 from matplotlib.figure import Figure
13 from rasterio.crs import CRS
14
15 from .geo import RasterDataset
16 from .utils import download_url
17
18
19 class AbovegroundLiveWoodyBiomassDensity(RasterDataset):
20 """Aboveground Live Woody Biomass Density dataset.
21
22 The `Aboveground Live Woody Biomass Density dataset
23 <https://data.globalforestwatch.org/datasets/gfw::aboveground-live-woody
24 -biomass-density/about>`_
25 is a global-scale, wall-to-wall map of aboveground biomass at ~30m resolution
26 for the year 2000.
27
28 Dataset features:
29
30 * Masks with per pixel live woody biomass density estimates in megagrams
31 biomass per hectare at ~30m resolution (~40,000x40,0000 px)
32
33 Dataset format:
34
35 * geojson file that contains download links to tif files
36 * single-channel geotiffs with the pixel values representing biomass density
37
38 If you use this dataset in your research, please give credit to:
39
40 * `Global Forest Watch <https://data.globalforestwatch.org/>`_
41
42 .. versionadded:: 0.3
43 """
44
45 is_image = False
46
47 url = (
48 "https://opendata.arcgis.com/api/v3/datasets/3e8736c8866b458687"
49 "e00d40c9f00bce_0/downloads/data?format=geojson&spatialRefId=4326"
50 )
51
52 base_filename = "Aboveground_Live_Woody_Biomass_Density.geojson"
53
54 filename_glob = "*N_*E.*"
55 filename_regex = r"""^
56 (?P<latitude>[0-9][0-9][A-Z])_
57 (?P<longitude>[0-9][0-9][0-9][A-Z])*
58 """
59
60 def __init__(
61 self,
62 paths: Union[str, Iterable[str]] = "data",
63 crs: Optional[CRS] = None,
64 res: Optional[float] = None,
65 transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,
66 download: bool = False,
67 cache: bool = True,
68 ) -> None:
69 """Initialize a new Dataset instance.
70
71 Args:
72 paths: one or more root directories to search or files to load
73 crs: :term:`coordinate reference system (CRS)` to warp to
74 (defaults to the CRS of the first file found)
75 res: resolution of the dataset in units of CRS
76 (defaults to the resolution of the first file found)
77 transforms: a function/transform that takes an input sample
78 and returns a transformed version
79 download: if True, download dataset and store it in the root directory
80 cache: if True, cache file handle to speed up repeated sampling
81
82 Raises:
83 FileNotFoundError: if no files are found in ``paths``
84
85 .. versionchanged:: 0.5
86 *root* was renamed to *paths*.
87 """
88 self.paths = paths
89 self.download = download
90
91 self._verify()
92
93 super().__init__(paths, crs, res, transforms=transforms, cache=cache)
94
95 def _verify(self) -> None:
96 """Verify the integrity of the dataset.
97
98 Raises:
99 RuntimeError: if dataset is missing
100 """
101 # Check if the extracted files already exist
102 if self.files:
103 return
104
105 # Check if the user requested to download the dataset
106 if not self.download:
107 raise RuntimeError(
108 f"Dataset not found in `paths={self.paths!r}` and `download=False`, "
109 "either specify a different `root` directory or use `download=True` "
110 "to automatically download the dataset."
111 )
112
113 # Download the dataset
114 self._download()
115
116 def _download(self) -> None:
117 """Download the dataset."""
118 assert isinstance(self.paths, str)
119 download_url(self.url, self.paths, self.base_filename)
120
121 with open(os.path.join(self.paths, self.base_filename)) as f:
122 content = json.load(f)
123
124 for item in content["features"]:
125 download_url(
126 item["properties"]["download"],
127 self.paths,
128 item["properties"]["tile_id"] + ".tif",
129 )
130
131 def plot(
132 self,
133 sample: dict[str, Any],
134 show_titles: bool = True,
135 suptitle: Optional[str] = None,
136 ) -> Figure:
137 """Plot a sample from the dataset.
138
139 Args:
140 sample: a sample returned by :meth:`RasterDataset.__getitem__`
141 show_titles: flag indicating whether to show titles above each panel
142 suptitle: optional string to use as a suptitle
143
144 Returns:
145 a matplotlib Figure with the rendered sample
146 """
147 mask = sample["mask"].squeeze()
148 ncols = 1
149
150 showing_predictions = "prediction" in sample
151 if showing_predictions:
152 pred = sample["prediction"].squeeze()
153 ncols = 2
154
155 fig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(ncols * 4, 4))
156
157 if showing_predictions:
158 axs[0].imshow(mask)
159 axs[0].axis("off")
160 axs[1].imshow(pred)
161 axs[1].axis("off")
162 if show_titles:
163 axs[0].set_title("Mask")
164 axs[1].set_title("Prediction")
165 else:
166 axs.imshow(mask)
167 axs.axis("off")
168 if show_titles:
169 axs.set_title("Mask")
170
171 if suptitle is not None:
172 plt.suptitle(suptitle)
173
174 return fig
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchgeo/datasets/agb_live_woody_density.py b/torchgeo/datasets/agb_live_woody_density.py
--- a/torchgeo/datasets/agb_live_woody_density.py
+++ b/torchgeo/datasets/agb_live_woody_density.py
@@ -44,10 +44,7 @@
is_image = False
- url = (
- "https://opendata.arcgis.com/api/v3/datasets/3e8736c8866b458687"
- "e00d40c9f00bce_0/downloads/data?format=geojson&spatialRefId=4326"
- )
+ url = "https://opendata.arcgis.com/api/v3/datasets/e4bdbe8d6d8d4e32ace7d36a4aec7b93_0/downloads/data?format=geojson&spatialRefId=4326" # noqa: E501
base_filename = "Aboveground_Live_Woody_Biomass_Density.geojson"
@@ -123,7 +120,7 @@
for item in content["features"]:
download_url(
- item["properties"]["download"],
+ item["properties"]["Mg_px_1_download"],
self.paths,
item["properties"]["tile_id"] + ".tif",
)
| {"golden_diff": "diff --git a/torchgeo/datasets/agb_live_woody_density.py b/torchgeo/datasets/agb_live_woody_density.py\n--- a/torchgeo/datasets/agb_live_woody_density.py\n+++ b/torchgeo/datasets/agb_live_woody_density.py\n@@ -44,10 +44,7 @@\n \n is_image = False\n \n- url = (\n- \"https://opendata.arcgis.com/api/v3/datasets/3e8736c8866b458687\"\n- \"e00d40c9f00bce_0/downloads/data?format=geojson&spatialRefId=4326\"\n- )\n+ url = \"https://opendata.arcgis.com/api/v3/datasets/e4bdbe8d6d8d4e32ace7d36a4aec7b93_0/downloads/data?format=geojson&spatialRefId=4326\" # noqa: E501\n \n base_filename = \"Aboveground_Live_Woody_Biomass_Density.geojson\"\n \n@@ -123,7 +120,7 @@\n \n for item in content[\"features\"]:\n download_url(\n- item[\"properties\"][\"download\"],\n+ item[\"properties\"][\"Mg_px_1_download\"],\n self.paths,\n item[\"properties\"][\"tile_id\"] + \".tif\",\n )\n", "issue": "HTTP internal server error when trying to download ``AbovegroundLiveWoodyBiomassDensity``\n### Description\n\nThere is an ``urllib.error.HTTPError: HTTP Error 500: Internal Server Error`` occuring . This issue occures when compiling from github and installing using pip3. It is occuring for`` AbovegroundLiveWoodyBiomassDensity`` which is a module from ``torchgeo.datasets.agb_live_woody_density``\n\n### Steps to reproduce\n\npython\r\n``\r\nfrom torchgeo.datasets.agb_live_woody_density import AbovegroundLiveWoodyBiomassDensity \r\nALWBD = AbovegroundLiveWoodyBiomassDensity(paths=\"~/test\", download=True)\r\n``\r\n\n\n### Version\n\n0.6.0.dev0 and 0.5.0\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Aboveground Live Woody Biomass Density dataset.\"\"\"\n\nimport json\nimport os\nfrom collections.abc import Iterable\nfrom typing import Any, Callable, Optional, Union\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom rasterio.crs import CRS\n\nfrom .geo import RasterDataset\nfrom .utils import download_url\n\n\nclass AbovegroundLiveWoodyBiomassDensity(RasterDataset):\n \"\"\"Aboveground Live Woody Biomass Density dataset.\n\n The `Aboveground Live Woody Biomass Density dataset\n <https://data.globalforestwatch.org/datasets/gfw::aboveground-live-woody\n -biomass-density/about>`_\n is a global-scale, wall-to-wall map of aboveground biomass at ~30m resolution\n for the year 2000.\n\n Dataset features:\n\n * Masks with per pixel live woody biomass density estimates in megagrams\n biomass per hectare at ~30m resolution (~40,000x40,0000 px)\n\n Dataset format:\n\n * geojson file that contains download links to tif files\n * single-channel geotiffs with the pixel values representing biomass density\n\n If you use this dataset in your research, please give credit to:\n\n * `Global Forest Watch <https://data.globalforestwatch.org/>`_\n\n .. versionadded:: 0.3\n \"\"\"\n\n is_image = False\n\n url = (\n \"https://opendata.arcgis.com/api/v3/datasets/3e8736c8866b458687\"\n \"e00d40c9f00bce_0/downloads/data?format=geojson&spatialRefId=4326\"\n )\n\n base_filename = \"Aboveground_Live_Woody_Biomass_Density.geojson\"\n\n filename_glob = \"*N_*E.*\"\n filename_regex = r\"\"\"^\n (?P<latitude>[0-9][0-9][A-Z])_\n (?P<longitude>[0-9][0-9][0-9][A-Z])*\n \"\"\"\n\n def __init__(\n self,\n paths: Union[str, Iterable[str]] = \"data\",\n crs: Optional[CRS] = None,\n res: Optional[float] = None,\n transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,\n download: bool = False,\n cache: bool = True,\n ) -> None:\n \"\"\"Initialize a new Dataset instance.\n\n Args:\n paths: one or more root directories to search or files to load\n crs: :term:`coordinate reference system (CRS)` to warp to\n (defaults to the CRS of the first file found)\n res: resolution of the dataset in units of CRS\n (defaults to the resolution of the first file found)\n transforms: a function/transform that takes an input sample\n and returns a transformed version\n download: if True, download dataset and store it in the root directory\n cache: if True, cache file handle to speed up repeated sampling\n\n Raises:\n FileNotFoundError: if no files are found in ``paths``\n\n .. versionchanged:: 0.5\n *root* was renamed to *paths*.\n \"\"\"\n self.paths = paths\n self.download = download\n\n self._verify()\n\n super().__init__(paths, crs, res, transforms=transforms, cache=cache)\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\n\n Raises:\n RuntimeError: if dataset is missing\n \"\"\"\n # Check if the extracted files already exist\n if self.files:\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `paths={self.paths!r}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n assert isinstance(self.paths, str)\n download_url(self.url, self.paths, self.base_filename)\n\n with open(os.path.join(self.paths, self.base_filename)) as f:\n content = json.load(f)\n\n for item in content[\"features\"]:\n download_url(\n item[\"properties\"][\"download\"],\n self.paths,\n item[\"properties\"][\"tile_id\"] + \".tif\",\n )\n\n def plot(\n self,\n sample: dict[str, Any],\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`RasterDataset.__getitem__`\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n mask = sample[\"mask\"].squeeze()\n ncols = 1\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n pred = sample[\"prediction\"].squeeze()\n ncols = 2\n\n fig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(ncols * 4, 4))\n\n if showing_predictions:\n axs[0].imshow(mask)\n axs[0].axis(\"off\")\n axs[1].imshow(pred)\n axs[1].axis(\"off\")\n if show_titles:\n axs[0].set_title(\"Mask\")\n axs[1].set_title(\"Prediction\")\n else:\n axs.imshow(mask)\n axs.axis(\"off\")\n if show_titles:\n axs.set_title(\"Mask\")\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n", "path": "torchgeo/datasets/agb_live_woody_density.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Aboveground Live Woody Biomass Density dataset.\"\"\"\n\nimport json\nimport os\nfrom collections.abc import Iterable\nfrom typing import Any, Callable, Optional, Union\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom rasterio.crs import CRS\n\nfrom .geo import RasterDataset\nfrom .utils import download_url\n\n\nclass AbovegroundLiveWoodyBiomassDensity(RasterDataset):\n \"\"\"Aboveground Live Woody Biomass Density dataset.\n\n The `Aboveground Live Woody Biomass Density dataset\n <https://data.globalforestwatch.org/datasets/gfw::aboveground-live-woody\n -biomass-density/about>`_\n is a global-scale, wall-to-wall map of aboveground biomass at ~30m resolution\n for the year 2000.\n\n Dataset features:\n\n * Masks with per pixel live woody biomass density estimates in megagrams\n biomass per hectare at ~30m resolution (~40,000x40,0000 px)\n\n Dataset format:\n\n * geojson file that contains download links to tif files\n * single-channel geotiffs with the pixel values representing biomass density\n\n If you use this dataset in your research, please give credit to:\n\n * `Global Forest Watch <https://data.globalforestwatch.org/>`_\n\n .. versionadded:: 0.3\n \"\"\"\n\n is_image = False\n\n url = \"https://opendata.arcgis.com/api/v3/datasets/e4bdbe8d6d8d4e32ace7d36a4aec7b93_0/downloads/data?format=geojson&spatialRefId=4326\" # noqa: E501\n\n base_filename = \"Aboveground_Live_Woody_Biomass_Density.geojson\"\n\n filename_glob = \"*N_*E.*\"\n filename_regex = r\"\"\"^\n (?P<latitude>[0-9][0-9][A-Z])_\n (?P<longitude>[0-9][0-9][0-9][A-Z])*\n \"\"\"\n\n def __init__(\n self,\n paths: Union[str, Iterable[str]] = \"data\",\n crs: Optional[CRS] = None,\n res: Optional[float] = None,\n transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,\n download: bool = False,\n cache: bool = True,\n ) -> None:\n \"\"\"Initialize a new Dataset instance.\n\n Args:\n paths: one or more root directories to search or files to load\n crs: :term:`coordinate reference system (CRS)` to warp to\n (defaults to the CRS of the first file found)\n res: resolution of the dataset in units of CRS\n (defaults to the resolution of the first file found)\n transforms: a function/transform that takes an input sample\n and returns a transformed version\n download: if True, download dataset and store it in the root directory\n cache: if True, cache file handle to speed up repeated sampling\n\n Raises:\n FileNotFoundError: if no files are found in ``paths``\n\n .. versionchanged:: 0.5\n *root* was renamed to *paths*.\n \"\"\"\n self.paths = paths\n self.download = download\n\n self._verify()\n\n super().__init__(paths, crs, res, transforms=transforms, cache=cache)\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\n\n Raises:\n RuntimeError: if dataset is missing\n \"\"\"\n # Check if the extracted files already exist\n if self.files:\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `paths={self.paths!r}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n assert isinstance(self.paths, str)\n download_url(self.url, self.paths, self.base_filename)\n\n with open(os.path.join(self.paths, self.base_filename)) as f:\n content = json.load(f)\n\n for item in content[\"features\"]:\n download_url(\n item[\"properties\"][\"Mg_px_1_download\"],\n self.paths,\n item[\"properties\"][\"tile_id\"] + \".tif\",\n )\n\n def plot(\n self,\n sample: dict[str, Any],\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`RasterDataset.__getitem__`\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n mask = sample[\"mask\"].squeeze()\n ncols = 1\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n pred = sample[\"prediction\"].squeeze()\n ncols = 2\n\n fig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(ncols * 4, 4))\n\n if showing_predictions:\n axs[0].imshow(mask)\n axs[0].axis(\"off\")\n axs[1].imshow(pred)\n axs[1].axis(\"off\")\n if show_titles:\n axs[0].set_title(\"Mask\")\n axs[1].set_title(\"Prediction\")\n else:\n axs.imshow(mask)\n axs.axis(\"off\")\n if show_titles:\n axs.set_title(\"Mask\")\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n", "path": "torchgeo/datasets/agb_live_woody_density.py"}]} | 2,168 | 320 |
gh_patches_debug_14389 | rasdani/github-patches | git_diff | scrapy__scrapy-6322 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make the build reproducible
Whilst working on the [Reproducible Builds](https://reproducible-builds.org/) effort I noticed that scrapy could not be built reproducibly
This is due to the documentation embedding the current build year in the generated files, therefore making the build vary depending on when you build it. The fix is to use [`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) if it is exported to the surrounding environment.
I originally filed this in Debian as bug [#983852](https://bugs.debian.org/983852).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Scrapy documentation build configuration file, created by
2 # sphinx-quickstart on Mon Nov 24 12:02:52 2008.
3 #
4 # This file is execfile()d with the current directory set to its containing dir.
5 #
6 # The contents of this file are pickled, so don't put values in the namespace
7 # that aren't pickleable (module imports are okay, they're removed automatically).
8 #
9 # All configuration values have a default; values that are commented out
10 # serve to show the default.
11
12 import sys
13 from datetime import datetime
14 from pathlib import Path
15
16 # If your extensions are in another directory, add it here. If the directory
17 # is relative to the documentation root, use Path.absolute to make it absolute.
18 sys.path.append(str(Path(__file__).parent / "_ext"))
19 sys.path.insert(0, str(Path(__file__).parent.parent))
20
21
22 # General configuration
23 # ---------------------
24
25 # Add any Sphinx extension module names here, as strings. They can be extensions
26 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
27 extensions = [
28 "hoverxref.extension",
29 "notfound.extension",
30 "scrapydocs",
31 "sphinx.ext.autodoc",
32 "sphinx.ext.coverage",
33 "sphinx.ext.intersphinx",
34 "sphinx.ext.viewcode",
35 ]
36
37 # Add any paths that contain templates here, relative to this directory.
38 templates_path = ["_templates"]
39
40 # The suffix of source filenames.
41 source_suffix = ".rst"
42
43 # The encoding of source files.
44 # source_encoding = 'utf-8'
45
46 # The master toctree document.
47 master_doc = "index"
48
49 # General information about the project.
50 project = "Scrapy"
51 copyright = f"2008–{datetime.now().year}, Scrapy developers"
52
53 # The version info for the project you're documenting, acts as replacement for
54 # |version| and |release|, also used in various other places throughout the
55 # built documents.
56 #
57 # The short X.Y version.
58 try:
59 import scrapy
60
61 version = ".".join(map(str, scrapy.version_info[:2]))
62 release = scrapy.__version__
63 except ImportError:
64 version = ""
65 release = ""
66
67 # The language for content autogenerated by Sphinx. Refer to documentation
68 # for a list of supported languages.
69 language = "en"
70
71 # There are two options for replacing |today|: either, you set today to some
72 # non-false value, then it is used:
73 # today = ''
74 # Else, today_fmt is used as the format for a strftime call.
75 # today_fmt = '%B %d, %Y'
76
77 # List of documents that shouldn't be included in the build.
78 # unused_docs = []
79
80 exclude_patterns = ["build"]
81
82 # List of directories, relative to source directory, that shouldn't be searched
83 # for source files.
84 exclude_trees = [".build"]
85
86 # The reST default role (used for this markup: `text`) to use for all documents.
87 # default_role = None
88
89 # If true, '()' will be appended to :func: etc. cross-reference text.
90 # add_function_parentheses = True
91
92 # If true, the current module name will be prepended to all description
93 # unit titles (such as .. function::).
94 # add_module_names = True
95
96 # If true, sectionauthor and moduleauthor directives will be shown in the
97 # output. They are ignored by default.
98 # show_authors = False
99
100 # The name of the Pygments (syntax highlighting) style to use.
101 pygments_style = "sphinx"
102
103 # List of Sphinx warnings that will not be raised
104 suppress_warnings = ["epub.unknown_project_files"]
105
106
107 # Options for HTML output
108 # -----------------------
109
110 # The theme to use for HTML and HTML Help pages. See the documentation for
111 # a list of builtin themes.
112 html_theme = "sphinx_rtd_theme"
113
114 # Theme options are theme-specific and customize the look and feel of a theme
115 # further. For a list of options available for each theme, see the
116 # documentation.
117 # html_theme_options = {}
118
119 # Add any paths that contain custom themes here, relative to this directory.
120 # Add path to the RTD explicitly to robustify builds (otherwise might
121 # fail in a clean Debian build env)
122 import sphinx_rtd_theme
123
124 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
125
126 # The style sheet to use for HTML and HTML Help pages. A file of that name
127 # must exist either in Sphinx' static/ path, or in one of the custom paths
128 # given in html_static_path.
129 # html_style = 'scrapydoc.css'
130
131 # The name for this set of Sphinx documents. If None, it defaults to
132 # "<project> v<release> documentation".
133 # html_title = None
134
135 # A shorter title for the navigation bar. Default is the same as html_title.
136 # html_short_title = None
137
138 # The name of an image file (relative to this directory) to place at the top
139 # of the sidebar.
140 # html_logo = None
141
142 # The name of an image file (within the static path) to use as favicon of the
143 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
144 # pixels large.
145 # html_favicon = None
146
147 # Add any paths that contain custom static files (such as style sheets) here,
148 # relative to this directory. They are copied after the builtin static files,
149 # so a file named "default.css" will overwrite the builtin "default.css".
150 html_static_path = ["_static"]
151
152 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
153 # using the given strftime format.
154 html_last_updated_fmt = "%b %d, %Y"
155
156 # Custom sidebar templates, maps document names to template names.
157 # html_sidebars = {}
158
159 # Additional templates that should be rendered to pages, maps page names to
160 # template names.
161 # html_additional_pages = {}
162
163 # If false, no module index is generated.
164 # html_use_modindex = True
165
166 # If false, no index is generated.
167 # html_use_index = True
168
169 # If true, the index is split into individual pages for each letter.
170 # html_split_index = False
171
172 # If true, the reST sources are included in the HTML build as _sources/<name>.
173 html_copy_source = True
174
175 # If true, an OpenSearch description file will be output, and all pages will
176 # contain a <link> tag referring to it. The value of this option must be the
177 # base URL from which the finished HTML is served.
178 # html_use_opensearch = ''
179
180 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
181 # html_file_suffix = ''
182
183 # Output file base name for HTML help builder.
184 htmlhelp_basename = "Scrapydoc"
185
186 html_css_files = [
187 "custom.css",
188 ]
189
190
191 # Options for LaTeX output
192 # ------------------------
193
194 # The paper size ('letter' or 'a4').
195 # latex_paper_size = 'letter'
196
197 # The font size ('10pt', '11pt' or '12pt').
198 # latex_font_size = '10pt'
199
200 # Grouping the document tree into LaTeX files. List of tuples
201 # (source start file, target name, title, author, document class [howto/manual]).
202 latex_documents = [
203 ("index", "Scrapy.tex", "Scrapy Documentation", "Scrapy developers", "manual"),
204 ]
205
206 # The name of an image file (relative to this directory) to place at the top of
207 # the title page.
208 # latex_logo = None
209
210 # For "manual" documents, if this is true, then toplevel headings are parts,
211 # not chapters.
212 # latex_use_parts = False
213
214 # Additional stuff for the LaTeX preamble.
215 # latex_preamble = ''
216
217 # Documents to append as an appendix to all manuals.
218 # latex_appendices = []
219
220 # If false, no module index is generated.
221 # latex_use_modindex = True
222
223
224 # Options for the linkcheck builder
225 # ---------------------------------
226
227 # A list of regular expressions that match URIs that should not be checked when
228 # doing a linkcheck build.
229 linkcheck_ignore = [
230 r"http://localhost:\d+",
231 "http://hg.scrapy.org",
232 "http://directory.google.com/",
233 ]
234
235
236 # Options for the Coverage extension
237 # ----------------------------------
238 coverage_ignore_pyobjects = [
239 # Contract’s add_pre_hook and add_post_hook are not documented because
240 # they should be transparent to contract developers, for whom pre_hook and
241 # post_hook should be the actual concern.
242 r"\bContract\.add_(pre|post)_hook$",
243 # ContractsManager is an internal class, developers are not expected to
244 # interact with it directly in any way.
245 r"\bContractsManager\b$",
246 # For default contracts we only want to document their general purpose in
247 # their __init__ method, the methods they reimplement to achieve that purpose
248 # should be irrelevant to developers using those contracts.
249 r"\w+Contract\.(adjust_request_args|(pre|post)_process)$",
250 # Methods of downloader middlewares are not documented, only the classes
251 # themselves, since downloader middlewares are controlled through Scrapy
252 # settings.
253 r"^scrapy\.downloadermiddlewares\.\w*?\.(\w*?Middleware|DownloaderStats)\.",
254 # Base classes of downloader middlewares are implementation details that
255 # are not meant for users.
256 r"^scrapy\.downloadermiddlewares\.\w*?\.Base\w*?Middleware",
257 # Private exception used by the command-line interface implementation.
258 r"^scrapy\.exceptions\.UsageError",
259 # Methods of BaseItemExporter subclasses are only documented in
260 # BaseItemExporter.
261 r"^scrapy\.exporters\.(?!BaseItemExporter\b)\w*?\.",
262 # Extension behavior is only modified through settings. Methods of
263 # extension classes, as well as helper functions, are implementation
264 # details that are not documented.
265 r"^scrapy\.extensions\.[a-z]\w*?\.[A-Z]\w*?\.", # methods
266 r"^scrapy\.extensions\.[a-z]\w*?\.[a-z]", # helper functions
267 # Never documented before, and deprecated now.
268 r"^scrapy\.linkextractors\.FilteringLinkExtractor$",
269 # Implementation detail of LxmlLinkExtractor
270 r"^scrapy\.linkextractors\.lxmlhtml\.LxmlParserLinkExtractor",
271 ]
272
273
274 # Options for the InterSphinx extension
275 # -------------------------------------
276
277 intersphinx_mapping = {
278 "attrs": ("https://www.attrs.org/en/stable/", None),
279 "coverage": ("https://coverage.readthedocs.io/en/latest", None),
280 "cryptography": ("https://cryptography.io/en/latest/", None),
281 "cssselect": ("https://cssselect.readthedocs.io/en/latest", None),
282 "itemloaders": ("https://itemloaders.readthedocs.io/en/latest/", None),
283 "pytest": ("https://docs.pytest.org/en/latest", None),
284 "python": ("https://docs.python.org/3", None),
285 "sphinx": ("https://www.sphinx-doc.org/en/master", None),
286 "tox": ("https://tox.wiki/en/latest/", None),
287 "twisted": ("https://docs.twisted.org/en/stable/", None),
288 "twistedapi": ("https://docs.twisted.org/en/stable/api/", None),
289 "w3lib": ("https://w3lib.readthedocs.io/en/latest", None),
290 }
291 intersphinx_disabled_reftypes = []
292
293
294 # Options for sphinx-hoverxref options
295 # ------------------------------------
296
297 hoverxref_auto_ref = True
298 hoverxref_role_types = {
299 "class": "tooltip",
300 "command": "tooltip",
301 "confval": "tooltip",
302 "hoverxref": "tooltip",
303 "mod": "tooltip",
304 "ref": "tooltip",
305 "reqmeta": "tooltip",
306 "setting": "tooltip",
307 "signal": "tooltip",
308 }
309 hoverxref_roles = ["command", "reqmeta", "setting", "signal"]
310
311
312 def setup(app):
313 app.connect("autodoc-skip-member", maybe_skip_member)
314
315
316 def maybe_skip_member(app, what, name, obj, skip, options):
317 if not skip:
318 # autodocs was generating a text "alias of" for the following members
319 # https://github.com/sphinx-doc/sphinx/issues/4422
320 return name in {"default_item_class", "default_selector_class"}
321 return skip
322
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -10,7 +10,6 @@
# serve to show the default.
import sys
-from datetime import datetime
from pathlib import Path
# If your extensions are in another directory, add it here. If the directory
@@ -48,7 +47,7 @@
# General information about the project.
project = "Scrapy"
-copyright = f"2008–{datetime.now().year}, Scrapy developers"
+copyright = "Scrapy developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -10,7 +10,6 @@\n # serve to show the default.\n \n import sys\n-from datetime import datetime\n from pathlib import Path\n \n # If your extensions are in another directory, add it here. If the directory\n@@ -48,7 +47,7 @@\n \n # General information about the project.\n project = \"Scrapy\"\n-copyright = f\"2008\u2013{datetime.now().year}, Scrapy developers\"\n+copyright = \"Scrapy developers\"\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n", "issue": "Make the build reproducible\nWhilst working on the [Reproducible Builds](https://reproducible-builds.org/) effort I noticed that scrapy could not be built reproducibly\r\n\r\nThis is due to the documentation embedding the current build year in the generated files, therefore making the build vary depending on when you build it. The fix is to use [`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) if it is exported to the surrounding environment.\r\n\r\nI originally filed this in Debian as bug [#983852](https://bugs.debian.org/983852).\n", "before_files": [{"content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom pathlib import Path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use Path.absolute to make it absolute.\nsys.path.append(str(Path(__file__).parent / \"_ext\"))\nsys.path.insert(0, str(Path(__file__).parent.parent))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"hoverxref.extension\",\n \"notfound.extension\",\n \"scrapydocs\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"Scrapy\"\ncopyright = f\"2008\u2013{datetime.now().year}, Scrapy developers\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n\n version = \".\".join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = \"\"\n release = \"\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = \"en\"\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n# unused_docs = []\n\nexclude_patterns = [\"build\"]\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = [\".build\"]\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# List of Sphinx warnings that will not be raised\nsuppress_warnings = [\"epub.unknown_project_files\"]\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\n\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = \"%b %d, %Y\"\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_use_modindex = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Scrapydoc\"\n\nhtml_css_files = [\n \"custom.css\",\n]\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n# latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n (\"index\", \"Scrapy.tex\", \"Scrapy Documentation\", \"Scrapy developers\", \"manual\"),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n# latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n r\"http://localhost:\\d+\",\n \"http://hg.scrapy.org\",\n \"http://directory.google.com/\",\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r\"\\bContract\\.add_(pre|post)_hook$\",\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r\"\\bContractsManager\\b$\",\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r\"\\w+Contract\\.(adjust_request_args|(pre|post)_process)$\",\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r\"^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.\",\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r\"^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware\",\n # Private exception used by the command-line interface implementation.\n r\"^scrapy\\.exceptions\\.UsageError\",\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r\"^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.\",\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r\"^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.\", # methods\n r\"^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]\", # helper functions\n # Never documented before, and deprecated now.\n r\"^scrapy\\.linkextractors\\.FilteringLinkExtractor$\",\n # Implementation detail of LxmlLinkExtractor\n r\"^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor\",\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n \"attrs\": (\"https://www.attrs.org/en/stable/\", None),\n \"coverage\": (\"https://coverage.readthedocs.io/en/latest\", None),\n \"cryptography\": (\"https://cryptography.io/en/latest/\", None),\n \"cssselect\": (\"https://cssselect.readthedocs.io/en/latest\", None),\n \"itemloaders\": (\"https://itemloaders.readthedocs.io/en/latest/\", None),\n \"pytest\": (\"https://docs.pytest.org/en/latest\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"sphinx\": (\"https://www.sphinx-doc.org/en/master\", None),\n \"tox\": (\"https://tox.wiki/en/latest/\", None),\n \"twisted\": (\"https://docs.twisted.org/en/stable/\", None),\n \"twistedapi\": (\"https://docs.twisted.org/en/stable/api/\", None),\n \"w3lib\": (\"https://w3lib.readthedocs.io/en/latest\", None),\n}\nintersphinx_disabled_reftypes = []\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"command\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n \"reqmeta\": \"tooltip\",\n \"setting\": \"tooltip\",\n \"signal\": \"tooltip\",\n}\nhoverxref_roles = [\"command\", \"reqmeta\", \"setting\", \"signal\"]\n\n\ndef setup(app):\n app.connect(\"autodoc-skip-member\", maybe_skip_member)\n\n\ndef maybe_skip_member(app, what, name, obj, skip, options):\n if not skip:\n # autodocs was generating a text \"alias of\" for the following members\n # https://github.com/sphinx-doc/sphinx/issues/4422\n return name in {\"default_item_class\", \"default_selector_class\"}\n return skip\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom pathlib import Path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use Path.absolute to make it absolute.\nsys.path.append(str(Path(__file__).parent / \"_ext\"))\nsys.path.insert(0, str(Path(__file__).parent.parent))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"hoverxref.extension\",\n \"notfound.extension\",\n \"scrapydocs\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"Scrapy\"\ncopyright = \"Scrapy developers\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n\n version = \".\".join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = \"\"\n release = \"\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = \"en\"\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n# unused_docs = []\n\nexclude_patterns = [\"build\"]\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = [\".build\"]\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# List of Sphinx warnings that will not be raised\nsuppress_warnings = [\"epub.unknown_project_files\"]\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\n\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = \"%b %d, %Y\"\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_use_modindex = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Scrapydoc\"\n\nhtml_css_files = [\n \"custom.css\",\n]\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n# latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n (\"index\", \"Scrapy.tex\", \"Scrapy Documentation\", \"Scrapy developers\", \"manual\"),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n# latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n r\"http://localhost:\\d+\",\n \"http://hg.scrapy.org\",\n \"http://directory.google.com/\",\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r\"\\bContract\\.add_(pre|post)_hook$\",\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r\"\\bContractsManager\\b$\",\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r\"\\w+Contract\\.(adjust_request_args|(pre|post)_process)$\",\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r\"^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.\",\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r\"^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware\",\n # Private exception used by the command-line interface implementation.\n r\"^scrapy\\.exceptions\\.UsageError\",\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r\"^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.\",\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r\"^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.\", # methods\n r\"^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]\", # helper functions\n # Never documented before, and deprecated now.\n r\"^scrapy\\.linkextractors\\.FilteringLinkExtractor$\",\n # Implementation detail of LxmlLinkExtractor\n r\"^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor\",\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n \"attrs\": (\"https://www.attrs.org/en/stable/\", None),\n \"coverage\": (\"https://coverage.readthedocs.io/en/latest\", None),\n \"cryptography\": (\"https://cryptography.io/en/latest/\", None),\n \"cssselect\": (\"https://cssselect.readthedocs.io/en/latest\", None),\n \"itemloaders\": (\"https://itemloaders.readthedocs.io/en/latest/\", None),\n \"pytest\": (\"https://docs.pytest.org/en/latest\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"sphinx\": (\"https://www.sphinx-doc.org/en/master\", None),\n \"tox\": (\"https://tox.wiki/en/latest/\", None),\n \"twisted\": (\"https://docs.twisted.org/en/stable/\", None),\n \"twistedapi\": (\"https://docs.twisted.org/en/stable/api/\", None),\n \"w3lib\": (\"https://w3lib.readthedocs.io/en/latest\", None),\n}\nintersphinx_disabled_reftypes = []\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"command\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n \"reqmeta\": \"tooltip\",\n \"setting\": \"tooltip\",\n \"signal\": \"tooltip\",\n}\nhoverxref_roles = [\"command\", \"reqmeta\", \"setting\", \"signal\"]\n\n\ndef setup(app):\n app.connect(\"autodoc-skip-member\", maybe_skip_member)\n\n\ndef maybe_skip_member(app, what, name, obj, skip, options):\n if not skip:\n # autodocs was generating a text \"alias of\" for the following members\n # https://github.com/sphinx-doc/sphinx/issues/4422\n return name in {\"default_item_class\", \"default_selector_class\"}\n return skip\n", "path": "docs/conf.py"}]} | 3,997 | 163 |
gh_patches_debug_18187 | rasdani/github-patches | git_diff | praw-dev__praw-782 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Explain "PRAW is not thread safe" in the documentation.
It would be great to provide an example of why it is not thread safe.
Relevant comment:
https://www.reddit.com/r/redditdev/comments/63ugl5/praw_mulitprocessing_handler_prawhandler_is_not_a/dfx9oet/?context=3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """praw setup.py"""
2
3 import re
4 from codecs import open
5 from os import path
6 from setuptools import find_packages, setup
7
8
9 PACKAGE_NAME = 'praw'
10 HERE = path.abspath(path.dirname(__file__))
11 with open(path.join(HERE, 'README.rst'), encoding='utf-8') as fp:
12 README = fp.read()
13 with open(path.join(HERE, PACKAGE_NAME, 'const.py'),
14 encoding='utf-8') as fp:
15 VERSION = re.search("__version__ = '([^']+)'", fp.read()).group(1)
16
17
18 setup(name=PACKAGE_NAME,
19 author='Bryce Boe',
20 author_email='[email protected]',
21 classifiers=[
22 'Development Status :: 5 - Production/Stable',
23 'Environment :: Console',
24 'Intended Audience :: Developers',
25 'License :: OSI Approved :: BSD License',
26 'Natural Language :: English',
27 'Operating System :: OS Independent',
28 'Programming Language :: Python',
29 'Programming Language :: Python :: 2.7',
30 'Programming Language :: Python :: 3',
31 'Programming Language :: Python :: 3.3',
32 'Programming Language :: Python :: 3.4',
33 'Programming Language :: Python :: 3.5',
34 'Programming Language :: Python :: 3.6',
35 'Programming Language :: Python :: Implementation :: CPython',
36 'Topic :: Utilities'],
37 description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a '
38 'python package that allows for simple access to '
39 'reddit\'s API.'),
40 install_requires=['prawcore >=0.9.0, <0.10',
41 'update_checker >=0.16'],
42 keywords='reddit api wrapper',
43 license='Simplified BSD License',
44 long_description=README,
45 package_data={'': ['LICENSE.txt'], PACKAGE_NAME: ['*.ini']},
46 packages=find_packages(exclude=['tests', 'tests.*']),
47 setup_requires=['pytest-runner >=2.1'],
48 tests_require=['betamax >=0.8, <0.9',
49 'betamax-matchers >=0.3.0, <0.4',
50 'betamax-serializers >=0.2, <0.3',
51 'mock >=0.8',
52 'pytest >=2.7.3',
53 'six >=1.10'],
54 test_suite='tests',
55 url='https://praw.readthedocs.org/',
56 version=VERSION)
57
```
Path: `docs/conf.py`
Content:
```
1 import os
2 import sys
3 sys.path.insert(0, '..')
4
5 from praw import __version__
6
7 copyright = '2016, Bryce Boe'
8 exclude_patterns = ['_build']
9 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
10 html_static_path = ['_static']
11 html_theme = 'sphinx_rtd_theme'
12 html_theme_options = {
13 'collapse_navigation': True
14 }
15 html_use_smartypants = True
16 htmlhelp_basename = 'PRAW'
17 intersphinx_mapping = {'python': ('https://docs.python.org/3.6', None)}
18 master_doc = 'index'
19 nitpicky = True
20 project = 'PRAW'
21 pygments_style = 'sphinx'
22 release = __version__
23 source_suffix = '.rst'
24 suppress_warnings = ['image.nonlocal_uri']
25 version = '.'.join(__version__.split('.', 2)[:2])
26
27
28 # Use RTD theme locally
29 if not os.environ.get('READTHEDOCS'):
30 import sphinx_rtd_theme
31 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
32
33
34 def skip(app, what, name, obj, skip, options):
35 if name in {'__call__', '__contains__', '__getitem__', '__init__',
36 '__iter__', '__len__'}:
37 return False
38 return skip
39
40
41 def setup(app):
42 app.connect('autodoc-skip-member', skip)
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -4,7 +4,7 @@
from praw import __version__
-copyright = '2016, Bryce Boe'
+copyright = '2017, Bryce Boe'
exclude_patterns = ['_build']
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
html_static_path = ['_static']
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,7 @@
description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a '
'python package that allows for simple access to '
'reddit\'s API.'),
- install_requires=['prawcore >=0.9.0, <0.10',
+ install_requires=['prawcore >=0.10.1, <0.11',
'update_checker >=0.16'],
keywords='reddit api wrapper',
license='Simplified BSD License',
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -4,7 +4,7 @@\n \n from praw import __version__\n \n-copyright = '2016, Bryce Boe'\n+copyright = '2017, Bryce Boe'\n exclude_patterns = ['_build']\n extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']\n html_static_path = ['_static']\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,7 @@\n description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a '\n 'python package that allows for simple access to '\n 'reddit\\'s API.'),\n- install_requires=['prawcore >=0.9.0, <0.10',\n+ install_requires=['prawcore >=0.10.1, <0.11',\n 'update_checker >=0.16'],\n keywords='reddit api wrapper',\n license='Simplified BSD License',\n", "issue": "Explain \"PRAW is not thread safe\" in the documentation.\nIt would be great to provide an example of why it is not thread safe.\r\n\r\nRelevant comment:\r\n\r\nhttps://www.reddit.com/r/redditdev/comments/63ugl5/praw_mulitprocessing_handler_prawhandler_is_not_a/dfx9oet/?context=3\n", "before_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\nfrom setuptools import find_packages, setup\n\n\nPACKAGE_NAME = 'praw'\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, 'README.rst'), encoding='utf-8') as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, 'const.py'),\n encoding='utf-8') as fp:\n VERSION = re.search(\"__version__ = '([^']+)'\", fp.read()).group(1)\n\n\nsetup(name=PACKAGE_NAME,\n author='Bryce Boe',\n author_email='[email protected]',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Utilities'],\n description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a '\n 'python package that allows for simple access to '\n 'reddit\\'s API.'),\n install_requires=['prawcore >=0.9.0, <0.10',\n 'update_checker >=0.16'],\n keywords='reddit api wrapper',\n license='Simplified BSD License',\n long_description=README,\n package_data={'': ['LICENSE.txt'], PACKAGE_NAME: ['*.ini']},\n packages=find_packages(exclude=['tests', 'tests.*']),\n setup_requires=['pytest-runner >=2.1'],\n tests_require=['betamax >=0.8, <0.9',\n 'betamax-matchers >=0.3.0, <0.4',\n 'betamax-serializers >=0.2, <0.3',\n 'mock >=0.8',\n 'pytest >=2.7.3',\n 'six >=1.10'],\n test_suite='tests',\n url='https://praw.readthedocs.org/',\n version=VERSION)\n", "path": "setup.py"}, {"content": "import os\nimport sys\nsys.path.insert(0, '..')\n\nfrom praw import __version__\n\ncopyright = '2016, Bryce Boe'\nexclude_patterns = ['_build']\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']\nhtml_static_path = ['_static']\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'collapse_navigation': True\n}\nhtml_use_smartypants = True\nhtmlhelp_basename = 'PRAW'\nintersphinx_mapping = {'python': ('https://docs.python.org/3.6', None)}\nmaster_doc = 'index'\nnitpicky = True\nproject = 'PRAW'\npygments_style = 'sphinx'\nrelease = __version__\nsource_suffix = '.rst'\nsuppress_warnings = ['image.nonlocal_uri']\nversion = '.'.join(__version__.split('.', 2)[:2])\n\n\n# Use RTD theme locally\nif not os.environ.get('READTHEDOCS'):\n import sphinx_rtd_theme\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\ndef skip(app, what, name, obj, skip, options):\n if name in {'__call__', '__contains__', '__getitem__', '__init__',\n '__iter__', '__len__'}:\n return False\n return skip\n\n\ndef setup(app):\n app.connect('autodoc-skip-member', skip)\n", "path": "docs/conf.py"}], "after_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\nfrom setuptools import find_packages, setup\n\n\nPACKAGE_NAME = 'praw'\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, 'README.rst'), encoding='utf-8') as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, 'const.py'),\n encoding='utf-8') as fp:\n VERSION = re.search(\"__version__ = '([^']+)'\", fp.read()).group(1)\n\n\nsetup(name=PACKAGE_NAME,\n author='Bryce Boe',\n author_email='[email protected]',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Utilities'],\n description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a '\n 'python package that allows for simple access to '\n 'reddit\\'s API.'),\n install_requires=['prawcore >=0.10.1, <0.11',\n 'update_checker >=0.16'],\n keywords='reddit api wrapper',\n license='Simplified BSD License',\n long_description=README,\n package_data={'': ['LICENSE.txt'], PACKAGE_NAME: ['*.ini']},\n packages=find_packages(exclude=['tests', 'tests.*']),\n setup_requires=['pytest-runner >=2.1'],\n tests_require=['betamax >=0.8, <0.9',\n 'betamax-matchers >=0.3.0, <0.4',\n 'betamax-serializers >=0.2, <0.3',\n 'mock >=0.8',\n 'pytest >=2.7.3',\n 'six >=1.10'],\n test_suite='tests',\n url='https://praw.readthedocs.org/',\n version=VERSION)\n", "path": "setup.py"}, {"content": "import os\nimport sys\nsys.path.insert(0, '..')\n\nfrom praw import __version__\n\ncopyright = '2017, Bryce Boe'\nexclude_patterns = ['_build']\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']\nhtml_static_path = ['_static']\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'collapse_navigation': True\n}\nhtml_use_smartypants = True\nhtmlhelp_basename = 'PRAW'\nintersphinx_mapping = {'python': ('https://docs.python.org/3.6', None)}\nmaster_doc = 'index'\nnitpicky = True\nproject = 'PRAW'\npygments_style = 'sphinx'\nrelease = __version__\nsource_suffix = '.rst'\nsuppress_warnings = ['image.nonlocal_uri']\nversion = '.'.join(__version__.split('.', 2)[:2])\n\n\n# Use RTD theme locally\nif not os.environ.get('READTHEDOCS'):\n import sphinx_rtd_theme\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\ndef skip(app, what, name, obj, skip, options):\n if name in {'__call__', '__contains__', '__getitem__', '__init__',\n '__iter__', '__len__'}:\n return False\n return skip\n\n\ndef setup(app):\n app.connect('autodoc-skip-member', skip)\n", "path": "docs/conf.py"}]} | 1,350 | 237 |
gh_patches_debug_24913 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-6564 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bupa_gb spider is including closed branches
The bupa_gb.py spider is returning a number of closed practices, as these still have web pages and are still listed in the sitemap. Current examples include:
https://www.bupa.co.uk/dental/dental-care/practices/harleston
https://www.bupa.co.uk/dental/dental-care/practices/leckhampton
I think these can be reliably detected by checking if the name ends (case-insensitively) with "closed". So I'd suggest we drop any whose name matches /closed$/i. There are about 12 of these in total out of 388 branches.
(Sorry, I can no longer run the code on my computer to implement and test this myself, since I'm unable to install a recent enough version of Python.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/items.py`
Content:
```
1 # Define here the models for your scraped items
2 #
3 # See documentation in:
4 # http://doc.scrapy.org/en/latest/topics/items.html
5
6 import scrapy
7
8
9 class Feature(scrapy.Item):
10 lat = scrapy.Field()
11 lon = scrapy.Field()
12 geometry = scrapy.Field()
13 name = scrapy.Field()
14 branch = scrapy.Field()
15 addr_full = scrapy.Field()
16 housenumber = scrapy.Field()
17 street = scrapy.Field()
18 street_address = scrapy.Field()
19 city = scrapy.Field()
20 state = scrapy.Field()
21 postcode = scrapy.Field()
22 country = scrapy.Field()
23 phone = scrapy.Field()
24 email = scrapy.Field()
25 website = scrapy.Field()
26 twitter = scrapy.Field()
27 facebook = scrapy.Field()
28 opening_hours = scrapy.Field()
29 image = scrapy.Field()
30 ref = scrapy.Field()
31 brand = scrapy.Field()
32 brand_wikidata = scrapy.Field()
33 operator = scrapy.Field()
34 operator_wikidata = scrapy.Field()
35 located_in = scrapy.Field()
36 located_in_wikidata = scrapy.Field()
37 nsi_id = scrapy.Field()
38 extras = scrapy.Field()
39
40 def __init__(self, *args, **kwargs):
41 super().__init__(*args, **kwargs)
42 if not self._values.get("extras"):
43 self.__setitem__("extras", {})
44
45
46 def get_lat_lon(item: Feature) -> (float, float):
47 if geometry := item.get("geometry"):
48 if isinstance(geometry, dict):
49 if geometry.get("type") == "Point":
50 if coords := geometry.get("coordinates"):
51 try:
52 return float(coords[1]), float(coords[0])
53 except (TypeError, ValueError):
54 item["geometry"] = None
55 else:
56 try:
57 return float(item.get("lat")), float(item.get("lon"))
58 except (TypeError, ValueError):
59 pass
60 return None
61
62
63 def set_lat_lon(item: Feature, lat: float, lon: float):
64 item.pop("lat", None)
65 item.pop("lon", None)
66 if lat and lon:
67 item["geometry"] = {
68 "type": "Point",
69 "coordinates": [lon, lat],
70 }
71 else:
72 item["geometry"] = None
73
74
75 def add_social_media(item: Feature, service: str, account: str):
76 service = service.lower()
77 if service in item.fields:
78 item[service] = account
79 else:
80 item["extras"][f"contact:{service}"] = account
81
```
Path: `locations/spiders/bupa_gb.py`
Content:
```
1 from scrapy.spiders import SitemapSpider
2
3 from locations.categories import Categories
4 from locations.structured_data_spider import StructuredDataSpider
5
6
7 class BupaGBSpider(SitemapSpider, StructuredDataSpider):
8 name = "bupa_gb"
9 item_attributes = {"brand": "Bupa", "brand_wikidata": "Q931628", "extras": Categories.DENTIST.value}
10 sitemap_urls = ["https://www.bupa.co.uk/robots.txt"]
11 sitemap_rules = [(r"/practices/([-\w]+)$", "parse_sd")]
12
13 def post_process_item(self, item, response, ld_data, **kwargs):
14 if "Total Dental Care" in item["name"]:
15 item["brand"] = "Total Dental Care"
16 yield item
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/items.py b/locations/items.py
--- a/locations/items.py
+++ b/locations/items.py
@@ -2,6 +2,7 @@
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
+from datetime import datetime
import scrapy
@@ -78,3 +79,7 @@
item[service] = account
else:
item["extras"][f"contact:{service}"] = account
+
+
+def set_closed(item: Feature, end_date: datetime = None):
+ item["extras"]["end_date"] = end_date.strftime("%Y-%m-%d") if end_date else "yes"
diff --git a/locations/spiders/bupa_gb.py b/locations/spiders/bupa_gb.py
--- a/locations/spiders/bupa_gb.py
+++ b/locations/spiders/bupa_gb.py
@@ -1,6 +1,7 @@
from scrapy.spiders import SitemapSpider
from locations.categories import Categories
+from locations.items import set_closed
from locations.structured_data_spider import StructuredDataSpider
@@ -13,4 +14,8 @@
def post_process_item(self, item, response, ld_data, **kwargs):
if "Total Dental Care" in item["name"]:
item["brand"] = "Total Dental Care"
+
+ if item["name"].lower().endswith(" - closed"):
+ set_closed(item)
+
yield item
| {"golden_diff": "diff --git a/locations/items.py b/locations/items.py\n--- a/locations/items.py\n+++ b/locations/items.py\n@@ -2,6 +2,7 @@\n #\n # See documentation in:\n # http://doc.scrapy.org/en/latest/topics/items.html\n+from datetime import datetime\n \n import scrapy\n \n@@ -78,3 +79,7 @@\n item[service] = account\n else:\n item[\"extras\"][f\"contact:{service}\"] = account\n+\n+\n+def set_closed(item: Feature, end_date: datetime = None):\n+ item[\"extras\"][\"end_date\"] = end_date.strftime(\"%Y-%m-%d\") if end_date else \"yes\"\ndiff --git a/locations/spiders/bupa_gb.py b/locations/spiders/bupa_gb.py\n--- a/locations/spiders/bupa_gb.py\n+++ b/locations/spiders/bupa_gb.py\n@@ -1,6 +1,7 @@\n from scrapy.spiders import SitemapSpider\n \n from locations.categories import Categories\n+from locations.items import set_closed\n from locations.structured_data_spider import StructuredDataSpider\n \n \n@@ -13,4 +14,8 @@\n def post_process_item(self, item, response, ld_data, **kwargs):\n if \"Total Dental Care\" in item[\"name\"]:\n item[\"brand\"] = \"Total Dental Care\"\n+\n+ if item[\"name\"].lower().endswith(\" - closed\"):\n+ set_closed(item)\n+\n yield item\n", "issue": "bupa_gb spider is including closed branches\nThe bupa_gb.py spider is returning a number of closed practices, as these still have web pages and are still listed in the sitemap. Current examples include:\r\n\r\nhttps://www.bupa.co.uk/dental/dental-care/practices/harleston\r\nhttps://www.bupa.co.uk/dental/dental-care/practices/leckhampton\r\n\r\nI think these can be reliably detected by checking if the name ends (case-insensitively) with \"closed\". So I'd suggest we drop any whose name matches /closed$/i. There are about 12 of these in total out of 388 branches.\r\n\r\n(Sorry, I can no longer run the code on my computer to implement and test this myself, since I'm unable to install a recent enough version of Python.)\n", "before_files": [{"content": "# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass Feature(scrapy.Item):\n lat = scrapy.Field()\n lon = scrapy.Field()\n geometry = scrapy.Field()\n name = scrapy.Field()\n branch = scrapy.Field()\n addr_full = scrapy.Field()\n housenumber = scrapy.Field()\n street = scrapy.Field()\n street_address = scrapy.Field()\n city = scrapy.Field()\n state = scrapy.Field()\n postcode = scrapy.Field()\n country = scrapy.Field()\n phone = scrapy.Field()\n email = scrapy.Field()\n website = scrapy.Field()\n twitter = scrapy.Field()\n facebook = scrapy.Field()\n opening_hours = scrapy.Field()\n image = scrapy.Field()\n ref = scrapy.Field()\n brand = scrapy.Field()\n brand_wikidata = scrapy.Field()\n operator = scrapy.Field()\n operator_wikidata = scrapy.Field()\n located_in = scrapy.Field()\n located_in_wikidata = scrapy.Field()\n nsi_id = scrapy.Field()\n extras = scrapy.Field()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self._values.get(\"extras\"):\n self.__setitem__(\"extras\", {})\n\n\ndef get_lat_lon(item: Feature) -> (float, float):\n if geometry := item.get(\"geometry\"):\n if isinstance(geometry, dict):\n if geometry.get(\"type\") == \"Point\":\n if coords := geometry.get(\"coordinates\"):\n try:\n return float(coords[1]), float(coords[0])\n except (TypeError, ValueError):\n item[\"geometry\"] = None\n else:\n try:\n return float(item.get(\"lat\")), float(item.get(\"lon\"))\n except (TypeError, ValueError):\n pass\n return None\n\n\ndef set_lat_lon(item: Feature, lat: float, lon: float):\n item.pop(\"lat\", None)\n item.pop(\"lon\", None)\n if lat and lon:\n item[\"geometry\"] = {\n \"type\": \"Point\",\n \"coordinates\": [lon, lat],\n }\n else:\n item[\"geometry\"] = None\n\n\ndef add_social_media(item: Feature, service: str, account: str):\n service = service.lower()\n if service in item.fields:\n item[service] = account\n else:\n item[\"extras\"][f\"contact:{service}\"] = account\n", "path": "locations/items.py"}, {"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.categories import Categories\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass BupaGBSpider(SitemapSpider, StructuredDataSpider):\n name = \"bupa_gb\"\n item_attributes = {\"brand\": \"Bupa\", \"brand_wikidata\": \"Q931628\", \"extras\": Categories.DENTIST.value}\n sitemap_urls = [\"https://www.bupa.co.uk/robots.txt\"]\n sitemap_rules = [(r\"/practices/([-\\w]+)$\", \"parse_sd\")]\n\n def post_process_item(self, item, response, ld_data, **kwargs):\n if \"Total Dental Care\" in item[\"name\"]:\n item[\"brand\"] = \"Total Dental Care\"\n yield item\n", "path": "locations/spiders/bupa_gb.py"}], "after_files": [{"content": "# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\nfrom datetime import datetime\n\nimport scrapy\n\n\nclass Feature(scrapy.Item):\n lat = scrapy.Field()\n lon = scrapy.Field()\n geometry = scrapy.Field()\n name = scrapy.Field()\n branch = scrapy.Field()\n addr_full = scrapy.Field()\n housenumber = scrapy.Field()\n street = scrapy.Field()\n street_address = scrapy.Field()\n city = scrapy.Field()\n state = scrapy.Field()\n postcode = scrapy.Field()\n country = scrapy.Field()\n phone = scrapy.Field()\n email = scrapy.Field()\n website = scrapy.Field()\n twitter = scrapy.Field()\n facebook = scrapy.Field()\n opening_hours = scrapy.Field()\n image = scrapy.Field()\n ref = scrapy.Field()\n brand = scrapy.Field()\n brand_wikidata = scrapy.Field()\n operator = scrapy.Field()\n operator_wikidata = scrapy.Field()\n located_in = scrapy.Field()\n located_in_wikidata = scrapy.Field()\n nsi_id = scrapy.Field()\n extras = scrapy.Field()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self._values.get(\"extras\"):\n self.__setitem__(\"extras\", {})\n\n\ndef get_lat_lon(item: Feature) -> (float, float):\n if geometry := item.get(\"geometry\"):\n if isinstance(geometry, dict):\n if geometry.get(\"type\") == \"Point\":\n if coords := geometry.get(\"coordinates\"):\n try:\n return float(coords[1]), float(coords[0])\n except (TypeError, ValueError):\n item[\"geometry\"] = None\n else:\n try:\n return float(item.get(\"lat\")), float(item.get(\"lon\"))\n except (TypeError, ValueError):\n pass\n return None\n\n\ndef set_lat_lon(item: Feature, lat: float, lon: float):\n item.pop(\"lat\", None)\n item.pop(\"lon\", None)\n if lat and lon:\n item[\"geometry\"] = {\n \"type\": \"Point\",\n \"coordinates\": [lon, lat],\n }\n else:\n item[\"geometry\"] = None\n\n\ndef add_social_media(item: Feature, service: str, account: str):\n service = service.lower()\n if service in item.fields:\n item[service] = account\n else:\n item[\"extras\"][f\"contact:{service}\"] = account\n\n\ndef set_closed(item: Feature, end_date: datetime = None):\n item[\"extras\"][\"end_date\"] = end_date.strftime(\"%Y-%m-%d\") if end_date else \"yes\"\n", "path": "locations/items.py"}, {"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.categories import Categories\nfrom locations.items import set_closed\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass BupaGBSpider(SitemapSpider, StructuredDataSpider):\n name = \"bupa_gb\"\n item_attributes = {\"brand\": \"Bupa\", \"brand_wikidata\": \"Q931628\", \"extras\": Categories.DENTIST.value}\n sitemap_urls = [\"https://www.bupa.co.uk/robots.txt\"]\n sitemap_rules = [(r\"/practices/([-\\w]+)$\", \"parse_sd\")]\n\n def post_process_item(self, item, response, ld_data, **kwargs):\n if \"Total Dental Care\" in item[\"name\"]:\n item[\"brand\"] = \"Total Dental Care\"\n\n if item[\"name\"].lower().endswith(\" - closed\"):\n set_closed(item)\n\n yield item\n", "path": "locations/spiders/bupa_gb.py"}]} | 1,318 | 318 |
gh_patches_debug_33731 | rasdani/github-patches | git_diff | pyodide__pyodide-872 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow Micropip to download from relative urls
I think to allow relative urls, we'd only need to change the check here:
https://github.com/iodide-project/pyodide/blob/3a06f5dfcb9b536e9ece1f68f6963717acf82486/packages/micropip/micropip/micropip.py#L201
to `if "/" in requirement` or `if requirement.endswith(".whl")`.
Also, the documentation of `micropip.install` is a bit lacking. It'd be good to add an explanation of how `micropip` decides what a url is.
But for instance, it could be helpful to indicate the case where `url` is `"some_package-vers-py3-none-any.whl"`: does this expect a file `some_package-vers-py3-none-any.whl` to be in the current directory?
Would be good to mention that all wheels need to be named according to pep 427 too.
https://www.python.org/dev/peps/pep-0427/#file-name-convention
Redundancies / differences between pyodide.loadPackage and micropip.install?
It seems to me that `pyodide.loadPackage` and `micropip.install` have significant redundancies in their functionality. Is there any difference in their purpose? Which one is better? Could they be merged? If not, it would be good to add a very explicit explanation of their differences to the docs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/micropip/micropip/micropip.py`
Content:
```
1 try:
2 from js import Promise, XMLHttpRequest
3 except ImportError:
4 XMLHttpRequest = None
5
6 try:
7 from js import pyodide as js_pyodide
8 except ImportError:
9
10 class js_pyodide: # type: ignore
11 """A mock object to allow import of this package outside pyodide"""
12
13 class _module:
14 class packages:
15 dependencies = [] # type: ignore
16
17
18 import hashlib
19 import importlib
20 import io
21 import json
22 from pathlib import Path
23 import zipfile
24 from typing import Dict, Any, Union, List, Tuple
25
26 from distlib import markers, util, version
27
28
29 def _nullop(*args):
30 return
31
32
33 # Provide implementations of HTTP fetching for in-browser and out-of-browser to
34 # make testing easier
35 if XMLHttpRequest is not None:
36 import pyodide # noqa
37
38 def _get_url(url):
39 req = XMLHttpRequest.new()
40 req.open("GET", url, False)
41 req.send(None)
42 return io.StringIO(req.response)
43
44 def _get_url_async(url, cb):
45 req = XMLHttpRequest.new()
46 req.open("GET", url, True)
47 req.responseType = "arraybuffer"
48
49 def callback(e):
50 if req.readyState == 4:
51 cb(io.BytesIO(req.response))
52
53 req.onreadystatechange = callback
54 req.send(None)
55
56 # In practice, this is the `site-packages` directory.
57 WHEEL_BASE = Path(__file__).parent
58 else:
59 # Outside the browser
60 from urllib.request import urlopen
61
62 def _get_url(url):
63 with urlopen(url) as fd:
64 content = fd.read()
65 return io.BytesIO(content)
66
67 def _get_url_async(url, cb):
68 cb(_get_url(url))
69
70 WHEEL_BASE = Path(".") / "wheels"
71
72
73 def _get_pypi_json(pkgname):
74 url = f"https://pypi.org/pypi/{pkgname}/json"
75 fd = _get_url(url)
76 return json.load(fd)
77
78
79 def _parse_wheel_url(url: str) -> Tuple[str, Dict[str, Any], str]:
80 """Parse wheels url and extract available metadata
81
82 See https://www.python.org/dev/peps/pep-0427/#file-name-convention
83 """
84 file_name = Path(url).name
85 # also strip '.whl' extension.
86 wheel_name = Path(url).stem
87 tokens = wheel_name.split("-")
88 # TODO: support optional build tags in the filename (cf PEP 427)
89 if len(tokens) < 5:
90 raise ValueError(f"{file_name} is not a valid wheel file name.")
91 version, python_tag, abi_tag, platform = tokens[-4:]
92 name = "-".join(tokens[:-4])
93 wheel = {
94 "digests": None, # checksums not available
95 "filename": file_name,
96 "packagetype": "bdist_wheel",
97 "python_version": python_tag,
98 "abi_tag": abi_tag,
99 "platform": platform,
100 "url": url,
101 }
102
103 return name, wheel, version
104
105
106 class _WheelInstaller:
107 def extract_wheel(self, fd):
108 with zipfile.ZipFile(fd) as zf:
109 zf.extractall(WHEEL_BASE)
110
111 def validate_wheel(self, data, fileinfo):
112 if fileinfo.get("digests") is None:
113 # No checksums available, e.g. because installing
114 # from a different location than PyPi.
115 return
116 sha256 = fileinfo["digests"]["sha256"]
117 m = hashlib.sha256()
118 m.update(data.getvalue())
119 if m.hexdigest() != sha256:
120 raise ValueError("Contents don't match hash")
121
122 def __call__(self, name, fileinfo, resolve, reject):
123 url = self.fetch_wheel(name, fileinfo)
124
125 def callback(wheel):
126 try:
127 self.validate_wheel(wheel, fileinfo)
128 self.extract_wheel(wheel)
129 except Exception as e:
130 reject(str(e))
131 else:
132 resolve()
133
134 _get_url_async(url, callback)
135
136
137 class _RawWheelInstaller(_WheelInstaller):
138 def fetch_wheel(self, name, fileinfo):
139 return fileinfo["url"]
140
141
142 class _PackageManager:
143 version_scheme = version.get_scheme("normalized")
144
145 def __init__(self):
146 self.builtin_packages = {}
147 self.builtin_packages.update(js_pyodide._module.packages.dependencies)
148 self.installed_packages = {}
149
150 def install(
151 self,
152 requirements: Union[str, List[str]],
153 ctx=None,
154 wheel_installer=None,
155 resolve=_nullop,
156 reject=_nullop,
157 ):
158 try:
159 if ctx is None:
160 ctx = {"extra": None}
161
162 if wheel_installer is None:
163 wheel_installer = _RawWheelInstaller()
164
165 complete_ctx = dict(markers.DEFAULT_CONTEXT)
166 complete_ctx.update(ctx)
167
168 if isinstance(requirements, str):
169 requirements = [requirements]
170
171 transaction: Dict[str, Any] = {
172 "wheels": [],
173 "pyodide_packages": set(),
174 "locked": dict(self.installed_packages),
175 }
176 for requirement in requirements:
177 self.add_requirement(requirement, complete_ctx, transaction)
178 except Exception as e:
179 reject(str(e))
180
181 resolve_count = [len(transaction["wheels"])]
182
183 def do_resolve(*args):
184 resolve_count[0] -= 1
185 if resolve_count[0] == 0:
186 resolve(f'Installed {", ".join(self.installed_packages.keys())}')
187
188 # Install built-in packages
189 pyodide_packages = transaction["pyodide_packages"]
190 if len(pyodide_packages):
191 resolve_count[0] += 1
192 self.installed_packages.update(dict((k, None) for k in pyodide_packages))
193 js_pyodide.loadPackage(list(pyodide_packages)).then(do_resolve)
194
195 # Now install PyPI packages
196 for name, wheel, ver in transaction["wheels"]:
197 wheel_installer(name, wheel, do_resolve, reject)
198 self.installed_packages[name] = ver
199
200 def add_requirement(self, requirement: str, ctx, transaction):
201 if requirement.startswith(("http://", "https://")):
202 # custom download location
203 name, wheel, version = _parse_wheel_url(requirement)
204 transaction["wheels"].append((name, wheel, version))
205 return
206
207 req = util.parse_requirement(requirement)
208
209 # If it's a Pyodide package, use that instead of the one on PyPI
210 if req.name in self.builtin_packages:
211 transaction["pyodide_packages"].add(req.name)
212 return
213
214 if req.marker:
215 if not markers.evaluator.evaluate(req.marker, ctx):
216 return
217
218 matcher = self.version_scheme.matcher(req.requirement)
219
220 # If we already have something that will work, don't
221 # fetch again
222 for name, ver in transaction["locked"].items():
223 if name == req.name:
224 if matcher.match(ver):
225 break
226 else:
227 raise ValueError(
228 f"Requested '{requirement}', "
229 f"but {name}=={ver} is already installed"
230 )
231 else:
232 metadata = _get_pypi_json(req.name)
233 wheel, ver = self.find_wheel(metadata, req)
234 transaction["locked"][req.name] = ver
235
236 recurs_reqs = metadata.get("info", {}).get("requires_dist") or []
237 for recurs_req in recurs_reqs:
238 self.add_requirement(recurs_req, ctx, transaction)
239
240 transaction["wheels"].append((req.name, wheel, ver))
241
242 def find_wheel(self, metadata, req):
243 releases = []
244 for ver, files in metadata.get("releases", {}).items():
245 ver = self.version_scheme.suggest(ver)
246 if ver is not None:
247 releases.append((ver, files))
248
249 def version_number(release):
250 return version.NormalizedVersion(release[0])
251
252 releases = sorted(releases, key=version_number, reverse=True)
253 matcher = self.version_scheme.matcher(req.requirement)
254 for ver, meta in releases:
255 if matcher.match(ver):
256 for fileinfo in meta:
257 if fileinfo["filename"].endswith("py3-none-any.whl"):
258 return fileinfo, ver
259
260 raise ValueError(f"Couldn't find a pure Python 3 wheel for '{req.requirement}'")
261
262
263 # Make PACKAGE_MANAGER singleton
264 PACKAGE_MANAGER = _PackageManager()
265 del _PackageManager
266
267
268 def install(requirements: Union[str, List[str]]):
269 """Install the given package and all of its dependencies.
270
271 This only works for pure Python wheels or for packages built
272 in pyodide. If a package is not found in the pyodide repository
273 it will be loaded from PyPi.
274
275 Parameters
276 ----------
277 requirements
278 a requirements or a list of requirements to install.
279 Can be composed either of
280
281 - package names, as defined in pyodide repository or on PyPi
282 - URLs pointing to pure Python wheels. The file name of such wheels
283 end with ``none-any.whl``.
284
285 Returns
286 -------
287 a Promise that resolves when all packages have downloaded and installed.
288 """
289
290 def do_install(resolve, reject):
291 PACKAGE_MANAGER.install(requirements, resolve=resolve, reject=reject)
292 importlib.invalidate_caches()
293
294 return Promise.new(do_install)
295
296
297 __all__ = ["install"]
298
299
300 if __name__ == "__main__":
301 install("snowballstemmer")
302
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/micropip/micropip/micropip.py b/packages/micropip/micropip/micropip.py
--- a/packages/micropip/micropip/micropip.py
+++ b/packages/micropip/micropip/micropip.py
@@ -198,7 +198,7 @@
self.installed_packages[name] = ver
def add_requirement(self, requirement: str, ctx, transaction):
- if requirement.startswith(("http://", "https://")):
+ if requirement.endswith(".whl"):
# custom download location
name, wheel, version = _parse_wheel_url(requirement)
transaction["wheels"].append((name, wheel, version))
@@ -268,23 +268,28 @@
def install(requirements: Union[str, List[str]]):
"""Install the given package and all of its dependencies.
- This only works for pure Python wheels or for packages built
- in pyodide. If a package is not found in the pyodide repository
- it will be loaded from PyPi.
+ See :ref:`loading packages <loading_packages>` for more information.
+
+ This only works for packages that are either pure Python or for packages with
+ C extensions that are built in pyodide. If a pure Python package is not found
+ in the pyodide repository it will be loaded from PyPi.
Parameters
----------
requirements
- a requirements or a list of requirements to install.
- Can be composed either of
+ A requirement or list of requirements to install.
+ Each requirement is a string.
+
+ - If the requirement ends in ".whl", the file will be interpreted as a url.
+ The file must be a wheel named in compliance with the
+ [PEP 427 naming convention](https://www.python.org/dev/peps/pep-0427/#file-format)
- - package names, as defined in pyodide repository or on PyPi
- - URLs pointing to pure Python wheels. The file name of such wheels
- end with ``none-any.whl``.
+ - A package name. A package by this name must either be present in the pyodide
+ repository at `languagePluginUrl` or on PyPi.
Returns
-------
- a Promise that resolves when all packages have downloaded and installed.
+ A Promise that resolves when all packages have been downloaded and installed.
"""
def do_install(resolve, reject):
| {"golden_diff": "diff --git a/packages/micropip/micropip/micropip.py b/packages/micropip/micropip/micropip.py\n--- a/packages/micropip/micropip/micropip.py\n+++ b/packages/micropip/micropip/micropip.py\n@@ -198,7 +198,7 @@\n self.installed_packages[name] = ver\n \n def add_requirement(self, requirement: str, ctx, transaction):\n- if requirement.startswith((\"http://\", \"https://\")):\n+ if requirement.endswith(\".whl\"):\n # custom download location\n name, wheel, version = _parse_wheel_url(requirement)\n transaction[\"wheels\"].append((name, wheel, version))\n@@ -268,23 +268,28 @@\n def install(requirements: Union[str, List[str]]):\n \"\"\"Install the given package and all of its dependencies.\n \n- This only works for pure Python wheels or for packages built\n- in pyodide. If a package is not found in the pyodide repository\n- it will be loaded from PyPi.\n+ See :ref:`loading packages <loading_packages>` for more information.\n+\n+ This only works for packages that are either pure Python or for packages with\n+ C extensions that are built in pyodide. If a pure Python package is not found\n+ in the pyodide repository it will be loaded from PyPi.\n \n Parameters\n ----------\n requirements\n- a requirements or a list of requirements to install.\n- Can be composed either of\n+ A requirement or list of requirements to install.\n+ Each requirement is a string.\n+\n+ - If the requirement ends in \".whl\", the file will be interpreted as a url.\n+ The file must be a wheel named in compliance with the\n+ [PEP 427 naming convention](https://www.python.org/dev/peps/pep-0427/#file-format)\n \n- - package names, as defined in pyodide repository or on PyPi\n- - URLs pointing to pure Python wheels. The file name of such wheels\n- end with ``none-any.whl``.\n+ - A package name. A package by this name must either be present in the pyodide\n+ repository at `languagePluginUrl` or on PyPi.\n \n Returns\n -------\n- a Promise that resolves when all packages have downloaded and installed.\n+ A Promise that resolves when all packages have been downloaded and installed.\n \"\"\"\n \n def do_install(resolve, reject):\n", "issue": "Allow Micropip to download from relative urls\nI think to allow relative urls, we'd only need to change the check here:\r\n\r\nhttps://github.com/iodide-project/pyodide/blob/3a06f5dfcb9b536e9ece1f68f6963717acf82486/packages/micropip/micropip/micropip.py#L201\r\n\r\nto `if \"/\" in requirement` or `if requirement.endswith(\".whl\")`.\r\n\r\nAlso, the documentation of `micropip.install` is a bit lacking. It'd be good to add an explanation of how `micropip` decides what a url is.\r\nBut for instance, it could be helpful to indicate the case where `url` is `\"some_package-vers-py3-none-any.whl\"`: does this expect a file `some_package-vers-py3-none-any.whl` to be in the current directory?\r\n\r\nWould be good to mention that all wheels need to be named according to pep 427 too.\r\nhttps://www.python.org/dev/peps/pep-0427/#file-name-convention\nRedundancies / differences between pyodide.loadPackage and micropip.install?\nIt seems to me that `pyodide.loadPackage` and `micropip.install` have significant redundancies in their functionality. Is there any difference in their purpose? Which one is better? Could they be merged? If not, it would be good to add a very explicit explanation of their differences to the docs.\n", "before_files": [{"content": "try:\n from js import Promise, XMLHttpRequest\nexcept ImportError:\n XMLHttpRequest = None\n\ntry:\n from js import pyodide as js_pyodide\nexcept ImportError:\n\n class js_pyodide: # type: ignore\n \"\"\"A mock object to allow import of this package outside pyodide\"\"\"\n\n class _module:\n class packages:\n dependencies = [] # type: ignore\n\n\nimport hashlib\nimport importlib\nimport io\nimport json\nfrom pathlib import Path\nimport zipfile\nfrom typing import Dict, Any, Union, List, Tuple\n\nfrom distlib import markers, util, version\n\n\ndef _nullop(*args):\n return\n\n\n# Provide implementations of HTTP fetching for in-browser and out-of-browser to\n# make testing easier\nif XMLHttpRequest is not None:\n import pyodide # noqa\n\n def _get_url(url):\n req = XMLHttpRequest.new()\n req.open(\"GET\", url, False)\n req.send(None)\n return io.StringIO(req.response)\n\n def _get_url_async(url, cb):\n req = XMLHttpRequest.new()\n req.open(\"GET\", url, True)\n req.responseType = \"arraybuffer\"\n\n def callback(e):\n if req.readyState == 4:\n cb(io.BytesIO(req.response))\n\n req.onreadystatechange = callback\n req.send(None)\n\n # In practice, this is the `site-packages` directory.\n WHEEL_BASE = Path(__file__).parent\nelse:\n # Outside the browser\n from urllib.request import urlopen\n\n def _get_url(url):\n with urlopen(url) as fd:\n content = fd.read()\n return io.BytesIO(content)\n\n def _get_url_async(url, cb):\n cb(_get_url(url))\n\n WHEEL_BASE = Path(\".\") / \"wheels\"\n\n\ndef _get_pypi_json(pkgname):\n url = f\"https://pypi.org/pypi/{pkgname}/json\"\n fd = _get_url(url)\n return json.load(fd)\n\n\ndef _parse_wheel_url(url: str) -> Tuple[str, Dict[str, Any], str]:\n \"\"\"Parse wheels url and extract available metadata\n\n See https://www.python.org/dev/peps/pep-0427/#file-name-convention\n \"\"\"\n file_name = Path(url).name\n # also strip '.whl' extension.\n wheel_name = Path(url).stem\n tokens = wheel_name.split(\"-\")\n # TODO: support optional build tags in the filename (cf PEP 427)\n if len(tokens) < 5:\n raise ValueError(f\"{file_name} is not a valid wheel file name.\")\n version, python_tag, abi_tag, platform = tokens[-4:]\n name = \"-\".join(tokens[:-4])\n wheel = {\n \"digests\": None, # checksums not available\n \"filename\": file_name,\n \"packagetype\": \"bdist_wheel\",\n \"python_version\": python_tag,\n \"abi_tag\": abi_tag,\n \"platform\": platform,\n \"url\": url,\n }\n\n return name, wheel, version\n\n\nclass _WheelInstaller:\n def extract_wheel(self, fd):\n with zipfile.ZipFile(fd) as zf:\n zf.extractall(WHEEL_BASE)\n\n def validate_wheel(self, data, fileinfo):\n if fileinfo.get(\"digests\") is None:\n # No checksums available, e.g. because installing\n # from a different location than PyPi.\n return\n sha256 = fileinfo[\"digests\"][\"sha256\"]\n m = hashlib.sha256()\n m.update(data.getvalue())\n if m.hexdigest() != sha256:\n raise ValueError(\"Contents don't match hash\")\n\n def __call__(self, name, fileinfo, resolve, reject):\n url = self.fetch_wheel(name, fileinfo)\n\n def callback(wheel):\n try:\n self.validate_wheel(wheel, fileinfo)\n self.extract_wheel(wheel)\n except Exception as e:\n reject(str(e))\n else:\n resolve()\n\n _get_url_async(url, callback)\n\n\nclass _RawWheelInstaller(_WheelInstaller):\n def fetch_wheel(self, name, fileinfo):\n return fileinfo[\"url\"]\n\n\nclass _PackageManager:\n version_scheme = version.get_scheme(\"normalized\")\n\n def __init__(self):\n self.builtin_packages = {}\n self.builtin_packages.update(js_pyodide._module.packages.dependencies)\n self.installed_packages = {}\n\n def install(\n self,\n requirements: Union[str, List[str]],\n ctx=None,\n wheel_installer=None,\n resolve=_nullop,\n reject=_nullop,\n ):\n try:\n if ctx is None:\n ctx = {\"extra\": None}\n\n if wheel_installer is None:\n wheel_installer = _RawWheelInstaller()\n\n complete_ctx = dict(markers.DEFAULT_CONTEXT)\n complete_ctx.update(ctx)\n\n if isinstance(requirements, str):\n requirements = [requirements]\n\n transaction: Dict[str, Any] = {\n \"wheels\": [],\n \"pyodide_packages\": set(),\n \"locked\": dict(self.installed_packages),\n }\n for requirement in requirements:\n self.add_requirement(requirement, complete_ctx, transaction)\n except Exception as e:\n reject(str(e))\n\n resolve_count = [len(transaction[\"wheels\"])]\n\n def do_resolve(*args):\n resolve_count[0] -= 1\n if resolve_count[0] == 0:\n resolve(f'Installed {\", \".join(self.installed_packages.keys())}')\n\n # Install built-in packages\n pyodide_packages = transaction[\"pyodide_packages\"]\n if len(pyodide_packages):\n resolve_count[0] += 1\n self.installed_packages.update(dict((k, None) for k in pyodide_packages))\n js_pyodide.loadPackage(list(pyodide_packages)).then(do_resolve)\n\n # Now install PyPI packages\n for name, wheel, ver in transaction[\"wheels\"]:\n wheel_installer(name, wheel, do_resolve, reject)\n self.installed_packages[name] = ver\n\n def add_requirement(self, requirement: str, ctx, transaction):\n if requirement.startswith((\"http://\", \"https://\")):\n # custom download location\n name, wheel, version = _parse_wheel_url(requirement)\n transaction[\"wheels\"].append((name, wheel, version))\n return\n\n req = util.parse_requirement(requirement)\n\n # If it's a Pyodide package, use that instead of the one on PyPI\n if req.name in self.builtin_packages:\n transaction[\"pyodide_packages\"].add(req.name)\n return\n\n if req.marker:\n if not markers.evaluator.evaluate(req.marker, ctx):\n return\n\n matcher = self.version_scheme.matcher(req.requirement)\n\n # If we already have something that will work, don't\n # fetch again\n for name, ver in transaction[\"locked\"].items():\n if name == req.name:\n if matcher.match(ver):\n break\n else:\n raise ValueError(\n f\"Requested '{requirement}', \"\n f\"but {name}=={ver} is already installed\"\n )\n else:\n metadata = _get_pypi_json(req.name)\n wheel, ver = self.find_wheel(metadata, req)\n transaction[\"locked\"][req.name] = ver\n\n recurs_reqs = metadata.get(\"info\", {}).get(\"requires_dist\") or []\n for recurs_req in recurs_reqs:\n self.add_requirement(recurs_req, ctx, transaction)\n\n transaction[\"wheels\"].append((req.name, wheel, ver))\n\n def find_wheel(self, metadata, req):\n releases = []\n for ver, files in metadata.get(\"releases\", {}).items():\n ver = self.version_scheme.suggest(ver)\n if ver is not None:\n releases.append((ver, files))\n\n def version_number(release):\n return version.NormalizedVersion(release[0])\n\n releases = sorted(releases, key=version_number, reverse=True)\n matcher = self.version_scheme.matcher(req.requirement)\n for ver, meta in releases:\n if matcher.match(ver):\n for fileinfo in meta:\n if fileinfo[\"filename\"].endswith(\"py3-none-any.whl\"):\n return fileinfo, ver\n\n raise ValueError(f\"Couldn't find a pure Python 3 wheel for '{req.requirement}'\")\n\n\n# Make PACKAGE_MANAGER singleton\nPACKAGE_MANAGER = _PackageManager()\ndel _PackageManager\n\n\ndef install(requirements: Union[str, List[str]]):\n \"\"\"Install the given package and all of its dependencies.\n\n This only works for pure Python wheels or for packages built\n in pyodide. If a package is not found in the pyodide repository\n it will be loaded from PyPi.\n\n Parameters\n ----------\n requirements\n a requirements or a list of requirements to install.\n Can be composed either of\n\n - package names, as defined in pyodide repository or on PyPi\n - URLs pointing to pure Python wheels. The file name of such wheels\n end with ``none-any.whl``.\n\n Returns\n -------\n a Promise that resolves when all packages have downloaded and installed.\n \"\"\"\n\n def do_install(resolve, reject):\n PACKAGE_MANAGER.install(requirements, resolve=resolve, reject=reject)\n importlib.invalidate_caches()\n\n return Promise.new(do_install)\n\n\n__all__ = [\"install\"]\n\n\nif __name__ == \"__main__\":\n install(\"snowballstemmer\")\n", "path": "packages/micropip/micropip/micropip.py"}], "after_files": [{"content": "try:\n from js import Promise, XMLHttpRequest\nexcept ImportError:\n XMLHttpRequest = None\n\ntry:\n from js import pyodide as js_pyodide\nexcept ImportError:\n\n class js_pyodide: # type: ignore\n \"\"\"A mock object to allow import of this package outside pyodide\"\"\"\n\n class _module:\n class packages:\n dependencies = [] # type: ignore\n\n\nimport hashlib\nimport importlib\nimport io\nimport json\nfrom pathlib import Path\nimport zipfile\nfrom typing import Dict, Any, Union, List, Tuple\n\nfrom distlib import markers, util, version\n\n\ndef _nullop(*args):\n return\n\n\n# Provide implementations of HTTP fetching for in-browser and out-of-browser to\n# make testing easier\nif XMLHttpRequest is not None:\n import pyodide # noqa\n\n def _get_url(url):\n req = XMLHttpRequest.new()\n req.open(\"GET\", url, False)\n req.send(None)\n return io.StringIO(req.response)\n\n def _get_url_async(url, cb):\n req = XMLHttpRequest.new()\n req.open(\"GET\", url, True)\n req.responseType = \"arraybuffer\"\n\n def callback(e):\n if req.readyState == 4:\n cb(io.BytesIO(req.response))\n\n req.onreadystatechange = callback\n req.send(None)\n\n # In practice, this is the `site-packages` directory.\n WHEEL_BASE = Path(__file__).parent\nelse:\n # Outside the browser\n from urllib.request import urlopen\n\n def _get_url(url):\n with urlopen(url) as fd:\n content = fd.read()\n return io.BytesIO(content)\n\n def _get_url_async(url, cb):\n cb(_get_url(url))\n\n WHEEL_BASE = Path(\".\") / \"wheels\"\n\n\ndef _get_pypi_json(pkgname):\n url = f\"https://pypi.org/pypi/{pkgname}/json\"\n fd = _get_url(url)\n return json.load(fd)\n\n\ndef _parse_wheel_url(url: str) -> Tuple[str, Dict[str, Any], str]:\n \"\"\"Parse wheels url and extract available metadata\n\n See https://www.python.org/dev/peps/pep-0427/#file-name-convention\n \"\"\"\n file_name = Path(url).name\n # also strip '.whl' extension.\n wheel_name = Path(url).stem\n tokens = wheel_name.split(\"-\")\n # TODO: support optional build tags in the filename (cf PEP 427)\n if len(tokens) < 5:\n raise ValueError(f\"{file_name} is not a valid wheel file name.\")\n version, python_tag, abi_tag, platform = tokens[-4:]\n name = \"-\".join(tokens[:-4])\n wheel = {\n \"digests\": None, # checksums not available\n \"filename\": file_name,\n \"packagetype\": \"bdist_wheel\",\n \"python_version\": python_tag,\n \"abi_tag\": abi_tag,\n \"platform\": platform,\n \"url\": url,\n }\n\n return name, wheel, version\n\n\nclass _WheelInstaller:\n def extract_wheel(self, fd):\n with zipfile.ZipFile(fd) as zf:\n zf.extractall(WHEEL_BASE)\n\n def validate_wheel(self, data, fileinfo):\n if fileinfo.get(\"digests\") is None:\n # No checksums available, e.g. because installing\n # from a different location than PyPi.\n return\n sha256 = fileinfo[\"digests\"][\"sha256\"]\n m = hashlib.sha256()\n m.update(data.getvalue())\n if m.hexdigest() != sha256:\n raise ValueError(\"Contents don't match hash\")\n\n def __call__(self, name, fileinfo, resolve, reject):\n url = self.fetch_wheel(name, fileinfo)\n\n def callback(wheel):\n try:\n self.validate_wheel(wheel, fileinfo)\n self.extract_wheel(wheel)\n except Exception as e:\n reject(str(e))\n else:\n resolve()\n\n _get_url_async(url, callback)\n\n\nclass _RawWheelInstaller(_WheelInstaller):\n def fetch_wheel(self, name, fileinfo):\n return fileinfo[\"url\"]\n\n\nclass _PackageManager:\n version_scheme = version.get_scheme(\"normalized\")\n\n def __init__(self):\n self.builtin_packages = {}\n self.builtin_packages.update(js_pyodide._module.packages.dependencies)\n self.installed_packages = {}\n\n def install(\n self,\n requirements: Union[str, List[str]],\n ctx=None,\n wheel_installer=None,\n resolve=_nullop,\n reject=_nullop,\n ):\n try:\n if ctx is None:\n ctx = {\"extra\": None}\n\n if wheel_installer is None:\n wheel_installer = _RawWheelInstaller()\n\n complete_ctx = dict(markers.DEFAULT_CONTEXT)\n complete_ctx.update(ctx)\n\n if isinstance(requirements, str):\n requirements = [requirements]\n\n transaction: Dict[str, Any] = {\n \"wheels\": [],\n \"pyodide_packages\": set(),\n \"locked\": dict(self.installed_packages),\n }\n for requirement in requirements:\n self.add_requirement(requirement, complete_ctx, transaction)\n except Exception as e:\n reject(str(e))\n\n resolve_count = [len(transaction[\"wheels\"])]\n\n def do_resolve(*args):\n resolve_count[0] -= 1\n if resolve_count[0] == 0:\n resolve(f'Installed {\", \".join(self.installed_packages.keys())}')\n\n # Install built-in packages\n pyodide_packages = transaction[\"pyodide_packages\"]\n if len(pyodide_packages):\n resolve_count[0] += 1\n self.installed_packages.update(dict((k, None) for k in pyodide_packages))\n js_pyodide.loadPackage(list(pyodide_packages)).then(do_resolve)\n\n # Now install PyPI packages\n for name, wheel, ver in transaction[\"wheels\"]:\n wheel_installer(name, wheel, do_resolve, reject)\n self.installed_packages[name] = ver\n\n def add_requirement(self, requirement: str, ctx, transaction):\n if requirement.endswith(\".whl\"):\n # custom download location\n name, wheel, version = _parse_wheel_url(requirement)\n transaction[\"wheels\"].append((name, wheel, version))\n return\n\n req = util.parse_requirement(requirement)\n\n # If it's a Pyodide package, use that instead of the one on PyPI\n if req.name in self.builtin_packages:\n transaction[\"pyodide_packages\"].add(req.name)\n return\n\n if req.marker:\n if not markers.evaluator.evaluate(req.marker, ctx):\n return\n\n matcher = self.version_scheme.matcher(req.requirement)\n\n # If we already have something that will work, don't\n # fetch again\n for name, ver in transaction[\"locked\"].items():\n if name == req.name:\n if matcher.match(ver):\n break\n else:\n raise ValueError(\n f\"Requested '{requirement}', \"\n f\"but {name}=={ver} is already installed\"\n )\n else:\n metadata = _get_pypi_json(req.name)\n wheel, ver = self.find_wheel(metadata, req)\n transaction[\"locked\"][req.name] = ver\n\n recurs_reqs = metadata.get(\"info\", {}).get(\"requires_dist\") or []\n for recurs_req in recurs_reqs:\n self.add_requirement(recurs_req, ctx, transaction)\n\n transaction[\"wheels\"].append((req.name, wheel, ver))\n\n def find_wheel(self, metadata, req):\n releases = []\n for ver, files in metadata.get(\"releases\", {}).items():\n ver = self.version_scheme.suggest(ver)\n if ver is not None:\n releases.append((ver, files))\n\n def version_number(release):\n return version.NormalizedVersion(release[0])\n\n releases = sorted(releases, key=version_number, reverse=True)\n matcher = self.version_scheme.matcher(req.requirement)\n for ver, meta in releases:\n if matcher.match(ver):\n for fileinfo in meta:\n if fileinfo[\"filename\"].endswith(\"py3-none-any.whl\"):\n return fileinfo, ver\n\n raise ValueError(f\"Couldn't find a pure Python 3 wheel for '{req.requirement}'\")\n\n\n# Make PACKAGE_MANAGER singleton\nPACKAGE_MANAGER = _PackageManager()\ndel _PackageManager\n\n\ndef install(requirements: Union[str, List[str]]):\n \"\"\"Install the given package and all of its dependencies.\n\n See :ref:`loading packages <loading_packages>` for more information.\n\n This only works for packages that are either pure Python or for packages with\n C extensions that are built in pyodide. If a pure Python package is not found\n in the pyodide repository it will be loaded from PyPi.\n\n Parameters\n ----------\n requirements\n A requirement or list of requirements to install.\n Each requirement is a string.\n\n - If the requirement ends in \".whl\", the file will be interpreted as a url.\n The file must be a wheel named in compliance with the\n [PEP 427 naming convention](https://www.python.org/dev/peps/pep-0427/#file-format)\n\n - A package name. A package by this name must either be present in the pyodide\n repository at `languagePluginUrl` or on PyPi.\n\n Returns\n -------\n A Promise that resolves when all packages have been downloaded and installed.\n \"\"\"\n\n def do_install(resolve, reject):\n PACKAGE_MANAGER.install(requirements, resolve=resolve, reject=reject)\n importlib.invalidate_caches()\n\n return Promise.new(do_install)\n\n\n__all__ = [\"install\"]\n\n\nif __name__ == \"__main__\":\n install(\"snowballstemmer\")\n", "path": "packages/micropip/micropip/micropip.py"}]} | 3,477 | 566 |
gh_patches_debug_4358 | rasdani/github-patches | git_diff | pallets__click-1832 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reconsider the choice of adding a newline after multi-line option definitions
First, thanks for your work.
I ask you to reconsider the feature introduced with PR https://github.com/pallets/click/pull/1081.
1. Adding a newline only to some options feels inconsistent and leads to a weird-looking "non-uniform" help strings. It's even worse when you use an extension that adds new help sections (e.g. [Cloup](https://github.com/janluke/cloup) for option groups), since some sections are not clearly demarked or too much demarked. It looks like a complete mess.
2. I'm pretty sure it's non-standard. Why should it be the default?
As a consequence, it feels like a bug. I did mistake it for a bug. Another developer reported it as a bug (issue https://github.com/pallets/click/issues/1559). The few people I asked don't like it and consider it a problem worth the effort of writing additional code to get rid of it. Most people in the original issue (https://github.com/pallets/click/issues/1075) are for an all-or-nothing behavior and described the current behviour as inconsistent as well.
Here's some alternative proposals.
1. Remove the feature.
2. Make it possible but non-default. Two solutions here:
1. Add a parameter to `HelpFormatter`. It can be made as simple as a boolean or as complex as a "list item separation strategy". A user can pass a custom factory function as `Context.formatter_class` (which probably could be called `formatter_factory`).
2. Refactor `HelpFormatter` to make it easy for people to override without copying, pasting and modiyfing the current code of `HelpFormatter.write_dl`.
Thank you again.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/click/formatting.py`
Content:
```
1 import typing as t
2 from contextlib import contextmanager
3
4 from ._compat import term_len
5 from .parser import split_opt
6
7 # Can force a width. This is used by the test system
8 FORCED_WIDTH: t.Optional[int] = None
9
10
11 def measure_table(rows):
12 widths = {}
13 for row in rows:
14 for idx, col in enumerate(row):
15 widths[idx] = max(widths.get(idx, 0), term_len(col))
16 return tuple(y for x, y in sorted(widths.items()))
17
18
19 def iter_rows(rows, col_count):
20 for row in rows:
21 row = tuple(row)
22 yield row + ("",) * (col_count - len(row))
23
24
25 def wrap_text(
26 text, width=78, initial_indent="", subsequent_indent="", preserve_paragraphs=False
27 ):
28 """A helper function that intelligently wraps text. By default, it
29 assumes that it operates on a single paragraph of text but if the
30 `preserve_paragraphs` parameter is provided it will intelligently
31 handle paragraphs (defined by two empty lines).
32
33 If paragraphs are handled, a paragraph can be prefixed with an empty
34 line containing the ``\\b`` character (``\\x08``) to indicate that
35 no rewrapping should happen in that block.
36
37 :param text: the text that should be rewrapped.
38 :param width: the maximum width for the text.
39 :param initial_indent: the initial indent that should be placed on the
40 first line as a string.
41 :param subsequent_indent: the indent string that should be placed on
42 each consecutive line.
43 :param preserve_paragraphs: if this flag is set then the wrapping will
44 intelligently handle paragraphs.
45 """
46 from ._textwrap import TextWrapper
47
48 text = text.expandtabs()
49 wrapper = TextWrapper(
50 width,
51 initial_indent=initial_indent,
52 subsequent_indent=subsequent_indent,
53 replace_whitespace=False,
54 )
55 if not preserve_paragraphs:
56 return wrapper.fill(text)
57
58 p = []
59 buf = []
60 indent = None
61
62 def _flush_par():
63 if not buf:
64 return
65 if buf[0].strip() == "\b":
66 p.append((indent or 0, True, "\n".join(buf[1:])))
67 else:
68 p.append((indent or 0, False, " ".join(buf)))
69 del buf[:]
70
71 for line in text.splitlines():
72 if not line:
73 _flush_par()
74 indent = None
75 else:
76 if indent is None:
77 orig_len = term_len(line)
78 line = line.lstrip()
79 indent = orig_len - term_len(line)
80 buf.append(line)
81 _flush_par()
82
83 rv = []
84 for indent, raw, text in p:
85 with wrapper.extra_indent(" " * indent):
86 if raw:
87 rv.append(wrapper.indent_only(text))
88 else:
89 rv.append(wrapper.fill(text))
90
91 return "\n\n".join(rv)
92
93
94 class HelpFormatter:
95 """This class helps with formatting text-based help pages. It's
96 usually just needed for very special internal cases, but it's also
97 exposed so that developers can write their own fancy outputs.
98
99 At present, it always writes into memory.
100
101 :param indent_increment: the additional increment for each level.
102 :param width: the width for the text. This defaults to the terminal
103 width clamped to a maximum of 78.
104 """
105
106 def __init__(self, indent_increment=2, width=None, max_width=None):
107 import shutil
108
109 self.indent_increment = indent_increment
110 if max_width is None:
111 max_width = 80
112 if width is None:
113 width = FORCED_WIDTH
114 if width is None:
115 width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)
116 self.width = width
117 self.current_indent = 0
118 self.buffer = []
119
120 def write(self, string):
121 """Writes a unicode string into the internal buffer."""
122 self.buffer.append(string)
123
124 def indent(self):
125 """Increases the indentation."""
126 self.current_indent += self.indent_increment
127
128 def dedent(self):
129 """Decreases the indentation."""
130 self.current_indent -= self.indent_increment
131
132 def write_usage(self, prog, args="", prefix="Usage: "):
133 """Writes a usage line into the buffer.
134
135 :param prog: the program name.
136 :param args: whitespace separated list of arguments.
137 :param prefix: the prefix for the first line.
138 """
139 usage_prefix = f"{prefix:>{self.current_indent}}{prog} "
140 text_width = self.width - self.current_indent
141
142 if text_width >= (term_len(usage_prefix) + 20):
143 # The arguments will fit to the right of the prefix.
144 indent = " " * term_len(usage_prefix)
145 self.write(
146 wrap_text(
147 args,
148 text_width,
149 initial_indent=usage_prefix,
150 subsequent_indent=indent,
151 )
152 )
153 else:
154 # The prefix is too long, put the arguments on the next line.
155 self.write(usage_prefix)
156 self.write("\n")
157 indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
158 self.write(
159 wrap_text(
160 args, text_width, initial_indent=indent, subsequent_indent=indent
161 )
162 )
163
164 self.write("\n")
165
166 def write_heading(self, heading):
167 """Writes a heading into the buffer."""
168 self.write(f"{'':>{self.current_indent}}{heading}:\n")
169
170 def write_paragraph(self):
171 """Writes a paragraph into the buffer."""
172 if self.buffer:
173 self.write("\n")
174
175 def write_text(self, text):
176 """Writes re-indented text into the buffer. This rewraps and
177 preserves paragraphs.
178 """
179 text_width = max(self.width - self.current_indent, 11)
180 indent = " " * self.current_indent
181 self.write(
182 wrap_text(
183 text,
184 text_width,
185 initial_indent=indent,
186 subsequent_indent=indent,
187 preserve_paragraphs=True,
188 )
189 )
190 self.write("\n")
191
192 def write_dl(self, rows, col_max=30, col_spacing=2):
193 """Writes a definition list into the buffer. This is how options
194 and commands are usually formatted.
195
196 :param rows: a list of two item tuples for the terms and values.
197 :param col_max: the maximum width of the first column.
198 :param col_spacing: the number of spaces between the first and
199 second column.
200 """
201 rows = list(rows)
202 widths = measure_table(rows)
203 if len(widths) != 2:
204 raise TypeError("Expected two columns for definition list")
205
206 first_col = min(widths[0], col_max) + col_spacing
207
208 for first, second in iter_rows(rows, len(widths)):
209 self.write(f"{'':>{self.current_indent}}{first}")
210 if not second:
211 self.write("\n")
212 continue
213 if term_len(first) <= first_col - col_spacing:
214 self.write(" " * (first_col - term_len(first)))
215 else:
216 self.write("\n")
217 self.write(" " * (first_col + self.current_indent))
218
219 text_width = max(self.width - first_col - 2, 10)
220 wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
221 lines = wrapped_text.splitlines()
222
223 if lines:
224 self.write(f"{lines[0]}\n")
225
226 for line in lines[1:]:
227 self.write(f"{'':>{first_col + self.current_indent}}{line}\n")
228
229 if len(lines) > 1:
230 # separate long help from next option
231 self.write("\n")
232 else:
233 self.write("\n")
234
235 @contextmanager
236 def section(self, name):
237 """Helpful context manager that writes a paragraph, a heading,
238 and the indents.
239
240 :param name: the section name that is written as heading.
241 """
242 self.write_paragraph()
243 self.write_heading(name)
244 self.indent()
245 try:
246 yield
247 finally:
248 self.dedent()
249
250 @contextmanager
251 def indentation(self):
252 """A context manager that increases the indentation."""
253 self.indent()
254 try:
255 yield
256 finally:
257 self.dedent()
258
259 def getvalue(self):
260 """Returns the buffer contents."""
261 return "".join(self.buffer)
262
263
264 def join_options(options):
265 """Given a list of option strings this joins them in the most appropriate
266 way and returns them in the form ``(formatted_string,
267 any_prefix_is_slash)`` where the second item in the tuple is a flag that
268 indicates if any of the option prefixes was a slash.
269 """
270 rv = []
271 any_prefix_is_slash = False
272 for opt in options:
273 prefix = split_opt(opt)[0]
274 if prefix == "/":
275 any_prefix_is_slash = True
276 rv.append((len(prefix), opt))
277
278 rv.sort(key=lambda x: x[0])
279
280 rv = ", ".join(x[1] for x in rv)
281 return rv, any_prefix_is_slash
282
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/click/formatting.py b/src/click/formatting.py
--- a/src/click/formatting.py
+++ b/src/click/formatting.py
@@ -225,10 +225,6 @@
for line in lines[1:]:
self.write(f"{'':>{first_col + self.current_indent}}{line}\n")
-
- if len(lines) > 1:
- # separate long help from next option
- self.write("\n")
else:
self.write("\n")
| {"golden_diff": "diff --git a/src/click/formatting.py b/src/click/formatting.py\n--- a/src/click/formatting.py\n+++ b/src/click/formatting.py\n@@ -225,10 +225,6 @@\n \n for line in lines[1:]:\n self.write(f\"{'':>{first_col + self.current_indent}}{line}\\n\")\n-\n- if len(lines) > 1:\n- # separate long help from next option\n- self.write(\"\\n\")\n else:\n self.write(\"\\n\")\n", "issue": "Reconsider the choice of adding a newline after multi-line option definitions\nFirst, thanks for your work.\r\n\r\nI ask you to reconsider the feature introduced with PR https://github.com/pallets/click/pull/1081.\r\n\r\n1. Adding a newline only to some options feels inconsistent and leads to a weird-looking \"non-uniform\" help strings. It's even worse when you use an extension that adds new help sections (e.g. [Cloup](https://github.com/janluke/cloup) for option groups), since some sections are not clearly demarked or too much demarked. It looks like a complete mess.\r\n\r\n2. I'm pretty sure it's non-standard. Why should it be the default?\r\n\r\nAs a consequence, it feels like a bug. I did mistake it for a bug. Another developer reported it as a bug (issue https://github.com/pallets/click/issues/1559). The few people I asked don't like it and consider it a problem worth the effort of writing additional code to get rid of it. Most people in the original issue (https://github.com/pallets/click/issues/1075) are for an all-or-nothing behavior and described the current behviour as inconsistent as well.\r\n\r\nHere's some alternative proposals.\r\n1. Remove the feature.\r\n2. Make it possible but non-default. Two solutions here:\r\n 1. Add a parameter to `HelpFormatter`. It can be made as simple as a boolean or as complex as a \"list item separation strategy\". A user can pass a custom factory function as `Context.formatter_class` (which probably could be called `formatter_factory`).\r\n 2. Refactor `HelpFormatter` to make it easy for people to override without copying, pasting and modiyfing the current code of `HelpFormatter.write_dl`.\r\n\r\nThank you again.\r\n\n", "before_files": [{"content": "import typing as t\nfrom contextlib import contextmanager\n\nfrom ._compat import term_len\nfrom .parser import split_opt\n\n# Can force a width. This is used by the test system\nFORCED_WIDTH: t.Optional[int] = None\n\n\ndef measure_table(rows):\n widths = {}\n for row in rows:\n for idx, col in enumerate(row):\n widths[idx] = max(widths.get(idx, 0), term_len(col))\n return tuple(y for x, y in sorted(widths.items()))\n\n\ndef iter_rows(rows, col_count):\n for row in rows:\n row = tuple(row)\n yield row + (\"\",) * (col_count - len(row))\n\n\ndef wrap_text(\n text, width=78, initial_indent=\"\", subsequent_indent=\"\", preserve_paragraphs=False\n):\n \"\"\"A helper function that intelligently wraps text. By default, it\n assumes that it operates on a single paragraph of text but if the\n `preserve_paragraphs` parameter is provided it will intelligently\n handle paragraphs (defined by two empty lines).\n\n If paragraphs are handled, a paragraph can be prefixed with an empty\n line containing the ``\\\\b`` character (``\\\\x08``) to indicate that\n no rewrapping should happen in that block.\n\n :param text: the text that should be rewrapped.\n :param width: the maximum width for the text.\n :param initial_indent: the initial indent that should be placed on the\n first line as a string.\n :param subsequent_indent: the indent string that should be placed on\n each consecutive line.\n :param preserve_paragraphs: if this flag is set then the wrapping will\n intelligently handle paragraphs.\n \"\"\"\n from ._textwrap import TextWrapper\n\n text = text.expandtabs()\n wrapper = TextWrapper(\n width,\n initial_indent=initial_indent,\n subsequent_indent=subsequent_indent,\n replace_whitespace=False,\n )\n if not preserve_paragraphs:\n return wrapper.fill(text)\n\n p = []\n buf = []\n indent = None\n\n def _flush_par():\n if not buf:\n return\n if buf[0].strip() == \"\\b\":\n p.append((indent or 0, True, \"\\n\".join(buf[1:])))\n else:\n p.append((indent or 0, False, \" \".join(buf)))\n del buf[:]\n\n for line in text.splitlines():\n if not line:\n _flush_par()\n indent = None\n else:\n if indent is None:\n orig_len = term_len(line)\n line = line.lstrip()\n indent = orig_len - term_len(line)\n buf.append(line)\n _flush_par()\n\n rv = []\n for indent, raw, text in p:\n with wrapper.extra_indent(\" \" * indent):\n if raw:\n rv.append(wrapper.indent_only(text))\n else:\n rv.append(wrapper.fill(text))\n\n return \"\\n\\n\".join(rv)\n\n\nclass HelpFormatter:\n \"\"\"This class helps with formatting text-based help pages. It's\n usually just needed for very special internal cases, but it's also\n exposed so that developers can write their own fancy outputs.\n\n At present, it always writes into memory.\n\n :param indent_increment: the additional increment for each level.\n :param width: the width for the text. This defaults to the terminal\n width clamped to a maximum of 78.\n \"\"\"\n\n def __init__(self, indent_increment=2, width=None, max_width=None):\n import shutil\n\n self.indent_increment = indent_increment\n if max_width is None:\n max_width = 80\n if width is None:\n width = FORCED_WIDTH\n if width is None:\n width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)\n self.width = width\n self.current_indent = 0\n self.buffer = []\n\n def write(self, string):\n \"\"\"Writes a unicode string into the internal buffer.\"\"\"\n self.buffer.append(string)\n\n def indent(self):\n \"\"\"Increases the indentation.\"\"\"\n self.current_indent += self.indent_increment\n\n def dedent(self):\n \"\"\"Decreases the indentation.\"\"\"\n self.current_indent -= self.indent_increment\n\n def write_usage(self, prog, args=\"\", prefix=\"Usage: \"):\n \"\"\"Writes a usage line into the buffer.\n\n :param prog: the program name.\n :param args: whitespace separated list of arguments.\n :param prefix: the prefix for the first line.\n \"\"\"\n usage_prefix = f\"{prefix:>{self.current_indent}}{prog} \"\n text_width = self.width - self.current_indent\n\n if text_width >= (term_len(usage_prefix) + 20):\n # The arguments will fit to the right of the prefix.\n indent = \" \" * term_len(usage_prefix)\n self.write(\n wrap_text(\n args,\n text_width,\n initial_indent=usage_prefix,\n subsequent_indent=indent,\n )\n )\n else:\n # The prefix is too long, put the arguments on the next line.\n self.write(usage_prefix)\n self.write(\"\\n\")\n indent = \" \" * (max(self.current_indent, term_len(prefix)) + 4)\n self.write(\n wrap_text(\n args, text_width, initial_indent=indent, subsequent_indent=indent\n )\n )\n\n self.write(\"\\n\")\n\n def write_heading(self, heading):\n \"\"\"Writes a heading into the buffer.\"\"\"\n self.write(f\"{'':>{self.current_indent}}{heading}:\\n\")\n\n def write_paragraph(self):\n \"\"\"Writes a paragraph into the buffer.\"\"\"\n if self.buffer:\n self.write(\"\\n\")\n\n def write_text(self, text):\n \"\"\"Writes re-indented text into the buffer. This rewraps and\n preserves paragraphs.\n \"\"\"\n text_width = max(self.width - self.current_indent, 11)\n indent = \" \" * self.current_indent\n self.write(\n wrap_text(\n text,\n text_width,\n initial_indent=indent,\n subsequent_indent=indent,\n preserve_paragraphs=True,\n )\n )\n self.write(\"\\n\")\n\n def write_dl(self, rows, col_max=30, col_spacing=2):\n \"\"\"Writes a definition list into the buffer. This is how options\n and commands are usually formatted.\n\n :param rows: a list of two item tuples for the terms and values.\n :param col_max: the maximum width of the first column.\n :param col_spacing: the number of spaces between the first and\n second column.\n \"\"\"\n rows = list(rows)\n widths = measure_table(rows)\n if len(widths) != 2:\n raise TypeError(\"Expected two columns for definition list\")\n\n first_col = min(widths[0], col_max) + col_spacing\n\n for first, second in iter_rows(rows, len(widths)):\n self.write(f\"{'':>{self.current_indent}}{first}\")\n if not second:\n self.write(\"\\n\")\n continue\n if term_len(first) <= first_col - col_spacing:\n self.write(\" \" * (first_col - term_len(first)))\n else:\n self.write(\"\\n\")\n self.write(\" \" * (first_col + self.current_indent))\n\n text_width = max(self.width - first_col - 2, 10)\n wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)\n lines = wrapped_text.splitlines()\n\n if lines:\n self.write(f\"{lines[0]}\\n\")\n\n for line in lines[1:]:\n self.write(f\"{'':>{first_col + self.current_indent}}{line}\\n\")\n\n if len(lines) > 1:\n # separate long help from next option\n self.write(\"\\n\")\n else:\n self.write(\"\\n\")\n\n @contextmanager\n def section(self, name):\n \"\"\"Helpful context manager that writes a paragraph, a heading,\n and the indents.\n\n :param name: the section name that is written as heading.\n \"\"\"\n self.write_paragraph()\n self.write_heading(name)\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n @contextmanager\n def indentation(self):\n \"\"\"A context manager that increases the indentation.\"\"\"\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n def getvalue(self):\n \"\"\"Returns the buffer contents.\"\"\"\n return \"\".join(self.buffer)\n\n\ndef join_options(options):\n \"\"\"Given a list of option strings this joins them in the most appropriate\n way and returns them in the form ``(formatted_string,\n any_prefix_is_slash)`` where the second item in the tuple is a flag that\n indicates if any of the option prefixes was a slash.\n \"\"\"\n rv = []\n any_prefix_is_slash = False\n for opt in options:\n prefix = split_opt(opt)[0]\n if prefix == \"/\":\n any_prefix_is_slash = True\n rv.append((len(prefix), opt))\n\n rv.sort(key=lambda x: x[0])\n\n rv = \", \".join(x[1] for x in rv)\n return rv, any_prefix_is_slash\n", "path": "src/click/formatting.py"}], "after_files": [{"content": "import typing as t\nfrom contextlib import contextmanager\n\nfrom ._compat import term_len\nfrom .parser import split_opt\n\n# Can force a width. This is used by the test system\nFORCED_WIDTH: t.Optional[int] = None\n\n\ndef measure_table(rows):\n widths = {}\n for row in rows:\n for idx, col in enumerate(row):\n widths[idx] = max(widths.get(idx, 0), term_len(col))\n return tuple(y for x, y in sorted(widths.items()))\n\n\ndef iter_rows(rows, col_count):\n for row in rows:\n row = tuple(row)\n yield row + (\"\",) * (col_count - len(row))\n\n\ndef wrap_text(\n text, width=78, initial_indent=\"\", subsequent_indent=\"\", preserve_paragraphs=False\n):\n \"\"\"A helper function that intelligently wraps text. By default, it\n assumes that it operates on a single paragraph of text but if the\n `preserve_paragraphs` parameter is provided it will intelligently\n handle paragraphs (defined by two empty lines).\n\n If paragraphs are handled, a paragraph can be prefixed with an empty\n line containing the ``\\\\b`` character (``\\\\x08``) to indicate that\n no rewrapping should happen in that block.\n\n :param text: the text that should be rewrapped.\n :param width: the maximum width for the text.\n :param initial_indent: the initial indent that should be placed on the\n first line as a string.\n :param subsequent_indent: the indent string that should be placed on\n each consecutive line.\n :param preserve_paragraphs: if this flag is set then the wrapping will\n intelligently handle paragraphs.\n \"\"\"\n from ._textwrap import TextWrapper\n\n text = text.expandtabs()\n wrapper = TextWrapper(\n width,\n initial_indent=initial_indent,\n subsequent_indent=subsequent_indent,\n replace_whitespace=False,\n )\n if not preserve_paragraphs:\n return wrapper.fill(text)\n\n p = []\n buf = []\n indent = None\n\n def _flush_par():\n if not buf:\n return\n if buf[0].strip() == \"\\b\":\n p.append((indent or 0, True, \"\\n\".join(buf[1:])))\n else:\n p.append((indent or 0, False, \" \".join(buf)))\n del buf[:]\n\n for line in text.splitlines():\n if not line:\n _flush_par()\n indent = None\n else:\n if indent is None:\n orig_len = term_len(line)\n line = line.lstrip()\n indent = orig_len - term_len(line)\n buf.append(line)\n _flush_par()\n\n rv = []\n for indent, raw, text in p:\n with wrapper.extra_indent(\" \" * indent):\n if raw:\n rv.append(wrapper.indent_only(text))\n else:\n rv.append(wrapper.fill(text))\n\n return \"\\n\\n\".join(rv)\n\n\nclass HelpFormatter:\n \"\"\"This class helps with formatting text-based help pages. It's\n usually just needed for very special internal cases, but it's also\n exposed so that developers can write their own fancy outputs.\n\n At present, it always writes into memory.\n\n :param indent_increment: the additional increment for each level.\n :param width: the width for the text. This defaults to the terminal\n width clamped to a maximum of 78.\n \"\"\"\n\n def __init__(self, indent_increment=2, width=None, max_width=None):\n import shutil\n\n self.indent_increment = indent_increment\n if max_width is None:\n max_width = 80\n if width is None:\n width = FORCED_WIDTH\n if width is None:\n width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)\n self.width = width\n self.current_indent = 0\n self.buffer = []\n\n def write(self, string):\n \"\"\"Writes a unicode string into the internal buffer.\"\"\"\n self.buffer.append(string)\n\n def indent(self):\n \"\"\"Increases the indentation.\"\"\"\n self.current_indent += self.indent_increment\n\n def dedent(self):\n \"\"\"Decreases the indentation.\"\"\"\n self.current_indent -= self.indent_increment\n\n def write_usage(self, prog, args=\"\", prefix=\"Usage: \"):\n \"\"\"Writes a usage line into the buffer.\n\n :param prog: the program name.\n :param args: whitespace separated list of arguments.\n :param prefix: the prefix for the first line.\n \"\"\"\n usage_prefix = f\"{prefix:>{self.current_indent}}{prog} \"\n text_width = self.width - self.current_indent\n\n if text_width >= (term_len(usage_prefix) + 20):\n # The arguments will fit to the right of the prefix.\n indent = \" \" * term_len(usage_prefix)\n self.write(\n wrap_text(\n args,\n text_width,\n initial_indent=usage_prefix,\n subsequent_indent=indent,\n )\n )\n else:\n # The prefix is too long, put the arguments on the next line.\n self.write(usage_prefix)\n self.write(\"\\n\")\n indent = \" \" * (max(self.current_indent, term_len(prefix)) + 4)\n self.write(\n wrap_text(\n args, text_width, initial_indent=indent, subsequent_indent=indent\n )\n )\n\n self.write(\"\\n\")\n\n def write_heading(self, heading):\n \"\"\"Writes a heading into the buffer.\"\"\"\n self.write(f\"{'':>{self.current_indent}}{heading}:\\n\")\n\n def write_paragraph(self):\n \"\"\"Writes a paragraph into the buffer.\"\"\"\n if self.buffer:\n self.write(\"\\n\")\n\n def write_text(self, text):\n \"\"\"Writes re-indented text into the buffer. This rewraps and\n preserves paragraphs.\n \"\"\"\n text_width = max(self.width - self.current_indent, 11)\n indent = \" \" * self.current_indent\n self.write(\n wrap_text(\n text,\n text_width,\n initial_indent=indent,\n subsequent_indent=indent,\n preserve_paragraphs=True,\n )\n )\n self.write(\"\\n\")\n\n def write_dl(self, rows, col_max=30, col_spacing=2):\n \"\"\"Writes a definition list into the buffer. This is how options\n and commands are usually formatted.\n\n :param rows: a list of two item tuples for the terms and values.\n :param col_max: the maximum width of the first column.\n :param col_spacing: the number of spaces between the first and\n second column.\n \"\"\"\n rows = list(rows)\n widths = measure_table(rows)\n if len(widths) != 2:\n raise TypeError(\"Expected two columns for definition list\")\n\n first_col = min(widths[0], col_max) + col_spacing\n\n for first, second in iter_rows(rows, len(widths)):\n self.write(f\"{'':>{self.current_indent}}{first}\")\n if not second:\n self.write(\"\\n\")\n continue\n if term_len(first) <= first_col - col_spacing:\n self.write(\" \" * (first_col - term_len(first)))\n else:\n self.write(\"\\n\")\n self.write(\" \" * (first_col + self.current_indent))\n\n text_width = max(self.width - first_col - 2, 10)\n wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)\n lines = wrapped_text.splitlines()\n\n if lines:\n self.write(f\"{lines[0]}\\n\")\n\n for line in lines[1:]:\n self.write(f\"{'':>{first_col + self.current_indent}}{line}\\n\")\n else:\n self.write(\"\\n\")\n\n @contextmanager\n def section(self, name):\n \"\"\"Helpful context manager that writes a paragraph, a heading,\n and the indents.\n\n :param name: the section name that is written as heading.\n \"\"\"\n self.write_paragraph()\n self.write_heading(name)\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n @contextmanager\n def indentation(self):\n \"\"\"A context manager that increases the indentation.\"\"\"\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n def getvalue(self):\n \"\"\"Returns the buffer contents.\"\"\"\n return \"\".join(self.buffer)\n\n\ndef join_options(options):\n \"\"\"Given a list of option strings this joins them in the most appropriate\n way and returns them in the form ``(formatted_string,\n any_prefix_is_slash)`` where the second item in the tuple is a flag that\n indicates if any of the option prefixes was a slash.\n \"\"\"\n rv = []\n any_prefix_is_slash = False\n for opt in options:\n prefix = split_opt(opt)[0]\n if prefix == \"/\":\n any_prefix_is_slash = True\n rv.append((len(prefix), opt))\n\n rv.sort(key=lambda x: x[0])\n\n rv = \", \".join(x[1] for x in rv)\n return rv, any_prefix_is_slash\n", "path": "src/click/formatting.py"}]} | 3,398 | 117 |
gh_patches_debug_39544 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-1873 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need to enhance parser "ls_parser" to parse the output of command 'ls -laRZ <dir-name>' in RHEL-8
The parser **ls_parser** needs to be enhanced to support parsing the output of command `ls` with an option `Z` (for selinux context) in a RHEL8 system. In RHEL8, additional fields have been introduced in the output of command `ls -laRZ`. For example:
RHEL7:
```
#/bin/ls -laRZ /var/lib/nova/instances
/var/lib/nova/instances:
drwxr-xr-x. nova nova system_u:object_r:nova_var_lib_t:s0 .
drwxr-xr-x. nova nova system_u:object_r:nova_var_lib_t:s0 ..
drwxr-xr-x. root root unconfined_u:object_r:nova_var_lib_t:s0 xxxx-xxxx-xxxx-xxxx
```
RHEL8:
```
#/bin/ls -laRZ /var/lib/nova/instances
/var/lib/nova/instances:
total 0
drwxr-xr-x. 3 root root unconfined_u:object_r:var_lib_t:s0 50 Apr 8 16:41 .
drwxr-xr-x. 3 root root unconfined_u:object_r:var_lib_t:s0 23 Apr 8 16:29 ..
drwxr-xr-x. 2 root root unconfined_u:object_r:var_lib_t:s0 54 Apr 8 16:41 xxxx-xxxx-xxxx-xxxx
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/core/ls_parser.py`
Content:
```
1 """
2 This module contains logic for parsing ls output. It attempts to handle
3 output when selinux is enabled or disabled and also skip "bad" lines.
4 """
5 import six
6
7
8 def parse_path(path):
9 """
10 Convert possible symbolic link into a source -> target pair.
11
12 Args:
13 path (str): The path portion of an ls output line.
14
15 Returns:
16 A (path, link) tuple where path is always populated and link is a non
17 empty string if the original path is a symoblic link.
18 """
19 path, _, link = path.partition(" -> ")
20 return path, link
21
22
23 def parse_non_selinux(parts):
24 """
25 Parse part of an ls output line that isn't selinux.
26
27 Args:
28 parts (list): A four element list of strings representing the initial
29 parts of an ls line after the permission bits. The parts are link
30 count, owner, group, and everything else.
31
32 Returns:
33 A dict containing links, owner, group, date, and name. If the line
34 represented a device, major and minor numbers are included. Otherwise,
35 size is included. If the raw name was a symbolic link, link is
36 included.
37 """
38 links, owner, group, last = parts
39 result = {
40 "links": int(links),
41 "owner": owner,
42 "group": group,
43 }
44
45 # device numbers only go to 256.
46 # If a comma is in the first four characters, the next two elements are
47 # major and minor device numbers. Otherwise, the next element is the size.
48 if "," in last[:4]:
49 major, minor, rest = last.split(None, 2)
50 result["major"] = int(major.rstrip(","))
51 result["minor"] = int(minor)
52 else:
53 size, rest = last.split(None, 1)
54 result["size"] = int(size)
55
56 # The date part is always 12 characters regardless of content.
57 result["date"] = rest[:12]
58
59 # Jump over the date and the following space to get the path part.
60 path, link = parse_path(rest[13:])
61 result["name"] = path
62 if link:
63 result["link"] = link
64
65 return result
66
67
68 def parse_selinux(parts):
69 """
70 Parse part of an ls output line that is selinux.
71
72 Args:
73 parts (list): A four element list of strings representing the initial
74 parts of an ls line after the permission bits. The parts are owner
75 group, selinux info, and the path.
76
77 Returns:
78 A dict containing owner, group, se_user, se_role, se_type, se_mls, and
79 name. If the raw name was a symbolic link, link is always included.
80
81 """
82
83 owner, group = parts[:2]
84 selinux = parts[2].split(":")
85 lsel = len(selinux)
86 path, link = parse_path(parts[-1])
87 result = {
88 "owner": owner,
89 "group": group,
90 "se_user": selinux[0],
91 "se_role": selinux[1] if lsel > 1 else None,
92 "se_type": selinux[2] if lsel > 2 else None,
93 "se_mls": selinux[3] if lsel > 3 else None,
94 "name": path
95 }
96 if link:
97 result["link"] = link
98 return result
99
100
101 PASS_KEYS = set(["name", "total"])
102 DELAYED_KEYS = ["entries", "files", "dirs", "specials"]
103
104
105 class Directory(dict):
106 def __init__(self, name, total, body):
107 data = dict.fromkeys(DELAYED_KEYS)
108 data["name"] = name
109 data["total"] = total
110 self.body = body
111 self.loaded = False
112 super(Directory, self).__init__(data)
113
114 def iteritems(self):
115 if not self.loaded:
116 self._load()
117 return six.iteritems(super(Directory, self))
118
119 def items(self):
120 if not self.loaded:
121 self._load()
122 return super(Directory, self).items()
123
124 def values(self):
125 if not self.loaded:
126 self._load()
127 return super(Directory, self).values()
128
129 def get(self, key, default=None):
130 if not self.loaded:
131 self._load()
132 return super(Directory, self).get(key, default)
133
134 def _load(self):
135 dirs = []
136 ents = {}
137 files = []
138 specials = []
139 for line in self.body:
140 parts = line.split(None, 4)
141 perms = parts[0]
142 typ = perms[0]
143 entry = {
144 "type": typ,
145 "perms": perms[1:]
146 }
147 if parts[1][0].isdigit():
148 rest = parse_non_selinux(parts[1:])
149 else:
150 rest = parse_selinux(parts[1:])
151
152 # Update our entry and put it into the correct buckets
153 # based on its type.
154 entry.update(rest)
155 entry["raw_entry"] = line
156 entry["dir"] = self["name"]
157 nm = entry["name"]
158 ents[nm] = entry
159 if typ not in "bcd":
160 files.append(nm)
161 elif typ == "d":
162 dirs.append(nm)
163 elif typ in "bc":
164 specials.append(nm)
165
166 self.update({"entries": ents,
167 "files": files,
168 "dirs": dirs,
169 "specials": specials})
170
171 self.loaded = True
172 del self.body
173
174 def __getitem__(self, key):
175 if self.loaded or key in PASS_KEYS:
176 return super(Directory, self).__getitem__(key)
177 self._load()
178 return super(Directory, self).__getitem__(key)
179
180
181 def parse(lines, root=None):
182 """
183 Parses a list of lines from ls into dictionaries representing their
184 components.
185
186 Args:
187 lines (list): A list of lines generated by ls.
188 root (str): The directory name to be used for ls output stanzas that
189 don't have a name.
190
191 Returns:
192 A dictionary representing the ls output. It's keyed by the path
193 containing each ls stanza.
194 """
195 doc = {}
196 entries = []
197 name = None
198 total = None
199 for line in lines:
200 line = line.strip()
201 if not line:
202 continue
203 if line and line[0] == "/" and line[-1] == ":":
204 if name is None:
205 name = line[:-1]
206 if entries:
207 d = Directory(name, total or len(entries), entries)
208 doc[root] = d
209 total = None
210 entries = []
211 else:
212 d = Directory(name, total or len(entries), entries)
213 doc[name or root] = d
214 total = None
215 entries = []
216 name = line[:-1]
217 continue
218 if line.startswith("total"):
219 total = int(line.split(None, 1)[1])
220 continue
221 entries.append(line)
222 name = name or root
223 doc[name] = Directory(name, total or len(entries), entries)
224 return doc
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/core/ls_parser.py b/insights/core/ls_parser.py
--- a/insights/core/ls_parser.py
+++ b/insights/core/ls_parser.py
@@ -76,7 +76,7 @@
Returns:
A dict containing owner, group, se_user, se_role, se_type, se_mls, and
- name. If the raw name was a symbolic link, link is always included.
+ name. If the raw name was a symbolic link, link is also included.
"""
@@ -98,6 +98,47 @@
return result
+def parse_rhel8_selinux(parts):
+ """
+ Parse part of an ls output line that is selinux on RHEL8.
+
+ Args:
+ parts (list): A four element list of strings representing the initial
+ parts of an ls line after the permission bits. The parts are link
+ count, owner, group, and everything else
+
+ Returns:
+ A dict containing links, owner, group, se_user, se_role, se_type,
+ se_mls, size, date, and name. If the raw name was a symbolic link,
+ link is also included.
+
+ """
+
+ links, owner, group, last = parts
+
+ selinux = parts[3].split(":")
+ lsel = len(selinux)
+ selinux, size, last = parts[-1].split(None, 2)
+ selinux = selinux.split(":")
+ date = last[:12]
+ path, link = parse_path(last[13:])
+ result = {
+ "links": int(links),
+ "owner": owner,
+ "group": group,
+ "se_user": selinux[0],
+ "se_role": selinux[1] if lsel > 1 else None,
+ "se_type": selinux[2] if lsel > 2 else None,
+ "se_mls": selinux[3] if lsel > 3 else None,
+ "size": int(size),
+ "name": path,
+ "date": date,
+ }
+ if link:
+ result["link"] = link
+ return result
+
+
PASS_KEYS = set(["name", "total"])
DELAYED_KEYS = ["entries", "files", "dirs", "specials"]
@@ -137,6 +178,10 @@
files = []
specials = []
for line in self.body:
+ # we can't split(None, 5) here b/c rhel 6/7 selinux lines only have
+ # 4 parts before the path, and the path itself could contain
+ # spaces. Unfortunately, this means we have to split the line again
+ # below
parts = line.split(None, 4)
perms = parts[0]
typ = perms[0]
@@ -145,7 +190,13 @@
"perms": perms[1:]
}
if parts[1][0].isdigit():
- rest = parse_non_selinux(parts[1:])
+ # We have to split the line again to see if this is a RHEL8
+ # selinux stanza. This assumes that the context section will
+ # always have at least two pieces separated by ':'.
+ if ":" in line.split()[4]:
+ rest = parse_rhel8_selinux(parts[1:])
+ else:
+ rest = parse_non_selinux(parts[1:])
else:
rest = parse_selinux(parts[1:])
@@ -220,5 +271,6 @@
continue
entries.append(line)
name = name or root
- doc[name] = Directory(name, total or len(entries), entries)
+ total = total if total is not None else len(entries)
+ doc[name] = Directory(name, total, entries)
return doc
| {"golden_diff": "diff --git a/insights/core/ls_parser.py b/insights/core/ls_parser.py\n--- a/insights/core/ls_parser.py\n+++ b/insights/core/ls_parser.py\n@@ -76,7 +76,7 @@\n \n Returns:\n A dict containing owner, group, se_user, se_role, se_type, se_mls, and\n- name. If the raw name was a symbolic link, link is always included.\n+ name. If the raw name was a symbolic link, link is also included.\n \n \"\"\"\n \n@@ -98,6 +98,47 @@\n return result\n \n \n+def parse_rhel8_selinux(parts):\n+ \"\"\"\n+ Parse part of an ls output line that is selinux on RHEL8.\n+\n+ Args:\n+ parts (list): A four element list of strings representing the initial\n+ parts of an ls line after the permission bits. The parts are link\n+ count, owner, group, and everything else\n+\n+ Returns:\n+ A dict containing links, owner, group, se_user, se_role, se_type,\n+ se_mls, size, date, and name. If the raw name was a symbolic link,\n+ link is also included.\n+\n+ \"\"\"\n+\n+ links, owner, group, last = parts\n+\n+ selinux = parts[3].split(\":\")\n+ lsel = len(selinux)\n+ selinux, size, last = parts[-1].split(None, 2)\n+ selinux = selinux.split(\":\")\n+ date = last[:12]\n+ path, link = parse_path(last[13:])\n+ result = {\n+ \"links\": int(links),\n+ \"owner\": owner,\n+ \"group\": group,\n+ \"se_user\": selinux[0],\n+ \"se_role\": selinux[1] if lsel > 1 else None,\n+ \"se_type\": selinux[2] if lsel > 2 else None,\n+ \"se_mls\": selinux[3] if lsel > 3 else None,\n+ \"size\": int(size),\n+ \"name\": path,\n+ \"date\": date,\n+ }\n+ if link:\n+ result[\"link\"] = link\n+ return result\n+\n+\n PASS_KEYS = set([\"name\", \"total\"])\n DELAYED_KEYS = [\"entries\", \"files\", \"dirs\", \"specials\"]\n \n@@ -137,6 +178,10 @@\n files = []\n specials = []\n for line in self.body:\n+ # we can't split(None, 5) here b/c rhel 6/7 selinux lines only have\n+ # 4 parts before the path, and the path itself could contain\n+ # spaces. Unfortunately, this means we have to split the line again\n+ # below\n parts = line.split(None, 4)\n perms = parts[0]\n typ = perms[0]\n@@ -145,7 +190,13 @@\n \"perms\": perms[1:]\n }\n if parts[1][0].isdigit():\n- rest = parse_non_selinux(parts[1:])\n+ # We have to split the line again to see if this is a RHEL8\n+ # selinux stanza. This assumes that the context section will\n+ # always have at least two pieces separated by ':'.\n+ if \":\" in line.split()[4]:\n+ rest = parse_rhel8_selinux(parts[1:])\n+ else:\n+ rest = parse_non_selinux(parts[1:])\n else:\n rest = parse_selinux(parts[1:])\n \n@@ -220,5 +271,6 @@\n continue\n entries.append(line)\n name = name or root\n- doc[name] = Directory(name, total or len(entries), entries)\n+ total = total if total is not None else len(entries)\n+ doc[name] = Directory(name, total, entries)\n return doc\n", "issue": "Need to enhance parser \"ls_parser\" to parse the output of command 'ls -laRZ <dir-name>' in RHEL-8 \nThe parser **ls_parser** needs to be enhanced to support parsing the output of command `ls` with an option `Z` (for selinux context) in a RHEL8 system. In RHEL8, additional fields have been introduced in the output of command `ls -laRZ`. For example:\r\n\r\nRHEL7: \r\n\r\n```\r\n#/bin/ls -laRZ /var/lib/nova/instances\r\n/var/lib/nova/instances:\r\ndrwxr-xr-x. nova nova system_u:object_r:nova_var_lib_t:s0 .\r\ndrwxr-xr-x. nova nova system_u:object_r:nova_var_lib_t:s0 ..\r\ndrwxr-xr-x. root root unconfined_u:object_r:nova_var_lib_t:s0 xxxx-xxxx-xxxx-xxxx\r\n```\r\n\r\nRHEL8: \r\n\r\n```\r\n#/bin/ls -laRZ /var/lib/nova/instances\r\n/var/lib/nova/instances:\r\ntotal 0\r\ndrwxr-xr-x. 3 root root unconfined_u:object_r:var_lib_t:s0 50 Apr 8 16:41 .\r\ndrwxr-xr-x. 3 root root unconfined_u:object_r:var_lib_t:s0 23 Apr 8 16:29 ..\r\ndrwxr-xr-x. 2 root root unconfined_u:object_r:var_lib_t:s0 54 Apr 8 16:41 xxxx-xxxx-xxxx-xxxx\r\n```\n", "before_files": [{"content": "\"\"\"\nThis module contains logic for parsing ls output. It attempts to handle\noutput when selinux is enabled or disabled and also skip \"bad\" lines.\n\"\"\"\nimport six\n\n\ndef parse_path(path):\n \"\"\"\n Convert possible symbolic link into a source -> target pair.\n\n Args:\n path (str): The path portion of an ls output line.\n\n Returns:\n A (path, link) tuple where path is always populated and link is a non\n empty string if the original path is a symoblic link.\n \"\"\"\n path, _, link = path.partition(\" -> \")\n return path, link\n\n\ndef parse_non_selinux(parts):\n \"\"\"\n Parse part of an ls output line that isn't selinux.\n\n Args:\n parts (list): A four element list of strings representing the initial\n parts of an ls line after the permission bits. The parts are link\n count, owner, group, and everything else.\n\n Returns:\n A dict containing links, owner, group, date, and name. If the line\n represented a device, major and minor numbers are included. Otherwise,\n size is included. If the raw name was a symbolic link, link is\n included.\n \"\"\"\n links, owner, group, last = parts\n result = {\n \"links\": int(links),\n \"owner\": owner,\n \"group\": group,\n }\n\n # device numbers only go to 256.\n # If a comma is in the first four characters, the next two elements are\n # major and minor device numbers. Otherwise, the next element is the size.\n if \",\" in last[:4]:\n major, minor, rest = last.split(None, 2)\n result[\"major\"] = int(major.rstrip(\",\"))\n result[\"minor\"] = int(minor)\n else:\n size, rest = last.split(None, 1)\n result[\"size\"] = int(size)\n\n # The date part is always 12 characters regardless of content.\n result[\"date\"] = rest[:12]\n\n # Jump over the date and the following space to get the path part.\n path, link = parse_path(rest[13:])\n result[\"name\"] = path\n if link:\n result[\"link\"] = link\n\n return result\n\n\ndef parse_selinux(parts):\n \"\"\"\n Parse part of an ls output line that is selinux.\n\n Args:\n parts (list): A four element list of strings representing the initial\n parts of an ls line after the permission bits. The parts are owner\n group, selinux info, and the path.\n\n Returns:\n A dict containing owner, group, se_user, se_role, se_type, se_mls, and\n name. If the raw name was a symbolic link, link is always included.\n\n \"\"\"\n\n owner, group = parts[:2]\n selinux = parts[2].split(\":\")\n lsel = len(selinux)\n path, link = parse_path(parts[-1])\n result = {\n \"owner\": owner,\n \"group\": group,\n \"se_user\": selinux[0],\n \"se_role\": selinux[1] if lsel > 1 else None,\n \"se_type\": selinux[2] if lsel > 2 else None,\n \"se_mls\": selinux[3] if lsel > 3 else None,\n \"name\": path\n }\n if link:\n result[\"link\"] = link\n return result\n\n\nPASS_KEYS = set([\"name\", \"total\"])\nDELAYED_KEYS = [\"entries\", \"files\", \"dirs\", \"specials\"]\n\n\nclass Directory(dict):\n def __init__(self, name, total, body):\n data = dict.fromkeys(DELAYED_KEYS)\n data[\"name\"] = name\n data[\"total\"] = total\n self.body = body\n self.loaded = False\n super(Directory, self).__init__(data)\n\n def iteritems(self):\n if not self.loaded:\n self._load()\n return six.iteritems(super(Directory, self))\n\n def items(self):\n if not self.loaded:\n self._load()\n return super(Directory, self).items()\n\n def values(self):\n if not self.loaded:\n self._load()\n return super(Directory, self).values()\n\n def get(self, key, default=None):\n if not self.loaded:\n self._load()\n return super(Directory, self).get(key, default)\n\n def _load(self):\n dirs = []\n ents = {}\n files = []\n specials = []\n for line in self.body:\n parts = line.split(None, 4)\n perms = parts[0]\n typ = perms[0]\n entry = {\n \"type\": typ,\n \"perms\": perms[1:]\n }\n if parts[1][0].isdigit():\n rest = parse_non_selinux(parts[1:])\n else:\n rest = parse_selinux(parts[1:])\n\n # Update our entry and put it into the correct buckets\n # based on its type.\n entry.update(rest)\n entry[\"raw_entry\"] = line\n entry[\"dir\"] = self[\"name\"]\n nm = entry[\"name\"]\n ents[nm] = entry\n if typ not in \"bcd\":\n files.append(nm)\n elif typ == \"d\":\n dirs.append(nm)\n elif typ in \"bc\":\n specials.append(nm)\n\n self.update({\"entries\": ents,\n \"files\": files,\n \"dirs\": dirs,\n \"specials\": specials})\n\n self.loaded = True\n del self.body\n\n def __getitem__(self, key):\n if self.loaded or key in PASS_KEYS:\n return super(Directory, self).__getitem__(key)\n self._load()\n return super(Directory, self).__getitem__(key)\n\n\ndef parse(lines, root=None):\n \"\"\"\n Parses a list of lines from ls into dictionaries representing their\n components.\n\n Args:\n lines (list): A list of lines generated by ls.\n root (str): The directory name to be used for ls output stanzas that\n don't have a name.\n\n Returns:\n A dictionary representing the ls output. It's keyed by the path\n containing each ls stanza.\n \"\"\"\n doc = {}\n entries = []\n name = None\n total = None\n for line in lines:\n line = line.strip()\n if not line:\n continue\n if line and line[0] == \"/\" and line[-1] == \":\":\n if name is None:\n name = line[:-1]\n if entries:\n d = Directory(name, total or len(entries), entries)\n doc[root] = d\n total = None\n entries = []\n else:\n d = Directory(name, total or len(entries), entries)\n doc[name or root] = d\n total = None\n entries = []\n name = line[:-1]\n continue\n if line.startswith(\"total\"):\n total = int(line.split(None, 1)[1])\n continue\n entries.append(line)\n name = name or root\n doc[name] = Directory(name, total or len(entries), entries)\n return doc\n", "path": "insights/core/ls_parser.py"}], "after_files": [{"content": "\"\"\"\nThis module contains logic for parsing ls output. It attempts to handle\noutput when selinux is enabled or disabled and also skip \"bad\" lines.\n\"\"\"\nimport six\n\n\ndef parse_path(path):\n \"\"\"\n Convert possible symbolic link into a source -> target pair.\n\n Args:\n path (str): The path portion of an ls output line.\n\n Returns:\n A (path, link) tuple where path is always populated and link is a non\n empty string if the original path is a symoblic link.\n \"\"\"\n path, _, link = path.partition(\" -> \")\n return path, link\n\n\ndef parse_non_selinux(parts):\n \"\"\"\n Parse part of an ls output line that isn't selinux.\n\n Args:\n parts (list): A four element list of strings representing the initial\n parts of an ls line after the permission bits. The parts are link\n count, owner, group, and everything else.\n\n Returns:\n A dict containing links, owner, group, date, and name. If the line\n represented a device, major and minor numbers are included. Otherwise,\n size is included. If the raw name was a symbolic link, link is\n included.\n \"\"\"\n links, owner, group, last = parts\n result = {\n \"links\": int(links),\n \"owner\": owner,\n \"group\": group,\n }\n\n # device numbers only go to 256.\n # If a comma is in the first four characters, the next two elements are\n # major and minor device numbers. Otherwise, the next element is the size.\n if \",\" in last[:4]:\n major, minor, rest = last.split(None, 2)\n result[\"major\"] = int(major.rstrip(\",\"))\n result[\"minor\"] = int(minor)\n else:\n size, rest = last.split(None, 1)\n result[\"size\"] = int(size)\n\n # The date part is always 12 characters regardless of content.\n result[\"date\"] = rest[:12]\n\n # Jump over the date and the following space to get the path part.\n path, link = parse_path(rest[13:])\n result[\"name\"] = path\n if link:\n result[\"link\"] = link\n\n return result\n\n\ndef parse_selinux(parts):\n \"\"\"\n Parse part of an ls output line that is selinux.\n\n Args:\n parts (list): A four element list of strings representing the initial\n parts of an ls line after the permission bits. The parts are owner\n group, selinux info, and the path.\n\n Returns:\n A dict containing owner, group, se_user, se_role, se_type, se_mls, and\n name. If the raw name was a symbolic link, link is also included.\n\n \"\"\"\n\n owner, group = parts[:2]\n selinux = parts[2].split(\":\")\n lsel = len(selinux)\n path, link = parse_path(parts[-1])\n result = {\n \"owner\": owner,\n \"group\": group,\n \"se_user\": selinux[0],\n \"se_role\": selinux[1] if lsel > 1 else None,\n \"se_type\": selinux[2] if lsel > 2 else None,\n \"se_mls\": selinux[3] if lsel > 3 else None,\n \"name\": path\n }\n if link:\n result[\"link\"] = link\n return result\n\n\ndef parse_rhel8_selinux(parts):\n \"\"\"\n Parse part of an ls output line that is selinux on RHEL8.\n\n Args:\n parts (list): A four element list of strings representing the initial\n parts of an ls line after the permission bits. The parts are link\n count, owner, group, and everything else\n\n Returns:\n A dict containing links, owner, group, se_user, se_role, se_type,\n se_mls, size, date, and name. If the raw name was a symbolic link,\n link is also included.\n\n \"\"\"\n\n links, owner, group, last = parts\n\n selinux = parts[3].split(\":\")\n lsel = len(selinux)\n selinux, size, last = parts[-1].split(None, 2)\n selinux = selinux.split(\":\")\n date = last[:12]\n path, link = parse_path(last[13:])\n result = {\n \"links\": int(links),\n \"owner\": owner,\n \"group\": group,\n \"se_user\": selinux[0],\n \"se_role\": selinux[1] if lsel > 1 else None,\n \"se_type\": selinux[2] if lsel > 2 else None,\n \"se_mls\": selinux[3] if lsel > 3 else None,\n \"size\": int(size),\n \"name\": path,\n \"date\": date,\n }\n if link:\n result[\"link\"] = link\n return result\n\n\nPASS_KEYS = set([\"name\", \"total\"])\nDELAYED_KEYS = [\"entries\", \"files\", \"dirs\", \"specials\"]\n\n\nclass Directory(dict):\n def __init__(self, name, total, body):\n data = dict.fromkeys(DELAYED_KEYS)\n data[\"name\"] = name\n data[\"total\"] = total\n self.body = body\n self.loaded = False\n super(Directory, self).__init__(data)\n\n def iteritems(self):\n if not self.loaded:\n self._load()\n return six.iteritems(super(Directory, self))\n\n def items(self):\n if not self.loaded:\n self._load()\n return super(Directory, self).items()\n\n def values(self):\n if not self.loaded:\n self._load()\n return super(Directory, self).values()\n\n def get(self, key, default=None):\n if not self.loaded:\n self._load()\n return super(Directory, self).get(key, default)\n\n def _load(self):\n dirs = []\n ents = {}\n files = []\n specials = []\n for line in self.body:\n # we can't split(None, 5) here b/c rhel 6/7 selinux lines only have\n # 4 parts before the path, and the path itself could contain\n # spaces. Unfortunately, this means we have to split the line again\n # below\n parts = line.split(None, 4)\n perms = parts[0]\n typ = perms[0]\n entry = {\n \"type\": typ,\n \"perms\": perms[1:]\n }\n if parts[1][0].isdigit():\n # We have to split the line again to see if this is a RHEL8\n # selinux stanza. This assumes that the context section will\n # always have at least two pieces separated by ':'.\n if \":\" in line.split()[4]:\n rest = parse_rhel8_selinux(parts[1:])\n else:\n rest = parse_non_selinux(parts[1:])\n else:\n rest = parse_selinux(parts[1:])\n\n # Update our entry and put it into the correct buckets\n # based on its type.\n entry.update(rest)\n entry[\"raw_entry\"] = line\n entry[\"dir\"] = self[\"name\"]\n nm = entry[\"name\"]\n ents[nm] = entry\n if typ not in \"bcd\":\n files.append(nm)\n elif typ == \"d\":\n dirs.append(nm)\n elif typ in \"bc\":\n specials.append(nm)\n\n self.update({\"entries\": ents,\n \"files\": files,\n \"dirs\": dirs,\n \"specials\": specials})\n\n self.loaded = True\n del self.body\n\n def __getitem__(self, key):\n if self.loaded or key in PASS_KEYS:\n return super(Directory, self).__getitem__(key)\n self._load()\n return super(Directory, self).__getitem__(key)\n\n\ndef parse(lines, root=None):\n \"\"\"\n Parses a list of lines from ls into dictionaries representing their\n components.\n\n Args:\n lines (list): A list of lines generated by ls.\n root (str): The directory name to be used for ls output stanzas that\n don't have a name.\n\n Returns:\n A dictionary representing the ls output. It's keyed by the path\n containing each ls stanza.\n \"\"\"\n doc = {}\n entries = []\n name = None\n total = None\n for line in lines:\n line = line.strip()\n if not line:\n continue\n if line and line[0] == \"/\" and line[-1] == \":\":\n if name is None:\n name = line[:-1]\n if entries:\n d = Directory(name, total or len(entries), entries)\n doc[root] = d\n total = None\n entries = []\n else:\n d = Directory(name, total or len(entries), entries)\n doc[name or root] = d\n total = None\n entries = []\n name = line[:-1]\n continue\n if line.startswith(\"total\"):\n total = int(line.split(None, 1)[1])\n continue\n entries.append(line)\n name = name or root\n total = total if total is not None else len(entries)\n doc[name] = Directory(name, total, entries)\n return doc\n", "path": "insights/core/ls_parser.py"}]} | 2,757 | 885 |
gh_patches_debug_32 | rasdani/github-patches | git_diff | mlflow__mlflow-2797 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[SETUP-BUG] ResolvePackageNotFound - python=3.5.2
Thank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)
for information on what types of issues we address.
Please fill in this template and do not delete it unless you are sure your issue is outside its scope.
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 16.04
- **MLflow installed from (source or binary)**: binary (pip install mlflow)
- **MLflow version (run ``mlflow --version``)**: 1.2.0
- **Python version**: 3.5.2
- **Exact command to reproduce**: mlflow models build-docker -m /path/to/model -n "my-model"
### Describe the problem
mlflow models build-docker -m /path/to/model -n "my-model"
### Other info / logs
Warning: you have pip-installed dependencies in your environment file, but you do not list pip itself as one of your condadependencies. Conda may not use the correct pip to install your packages, and they may end up in the wrong place. Pleaseadd an explicit pip dependency. I'm adding one for you, but still nagging you.
Collecting package metadata (repodata.json): ...working... done
Solving environment: ...working... failed
ResolvePackageNotFound:
- python=3.5.2
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/miniconda/lib/python3.7/site-packages/mlflow/models/container/__init__.py", line 102, in _install_pyfunc_deps
raise Exception("Failed to create model environment.")
Exception: Failed to create model environment.
creating and activating custom environment
The command '/bin/sh -c python -c 'from mlflow.models.container import _install_pyfunc_deps; _install_pyfunc_deps("/opt/ml/model", install_mlflow=False)'' returned a non-zero code: 1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/utils/environment.py`
Content:
```
1 import yaml
2
3 from mlflow.utils import PYTHON_VERSION
4
5 _conda_header = """\
6 name: mlflow-env
7 channels:
8 - defaults
9 """
10
11
12 def _mlflow_conda_env(path=None, additional_conda_deps=None, additional_pip_deps=None,
13 additional_conda_channels=None, install_mlflow=True):
14 """
15 Creates a Conda environment with the specified package channels and dependencies. If there are
16 any pip dependencies, including from the install_mlflow parameter, then pip will be added to
17 the conda dependencies. This is done to ensure that the pip inside the conda environment is
18 used to install the pip dependencies.
19
20 :param path: Local filesystem path where the conda env file is to be written. If unspecified,
21 the conda env will not be written to the filesystem; it will still be returned
22 in dictionary format.
23 :param additional_conda_deps: List of additional conda dependencies passed as strings.
24 :param additional_pip_deps: List of additional pip dependencies passed as strings.
25 :param additional_conda_channels: List of additional conda channels to search when resolving
26 packages.
27 :return: ``None`` if ``path`` is specified. Otherwise, the a dictionary representation of the
28 Conda environment.
29 """
30 pip_deps = (["mlflow"] if install_mlflow else []) + (
31 additional_pip_deps if additional_pip_deps else [])
32 conda_deps = (additional_conda_deps if additional_conda_deps else []) + (
33 ["pip"] if pip_deps else [])
34
35 env = yaml.safe_load(_conda_header)
36 env["dependencies"] = ["python={}".format(PYTHON_VERSION)]
37 if conda_deps is not None:
38 env["dependencies"] += conda_deps
39 env["dependencies"].append({"pip": pip_deps})
40 if additional_conda_channels is not None:
41 env["channels"] += additional_conda_channels
42
43 if path is not None:
44 with open(path, "w") as out:
45 yaml.safe_dump(env, stream=out, default_flow_style=False)
46 return None
47 else:
48 return env
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlflow/utils/environment.py b/mlflow/utils/environment.py
--- a/mlflow/utils/environment.py
+++ b/mlflow/utils/environment.py
@@ -6,6 +6,7 @@
name: mlflow-env
channels:
- defaults
+ - conda-forge
"""
| {"golden_diff": "diff --git a/mlflow/utils/environment.py b/mlflow/utils/environment.py\n--- a/mlflow/utils/environment.py\n+++ b/mlflow/utils/environment.py\n@@ -6,6 +6,7 @@\n name: mlflow-env\n channels:\n - defaults\n+ - conda-forge\n \"\"\"\n", "issue": "[SETUP-BUG] ResolvePackageNotFound - python=3.5.2\nThank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)\r\nfor information on what types of issues we address.\r\n \r\nPlease fill in this template and do not delete it unless you are sure your issue is outside its scope.\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 16.04\r\n- **MLflow installed from (source or binary)**: binary (pip install mlflow)\r\n- **MLflow version (run ``mlflow --version``)**: 1.2.0\r\n- **Python version**: 3.5.2\r\n- **Exact command to reproduce**: mlflow models build-docker -m /path/to/model -n \"my-model\" \r\n\r\n### Describe the problem\r\nmlflow models build-docker -m /path/to/model -n \"my-model\"\r\n\r\n### Other info / logs\r\nWarning: you have pip-installed dependencies in your environment file, but you do not list pip itself as one of your condadependencies. Conda may not use the correct pip to install your packages, and they may end up in the wrong place. Pleaseadd an explicit pip dependency. I'm adding one for you, but still nagging you.\r\nCollecting package metadata (repodata.json): ...working... done\r\nSolving environment: ...working... failed\r\n\r\nResolvePackageNotFound:\r\n - python=3.5.2\r\n\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/miniconda/lib/python3.7/site-packages/mlflow/models/container/__init__.py\", line 102, in _install_pyfunc_deps\r\n raise Exception(\"Failed to create model environment.\")\r\nException: Failed to create model environment.\r\ncreating and activating custom environment\r\nThe command '/bin/sh -c python -c 'from mlflow.models.container import _install_pyfunc_deps; _install_pyfunc_deps(\"/opt/ml/model\", install_mlflow=False)'' returned a non-zero code: 1\r\n\n", "before_files": [{"content": "import yaml\n\nfrom mlflow.utils import PYTHON_VERSION\n\n_conda_header = \"\"\"\\\nname: mlflow-env\nchannels:\n - defaults\n\"\"\"\n\n\ndef _mlflow_conda_env(path=None, additional_conda_deps=None, additional_pip_deps=None,\n additional_conda_channels=None, install_mlflow=True):\n \"\"\"\n Creates a Conda environment with the specified package channels and dependencies. If there are\n any pip dependencies, including from the install_mlflow parameter, then pip will be added to\n the conda dependencies. This is done to ensure that the pip inside the conda environment is\n used to install the pip dependencies.\n\n :param path: Local filesystem path where the conda env file is to be written. If unspecified,\n the conda env will not be written to the filesystem; it will still be returned\n in dictionary format.\n :param additional_conda_deps: List of additional conda dependencies passed as strings.\n :param additional_pip_deps: List of additional pip dependencies passed as strings.\n :param additional_conda_channels: List of additional conda channels to search when resolving\n packages.\n :return: ``None`` if ``path`` is specified. Otherwise, the a dictionary representation of the\n Conda environment.\n \"\"\"\n pip_deps = ([\"mlflow\"] if install_mlflow else []) + (\n additional_pip_deps if additional_pip_deps else [])\n conda_deps = (additional_conda_deps if additional_conda_deps else []) + (\n [\"pip\"] if pip_deps else [])\n\n env = yaml.safe_load(_conda_header)\n env[\"dependencies\"] = [\"python={}\".format(PYTHON_VERSION)]\n if conda_deps is not None:\n env[\"dependencies\"] += conda_deps\n env[\"dependencies\"].append({\"pip\": pip_deps})\n if additional_conda_channels is not None:\n env[\"channels\"] += additional_conda_channels\n\n if path is not None:\n with open(path, \"w\") as out:\n yaml.safe_dump(env, stream=out, default_flow_style=False)\n return None\n else:\n return env\n", "path": "mlflow/utils/environment.py"}], "after_files": [{"content": "import yaml\n\nfrom mlflow.utils import PYTHON_VERSION\n\n_conda_header = \"\"\"\\\nname: mlflow-env\nchannels:\n - defaults\n - conda-forge\n\"\"\"\n\n\ndef _mlflow_conda_env(path=None, additional_conda_deps=None, additional_pip_deps=None,\n additional_conda_channels=None, install_mlflow=True):\n \"\"\"\n Creates a Conda environment with the specified package channels and dependencies. If there are\n any pip dependencies, including from the install_mlflow parameter, then pip will be added to\n the conda dependencies. This is done to ensure that the pip inside the conda environment is\n used to install the pip dependencies.\n\n :param path: Local filesystem path where the conda env file is to be written. If unspecified,\n the conda env will not be written to the filesystem; it will still be returned\n in dictionary format.\n :param additional_conda_deps: List of additional conda dependencies passed as strings.\n :param additional_pip_deps: List of additional pip dependencies passed as strings.\n :param additional_conda_channels: List of additional conda channels to search when resolving\n packages.\n :return: ``None`` if ``path`` is specified. Otherwise, the a dictionary representation of the\n Conda environment.\n \"\"\"\n pip_deps = ([\"mlflow\"] if install_mlflow else []) + (\n additional_pip_deps if additional_pip_deps else [])\n conda_deps = (additional_conda_deps if additional_conda_deps else []) + (\n [\"pip\"] if pip_deps else [])\n\n env = yaml.safe_load(_conda_header)\n env[\"dependencies\"] = [\"python={}\".format(PYTHON_VERSION)]\n if conda_deps is not None:\n env[\"dependencies\"] += conda_deps\n env[\"dependencies\"].append({\"pip\": pip_deps})\n if additional_conda_channels is not None:\n env[\"channels\"] += additional_conda_channels\n\n if path is not None:\n with open(path, \"w\") as out:\n yaml.safe_dump(env, stream=out, default_flow_style=False)\n return None\n else:\n return env\n", "path": "mlflow/utils/environment.py"}]} | 1,264 | 63 |
gh_patches_debug_37559 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-1202 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Site Request] 7plus add login/pass to access 720p resolution
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
######################################################################
-->
## Checklist
<!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.09.25. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
-->
- [x] I'm reporting a site feature request
- [x] I've verified that I'm running yt-dlp version **2021.09.25**
- [x] I've searched the bugtracker for similar site feature requests including closed ones
## Description
<!--
Provide an explanation of your site feature request in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.
-->
7Plus has 720p resolution available behind a login/pass. Current site functionality only pulls 540p.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/sevenplus.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .brightcove import BrightcoveNewIE
7 from ..compat import (
8 compat_HTTPError,
9 compat_str,
10 )
11 from ..utils import (
12 ExtractorError,
13 try_get,
14 update_url_query,
15 )
16
17
18 class SevenPlusIE(BrightcoveNewIE):
19 IE_NAME = '7plus'
20 _VALID_URL = r'https?://(?:www\.)?7plus\.com\.au/(?P<path>[^?]+\?.*?\bepisode-id=(?P<id>[^&#]+))'
21 _TESTS = [{
22 'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003',
23 'info_dict': {
24 'id': 'MTYS7-003',
25 'ext': 'mp4',
26 'title': 'S7 E3 - Wind Surf',
27 'description': 'md5:29c6a69f21accda7601278f81b46483d',
28 'uploader_id': '5303576322001',
29 'upload_date': '20171201',
30 'timestamp': 1512106377,
31 'series': 'Mighty Ships',
32 'season_number': 7,
33 'episode_number': 3,
34 'episode': 'Wind Surf',
35 },
36 'params': {
37 'format': 'bestvideo',
38 'skip_download': True,
39 }
40 }, {
41 'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001',
42 'only_matching': True,
43 }]
44
45 def _real_extract(self, url):
46 path, episode_id = self._match_valid_url(url).groups()
47
48 try:
49 media = self._download_json(
50 'https://videoservice.swm.digital/playback', episode_id, query={
51 'appId': '7plus',
52 'deviceType': 'web',
53 'platformType': 'web',
54 'accountId': 5303576322001,
55 'referenceId': 'ref:' + episode_id,
56 'deliveryId': 'csai',
57 'videoType': 'vod',
58 })['media']
59 except ExtractorError as e:
60 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
61 raise ExtractorError(self._parse_json(
62 e.cause.read().decode(), episode_id)[0]['error_code'], expected=True)
63 raise
64
65 for source in media.get('sources', {}):
66 src = source.get('src')
67 if not src:
68 continue
69 source['src'] = update_url_query(src, {'rule': ''})
70
71 info = self._parse_brightcove_metadata(media, episode_id)
72
73 content = self._download_json(
74 'https://component-cdn.swm.digital/content/' + path,
75 episode_id, headers={
76 'market-id': 4,
77 }, fatal=False) or {}
78 for item in content.get('items', {}):
79 if item.get('componentData', {}).get('componentType') == 'infoPanel':
80 for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]:
81 value = item.get(src_key)
82 if value:
83 info[dst_key] = value
84 info['series'] = try_get(
85 item, lambda x: x['seriesLogo']['name'], compat_str)
86 mobj = re.search(r'^S(\d+)\s+E(\d+)\s+-\s+(.+)$', info['title'])
87 if mobj:
88 info.update({
89 'season_number': int(mobj.group(1)),
90 'episode_number': int(mobj.group(2)),
91 'episode': mobj.group(3),
92 })
93
94 return info
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt_dlp/extractor/sevenplus.py b/yt_dlp/extractor/sevenplus.py
--- a/yt_dlp/extractor/sevenplus.py
+++ b/yt_dlp/extractor/sevenplus.py
@@ -1,6 +1,7 @@
# coding: utf-8
from __future__ import unicode_literals
+import json
import re
from .brightcove import BrightcoveNewIE
@@ -42,9 +43,52 @@
'only_matching': True,
}]
+ def _real_initialize(self):
+ self.token = None
+
+ cookies = self._get_cookies('https://7plus.com.au')
+ api_key = next((x for x in cookies if x.startswith('glt_')), '')[4:]
+ if not api_key: # Cookies are signed out, skip login
+ return
+
+ login_resp = self._download_json(
+ 'https://login.7plus.com.au/accounts.getJWT', None, 'Logging in', fatal=False,
+ query={
+ 'APIKey': api_key,
+ 'sdk': 'js_latest',
+ 'login_token': cookies[f'glt_{api_key}'].value,
+ 'authMode': 'cookie',
+ 'pageURL': 'https://7plus.com.au/',
+ 'sdkBuild': '12471',
+ 'format': 'json',
+ }) or {}
+
+ if 'errorMessage' in login_resp:
+ self.report_warning(f'Unable to login: 7plus said: {login_resp["errorMessage"]}')
+ return
+ id_token = login_resp.get('id_token')
+ if not id_token:
+ self.report_warning('Unable to login: Could not extract id token')
+ return
+
+ token_resp = self._download_json(
+ 'https://7plus.com.au/auth/token', None, 'Getting auth token', fatal=False,
+ headers={'Content-Type': 'application/json'}, data=json.dumps({
+ 'idToken': id_token,
+ 'platformId': 'web',
+ 'regSource': '7plus',
+ }).encode('utf-8')) or {}
+ self.token = token_resp.get('token')
+ if not self.token:
+ self.report_warning('Unable to log in: Could not extract auth token')
+
def _real_extract(self, url):
path, episode_id = self._match_valid_url(url).groups()
+ headers = {}
+ if self.token:
+ headers['Authorization'] = f'Bearer {self.token}'
+
try:
media = self._download_json(
'https://videoservice.swm.digital/playback', episode_id, query={
@@ -55,7 +99,7 @@
'referenceId': 'ref:' + episode_id,
'deliveryId': 'csai',
'videoType': 'vod',
- })['media']
+ }, headers=headers)['media']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
raise ExtractorError(self._parse_json(
| {"golden_diff": "diff --git a/yt_dlp/extractor/sevenplus.py b/yt_dlp/extractor/sevenplus.py\n--- a/yt_dlp/extractor/sevenplus.py\n+++ b/yt_dlp/extractor/sevenplus.py\n@@ -1,6 +1,7 @@\n # coding: utf-8\n from __future__ import unicode_literals\n \n+import json\n import re\n \n from .brightcove import BrightcoveNewIE\n@@ -42,9 +43,52 @@\n 'only_matching': True,\n }]\n \n+ def _real_initialize(self):\n+ self.token = None\n+\n+ cookies = self._get_cookies('https://7plus.com.au')\n+ api_key = next((x for x in cookies if x.startswith('glt_')), '')[4:]\n+ if not api_key: # Cookies are signed out, skip login\n+ return\n+\n+ login_resp = self._download_json(\n+ 'https://login.7plus.com.au/accounts.getJWT', None, 'Logging in', fatal=False,\n+ query={\n+ 'APIKey': api_key,\n+ 'sdk': 'js_latest',\n+ 'login_token': cookies[f'glt_{api_key}'].value,\n+ 'authMode': 'cookie',\n+ 'pageURL': 'https://7plus.com.au/',\n+ 'sdkBuild': '12471',\n+ 'format': 'json',\n+ }) or {}\n+\n+ if 'errorMessage' in login_resp:\n+ self.report_warning(f'Unable to login: 7plus said: {login_resp[\"errorMessage\"]}')\n+ return\n+ id_token = login_resp.get('id_token')\n+ if not id_token:\n+ self.report_warning('Unable to login: Could not extract id token')\n+ return\n+\n+ token_resp = self._download_json(\n+ 'https://7plus.com.au/auth/token', None, 'Getting auth token', fatal=False,\n+ headers={'Content-Type': 'application/json'}, data=json.dumps({\n+ 'idToken': id_token,\n+ 'platformId': 'web',\n+ 'regSource': '7plus',\n+ }).encode('utf-8')) or {}\n+ self.token = token_resp.get('token')\n+ if not self.token:\n+ self.report_warning('Unable to log in: Could not extract auth token')\n+\n def _real_extract(self, url):\n path, episode_id = self._match_valid_url(url).groups()\n \n+ headers = {}\n+ if self.token:\n+ headers['Authorization'] = f'Bearer {self.token}'\n+\n try:\n media = self._download_json(\n 'https://videoservice.swm.digital/playback', episode_id, query={\n@@ -55,7 +99,7 @@\n 'referenceId': 'ref:' + episode_id,\n 'deliveryId': 'csai',\n 'videoType': 'vod',\n- })['media']\n+ }, headers=headers)['media']\n except ExtractorError as e:\n if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:\n raise ExtractorError(self._parse_json(\n", "issue": "[Site Request] 7plus add login/pass to access 720p resolution\n<!--\r\n\r\n######################################################################\r\n WARNING!\r\n IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE\r\n######################################################################\r\n\r\n-->\r\n\r\n\r\n## Checklist\r\n\r\n<!--\r\nCarefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:\r\n- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.09.25. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.\r\n- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.\r\n- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)\r\n-->\r\n\r\n- [x] I'm reporting a site feature request\r\n- [x] I've verified that I'm running yt-dlp version **2021.09.25**\r\n- [x] I've searched the bugtracker for similar site feature requests including closed ones\r\n\r\n\r\n## Description\r\n\r\n<!--\r\nProvide an explanation of your site feature request in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.\r\n-->\r\n\r\n7Plus has 720p resolution available behind a login/pass. Current site functionality only pulls 540p. \n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .brightcove import BrightcoveNewIE\nfrom ..compat import (\n compat_HTTPError,\n compat_str,\n)\nfrom ..utils import (\n ExtractorError,\n try_get,\n update_url_query,\n)\n\n\nclass SevenPlusIE(BrightcoveNewIE):\n IE_NAME = '7plus'\n _VALID_URL = r'https?://(?:www\\.)?7plus\\.com\\.au/(?P<path>[^?]+\\?.*?\\bepisode-id=(?P<id>[^&#]+))'\n _TESTS = [{\n 'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003',\n 'info_dict': {\n 'id': 'MTYS7-003',\n 'ext': 'mp4',\n 'title': 'S7 E3 - Wind Surf',\n 'description': 'md5:29c6a69f21accda7601278f81b46483d',\n 'uploader_id': '5303576322001',\n 'upload_date': '20171201',\n 'timestamp': 1512106377,\n 'series': 'Mighty Ships',\n 'season_number': 7,\n 'episode_number': 3,\n 'episode': 'Wind Surf',\n },\n 'params': {\n 'format': 'bestvideo',\n 'skip_download': True,\n }\n }, {\n 'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n path, episode_id = self._match_valid_url(url).groups()\n\n try:\n media = self._download_json(\n 'https://videoservice.swm.digital/playback', episode_id, query={\n 'appId': '7plus',\n 'deviceType': 'web',\n 'platformType': 'web',\n 'accountId': 5303576322001,\n 'referenceId': 'ref:' + episode_id,\n 'deliveryId': 'csai',\n 'videoType': 'vod',\n })['media']\n except ExtractorError as e:\n if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:\n raise ExtractorError(self._parse_json(\n e.cause.read().decode(), episode_id)[0]['error_code'], expected=True)\n raise\n\n for source in media.get('sources', {}):\n src = source.get('src')\n if not src:\n continue\n source['src'] = update_url_query(src, {'rule': ''})\n\n info = self._parse_brightcove_metadata(media, episode_id)\n\n content = self._download_json(\n 'https://component-cdn.swm.digital/content/' + path,\n episode_id, headers={\n 'market-id': 4,\n }, fatal=False) or {}\n for item in content.get('items', {}):\n if item.get('componentData', {}).get('componentType') == 'infoPanel':\n for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]:\n value = item.get(src_key)\n if value:\n info[dst_key] = value\n info['series'] = try_get(\n item, lambda x: x['seriesLogo']['name'], compat_str)\n mobj = re.search(r'^S(\\d+)\\s+E(\\d+)\\s+-\\s+(.+)$', info['title'])\n if mobj:\n info.update({\n 'season_number': int(mobj.group(1)),\n 'episode_number': int(mobj.group(2)),\n 'episode': mobj.group(3),\n })\n\n return info\n", "path": "yt_dlp/extractor/sevenplus.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom .brightcove import BrightcoveNewIE\nfrom ..compat import (\n compat_HTTPError,\n compat_str,\n)\nfrom ..utils import (\n ExtractorError,\n try_get,\n update_url_query,\n)\n\n\nclass SevenPlusIE(BrightcoveNewIE):\n IE_NAME = '7plus'\n _VALID_URL = r'https?://(?:www\\.)?7plus\\.com\\.au/(?P<path>[^?]+\\?.*?\\bepisode-id=(?P<id>[^&#]+))'\n _TESTS = [{\n 'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003',\n 'info_dict': {\n 'id': 'MTYS7-003',\n 'ext': 'mp4',\n 'title': 'S7 E3 - Wind Surf',\n 'description': 'md5:29c6a69f21accda7601278f81b46483d',\n 'uploader_id': '5303576322001',\n 'upload_date': '20171201',\n 'timestamp': 1512106377,\n 'series': 'Mighty Ships',\n 'season_number': 7,\n 'episode_number': 3,\n 'episode': 'Wind Surf',\n },\n 'params': {\n 'format': 'bestvideo',\n 'skip_download': True,\n }\n }, {\n 'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001',\n 'only_matching': True,\n }]\n\n def _real_initialize(self):\n self.token = None\n\n cookies = self._get_cookies('https://7plus.com.au')\n api_key = next((x for x in cookies if x.startswith('glt_')), '')[4:]\n if not api_key: # Cookies are signed out, skip login\n return\n\n login_resp = self._download_json(\n 'https://login.7plus.com.au/accounts.getJWT', None, 'Logging in', fatal=False,\n query={\n 'APIKey': api_key,\n 'sdk': 'js_latest',\n 'login_token': cookies[f'glt_{api_key}'].value,\n 'authMode': 'cookie',\n 'pageURL': 'https://7plus.com.au/',\n 'sdkBuild': '12471',\n 'format': 'json',\n }) or {}\n\n if 'errorMessage' in login_resp:\n self.report_warning(f'Unable to login: 7plus said: {login_resp[\"errorMessage\"]}')\n return\n id_token = login_resp.get('id_token')\n if not id_token:\n self.report_warning('Unable to login: Could not extract id token')\n return\n\n token_resp = self._download_json(\n 'https://7plus.com.au/auth/token', None, 'Getting auth token', fatal=False,\n headers={'Content-Type': 'application/json'}, data=json.dumps({\n 'idToken': id_token,\n 'platformId': 'web',\n 'regSource': '7plus',\n }).encode('utf-8')) or {}\n self.token = token_resp.get('token')\n if not self.token:\n self.report_warning('Unable to log in: Could not extract auth token')\n\n def _real_extract(self, url):\n path, episode_id = self._match_valid_url(url).groups()\n\n headers = {}\n if self.token:\n headers['Authorization'] = f'Bearer {self.token}'\n\n try:\n media = self._download_json(\n 'https://videoservice.swm.digital/playback', episode_id, query={\n 'appId': '7plus',\n 'deviceType': 'web',\n 'platformType': 'web',\n 'accountId': 5303576322001,\n 'referenceId': 'ref:' + episode_id,\n 'deliveryId': 'csai',\n 'videoType': 'vod',\n }, headers=headers)['media']\n except ExtractorError as e:\n if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:\n raise ExtractorError(self._parse_json(\n e.cause.read().decode(), episode_id)[0]['error_code'], expected=True)\n raise\n\n for source in media.get('sources', {}):\n src = source.get('src')\n if not src:\n continue\n source['src'] = update_url_query(src, {'rule': ''})\n\n info = self._parse_brightcove_metadata(media, episode_id)\n\n content = self._download_json(\n 'https://component-cdn.swm.digital/content/' + path,\n episode_id, headers={\n 'market-id': 4,\n }, fatal=False) or {}\n for item in content.get('items', {}):\n if item.get('componentData', {}).get('componentType') == 'infoPanel':\n for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]:\n value = item.get(src_key)\n if value:\n info[dst_key] = value\n info['series'] = try_get(\n item, lambda x: x['seriesLogo']['name'], compat_str)\n mobj = re.search(r'^S(\\d+)\\s+E(\\d+)\\s+-\\s+(.+)$', info['title'])\n if mobj:\n info.update({\n 'season_number': int(mobj.group(1)),\n 'episode_number': int(mobj.group(2)),\n 'episode': mobj.group(3),\n })\n\n return info\n", "path": "yt_dlp/extractor/sevenplus.py"}]} | 1,679 | 698 |
gh_patches_debug_25239 | rasdani/github-patches | git_diff | learningequality__kolibri-1604 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DateTimeTzFields are serialized with incorrect times
Example saved this time in a `ChannelMetaDataCache` object (at around 14:00 local time)
`2017-06-06 14:44:12.582879(America/Los_Angeles)`
But it gets serialized as
`2017-06-06T07:44:12.582-07:00`
Expected is
`2017-06-06T14:44:12.582-07:00`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/serializers.py`
Content:
```
1 from django.utils import timezone
2 from rest_framework.serializers import DateTimeField, ModelSerializer
3 from .fields import DateTimeTzField as DjangoDateTimeTzField
4
5 class DateTimeTzField(DateTimeField):
6
7 def to_internal_value(self, data):
8 data = super(DateTimeTzField, self).to_internal_value(data)
9 tz = timezone.get_current_timezone()
10 return data.astimezone(tz)
11
12
13 serializer_field_mapping = {
14 DjangoDateTimeTzField: DateTimeTzField,
15 }
16
17 serializer_field_mapping.update(ModelSerializer.serializer_field_mapping)
18
19 class KolibriModelSerializer(ModelSerializer):
20
21 serializer_field_mapping = serializer_field_mapping
22
```
Path: `kolibri/core/fields.py`
Content:
```
1 import datetime
2 import re
3
4 import pytz
5 from django.db.models.fields import Field
6 from django.utils import timezone
7
8 date_time_format = "%Y-%m-%d %H:%M:%S.%f"
9 tz_format = "({tz})"
10 tz_regex = re.compile("\(([^\)]+)\)")
11 db_storage_string = "{date_time_string}{tz_string}"
12
13 def parse_timezonestamp(value):
14 if tz_regex.search(value):
15 tz = pytz.timezone(tz_regex.search(value).groups()[0])
16 else:
17 tz = timezone.get_current_timezone()
18 utc_value = tz_regex.sub('', value)
19 value = datetime.datetime.strptime(utc_value, date_time_format)
20 value = timezone.make_aware(value, pytz.utc)
21 return value.astimezone(tz)
22
23 def create_timezonestamp(value):
24 if value.tzinfo:
25 tz = value.tzinfo.zone
26 else:
27 tz = timezone.get_current_timezone().zone
28 date_time_string = value.strftime(date_time_format)
29 tz_string = tz_format.format(tz=tz)
30 value = db_storage_string.format(date_time_string=date_time_string, tz_string=tz_string)
31 return value
32
33 class DateTimeTzField(Field):
34 """
35 A field that stores datetime information as a char in this format:
36
37 %Y-%m-%d %H:%M:%S.%f(<tzinfo>)
38
39 It reads a timezone aware datetime object, and extracts the timezone zone information
40 then parses the datetime into the format above with the timezone information appended.
41
42 As this is ISO formatted, alphabetic sorting should still allow for proper queries
43 against this in the database. Mostly engineered for SQLite usage.
44 """
45
46 def db_type(self, connection):
47 return "varchar"
48
49 def from_db_value(self, value, expression, connection, context):
50 if value is None:
51 return value
52 return parse_timezonestamp(value)
53
54 def to_python(self, value):
55 if isinstance(value, datetime.datetime):
56 return value
57
58 if value is None:
59 return value
60
61 return parse_timezonestamp(value)
62
63 def get_prep_value(self, value):
64 # Casts datetimes into the format expected by the backend
65 if value is None:
66 return value
67 return create_timezonestamp(value)
68
69 def get_db_prep_value(self, value, connection, prepared=False):
70 if not prepared:
71 value = self.get_prep_value(value)
72 return value
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/core/fields.py b/kolibri/core/fields.py
--- a/kolibri/core/fields.py
+++ b/kolibri/core/fields.py
@@ -25,7 +25,8 @@
tz = value.tzinfo.zone
else:
tz = timezone.get_current_timezone().zone
- date_time_string = value.strftime(date_time_format)
+ value = timezone.make_aware(value, timezone.get_current_timezone())
+ date_time_string = value.astimezone(pytz.utc).strftime(date_time_format)
tz_string = tz_format.format(tz=tz)
value = db_storage_string.format(date_time_string=date_time_string, tz_string=tz_string)
return value
diff --git a/kolibri/core/serializers.py b/kolibri/core/serializers.py
--- a/kolibri/core/serializers.py
+++ b/kolibri/core/serializers.py
@@ -1,3 +1,5 @@
+import pytz
+
from django.utils import timezone
from rest_framework.serializers import DateTimeField, ModelSerializer
from .fields import DateTimeTzField as DjangoDateTimeTzField
@@ -7,6 +9,8 @@
def to_internal_value(self, data):
data = super(DateTimeTzField, self).to_internal_value(data)
tz = timezone.get_current_timezone()
+ if not data.tzinfo:
+ data = timezone.make_aware(data, pytz.utc)
return data.astimezone(tz)
| {"golden_diff": "diff --git a/kolibri/core/fields.py b/kolibri/core/fields.py\n--- a/kolibri/core/fields.py\n+++ b/kolibri/core/fields.py\n@@ -25,7 +25,8 @@\n tz = value.tzinfo.zone\n else:\n tz = timezone.get_current_timezone().zone\n- date_time_string = value.strftime(date_time_format)\n+ value = timezone.make_aware(value, timezone.get_current_timezone())\n+ date_time_string = value.astimezone(pytz.utc).strftime(date_time_format)\n tz_string = tz_format.format(tz=tz)\n value = db_storage_string.format(date_time_string=date_time_string, tz_string=tz_string)\n return value\ndiff --git a/kolibri/core/serializers.py b/kolibri/core/serializers.py\n--- a/kolibri/core/serializers.py\n+++ b/kolibri/core/serializers.py\n@@ -1,3 +1,5 @@\n+import pytz\n+\n from django.utils import timezone\n from rest_framework.serializers import DateTimeField, ModelSerializer\n from .fields import DateTimeTzField as DjangoDateTimeTzField\n@@ -7,6 +9,8 @@\n def to_internal_value(self, data):\n data = super(DateTimeTzField, self).to_internal_value(data)\n tz = timezone.get_current_timezone()\n+ if not data.tzinfo:\n+ data = timezone.make_aware(data, pytz.utc)\n return data.astimezone(tz)\n", "issue": "DateTimeTzFields are serialized with incorrect times\nExample saved this time in a `ChannelMetaDataCache` object (at around 14:00 local time)\r\n\r\n`2017-06-06 14:44:12.582879(America/Los_Angeles)`\r\n\r\nBut it gets serialized as\r\n\r\n`2017-06-06T07:44:12.582-07:00`\r\n\r\nExpected is\r\n\r\n`2017-06-06T14:44:12.582-07:00`\r\n\n", "before_files": [{"content": "from django.utils import timezone\nfrom rest_framework.serializers import DateTimeField, ModelSerializer\nfrom .fields import DateTimeTzField as DjangoDateTimeTzField\n\nclass DateTimeTzField(DateTimeField):\n\n def to_internal_value(self, data):\n data = super(DateTimeTzField, self).to_internal_value(data)\n tz = timezone.get_current_timezone()\n return data.astimezone(tz)\n\n\nserializer_field_mapping = {\n DjangoDateTimeTzField: DateTimeTzField,\n}\n\nserializer_field_mapping.update(ModelSerializer.serializer_field_mapping)\n\nclass KolibriModelSerializer(ModelSerializer):\n\n serializer_field_mapping = serializer_field_mapping\n", "path": "kolibri/core/serializers.py"}, {"content": "import datetime\nimport re\n\nimport pytz\nfrom django.db.models.fields import Field\nfrom django.utils import timezone\n\ndate_time_format = \"%Y-%m-%d %H:%M:%S.%f\"\ntz_format = \"({tz})\"\ntz_regex = re.compile(\"\\(([^\\)]+)\\)\")\ndb_storage_string = \"{date_time_string}{tz_string}\"\n\ndef parse_timezonestamp(value):\n if tz_regex.search(value):\n tz = pytz.timezone(tz_regex.search(value).groups()[0])\n else:\n tz = timezone.get_current_timezone()\n utc_value = tz_regex.sub('', value)\n value = datetime.datetime.strptime(utc_value, date_time_format)\n value = timezone.make_aware(value, pytz.utc)\n return value.astimezone(tz)\n\ndef create_timezonestamp(value):\n if value.tzinfo:\n tz = value.tzinfo.zone\n else:\n tz = timezone.get_current_timezone().zone\n date_time_string = value.strftime(date_time_format)\n tz_string = tz_format.format(tz=tz)\n value = db_storage_string.format(date_time_string=date_time_string, tz_string=tz_string)\n return value\n\nclass DateTimeTzField(Field):\n \"\"\"\n A field that stores datetime information as a char in this format:\n\n %Y-%m-%d %H:%M:%S.%f(<tzinfo>)\n\n It reads a timezone aware datetime object, and extracts the timezone zone information\n then parses the datetime into the format above with the timezone information appended.\n\n As this is ISO formatted, alphabetic sorting should still allow for proper queries\n against this in the database. Mostly engineered for SQLite usage.\n \"\"\"\n\n def db_type(self, connection):\n return \"varchar\"\n\n def from_db_value(self, value, expression, connection, context):\n if value is None:\n return value\n return parse_timezonestamp(value)\n\n def to_python(self, value):\n if isinstance(value, datetime.datetime):\n return value\n\n if value is None:\n return value\n\n return parse_timezonestamp(value)\n\n def get_prep_value(self, value):\n # Casts datetimes into the format expected by the backend\n if value is None:\n return value\n return create_timezonestamp(value)\n\n def get_db_prep_value(self, value, connection, prepared=False):\n if not prepared:\n value = self.get_prep_value(value)\n return value\n", "path": "kolibri/core/fields.py"}], "after_files": [{"content": "import pytz\n\nfrom django.utils import timezone\nfrom rest_framework.serializers import DateTimeField, ModelSerializer\nfrom .fields import DateTimeTzField as DjangoDateTimeTzField\n\nclass DateTimeTzField(DateTimeField):\n\n def to_internal_value(self, data):\n data = super(DateTimeTzField, self).to_internal_value(data)\n tz = timezone.get_current_timezone()\n if not data.tzinfo:\n data = timezone.make_aware(data, pytz.utc)\n return data.astimezone(tz)\n\n\nserializer_field_mapping = {\n DjangoDateTimeTzField: DateTimeTzField,\n}\n\nserializer_field_mapping.update(ModelSerializer.serializer_field_mapping)\n\nclass KolibriModelSerializer(ModelSerializer):\n\n serializer_field_mapping = serializer_field_mapping\n", "path": "kolibri/core/serializers.py"}, {"content": "import datetime\nimport re\n\nimport pytz\nfrom django.db.models.fields import Field\nfrom django.utils import timezone\n\ndate_time_format = \"%Y-%m-%d %H:%M:%S.%f\"\ntz_format = \"({tz})\"\ntz_regex = re.compile(\"\\(([^\\)]+)\\)\")\ndb_storage_string = \"{date_time_string}{tz_string}\"\n\ndef parse_timezonestamp(value):\n if tz_regex.search(value):\n tz = pytz.timezone(tz_regex.search(value).groups()[0])\n else:\n tz = timezone.get_current_timezone()\n utc_value = tz_regex.sub('', value)\n value = datetime.datetime.strptime(utc_value, date_time_format)\n value = timezone.make_aware(value, pytz.utc)\n return value.astimezone(tz)\n\ndef create_timezonestamp(value):\n if value.tzinfo:\n tz = value.tzinfo.zone\n else:\n tz = timezone.get_current_timezone().zone\n value = timezone.make_aware(value, timezone.get_current_timezone())\n date_time_string = value.astimezone(pytz.utc).strftime(date_time_format)\n tz_string = tz_format.format(tz=tz)\n value = db_storage_string.format(date_time_string=date_time_string, tz_string=tz_string)\n return value\n\nclass DateTimeTzField(Field):\n \"\"\"\n A field that stores datetime information as a char in this format:\n\n %Y-%m-%d %H:%M:%S.%f(<tzinfo>)\n\n It reads a timezone aware datetime object, and extracts the timezone zone information\n then parses the datetime into the format above with the timezone information appended.\n\n As this is ISO formatted, alphabetic sorting should still allow for proper queries\n against this in the database. Mostly engineered for SQLite usage.\n \"\"\"\n\n def db_type(self, connection):\n return \"varchar\"\n\n def from_db_value(self, value, expression, connection, context):\n if value is None:\n return value\n return parse_timezonestamp(value)\n\n def to_python(self, value):\n if isinstance(value, datetime.datetime):\n return value\n\n if value is None:\n return value\n\n return parse_timezonestamp(value)\n\n def get_prep_value(self, value):\n # Casts datetimes into the format expected by the backend\n if value is None:\n return value\n return create_timezonestamp(value)\n\n def get_db_prep_value(self, value, connection, prepared=False):\n if not prepared:\n value = self.get_prep_value(value)\n return value\n", "path": "kolibri/core/fields.py"}]} | 1,256 | 325 |
gh_patches_debug_2535 | rasdani/github-patches | git_diff | python__peps-2229 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't auto-add inline links to ref section & rm if empty, per #2130
First step to implementing #2130 , as agreed with @gvanrossum and the PEP editor team.
When building, don't add redundant footnotes and references entries for URLs that are already directly linked inline. This avoids an unnecessary, potentially confusing footnote for each link, and taking up additional space in the references section for no real benefit, plus simplifies the build code and should improve build time, especially for Sphinx. Furthermore, if the references section is empty (not including now-invisible link targets), remove it, as we did before (but in a more robust manner). This allows past and future PEPs to still use inline references with targets stored in the references section, while avoiding showing a now-empty references section.
These are both implemented for Sphinx and the legacy builder, and I visually inspected a variety of PEPs with various cases with both builders to ensure the desired results were achieved, and there were no obvious side effects from this change.
Following merging this PR, following the plan outlined in #2130 , I'll proceed with one updating the meta-PEP docs in PEP 0, PEP 1 and PEP 12 to reflect the revised policy of using standard reST links (inline or with separate targets) rather than the references section, and follow that with a PR updating the relative handful of references in the other active meta-PEPs, for consistency.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pep_sphinx_extensions/pep_processor/transforms/pep_footer.py`
Content:
```
1 import datetime
2 from pathlib import Path
3 import subprocess
4
5 from docutils import nodes
6 from docutils import transforms
7
8
9 class PEPFooter(transforms.Transform):
10 """Footer transforms for PEPs.
11
12 - Removes the References section if it is empty when rendered.
13 - Creates a link to the (GitHub) source text.
14
15 Source Link:
16 Create the link to the source file from the document source path,
17 and append the text to the end of the document.
18
19 """
20
21 # Uses same priority as docutils.transforms.TargetNotes
22 default_priority = 520
23
24 def apply(self) -> None:
25 pep_source_path = Path(self.document["source"])
26 if not pep_source_path.match("pep-*"):
27 return # not a PEP file, exit early
28
29 # Iterate through sections from the end of the document
30 for section in reversed(self.document[0]):
31 if not isinstance(section, nodes.section):
32 continue
33 title_words = section[0].astext().lower().split()
34 if "references" in title_words:
35 # Remove references section if there are no displayed
36 # footnotes (it only has title & link target nodes)
37 if all(isinstance(ref_node, (nodes.title, nodes.target))
38 for ref_node in section):
39 section.parent.remove(section)
40 break
41
42 # Add link to source text and last modified date
43 if pep_source_path.stem != "pep-0000":
44 self.document += _add_source_link(pep_source_path)
45 self.document += _add_commit_history_info(pep_source_path)
46
47
48 def _add_source_link(pep_source_path: Path) -> nodes.paragraph:
49 """Add link to source text on VCS (GitHub)"""
50 source_link = f"https://github.com/python/peps/blob/main/{pep_source_path.name}"
51 link_node = nodes.reference("", source_link, refuri=source_link)
52 return nodes.paragraph("", "Source: ", link_node)
53
54
55 def _add_commit_history_info(pep_source_path: Path) -> nodes.paragraph:
56 """Use local git history to find last modified date."""
57 try:
58 since_epoch = LAST_MODIFIED_TIMES[pep_source_path.name]
59 except KeyError:
60 return nodes.paragraph()
61
62 iso_time = datetime.datetime.utcfromtimestamp(since_epoch).isoformat(sep=" ")
63 commit_link = f"https://github.com/python/peps/commits/main/{pep_source_path.name}"
64 link_node = nodes.reference("", f"{iso_time} GMT", refuri=commit_link)
65 return nodes.paragraph("", "Last modified: ", link_node)
66
67
68 def _get_last_modified_timestamps():
69 # get timestamps and changed files from all commits (without paging results)
70 args = ["git", "--no-pager", "log", "--format=#%at", "--name-only"]
71 with subprocess.Popen(args, stdout=subprocess.PIPE) as process:
72 all_modified = process.stdout.read().decode("utf-8")
73 process.stdout.close()
74 if process.wait(): # non-zero return code
75 return {}
76
77 # set up the dictionary with the *current* files
78 last_modified = {path.name: 0 for path in Path().glob("pep-*") if path.suffix in {".txt", ".rst"}}
79
80 # iterate through newest to oldest, updating per file timestamps
81 change_sets = all_modified.removeprefix("#").split("#")
82 for change_set in change_sets:
83 timestamp, files = change_set.split("\n", 1)
84 for file in files.strip().split("\n"):
85 if file.startswith("pep-") and file[-3:] in {"txt", "rst"}:
86 if last_modified.get(file) == 0:
87 try:
88 last_modified[file] = float(timestamp)
89 except ValueError:
90 pass # if float conversion fails
91
92 return last_modified
93
94
95 LAST_MODIFIED_TIMES = _get_last_modified_timestamps()
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py b/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py
--- a/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py
+++ b/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py
@@ -18,8 +18,8 @@
"""
- # Uses same priority as docutils.transforms.TargetNotes
- default_priority = 520
+ # Set low priority so ref targets aren't removed before they are needed
+ default_priority = 999
def apply(self) -> None:
pep_source_path = Path(self.document["source"])
| {"golden_diff": "diff --git a/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py b/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py\n--- a/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py\n+++ b/pep_sphinx_extensions/pep_processor/transforms/pep_footer.py\n@@ -18,8 +18,8 @@\n \n \"\"\"\n \n- # Uses same priority as docutils.transforms.TargetNotes\n- default_priority = 520\n+ # Set low priority so ref targets aren't removed before they are needed\n+ default_priority = 999\n \n def apply(self) -> None:\n pep_source_path = Path(self.document[\"source\"])\n", "issue": "Don't auto-add inline links to ref section & rm if empty, per #2130\nFirst step to implementing #2130 , as agreed with @gvanrossum and the PEP editor team.\r\n\r\nWhen building, don't add redundant footnotes and references entries for URLs that are already directly linked inline. This avoids an unnecessary, potentially confusing footnote for each link, and taking up additional space in the references section for no real benefit, plus simplifies the build code and should improve build time, especially for Sphinx. Furthermore, if the references section is empty (not including now-invisible link targets), remove it, as we did before (but in a more robust manner). This allows past and future PEPs to still use inline references with targets stored in the references section, while avoiding showing a now-empty references section.\r\n\r\nThese are both implemented for Sphinx and the legacy builder, and I visually inspected a variety of PEPs with various cases with both builders to ensure the desired results were achieved, and there were no obvious side effects from this change.\r\n\r\nFollowing merging this PR, following the plan outlined in #2130 , I'll proceed with one updating the meta-PEP docs in PEP 0, PEP 1 and PEP 12 to reflect the revised policy of using standard reST links (inline or with separate targets) rather than the references section, and follow that with a PR updating the relative handful of references in the other active meta-PEPs, for consistency.\n", "before_files": [{"content": "import datetime\nfrom pathlib import Path\nimport subprocess\n\nfrom docutils import nodes\nfrom docutils import transforms\n\n\nclass PEPFooter(transforms.Transform):\n \"\"\"Footer transforms for PEPs.\n\n - Removes the References section if it is empty when rendered.\n - Creates a link to the (GitHub) source text.\n\n Source Link:\n Create the link to the source file from the document source path,\n and append the text to the end of the document.\n\n \"\"\"\n\n # Uses same priority as docutils.transforms.TargetNotes\n default_priority = 520\n\n def apply(self) -> None:\n pep_source_path = Path(self.document[\"source\"])\n if not pep_source_path.match(\"pep-*\"):\n return # not a PEP file, exit early\n\n # Iterate through sections from the end of the document\n for section in reversed(self.document[0]):\n if not isinstance(section, nodes.section):\n continue\n title_words = section[0].astext().lower().split()\n if \"references\" in title_words:\n # Remove references section if there are no displayed\n # footnotes (it only has title & link target nodes)\n if all(isinstance(ref_node, (nodes.title, nodes.target))\n for ref_node in section):\n section.parent.remove(section)\n break\n\n # Add link to source text and last modified date\n if pep_source_path.stem != \"pep-0000\":\n self.document += _add_source_link(pep_source_path)\n self.document += _add_commit_history_info(pep_source_path)\n\n\ndef _add_source_link(pep_source_path: Path) -> nodes.paragraph:\n \"\"\"Add link to source text on VCS (GitHub)\"\"\"\n source_link = f\"https://github.com/python/peps/blob/main/{pep_source_path.name}\"\n link_node = nodes.reference(\"\", source_link, refuri=source_link)\n return nodes.paragraph(\"\", \"Source: \", link_node)\n\n\ndef _add_commit_history_info(pep_source_path: Path) -> nodes.paragraph:\n \"\"\"Use local git history to find last modified date.\"\"\"\n try:\n since_epoch = LAST_MODIFIED_TIMES[pep_source_path.name]\n except KeyError:\n return nodes.paragraph()\n\n iso_time = datetime.datetime.utcfromtimestamp(since_epoch).isoformat(sep=\" \")\n commit_link = f\"https://github.com/python/peps/commits/main/{pep_source_path.name}\"\n link_node = nodes.reference(\"\", f\"{iso_time} GMT\", refuri=commit_link)\n return nodes.paragraph(\"\", \"Last modified: \", link_node)\n\n\ndef _get_last_modified_timestamps():\n # get timestamps and changed files from all commits (without paging results)\n args = [\"git\", \"--no-pager\", \"log\", \"--format=#%at\", \"--name-only\"]\n with subprocess.Popen(args, stdout=subprocess.PIPE) as process:\n all_modified = process.stdout.read().decode(\"utf-8\")\n process.stdout.close()\n if process.wait(): # non-zero return code\n return {}\n\n # set up the dictionary with the *current* files\n last_modified = {path.name: 0 for path in Path().glob(\"pep-*\") if path.suffix in {\".txt\", \".rst\"}}\n\n # iterate through newest to oldest, updating per file timestamps\n change_sets = all_modified.removeprefix(\"#\").split(\"#\")\n for change_set in change_sets:\n timestamp, files = change_set.split(\"\\n\", 1)\n for file in files.strip().split(\"\\n\"):\n if file.startswith(\"pep-\") and file[-3:] in {\"txt\", \"rst\"}:\n if last_modified.get(file) == 0:\n try:\n last_modified[file] = float(timestamp)\n except ValueError:\n pass # if float conversion fails\n\n return last_modified\n\n\nLAST_MODIFIED_TIMES = _get_last_modified_timestamps()\n", "path": "pep_sphinx_extensions/pep_processor/transforms/pep_footer.py"}], "after_files": [{"content": "import datetime\nfrom pathlib import Path\nimport subprocess\n\nfrom docutils import nodes\nfrom docutils import transforms\n\n\nclass PEPFooter(transforms.Transform):\n \"\"\"Footer transforms for PEPs.\n\n - Removes the References section if it is empty when rendered.\n - Creates a link to the (GitHub) source text.\n\n Source Link:\n Create the link to the source file from the document source path,\n and append the text to the end of the document.\n\n \"\"\"\n\n # Set low priority so ref targets aren't removed before they are needed\n default_priority = 999\n\n def apply(self) -> None:\n pep_source_path = Path(self.document[\"source\"])\n if not pep_source_path.match(\"pep-*\"):\n return # not a PEP file, exit early\n\n # Iterate through sections from the end of the document\n for section in reversed(self.document[0]):\n if not isinstance(section, nodes.section):\n continue\n title_words = section[0].astext().lower().split()\n if \"references\" in title_words:\n # Remove references section if there are no displayed\n # footnotes (it only has title & link target nodes)\n if all(isinstance(ref_node, (nodes.title, nodes.target))\n for ref_node in section):\n section.parent.remove(section)\n break\n\n # Add link to source text and last modified date\n if pep_source_path.stem != \"pep-0000\":\n self.document += _add_source_link(pep_source_path)\n self.document += _add_commit_history_info(pep_source_path)\n\n\ndef _add_source_link(pep_source_path: Path) -> nodes.paragraph:\n \"\"\"Add link to source text on VCS (GitHub)\"\"\"\n source_link = f\"https://github.com/python/peps/blob/main/{pep_source_path.name}\"\n link_node = nodes.reference(\"\", source_link, refuri=source_link)\n return nodes.paragraph(\"\", \"Source: \", link_node)\n\n\ndef _add_commit_history_info(pep_source_path: Path) -> nodes.paragraph:\n \"\"\"Use local git history to find last modified date.\"\"\"\n try:\n since_epoch = LAST_MODIFIED_TIMES[pep_source_path.name]\n except KeyError:\n return nodes.paragraph()\n\n iso_time = datetime.datetime.utcfromtimestamp(since_epoch).isoformat(sep=\" \")\n commit_link = f\"https://github.com/python/peps/commits/main/{pep_source_path.name}\"\n link_node = nodes.reference(\"\", f\"{iso_time} GMT\", refuri=commit_link)\n return nodes.paragraph(\"\", \"Last modified: \", link_node)\n\n\ndef _get_last_modified_timestamps():\n # get timestamps and changed files from all commits (without paging results)\n args = [\"git\", \"--no-pager\", \"log\", \"--format=#%at\", \"--name-only\"]\n with subprocess.Popen(args, stdout=subprocess.PIPE) as process:\n all_modified = process.stdout.read().decode(\"utf-8\")\n process.stdout.close()\n if process.wait(): # non-zero return code\n return {}\n\n # set up the dictionary with the *current* files\n last_modified = {path.name: 0 for path in Path().glob(\"pep-*\") if path.suffix in {\".txt\", \".rst\"}}\n\n # iterate through newest to oldest, updating per file timestamps\n change_sets = all_modified.removeprefix(\"#\").split(\"#\")\n for change_set in change_sets:\n timestamp, files = change_set.split(\"\\n\", 1)\n for file in files.strip().split(\"\\n\"):\n if file.startswith(\"pep-\") and file[-3:] in {\"txt\", \"rst\"}:\n if last_modified.get(file) == 0:\n try:\n last_modified[file] = float(timestamp)\n except ValueError:\n pass # if float conversion fails\n\n return last_modified\n\n\nLAST_MODIFIED_TIMES = _get_last_modified_timestamps()\n", "path": "pep_sphinx_extensions/pep_processor/transforms/pep_footer.py"}]} | 1,604 | 165 |
gh_patches_debug_10524 | rasdani/github-patches | git_diff | Gallopsled__pwntools-2191 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
readthedocs.org builds are broken
The docs fail to build for a while, but differently since https://github.com/Gallopsled/pwntools/pull/2179. They're still built using Python 2.7, which had problems installing rpyc and is now missing the toml package before running the setup.py.
https://readthedocs.org/projects/pwntools/builds/
You could switch to Python 3 or try adding the `toml; python_version<'3.4'` package to the `docs/requirements.txt` file for a starter? I think only a maintainer with access to the configs can fix this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwn/toplevel.py`
Content:
```
1 # Get all the modules from pwnlib
2 import collections
3 import logging
4 import math
5 import operator
6 import os
7 import platform
8 import re
9 import socks
10 import signal
11 import string
12 import struct
13 import subprocess
14 import sys
15 import tempfile
16 import threading
17 import time
18
19 import colored_traceback
20 from pprint import pprint
21
22 import pwnlib
23 from pwnlib import *
24 from pwnlib.asm import *
25 from pwnlib.context import Thread
26 from pwnlib.context import context, LocalContext
27 from pwnlib.dynelf import DynELF
28 from pwnlib.encoders import *
29 from pwnlib.elf.corefile import Core, Corefile, Coredump
30 from pwnlib.elf.elf import ELF, load
31 from pwnlib.encoders import *
32 from pwnlib.exception import PwnlibException
33 from pwnlib.gdb import attach, debug_assembly, debug_shellcode
34 from pwnlib.filepointer import *
35 from pwnlib.filesystem import *
36 from pwnlib.flag import *
37 from pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split
38 from pwnlib.log import getLogger
39 from pwnlib.memleak import MemLeak, RelativeMemLeak
40 from pwnlib.regsort import *
41 from pwnlib.replacements import *
42 from pwnlib.rop import ROP
43 from pwnlib.rop.call import AppendedArgument
44 from pwnlib.rop.srop import SigreturnFrame
45 from pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload
46 from pwnlib.runner import *
47 from pwnlib.term.readline import str_input
48 from pwnlib.timeout import Timeout
49 from pwnlib.tubes.listen import listen
50 from pwnlib.tubes.process import process, PTY, PIPE, STDOUT
51 from pwnlib.tubes.remote import remote, tcp, udp, connect
52 from pwnlib.tubes.serialtube import serialtube
53 from pwnlib.tubes.server import server
54 from pwnlib.tubes.ssh import ssh
55 from pwnlib.tubes.tube import tube
56 from pwnlib.ui import *
57 from pwnlib.util import crc
58 from pwnlib.util import iters
59 from pwnlib.util import net
60 from pwnlib.util import proc
61 from pwnlib.util import safeeval
62 from pwnlib.util.crc import BitPolynom
63 from pwnlib.util.cyclic import *
64 from pwnlib.util.fiddling import *
65 from pwnlib.util.getdents import *
66 from pwnlib.util.hashes import *
67 from pwnlib.util.lists import *
68 from pwnlib.util.misc import *
69 from pwnlib.util.packing import *
70 from pwnlib.util.proc import pidof
71 from pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with
72 from pwnlib.util.splash import *
73 from pwnlib.util.web import *
74
75 # Promote these modules, so that "from pwn import *" will let you access them
76
77 from six.moves import cPickle as pickle, cStringIO as StringIO
78 from six import BytesIO
79
80 log = getLogger("pwnlib.exploit")
81 error = log.error
82 warning = log.warning
83 warn = log.warning
84 info = log.info
85 debug = log.debug
86 success = log.success
87
88 colored_traceback.add_hook()
89
90 # Equivalence with the default behavior of "from import *"
91 # __all__ = [x for x in tuple(globals()) if not x.startswith('_')]
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwn/toplevel.py b/pwn/toplevel.py
--- a/pwn/toplevel.py
+++ b/pwn/toplevel.py
@@ -16,7 +16,6 @@
import threading
import time
-import colored_traceback
from pprint import pprint
import pwnlib
@@ -85,7 +84,12 @@
debug = log.debug
success = log.success
-colored_traceback.add_hook()
+try:
+ import colored_traceback
+except ImportError:
+ pass
+else:
+ colored_traceback.add_hook()
# Equivalence with the default behavior of "from import *"
# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]
| {"golden_diff": "diff --git a/pwn/toplevel.py b/pwn/toplevel.py\n--- a/pwn/toplevel.py\n+++ b/pwn/toplevel.py\n@@ -16,7 +16,6 @@\n import threading\n import time\n \n-import colored_traceback\n from pprint import pprint\n \n import pwnlib\n@@ -85,7 +84,12 @@\n debug = log.debug\n success = log.success\n \n-colored_traceback.add_hook()\n+try:\n+ import colored_traceback\n+except ImportError:\n+ pass\n+else:\n+ colored_traceback.add_hook()\n \n # Equivalence with the default behavior of \"from import *\"\n # __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n", "issue": "readthedocs.org builds are broken\nThe docs fail to build for a while, but differently since https://github.com/Gallopsled/pwntools/pull/2179. They're still built using Python 2.7, which had problems installing rpyc and is now missing the toml package before running the setup.py.\r\n\r\nhttps://readthedocs.org/projects/pwntools/builds/\r\n\r\nYou could switch to Python 3 or try adding the `toml; python_version<'3.4'` package to the `docs/requirements.txt` file for a starter? I think only a maintainer with access to the configs can fix this.\n", "before_files": [{"content": "# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nimport colored_traceback\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.filesystem import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nlog = getLogger(\"pwnlib.exploit\")\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\ncolored_traceback.add_hook()\n\n# Equivalence with the default behavior of \"from import *\"\n# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n", "path": "pwn/toplevel.py"}], "after_files": [{"content": "# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.filesystem import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nlog = getLogger(\"pwnlib.exploit\")\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\ntry:\n import colored_traceback\nexcept ImportError:\n pass\nelse:\n colored_traceback.add_hook()\n\n# Equivalence with the default behavior of \"from import *\"\n# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n", "path": "pwn/toplevel.py"}]} | 1,314 | 158 |
gh_patches_debug_11600 | rasdani/github-patches | git_diff | nautobot__nautobot-4260 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set NAUTOBOT_INSTALLATION_METRICS_ENABLED=false in the dockerfile dev stage
<!--
NOTE: This template is for use by maintainers only. Please do not submit
an issue using this template unless you have been specifically asked to
do so.
-->
### Proposed Changes
This should propagate to all of the app dev instances since they base off of nautobot dev images. Make sure to unset this in the final stage
<!-- Provide justification for the proposed change(s). -->
### Justification
We don't want to send metrics for dev environments
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `development/nautobot_config.py`
Content:
```
1 """Nautobot development configuration file."""
2 import os
3
4 from nautobot.core.settings import * # noqa: F403
5 from nautobot.core.settings_funcs import is_truthy
6
7 #
8 # Debugging defaults to True rather than False for the development environment
9 #
10 DEBUG = is_truthy(os.getenv("NAUTOBOT_DEBUG", "True"))
11
12 # Django Debug Toolbar - enabled only when debugging
13 if DEBUG:
14 if "debug_toolbar" not in INSTALLED_APPS: # noqa: F405
15 INSTALLED_APPS.append("debug_toolbar") # noqa: F405
16 if "debug_toolbar.middleware.DebugToolbarMiddleware" not in MIDDLEWARE: # noqa: F405
17 MIDDLEWARE.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware") # noqa: F405
18 # By default the toolbar only displays when the request is coming from one of INTERNAL_IPS.
19 # For the Docker dev environment, we don't know in advance what that IP may be, so override to skip that check
20 DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": lambda _request: DEBUG}
21
22 #
23 # Logging for the development environment, taking into account the redefinition of DEBUG above
24 #
25
26 LOG_LEVEL = "DEBUG" if DEBUG else "INFO"
27 LOGGING["loggers"]["nautobot"]["handlers"] = ["verbose_console" if DEBUG else "normal_console"] # noqa: F405
28 LOGGING["loggers"]["nautobot"]["level"] = LOG_LEVEL # noqa: F405
29
30 #
31 # Plugins
32 #
33
34 PLUGINS = [
35 "example_plugin",
36 ]
37
38
39 #
40 # Development Environment for SSO
41 # Configure `invoke.yml` based on example for SSO development environment
42 #
43
44 # OIDC Dev ENV
45 if is_truthy(os.getenv("ENABLE_OIDC", "False")):
46 import requests
47
48 AUTHENTICATION_BACKENDS = (
49 "social_core.backends.keycloak.KeycloakOAuth2",
50 "nautobot.core.authentication.ObjectPermissionBackend",
51 )
52 SOCIAL_AUTH_KEYCLOAK_KEY = "nautobot"
53 SOCIAL_AUTH_KEYCLOAK_SECRET = "7b1c3527-8702-4742-af69-2b74ee5742e8"
54 SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = requests.get("http://keycloak:8087/realms/nautobot/", timeout=15).json()[
55 "public_key"
56 ]
57 SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = "http://localhost:8087/realms/nautobot/protocol/openid-connect/auth"
58 SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = "http://keycloak:8087/realms/nautobot/protocol/openid-connect/token"
59 SOCIAL_AUTH_KEYCLOAK_VERIFY_SSL = False
60
61 METRICS_ENABLED = True
62
63 CELERY_WORKER_PROMETHEUS_PORTS = [8080]
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/development/nautobot_config.py b/development/nautobot_config.py
--- a/development/nautobot_config.py
+++ b/development/nautobot_config.py
@@ -19,6 +19,9 @@
# For the Docker dev environment, we don't know in advance what that IP may be, so override to skip that check
DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": lambda _request: DEBUG}
+# Do *not* send anonymized install metrics when post_upgrade or send_installation_metrics management commands are run
+INSTALLATION_METRICS_ENABLED = is_truthy(os.getenv("NAUTOBOT_INSTALLATION_METRICS_ENABLED", "False"))
+
#
# Logging for the development environment, taking into account the redefinition of DEBUG above
#
| {"golden_diff": "diff --git a/development/nautobot_config.py b/development/nautobot_config.py\n--- a/development/nautobot_config.py\n+++ b/development/nautobot_config.py\n@@ -19,6 +19,9 @@\n # For the Docker dev environment, we don't know in advance what that IP may be, so override to skip that check\n DEBUG_TOOLBAR_CONFIG = {\"SHOW_TOOLBAR_CALLBACK\": lambda _request: DEBUG}\n \n+# Do *not* send anonymized install metrics when post_upgrade or send_installation_metrics management commands are run\n+INSTALLATION_METRICS_ENABLED = is_truthy(os.getenv(\"NAUTOBOT_INSTALLATION_METRICS_ENABLED\", \"False\"))\n+\n #\n # Logging for the development environment, taking into account the redefinition of DEBUG above\n #\n", "issue": "Set NAUTOBOT_INSTALLATION_METRICS_ENABLED=false in the dockerfile dev stage\n<!--\r\n NOTE: This template is for use by maintainers only. Please do not submit\r\n an issue using this template unless you have been specifically asked to\r\n do so.\r\n-->\r\n### Proposed Changes\r\n\r\nThis should propagate to all of the app dev instances since they base off of nautobot dev images. Make sure to unset this in the final stage\r\n\r\n<!-- Provide justification for the proposed change(s). -->\r\n### Justification\r\n\r\nWe don't want to send metrics for dev environments\n", "before_files": [{"content": "\"\"\"Nautobot development configuration file.\"\"\"\nimport os\n\nfrom nautobot.core.settings import * # noqa: F403\nfrom nautobot.core.settings_funcs import is_truthy\n\n#\n# Debugging defaults to True rather than False for the development environment\n#\nDEBUG = is_truthy(os.getenv(\"NAUTOBOT_DEBUG\", \"True\"))\n\n# Django Debug Toolbar - enabled only when debugging\nif DEBUG:\n if \"debug_toolbar\" not in INSTALLED_APPS: # noqa: F405\n INSTALLED_APPS.append(\"debug_toolbar\") # noqa: F405\n if \"debug_toolbar.middleware.DebugToolbarMiddleware\" not in MIDDLEWARE: # noqa: F405\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\") # noqa: F405\n # By default the toolbar only displays when the request is coming from one of INTERNAL_IPS.\n # For the Docker dev environment, we don't know in advance what that IP may be, so override to skip that check\n DEBUG_TOOLBAR_CONFIG = {\"SHOW_TOOLBAR_CALLBACK\": lambda _request: DEBUG}\n\n#\n# Logging for the development environment, taking into account the redefinition of DEBUG above\n#\n\nLOG_LEVEL = \"DEBUG\" if DEBUG else \"INFO\"\nLOGGING[\"loggers\"][\"nautobot\"][\"handlers\"] = [\"verbose_console\" if DEBUG else \"normal_console\"] # noqa: F405\nLOGGING[\"loggers\"][\"nautobot\"][\"level\"] = LOG_LEVEL # noqa: F405\n\n#\n# Plugins\n#\n\nPLUGINS = [\n \"example_plugin\",\n]\n\n\n#\n# Development Environment for SSO\n# Configure `invoke.yml` based on example for SSO development environment\n#\n\n# OIDC Dev ENV\nif is_truthy(os.getenv(\"ENABLE_OIDC\", \"False\")):\n import requests\n\n AUTHENTICATION_BACKENDS = (\n \"social_core.backends.keycloak.KeycloakOAuth2\",\n \"nautobot.core.authentication.ObjectPermissionBackend\",\n )\n SOCIAL_AUTH_KEYCLOAK_KEY = \"nautobot\"\n SOCIAL_AUTH_KEYCLOAK_SECRET = \"7b1c3527-8702-4742-af69-2b74ee5742e8\"\n SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = requests.get(\"http://keycloak:8087/realms/nautobot/\", timeout=15).json()[\n \"public_key\"\n ]\n SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = \"http://localhost:8087/realms/nautobot/protocol/openid-connect/auth\"\n SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = \"http://keycloak:8087/realms/nautobot/protocol/openid-connect/token\"\n SOCIAL_AUTH_KEYCLOAK_VERIFY_SSL = False\n\nMETRICS_ENABLED = True\n\nCELERY_WORKER_PROMETHEUS_PORTS = [8080]\n", "path": "development/nautobot_config.py"}], "after_files": [{"content": "\"\"\"Nautobot development configuration file.\"\"\"\nimport os\n\nfrom nautobot.core.settings import * # noqa: F403\nfrom nautobot.core.settings_funcs import is_truthy\n\n#\n# Debugging defaults to True rather than False for the development environment\n#\nDEBUG = is_truthy(os.getenv(\"NAUTOBOT_DEBUG\", \"True\"))\n\n# Django Debug Toolbar - enabled only when debugging\nif DEBUG:\n if \"debug_toolbar\" not in INSTALLED_APPS: # noqa: F405\n INSTALLED_APPS.append(\"debug_toolbar\") # noqa: F405\n if \"debug_toolbar.middleware.DebugToolbarMiddleware\" not in MIDDLEWARE: # noqa: F405\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\") # noqa: F405\n # By default the toolbar only displays when the request is coming from one of INTERNAL_IPS.\n # For the Docker dev environment, we don't know in advance what that IP may be, so override to skip that check\n DEBUG_TOOLBAR_CONFIG = {\"SHOW_TOOLBAR_CALLBACK\": lambda _request: DEBUG}\n\n# Do *not* send anonymized install metrics when post_upgrade or send_installation_metrics management commands are run\nINSTALLATION_METRICS_ENABLED = is_truthy(os.getenv(\"NAUTOBOT_INSTALLATION_METRICS_ENABLED\", \"False\"))\n\n#\n# Logging for the development environment, taking into account the redefinition of DEBUG above\n#\n\nLOG_LEVEL = \"DEBUG\" if DEBUG else \"INFO\"\nLOGGING[\"loggers\"][\"nautobot\"][\"handlers\"] = [\"verbose_console\" if DEBUG else \"normal_console\"] # noqa: F405\nLOGGING[\"loggers\"][\"nautobot\"][\"level\"] = LOG_LEVEL # noqa: F405\n\n#\n# Plugins\n#\n\nPLUGINS = [\n \"example_plugin\",\n]\n\n\n#\n# Development Environment for SSO\n# Configure `invoke.yml` based on example for SSO development environment\n#\n\n# OIDC Dev ENV\nif is_truthy(os.getenv(\"ENABLE_OIDC\", \"False\")):\n import requests\n\n AUTHENTICATION_BACKENDS = (\n \"social_core.backends.keycloak.KeycloakOAuth2\",\n \"nautobot.core.authentication.ObjectPermissionBackend\",\n )\n SOCIAL_AUTH_KEYCLOAK_KEY = \"nautobot\"\n SOCIAL_AUTH_KEYCLOAK_SECRET = \"7b1c3527-8702-4742-af69-2b74ee5742e8\"\n SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = requests.get(\"http://keycloak:8087/realms/nautobot/\", timeout=15).json()[\n \"public_key\"\n ]\n SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = \"http://localhost:8087/realms/nautobot/protocol/openid-connect/auth\"\n SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = \"http://keycloak:8087/realms/nautobot/protocol/openid-connect/token\"\n SOCIAL_AUTH_KEYCLOAK_VERIFY_SSL = False\n\nMETRICS_ENABLED = True\n\nCELERY_WORKER_PROMETHEUS_PORTS = [8080]\n", "path": "development/nautobot_config.py"}]} | 1,135 | 166 |
gh_patches_debug_7368 | rasdani/github-patches | git_diff | learningequality__kolibri-9397 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fresh Kolibri dev environment fails to install properly
## Observed behavior
I took these steps:
- Clone Kolibri to fresh local repo
- Ensure `develop` is checked out
- `python -m venv venv`
- `source venv/bin/activate`
- `pip install -r` every file in the `requirements/` directory
- `pip install -e .`
- `yarn install`
- `yarn devserver`
Then I get the error noted below. Seems like Django isn't being installed correctly or something. I've copied the `venv` from another worktree in and the branch works as expected until I `pip install...` everything again.
There was a recent bump to Morango in the requirements.txt but I can't imagine why that would cause this problem.
## Errors and logs
<!--
Relevant logs from:
* the command line
* ~/.kolibri/logs/kolibri.txt
* the browser console
Please wrap errors in triple backticks for clean formatting like this:
```
01:10 info: something happened
01:12 error: something bad happened
```
-->
```
╰─ yarn devserver
yarn run v1.22.15
$ run-p python-devserver lint-frontend:watch:format hashi-dev watch
> [email protected] hashi-dev
> yarn workspace hashi run dev
> [email protected] watch
> kolibri-tools build dev --file ./build_tools/build_plugins.txt --cache
> [email protected] python-devserver
> kolibri start --debug --foreground --port=8000 --settings=kolibri.deployment.default.settings.dev
> [email protected] lint-frontend:watch:format
> yarn run lint-frontend --monitor --write
$ kolibri-tools lint --pattern '{kolibri*/**/assets,packages,build_tools}/**/*.{js,vue,scss,less,css}' --ignore '**/dist/**,**/node_modules/**,**/static/**,**/kolibri-core-for-export/**' --monitor --write
$ yarn run build-base --mode=development --watch
$ webpack --config ./webpack.config.js --mode=development --watch
INFO Option DEBUG in section [Server] being overridden by environment variable KOLIBRI_DEBUG
INFO Option DEBUG_LOG_DATABASE in section [Server] being overridden by environment variable KOLIBRI_DEBUG_LOG_DATABASE
INFO Option RUN_MODE in section [Deployment] being overridden by environment variable KOLIBRI_RUN_MODE
/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use "pip install psycopg2-binary" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.
""")
Error: Traceback (most recent call last):
File "/home/jacob/kill-this-kolibri/kolibri/utils/cli.py", line 193, in invoke
initialize(**get_initialize_params())
File "/home/jacob/kill-this-kolibri/kolibri/utils/main.py", line 279, in initialize
_setup_django()
File "/home/jacob/kill-this-kolibri/kolibri/utils/main.py", line 153, in _setup_django
django.setup()
File "/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/__init__.py", line 27, in setup
apps.populate(settings.INSTALLED_APPS)
File "/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/apps/config.py", line 94, in create
module = import_module(entry)
File "/home/jacob/.pyenv/versions/3.6.12/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 953, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'django_extensions'
ERROR: "python-devserver" exited with 1.
error Command failed with exit code 1.
info Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command.
```
## Expected behavior
<!--
Description of what behavior was expected but did not occur
-->
Server should start up without issue.
## User-facing consequences…
<!--
Implications and real-world consequences for learners, coaches, admins, and other users of the application
-->
Devs cannot create new working local environments.
## Steps to reproduce
<!--
Precise steps that someone else can follow in order to see this behavior
-->
See the above.
## Context
<!--
Tell us about your environment, including:
* Kolibri version
* Operating system
* Browser
-->
`develop`
Fedora
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/deployment/default/settings/dev.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3 from __future__ import unicode_literals
4
5 import os
6
7 from .base import * # noqa isort:skip @UnusedWildImport
8
9 DEBUG = True
10
11 # Settings might be tuples, so switch to lists
12 INSTALLED_APPS = list(INSTALLED_APPS) + ["drf_yasg", "django_extensions"] # noqa F405
13 webpack_middleware = "kolibri.core.webpack.middleware.WebpackErrorHandler"
14 no_login_popup_middleware = (
15 "kolibri.core.auth.middleware.XhrPreventLoginPromptMiddleware"
16 )
17 MIDDLEWARE = list(MIDDLEWARE) + [ # noqa F405
18 webpack_middleware,
19 no_login_popup_middleware,
20 ]
21
22 INTERNAL_IPS = ["127.0.0.1"]
23
24 ROOT_URLCONF = "kolibri.deployment.default.dev_urls"
25
26 DEVELOPER_MODE = True
27 os.environ.update({"KOLIBRI_DEVELOPER_MODE": "True"})
28
29 try:
30 process_cache = CACHES["process_cache"] # noqa F405
31 except KeyError:
32 process_cache = None
33
34 # Create a memcache for each cache
35 CACHES = {
36 key: {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}
37 for key in CACHES # noqa F405
38 }
39
40 if process_cache:
41 CACHES["process_cache"] = process_cache
42
43
44 REST_FRAMEWORK = {
45 "UNAUTHENTICATED_USER": "kolibri.core.auth.models.KolibriAnonymousUser",
46 "DEFAULT_AUTHENTICATION_CLASSES": [
47 # Activate basic auth for external API testing tools
48 "rest_framework.authentication.BasicAuthentication",
49 "rest_framework.authentication.SessionAuthentication",
50 ],
51 "DEFAULT_RENDERER_CLASSES": (
52 "rest_framework.renderers.JSONRenderer",
53 "rest_framework.renderers.BrowsableAPIRenderer",
54 ),
55 "EXCEPTION_HANDLER": "kolibri.core.utils.exception_handler.custom_exception_handler",
56 }
57
58 SWAGGER_SETTINGS = {"DEFAULT_INFO": "kolibri.deployment.default.dev_urls.api_info"}
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/deployment/default/settings/dev.py b/kolibri/deployment/default/settings/dev.py
--- a/kolibri/deployment/default/settings/dev.py
+++ b/kolibri/deployment/default/settings/dev.py
@@ -9,7 +9,7 @@
DEBUG = True
# Settings might be tuples, so switch to lists
-INSTALLED_APPS = list(INSTALLED_APPS) + ["drf_yasg", "django_extensions"] # noqa F405
+INSTALLED_APPS = list(INSTALLED_APPS) + ["drf_yasg"] # noqa F405
webpack_middleware = "kolibri.core.webpack.middleware.WebpackErrorHandler"
no_login_popup_middleware = (
"kolibri.core.auth.middleware.XhrPreventLoginPromptMiddleware"
| {"golden_diff": "diff --git a/kolibri/deployment/default/settings/dev.py b/kolibri/deployment/default/settings/dev.py\n--- a/kolibri/deployment/default/settings/dev.py\n+++ b/kolibri/deployment/default/settings/dev.py\n@@ -9,7 +9,7 @@\n DEBUG = True\n \n # Settings might be tuples, so switch to lists\n-INSTALLED_APPS = list(INSTALLED_APPS) + [\"drf_yasg\", \"django_extensions\"] # noqa F405\n+INSTALLED_APPS = list(INSTALLED_APPS) + [\"drf_yasg\"] # noqa F405\n webpack_middleware = \"kolibri.core.webpack.middleware.WebpackErrorHandler\"\n no_login_popup_middleware = (\n \"kolibri.core.auth.middleware.XhrPreventLoginPromptMiddleware\"\n", "issue": "Fresh Kolibri dev environment fails to install properly\n## Observed behavior\r\n\r\nI took these steps:\r\n\r\n- Clone Kolibri to fresh local repo\r\n- Ensure `develop` is checked out\r\n- `python -m venv venv`\r\n- `source venv/bin/activate` \r\n- `pip install -r` every file in the `requirements/` directory\r\n- `pip install -e .`\r\n- `yarn install`\r\n- `yarn devserver`\r\n\r\nThen I get the error noted below. Seems like Django isn't being installed correctly or something. I've copied the `venv` from another worktree in and the branch works as expected until I `pip install...` everything again.\r\n\r\nThere was a recent bump to Morango in the requirements.txt but I can't imagine why that would cause this problem.\r\n\r\n## Errors and logs\r\n<!--\r\nRelevant logs from:\r\n * the command line\r\n * ~/.kolibri/logs/kolibri.txt\r\n * the browser console\r\n\r\nPlease wrap errors in triple backticks for clean formatting like this:\r\n```\r\n01:10 info: something happened\r\n01:12 error: something bad happened\r\n```\r\n-->\r\n\r\n```\r\n\u2570\u2500 yarn devserver \r\nyarn run v1.22.15\r\n$ run-p python-devserver lint-frontend:watch:format hashi-dev watch\r\n\r\n> [email protected] hashi-dev\r\n> yarn workspace hashi run dev\r\n\r\n\r\n> [email protected] watch\r\n> kolibri-tools build dev --file ./build_tools/build_plugins.txt --cache\r\n\r\n\r\n> [email protected] python-devserver\r\n> kolibri start --debug --foreground --port=8000 --settings=kolibri.deployment.default.settings.dev\r\n\r\n\r\n> [email protected] lint-frontend:watch:format\r\n> yarn run lint-frontend --monitor --write\r\n\r\n$ kolibri-tools lint --pattern '{kolibri*/**/assets,packages,build_tools}/**/*.{js,vue,scss,less,css}' --ignore '**/dist/**,**/node_modules/**,**/static/**,**/kolibri-core-for-export/**' --monitor --write\r\n$ yarn run build-base --mode=development --watch\r\n$ webpack --config ./webpack.config.js --mode=development --watch\r\nINFO Option DEBUG in section [Server] being overridden by environment variable KOLIBRI_DEBUG\r\nINFO Option DEBUG_LOG_DATABASE in section [Server] being overridden by environment variable KOLIBRI_DEBUG_LOG_DATABASE\r\nINFO Option RUN_MODE in section [Deployment] being overridden by environment variable KOLIBRI_RUN_MODE\r\n/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use \"pip install psycopg2-binary\" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.\r\n \"\"\")\r\nError: Traceback (most recent call last):\r\n File \"/home/jacob/kill-this-kolibri/kolibri/utils/cli.py\", line 193, in invoke\r\n initialize(**get_initialize_params())\r\n File \"/home/jacob/kill-this-kolibri/kolibri/utils/main.py\", line 279, in initialize\r\n _setup_django()\r\n File \"/home/jacob/kill-this-kolibri/kolibri/utils/main.py\", line 153, in _setup_django\r\n django.setup()\r\n File \"/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/__init__.py\", line 27, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/apps/registry.py\", line 85, in populate\r\n app_config = AppConfig.create(entry)\r\n File \"/home/jacob/kill-this-kolibri/venv/lib/python3.6/site-packages/django/apps/config.py\", line 94, in create\r\n module = import_module(entry)\r\n File \"/home/jacob/.pyenv/versions/3.6.12/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 953, in _find_and_load_unlocked\r\nModuleNotFoundError: No module named 'django_extensions'\r\n\r\nERROR: \"python-devserver\" exited with 1.\r\nerror Command failed with exit code 1.\r\ninfo Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command.\r\n```\r\n\r\n## Expected behavior\r\n<!--\r\nDescription of what behavior was expected but did not occur\r\n-->\r\n\r\nServer should start up without issue.\r\n\r\n## User-facing consequences\u2026\r\n\r\n<!--\r\nImplications and real-world consequences for learners, coaches, admins, and other users of the application\r\n-->\r\n\r\nDevs cannot create new working local environments.\r\n\r\n## Steps to reproduce\r\n<!--\r\nPrecise steps that someone else can follow in order to see this behavior\r\n-->\r\n\r\nSee the above.\r\n\r\n## Context\r\n<!--\r\nTell us about your environment, including:\r\n * Kolibri version\r\n * Operating system\r\n * Browser\r\n-->\r\n\r\n`develop`\r\nFedora\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom .base import * # noqa isort:skip @UnusedWildImport\n\nDEBUG = True\n\n# Settings might be tuples, so switch to lists\nINSTALLED_APPS = list(INSTALLED_APPS) + [\"drf_yasg\", \"django_extensions\"] # noqa F405\nwebpack_middleware = \"kolibri.core.webpack.middleware.WebpackErrorHandler\"\nno_login_popup_middleware = (\n \"kolibri.core.auth.middleware.XhrPreventLoginPromptMiddleware\"\n)\nMIDDLEWARE = list(MIDDLEWARE) + [ # noqa F405\n webpack_middleware,\n no_login_popup_middleware,\n]\n\nINTERNAL_IPS = [\"127.0.0.1\"]\n\nROOT_URLCONF = \"kolibri.deployment.default.dev_urls\"\n\nDEVELOPER_MODE = True\nos.environ.update({\"KOLIBRI_DEVELOPER_MODE\": \"True\"})\n\ntry:\n process_cache = CACHES[\"process_cache\"] # noqa F405\nexcept KeyError:\n process_cache = None\n\n# Create a memcache for each cache\nCACHES = {\n key: {\"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\"}\n for key in CACHES # noqa F405\n}\n\nif process_cache:\n CACHES[\"process_cache\"] = process_cache\n\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.core.auth.models.KolibriAnonymousUser\",\n \"DEFAULT_AUTHENTICATION_CLASSES\": [\n # Activate basic auth for external API testing tools\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ],\n \"DEFAULT_RENDERER_CLASSES\": (\n \"rest_framework.renderers.JSONRenderer\",\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n ),\n \"EXCEPTION_HANDLER\": \"kolibri.core.utils.exception_handler.custom_exception_handler\",\n}\n\nSWAGGER_SETTINGS = {\"DEFAULT_INFO\": \"kolibri.deployment.default.dev_urls.api_info\"}\n", "path": "kolibri/deployment/default/settings/dev.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom .base import * # noqa isort:skip @UnusedWildImport\n\nDEBUG = True\n\n# Settings might be tuples, so switch to lists\nINSTALLED_APPS = list(INSTALLED_APPS) + [\"drf_yasg\"] # noqa F405\nwebpack_middleware = \"kolibri.core.webpack.middleware.WebpackErrorHandler\"\nno_login_popup_middleware = (\n \"kolibri.core.auth.middleware.XhrPreventLoginPromptMiddleware\"\n)\nMIDDLEWARE = list(MIDDLEWARE) + [ # noqa F405\n webpack_middleware,\n no_login_popup_middleware,\n]\n\nINTERNAL_IPS = [\"127.0.0.1\"]\n\nROOT_URLCONF = \"kolibri.deployment.default.dev_urls\"\n\nDEVELOPER_MODE = True\nos.environ.update({\"KOLIBRI_DEVELOPER_MODE\": \"True\"})\n\ntry:\n process_cache = CACHES[\"process_cache\"] # noqa F405\nexcept KeyError:\n process_cache = None\n\n# Create a memcache for each cache\nCACHES = {\n key: {\"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\"}\n for key in CACHES # noqa F405\n}\n\nif process_cache:\n CACHES[\"process_cache\"] = process_cache\n\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.core.auth.models.KolibriAnonymousUser\",\n \"DEFAULT_AUTHENTICATION_CLASSES\": [\n # Activate basic auth for external API testing tools\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ],\n \"DEFAULT_RENDERER_CLASSES\": (\n \"rest_framework.renderers.JSONRenderer\",\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n ),\n \"EXCEPTION_HANDLER\": \"kolibri.core.utils.exception_handler.custom_exception_handler\",\n}\n\nSWAGGER_SETTINGS = {\"DEFAULT_INFO\": \"kolibri.deployment.default.dev_urls.api_info\"}\n", "path": "kolibri/deployment/default/settings/dev.py"}]} | 2,032 | 168 |
gh_patches_debug_14152 | rasdani/github-patches | git_diff | scrapy__scrapy-5412 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove Python 2 code from WrappedRequest
[The WrappedRequest class](https://github.com/scrapy/scrapy/blob/06f3d12c1208c380f9f1a16cb36ba2dfa3c244c5/scrapy/http/cookies.py#L148) has methods that can be removed, as [they were only used in Python 3.3 and earlier](https://docs.python.org/3/library/http.cookiejar.html#http.cookiejar.CookieJar.extract_cookies).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/http/cookies.py`
Content:
```
1 import re
2 import time
3 from http.cookiejar import CookieJar as _CookieJar, DefaultCookiePolicy
4
5 from scrapy.utils.httpobj import urlparse_cached
6 from scrapy.utils.python import to_unicode
7
8
9 # Defined in the http.cookiejar module, but undocumented:
10 # https://github.com/python/cpython/blob/v3.9.0/Lib/http/cookiejar.py#L527
11 IPV4_RE = re.compile(r"\.\d+$", re.ASCII)
12
13
14 class CookieJar:
15 def __init__(self, policy=None, check_expired_frequency=10000):
16 self.policy = policy or DefaultCookiePolicy()
17 self.jar = _CookieJar(self.policy)
18 self.jar._cookies_lock = _DummyLock()
19 self.check_expired_frequency = check_expired_frequency
20 self.processed = 0
21
22 def extract_cookies(self, response, request):
23 wreq = WrappedRequest(request)
24 wrsp = WrappedResponse(response)
25 return self.jar.extract_cookies(wrsp, wreq)
26
27 def add_cookie_header(self, request):
28 wreq = WrappedRequest(request)
29 self.policy._now = self.jar._now = int(time.time())
30
31 # the cookiejar implementation iterates through all domains
32 # instead we restrict to potential matches on the domain
33 req_host = urlparse_cached(request).hostname
34 if not req_host:
35 return
36
37 if not IPV4_RE.search(req_host):
38 hosts = potential_domain_matches(req_host)
39 if '.' not in req_host:
40 hosts += [req_host + ".local"]
41 else:
42 hosts = [req_host]
43
44 cookies = []
45 for host in hosts:
46 if host in self.jar._cookies:
47 cookies += self.jar._cookies_for_domain(host, wreq)
48
49 attrs = self.jar._cookie_attrs(cookies)
50 if attrs:
51 if not wreq.has_header("Cookie"):
52 wreq.add_unredirected_header("Cookie", "; ".join(attrs))
53
54 self.processed += 1
55 if self.processed % self.check_expired_frequency == 0:
56 # This is still quite inefficient for large number of cookies
57 self.jar.clear_expired_cookies()
58
59 @property
60 def _cookies(self):
61 return self.jar._cookies
62
63 def clear_session_cookies(self, *args, **kwargs):
64 return self.jar.clear_session_cookies(*args, **kwargs)
65
66 def clear(self, domain=None, path=None, name=None):
67 return self.jar.clear(domain, path, name)
68
69 def __iter__(self):
70 return iter(self.jar)
71
72 def __len__(self):
73 return len(self.jar)
74
75 def set_policy(self, pol):
76 return self.jar.set_policy(pol)
77
78 def make_cookies(self, response, request):
79 wreq = WrappedRequest(request)
80 wrsp = WrappedResponse(response)
81 return self.jar.make_cookies(wrsp, wreq)
82
83 def set_cookie(self, cookie):
84 self.jar.set_cookie(cookie)
85
86 def set_cookie_if_ok(self, cookie, request):
87 self.jar.set_cookie_if_ok(cookie, WrappedRequest(request))
88
89
90 def potential_domain_matches(domain):
91 """Potential domain matches for a cookie
92
93 >>> potential_domain_matches('www.example.com')
94 ['www.example.com', 'example.com', '.www.example.com', '.example.com']
95
96 """
97 matches = [domain]
98 try:
99 start = domain.index('.') + 1
100 end = domain.rindex('.')
101 while start < end:
102 matches.append(domain[start:])
103 start = domain.index('.', start) + 1
104 except ValueError:
105 pass
106 return matches + ['.' + d for d in matches]
107
108
109 class _DummyLock:
110 def acquire(self):
111 pass
112
113 def release(self):
114 pass
115
116
117 class WrappedRequest:
118 """Wraps a scrapy Request class with methods defined by urllib2.Request class to interact with CookieJar class
119
120 see http://docs.python.org/library/urllib2.html#urllib2.Request
121 """
122
123 def __init__(self, request):
124 self.request = request
125
126 def get_full_url(self):
127 return self.request.url
128
129 def get_host(self):
130 return urlparse_cached(self.request).netloc
131
132 def get_type(self):
133 return urlparse_cached(self.request).scheme
134
135 def is_unverifiable(self):
136 """Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965.
137
138 It defaults to False. An unverifiable request is one whose URL the user did not have the
139 option to approve. For example, if the request is for an image in an
140 HTML document, and the user had no option to approve the automatic
141 fetching of the image, this should be true.
142 """
143 return self.request.meta.get('is_unverifiable', False)
144
145 def get_origin_req_host(self):
146 return urlparse_cached(self.request).hostname
147
148 # python3 uses attributes instead of methods
149 @property
150 def full_url(self):
151 return self.get_full_url()
152
153 @property
154 def host(self):
155 return self.get_host()
156
157 @property
158 def type(self):
159 return self.get_type()
160
161 @property
162 def unverifiable(self):
163 return self.is_unverifiable()
164
165 @property
166 def origin_req_host(self):
167 return self.get_origin_req_host()
168
169 def has_header(self, name):
170 return name in self.request.headers
171
172 def get_header(self, name, default=None):
173 return to_unicode(self.request.headers.get(name, default),
174 errors='replace')
175
176 def header_items(self):
177 return [
178 (to_unicode(k, errors='replace'),
179 [to_unicode(x, errors='replace') for x in v])
180 for k, v in self.request.headers.items()
181 ]
182
183 def add_unredirected_header(self, name, value):
184 self.request.headers.appendlist(name, value)
185
186
187 class WrappedResponse:
188
189 def __init__(self, response):
190 self.response = response
191
192 def info(self):
193 return self
194
195 def get_all(self, name, default=None):
196 return [to_unicode(v, errors='replace')
197 for v in self.response.headers.getlist(name)]
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/http/cookies.py b/scrapy/http/cookies.py
--- a/scrapy/http/cookies.py
+++ b/scrapy/http/cookies.py
@@ -142,10 +142,6 @@
"""
return self.request.meta.get('is_unverifiable', False)
- def get_origin_req_host(self):
- return urlparse_cached(self.request).hostname
-
- # python3 uses attributes instead of methods
@property
def full_url(self):
return self.get_full_url()
@@ -164,7 +160,7 @@
@property
def origin_req_host(self):
- return self.get_origin_req_host()
+ return urlparse_cached(self.request).hostname
def has_header(self, name):
return name in self.request.headers
| {"golden_diff": "diff --git a/scrapy/http/cookies.py b/scrapy/http/cookies.py\n--- a/scrapy/http/cookies.py\n+++ b/scrapy/http/cookies.py\n@@ -142,10 +142,6 @@\n \"\"\"\n return self.request.meta.get('is_unverifiable', False)\n \n- def get_origin_req_host(self):\n- return urlparse_cached(self.request).hostname\n-\n- # python3 uses attributes instead of methods\n @property\n def full_url(self):\n return self.get_full_url()\n@@ -164,7 +160,7 @@\n \n @property\n def origin_req_host(self):\n- return self.get_origin_req_host()\n+ return urlparse_cached(self.request).hostname\n \n def has_header(self, name):\n return name in self.request.headers\n", "issue": "Remove Python 2 code from WrappedRequest\n[The WrappedRequest class](https://github.com/scrapy/scrapy/blob/06f3d12c1208c380f9f1a16cb36ba2dfa3c244c5/scrapy/http/cookies.py#L148) has methods that can be removed, as [they were only used in Python 3.3 and earlier](https://docs.python.org/3/library/http.cookiejar.html#http.cookiejar.CookieJar.extract_cookies).\n", "before_files": [{"content": "import re\nimport time\nfrom http.cookiejar import CookieJar as _CookieJar, DefaultCookiePolicy\n\nfrom scrapy.utils.httpobj import urlparse_cached\nfrom scrapy.utils.python import to_unicode\n\n\n# Defined in the http.cookiejar module, but undocumented:\n# https://github.com/python/cpython/blob/v3.9.0/Lib/http/cookiejar.py#L527\nIPV4_RE = re.compile(r\"\\.\\d+$\", re.ASCII)\n\n\nclass CookieJar:\n def __init__(self, policy=None, check_expired_frequency=10000):\n self.policy = policy or DefaultCookiePolicy()\n self.jar = _CookieJar(self.policy)\n self.jar._cookies_lock = _DummyLock()\n self.check_expired_frequency = check_expired_frequency\n self.processed = 0\n\n def extract_cookies(self, response, request):\n wreq = WrappedRequest(request)\n wrsp = WrappedResponse(response)\n return self.jar.extract_cookies(wrsp, wreq)\n\n def add_cookie_header(self, request):\n wreq = WrappedRequest(request)\n self.policy._now = self.jar._now = int(time.time())\n\n # the cookiejar implementation iterates through all domains\n # instead we restrict to potential matches on the domain\n req_host = urlparse_cached(request).hostname\n if not req_host:\n return\n\n if not IPV4_RE.search(req_host):\n hosts = potential_domain_matches(req_host)\n if '.' not in req_host:\n hosts += [req_host + \".local\"]\n else:\n hosts = [req_host]\n\n cookies = []\n for host in hosts:\n if host in self.jar._cookies:\n cookies += self.jar._cookies_for_domain(host, wreq)\n\n attrs = self.jar._cookie_attrs(cookies)\n if attrs:\n if not wreq.has_header(\"Cookie\"):\n wreq.add_unredirected_header(\"Cookie\", \"; \".join(attrs))\n\n self.processed += 1\n if self.processed % self.check_expired_frequency == 0:\n # This is still quite inefficient for large number of cookies\n self.jar.clear_expired_cookies()\n\n @property\n def _cookies(self):\n return self.jar._cookies\n\n def clear_session_cookies(self, *args, **kwargs):\n return self.jar.clear_session_cookies(*args, **kwargs)\n\n def clear(self, domain=None, path=None, name=None):\n return self.jar.clear(domain, path, name)\n\n def __iter__(self):\n return iter(self.jar)\n\n def __len__(self):\n return len(self.jar)\n\n def set_policy(self, pol):\n return self.jar.set_policy(pol)\n\n def make_cookies(self, response, request):\n wreq = WrappedRequest(request)\n wrsp = WrappedResponse(response)\n return self.jar.make_cookies(wrsp, wreq)\n\n def set_cookie(self, cookie):\n self.jar.set_cookie(cookie)\n\n def set_cookie_if_ok(self, cookie, request):\n self.jar.set_cookie_if_ok(cookie, WrappedRequest(request))\n\n\ndef potential_domain_matches(domain):\n \"\"\"Potential domain matches for a cookie\n\n >>> potential_domain_matches('www.example.com')\n ['www.example.com', 'example.com', '.www.example.com', '.example.com']\n\n \"\"\"\n matches = [domain]\n try:\n start = domain.index('.') + 1\n end = domain.rindex('.')\n while start < end:\n matches.append(domain[start:])\n start = domain.index('.', start) + 1\n except ValueError:\n pass\n return matches + ['.' + d for d in matches]\n\n\nclass _DummyLock:\n def acquire(self):\n pass\n\n def release(self):\n pass\n\n\nclass WrappedRequest:\n \"\"\"Wraps a scrapy Request class with methods defined by urllib2.Request class to interact with CookieJar class\n\n see http://docs.python.org/library/urllib2.html#urllib2.Request\n \"\"\"\n\n def __init__(self, request):\n self.request = request\n\n def get_full_url(self):\n return self.request.url\n\n def get_host(self):\n return urlparse_cached(self.request).netloc\n\n def get_type(self):\n return urlparse_cached(self.request).scheme\n\n def is_unverifiable(self):\n \"\"\"Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965.\n\n It defaults to False. An unverifiable request is one whose URL the user did not have the\n option to approve. For example, if the request is for an image in an\n HTML document, and the user had no option to approve the automatic\n fetching of the image, this should be true.\n \"\"\"\n return self.request.meta.get('is_unverifiable', False)\n\n def get_origin_req_host(self):\n return urlparse_cached(self.request).hostname\n\n # python3 uses attributes instead of methods\n @property\n def full_url(self):\n return self.get_full_url()\n\n @property\n def host(self):\n return self.get_host()\n\n @property\n def type(self):\n return self.get_type()\n\n @property\n def unverifiable(self):\n return self.is_unverifiable()\n\n @property\n def origin_req_host(self):\n return self.get_origin_req_host()\n\n def has_header(self, name):\n return name in self.request.headers\n\n def get_header(self, name, default=None):\n return to_unicode(self.request.headers.get(name, default),\n errors='replace')\n\n def header_items(self):\n return [\n (to_unicode(k, errors='replace'),\n [to_unicode(x, errors='replace') for x in v])\n for k, v in self.request.headers.items()\n ]\n\n def add_unredirected_header(self, name, value):\n self.request.headers.appendlist(name, value)\n\n\nclass WrappedResponse:\n\n def __init__(self, response):\n self.response = response\n\n def info(self):\n return self\n\n def get_all(self, name, default=None):\n return [to_unicode(v, errors='replace')\n for v in self.response.headers.getlist(name)]\n", "path": "scrapy/http/cookies.py"}], "after_files": [{"content": "import re\nimport time\nfrom http.cookiejar import CookieJar as _CookieJar, DefaultCookiePolicy\n\nfrom scrapy.utils.httpobj import urlparse_cached\nfrom scrapy.utils.python import to_unicode\n\n\n# Defined in the http.cookiejar module, but undocumented:\n# https://github.com/python/cpython/blob/v3.9.0/Lib/http/cookiejar.py#L527\nIPV4_RE = re.compile(r\"\\.\\d+$\", re.ASCII)\n\n\nclass CookieJar:\n def __init__(self, policy=None, check_expired_frequency=10000):\n self.policy = policy or DefaultCookiePolicy()\n self.jar = _CookieJar(self.policy)\n self.jar._cookies_lock = _DummyLock()\n self.check_expired_frequency = check_expired_frequency\n self.processed = 0\n\n def extract_cookies(self, response, request):\n wreq = WrappedRequest(request)\n wrsp = WrappedResponse(response)\n return self.jar.extract_cookies(wrsp, wreq)\n\n def add_cookie_header(self, request):\n wreq = WrappedRequest(request)\n self.policy._now = self.jar._now = int(time.time())\n\n # the cookiejar implementation iterates through all domains\n # instead we restrict to potential matches on the domain\n req_host = urlparse_cached(request).hostname\n if not req_host:\n return\n\n if not IPV4_RE.search(req_host):\n hosts = potential_domain_matches(req_host)\n if '.' not in req_host:\n hosts += [req_host + \".local\"]\n else:\n hosts = [req_host]\n\n cookies = []\n for host in hosts:\n if host in self.jar._cookies:\n cookies += self.jar._cookies_for_domain(host, wreq)\n\n attrs = self.jar._cookie_attrs(cookies)\n if attrs:\n if not wreq.has_header(\"Cookie\"):\n wreq.add_unredirected_header(\"Cookie\", \"; \".join(attrs))\n\n self.processed += 1\n if self.processed % self.check_expired_frequency == 0:\n # This is still quite inefficient for large number of cookies\n self.jar.clear_expired_cookies()\n\n @property\n def _cookies(self):\n return self.jar._cookies\n\n def clear_session_cookies(self, *args, **kwargs):\n return self.jar.clear_session_cookies(*args, **kwargs)\n\n def clear(self, domain=None, path=None, name=None):\n return self.jar.clear(domain, path, name)\n\n def __iter__(self):\n return iter(self.jar)\n\n def __len__(self):\n return len(self.jar)\n\n def set_policy(self, pol):\n return self.jar.set_policy(pol)\n\n def make_cookies(self, response, request):\n wreq = WrappedRequest(request)\n wrsp = WrappedResponse(response)\n return self.jar.make_cookies(wrsp, wreq)\n\n def set_cookie(self, cookie):\n self.jar.set_cookie(cookie)\n\n def set_cookie_if_ok(self, cookie, request):\n self.jar.set_cookie_if_ok(cookie, WrappedRequest(request))\n\n\ndef potential_domain_matches(domain):\n \"\"\"Potential domain matches for a cookie\n\n >>> potential_domain_matches('www.example.com')\n ['www.example.com', 'example.com', '.www.example.com', '.example.com']\n\n \"\"\"\n matches = [domain]\n try:\n start = domain.index('.') + 1\n end = domain.rindex('.')\n while start < end:\n matches.append(domain[start:])\n start = domain.index('.', start) + 1\n except ValueError:\n pass\n return matches + ['.' + d for d in matches]\n\n\nclass _DummyLock:\n def acquire(self):\n pass\n\n def release(self):\n pass\n\n\nclass WrappedRequest:\n \"\"\"Wraps a scrapy Request class with methods defined by urllib2.Request class to interact with CookieJar class\n\n see http://docs.python.org/library/urllib2.html#urllib2.Request\n \"\"\"\n\n def __init__(self, request):\n self.request = request\n\n def get_full_url(self):\n return self.request.url\n\n def get_host(self):\n return urlparse_cached(self.request).netloc\n\n def get_type(self):\n return urlparse_cached(self.request).scheme\n\n def is_unverifiable(self):\n \"\"\"Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965.\n\n It defaults to False. An unverifiable request is one whose URL the user did not have the\n option to approve. For example, if the request is for an image in an\n HTML document, and the user had no option to approve the automatic\n fetching of the image, this should be true.\n \"\"\"\n return self.request.meta.get('is_unverifiable', False)\n\n @property\n def full_url(self):\n return self.get_full_url()\n\n @property\n def host(self):\n return self.get_host()\n\n @property\n def type(self):\n return self.get_type()\n\n @property\n def unverifiable(self):\n return self.is_unverifiable()\n\n @property\n def origin_req_host(self):\n return urlparse_cached(self.request).hostname\n\n def has_header(self, name):\n return name in self.request.headers\n\n def get_header(self, name, default=None):\n return to_unicode(self.request.headers.get(name, default),\n errors='replace')\n\n def header_items(self):\n return [\n (to_unicode(k, errors='replace'),\n [to_unicode(x, errors='replace') for x in v])\n for k, v in self.request.headers.items()\n ]\n\n def add_unredirected_header(self, name, value):\n self.request.headers.appendlist(name, value)\n\n\nclass WrappedResponse:\n\n def __init__(self, response):\n self.response = response\n\n def info(self):\n return self\n\n def get_all(self, name, default=None):\n return [to_unicode(v, errors='replace')\n for v in self.response.headers.getlist(name)]\n", "path": "scrapy/http/cookies.py"}]} | 2,191 | 177 |
gh_patches_debug_17128 | rasdani/github-patches | git_diff | google__flax-965 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When jax_enable_x64 is set Adam promotes everything to float64
### Problem you have encountered:
When `jax_enable_x64` is set, Adam's `apply_gradient` method will promote all float32 arrays to float64, potentially unexpectedly degrading performance.
This is due to jax's wonky type promotion semantics. The offending line is:
https://github.com/google/flax/blob/3e36db3e5e3b8e6e1777d612f270e7948238aa9c/flax/optim/adam.py#L82
which promotes like:
```python
jnp.array([0], dtype=jnp.int32) + 1. # == DeviceArray([1.], dtype=float64)
```
and then cascades from there promoting everything to float64
### What you expected to happen:
Arrays should retain their dtypes on optimizer updates.
### Logs, error messages, etc:
### Steps to reproduce:
```python
from jax.config import config
config.update("jax_enable_x64", True)
import jax.numpy as jnp
import flax
opt = flax.optim.Adam(1e-3).create(
{"x": jnp.zeros(10, dtype=jnp.float32)})
assert opt.target["x"].dtype == jnp.float32
opt = opt.apply_gradient({"x": jnp.zeros(10, dtype=jnp.float32)})
# This fails, since dtype was promoted to float64
assert opt.target["x"].dtype == jnp.float32
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flax/optim/lamb.py`
Content:
```
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from .. import struct
16
17 from jax import lax
18 import jax.numpy as jnp
19
20 import numpy as onp
21
22 from .base import OptimizerDef
23
24 @struct.dataclass
25 class _LAMBHyperParams:
26 learning_rate: onp.ndarray
27 beta1: onp.ndarray
28 beta2: onp.ndarray
29 weight_decay: onp.ndarray
30 eps: onp.ndarray
31
32
33 @struct.dataclass
34 class _LAMBParamState:
35 grad_ema: onp.ndarray
36 grad_sq_ema: onp.ndarray
37
38
39 class LAMB(OptimizerDef):
40 """Layerwise adaptive moments for batch (LAMB) optimizer.
41
42 See https://arxiv.org/abs/1904.00962
43 """
44
45 def __init__(self, learning_rate=None, beta1=0.9, beta2=0.999, weight_decay=0,
46 eps=1e-6):
47 """Constructor for the LAMB optimizer.
48
49 Args:
50 learning_rate: the step size used to update the parameters.
51 beta1: the coefficient used for the moving average of the gradient
52 (default: 0.9).
53 beta2: the coefficient used for the moving average of the squared gradient
54 (default: 0.999).
55 weight_decay: weight decay coefficient to apply
56 eps: epsilon used for Adam update computation (default: 1e-6).
57 """
58
59 hyper_params = _LAMBHyperParams(
60 learning_rate, beta1, beta2, weight_decay, eps)
61 super().__init__(hyper_params)
62
63 def init_param_state(self, param):
64 return _LAMBParamState(jnp.zeros_like(param), jnp.zeros_like(param))
65
66 def apply_param_gradient(self, step, hyper_params, param, state, grad):
67 assert hyper_params.learning_rate is not None, 'no learning rate provided.'
68 beta1 = hyper_params.beta1
69 beta2 = hyper_params.beta2
70 weight_decay = hyper_params.weight_decay
71 learning_rate = hyper_params.learning_rate
72
73 grad_sq = lax.square(grad)
74 grad_ema = beta1 * state.grad_ema + (1. - beta1) * grad
75 grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq
76
77 t = step + 1.
78 grad_ema_corr = grad_ema / (1. - beta1 ** t)
79 grad_sq_ema_corr = grad_sq_ema / (1. - beta2 ** t)
80
81 update = grad_ema_corr / (jnp.sqrt(grad_sq_ema_corr) + hyper_params.eps)
82
83 if weight_decay != 0.0:
84 update += weight_decay * param
85
86 param_norm = jnp.linalg.norm(param)
87 update_norm = jnp.linalg.norm(update)
88 trust_ratio = jnp.where(
89 param_norm + update_norm > 0., param_norm / update_norm, 1.)
90
91 new_param = param - trust_ratio * learning_rate * update
92 new_state = _LAMBParamState(grad_ema, grad_sq_ema)
93 return new_param, new_state
94
```
Path: `flax/optim/adam.py`
Content:
```
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from .. import struct
16
17 import jax.numpy as jnp
18 from jax import lax
19
20 import numpy as onp
21
22 from .base import OptimizerDef
23
24
25 @struct.dataclass
26 class _AdamHyperParams:
27 learning_rate: onp.ndarray
28 beta1: onp.ndarray
29 beta2: onp.ndarray
30 eps: onp.ndarray
31 weight_decay: onp.ndarray
32
33
34 @struct.dataclass
35 class _AdamParamState:
36 grad_ema: onp.ndarray
37 grad_sq_ema: onp.ndarray
38
39
40 class Adam(OptimizerDef):
41 """Adam optimizer.
42
43 Implements Adam - a stochastic gradient descent method (SGD) that computes
44 individual adaptive learning rates for different parameters from estimates of
45 first- and second-order moments of the gradients.
46
47 Reference: [Adam: A Method
48 for Stochastic Optimization](https://arxiv.org/abs/1412.6980v8) (Kingma and
49 Ba, 2014).
50
51 Attributes:
52 learning_rate: The learning rate — the step size used to update the
53 parameters.
54 beta1: The exponentian decay rate for the 1st moment estimates. The
55 coefficient used to calculate the first moments of the gradients (the
56 moving average of the gradient) (default: 0.9).
57 beta2: The exponentian decay rate for the 2nd moment estimates. The
58 coefficient used to calculate the second moments of the gradients (the
59 moving average of the gradient magnitude) (default: 0.999).
60 eps: A small scalar added to the gradient magnitude estimate to improve
61 numerical stability (default: 1e-8).
62 weight_decay: The learning rate decay (default: 0.0).
63 """
64
65 def __init__(self,
66 learning_rate=None,
67 beta1=0.9,
68 beta2=0.999,
69 eps=1e-8,
70 weight_decay=0.0):
71 """Constructor for the Adam optimizer.
72
73 Args:
74 learning_rate: The step size used to update the parameters.
75 beta1: The coefficient used for the moving average of the
76 gradient (default: 0.9).
77 beta2: The coefficient used for the moving average of the
78 gradient magnitude (default: 0.999).
79 eps: The term added to the gradient magnitude estimate for
80 numerical stability (default: 1e-8).
81 weight_decay: AdamW style weight decay rate
82 (relative to learning rate) (default: 0.0).
83 """
84 hyper_params = _AdamHyperParams(learning_rate, beta1, beta2, eps,
85 weight_decay)
86 super().__init__(hyper_params)
87
88 def init_param_state(self, param):
89 return _AdamParamState(jnp.zeros_like(param), jnp.zeros_like(param))
90
91 def apply_param_gradient(self, step, hyper_params, param, state, grad):
92 assert hyper_params.learning_rate is not None, 'no learning rate provided.'
93 beta1 = hyper_params.beta1
94 beta2 = hyper_params.beta2
95 weight_decay = hyper_params.weight_decay
96 grad_sq = lax.square(grad)
97 grad_ema = beta1 * state.grad_ema + (1. - beta1) * grad
98 grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq
99
100 # bias correction
101 t = step + 1.
102 grad_ema_corr = grad_ema / (1 - beta1 ** t)
103 grad_sq_ema_corr = grad_sq_ema / (1 - beta2 ** t)
104
105 denom = jnp.sqrt(grad_sq_ema_corr) + hyper_params.eps
106 new_param = param - hyper_params.learning_rate * grad_ema_corr / denom
107 new_param -= hyper_params.learning_rate * weight_decay * param
108 new_state = _AdamParamState(grad_ema, grad_sq_ema)
109 return new_param, new_state
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flax/optim/adam.py b/flax/optim/adam.py
--- a/flax/optim/adam.py
+++ b/flax/optim/adam.py
@@ -98,7 +98,7 @@
grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq
# bias correction
- t = step + 1.
+ t = jnp.array(step + 1, lax.dtype(param.dtype))
grad_ema_corr = grad_ema / (1 - beta1 ** t)
grad_sq_ema_corr = grad_sq_ema / (1 - beta2 ** t)
diff --git a/flax/optim/lamb.py b/flax/optim/lamb.py
--- a/flax/optim/lamb.py
+++ b/flax/optim/lamb.py
@@ -74,7 +74,7 @@
grad_ema = beta1 * state.grad_ema + (1. - beta1) * grad
grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq
- t = step + 1.
+ t = jnp.array(step + 1, lax.dtype(param.dtype))
grad_ema_corr = grad_ema / (1. - beta1 ** t)
grad_sq_ema_corr = grad_sq_ema / (1. - beta2 ** t)
| {"golden_diff": "diff --git a/flax/optim/adam.py b/flax/optim/adam.py\n--- a/flax/optim/adam.py\n+++ b/flax/optim/adam.py\n@@ -98,7 +98,7 @@\n grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq\n \n # bias correction\n- t = step + 1.\n+ t = jnp.array(step + 1, lax.dtype(param.dtype))\n grad_ema_corr = grad_ema / (1 - beta1 ** t)\n grad_sq_ema_corr = grad_sq_ema / (1 - beta2 ** t)\n \ndiff --git a/flax/optim/lamb.py b/flax/optim/lamb.py\n--- a/flax/optim/lamb.py\n+++ b/flax/optim/lamb.py\n@@ -74,7 +74,7 @@\n grad_ema = beta1 * state.grad_ema + (1. - beta1) * grad\n grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq\n \n- t = step + 1.\n+ t = jnp.array(step + 1, lax.dtype(param.dtype))\n grad_ema_corr = grad_ema / (1. - beta1 ** t)\n grad_sq_ema_corr = grad_sq_ema / (1. - beta2 ** t)\n", "issue": "When jax_enable_x64 is set Adam promotes everything to float64\n\r\n### Problem you have encountered:\r\n\r\nWhen `jax_enable_x64` is set, Adam's `apply_gradient` method will promote all float32 arrays to float64, potentially unexpectedly degrading performance.\r\n\r\nThis is due to jax's wonky type promotion semantics. The offending line is:\r\nhttps://github.com/google/flax/blob/3e36db3e5e3b8e6e1777d612f270e7948238aa9c/flax/optim/adam.py#L82\r\n\r\nwhich promotes like:\r\n```python\r\njnp.array([0], dtype=jnp.int32) + 1. # == DeviceArray([1.], dtype=float64)\r\n```\r\nand then cascades from there promoting everything to float64\r\n\r\n### What you expected to happen:\r\n\r\nArrays should retain their dtypes on optimizer updates.\r\n\r\n### Logs, error messages, etc:\r\n\r\n\r\n### Steps to reproduce:\r\n\r\n```python\r\nfrom jax.config import config\r\nconfig.update(\"jax_enable_x64\", True)\r\n\r\nimport jax.numpy as jnp\r\nimport flax\r\n\r\nopt = flax.optim.Adam(1e-3).create(\r\n {\"x\": jnp.zeros(10, dtype=jnp.float32)})\r\n\r\nassert opt.target[\"x\"].dtype == jnp.float32\r\n\r\nopt = opt.apply_gradient({\"x\": jnp.zeros(10, dtype=jnp.float32)})\r\n\r\n# This fails, since dtype was promoted to float64\r\nassert opt.target[\"x\"].dtype == jnp.float32\r\n```\n", "before_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .. import struct\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as onp\n\nfrom .base import OptimizerDef\n\[email protected]\nclass _LAMBHyperParams:\n learning_rate: onp.ndarray\n beta1: onp.ndarray\n beta2: onp.ndarray\n weight_decay: onp.ndarray\n eps: onp.ndarray\n\n\[email protected]\nclass _LAMBParamState:\n grad_ema: onp.ndarray\n grad_sq_ema: onp.ndarray\n\n\nclass LAMB(OptimizerDef):\n \"\"\"Layerwise adaptive moments for batch (LAMB) optimizer.\n\n See https://arxiv.org/abs/1904.00962\n \"\"\"\n\n def __init__(self, learning_rate=None, beta1=0.9, beta2=0.999, weight_decay=0,\n eps=1e-6):\n \"\"\"Constructor for the LAMB optimizer.\n\n Args:\n learning_rate: the step size used to update the parameters.\n beta1: the coefficient used for the moving average of the gradient\n (default: 0.9).\n beta2: the coefficient used for the moving average of the squared gradient\n (default: 0.999).\n weight_decay: weight decay coefficient to apply\n eps: epsilon used for Adam update computation (default: 1e-6).\n \"\"\"\n\n hyper_params = _LAMBHyperParams(\n learning_rate, beta1, beta2, weight_decay, eps)\n super().__init__(hyper_params)\n\n def init_param_state(self, param):\n return _LAMBParamState(jnp.zeros_like(param), jnp.zeros_like(param))\n\n def apply_param_gradient(self, step, hyper_params, param, state, grad):\n assert hyper_params.learning_rate is not None, 'no learning rate provided.'\n beta1 = hyper_params.beta1\n beta2 = hyper_params.beta2\n weight_decay = hyper_params.weight_decay\n learning_rate = hyper_params.learning_rate\n\n grad_sq = lax.square(grad)\n grad_ema = beta1 * state.grad_ema + (1. - beta1) * grad\n grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq\n\n t = step + 1.\n grad_ema_corr = grad_ema / (1. - beta1 ** t)\n grad_sq_ema_corr = grad_sq_ema / (1. - beta2 ** t)\n\n update = grad_ema_corr / (jnp.sqrt(grad_sq_ema_corr) + hyper_params.eps)\n\n if weight_decay != 0.0:\n update += weight_decay * param\n\n param_norm = jnp.linalg.norm(param)\n update_norm = jnp.linalg.norm(update)\n trust_ratio = jnp.where(\n param_norm + update_norm > 0., param_norm / update_norm, 1.)\n\n new_param = param - trust_ratio * learning_rate * update\n new_state = _LAMBParamState(grad_ema, grad_sq_ema)\n return new_param, new_state\n", "path": "flax/optim/lamb.py"}, {"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .. import struct\n\nimport jax.numpy as jnp\nfrom jax import lax\n\nimport numpy as onp\n\nfrom .base import OptimizerDef\n\n\[email protected]\nclass _AdamHyperParams:\n learning_rate: onp.ndarray\n beta1: onp.ndarray\n beta2: onp.ndarray\n eps: onp.ndarray\n weight_decay: onp.ndarray\n\n\[email protected]\nclass _AdamParamState:\n grad_ema: onp.ndarray\n grad_sq_ema: onp.ndarray\n\n\nclass Adam(OptimizerDef):\n \"\"\"Adam optimizer.\n\n Implements Adam - a stochastic gradient descent method (SGD) that computes\n individual adaptive learning rates for different parameters from estimates of\n first- and second-order moments of the gradients. \n \n Reference: [Adam: A Method\n for Stochastic Optimization](https://arxiv.org/abs/1412.6980v8) (Kingma and\n Ba, 2014).\n\n Attributes:\n learning_rate: The learning rate \u2014 the step size used to update the\n parameters.\n beta1: The exponentian decay rate for the 1st moment estimates. The\n coefficient used to calculate the first moments of the gradients (the\n moving average of the gradient) (default: 0.9).\n beta2: The exponentian decay rate for the 2nd moment estimates. The\n coefficient used to calculate the second moments of the gradients (the\n moving average of the gradient magnitude) (default: 0.999).\n eps: A small scalar added to the gradient magnitude estimate to improve\n numerical stability (default: 1e-8).\n weight_decay: The learning rate decay (default: 0.0).\n \"\"\"\n\n def __init__(self,\n learning_rate=None,\n beta1=0.9,\n beta2=0.999,\n eps=1e-8,\n weight_decay=0.0):\n \"\"\"Constructor for the Adam optimizer.\n\n Args:\n learning_rate: The step size used to update the parameters.\n beta1: The coefficient used for the moving average of the\n gradient (default: 0.9).\n beta2: The coefficient used for the moving average of the\n gradient magnitude (default: 0.999).\n eps: The term added to the gradient magnitude estimate for\n numerical stability (default: 1e-8).\n weight_decay: AdamW style weight decay rate\n (relative to learning rate) (default: 0.0).\n \"\"\"\n hyper_params = _AdamHyperParams(learning_rate, beta1, beta2, eps,\n weight_decay)\n super().__init__(hyper_params)\n\n def init_param_state(self, param):\n return _AdamParamState(jnp.zeros_like(param), jnp.zeros_like(param))\n\n def apply_param_gradient(self, step, hyper_params, param, state, grad):\n assert hyper_params.learning_rate is not None, 'no learning rate provided.'\n beta1 = hyper_params.beta1\n beta2 = hyper_params.beta2\n weight_decay = hyper_params.weight_decay\n grad_sq = lax.square(grad)\n grad_ema = beta1 * state.grad_ema + (1. - beta1) * grad\n grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq\n\n # bias correction\n t = step + 1.\n grad_ema_corr = grad_ema / (1 - beta1 ** t)\n grad_sq_ema_corr = grad_sq_ema / (1 - beta2 ** t)\n\n denom = jnp.sqrt(grad_sq_ema_corr) + hyper_params.eps\n new_param = param - hyper_params.learning_rate * grad_ema_corr / denom\n new_param -= hyper_params.learning_rate * weight_decay * param\n new_state = _AdamParamState(grad_ema, grad_sq_ema)\n return new_param, new_state\n", "path": "flax/optim/adam.py"}], "after_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .. import struct\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as onp\n\nfrom .base import OptimizerDef\n\[email protected]\nclass _LAMBHyperParams:\n learning_rate: onp.ndarray\n beta1: onp.ndarray\n beta2: onp.ndarray\n weight_decay: onp.ndarray\n eps: onp.ndarray\n\n\[email protected]\nclass _LAMBParamState:\n grad_ema: onp.ndarray\n grad_sq_ema: onp.ndarray\n\n\nclass LAMB(OptimizerDef):\n \"\"\"Layerwise adaptive moments for batch (LAMB) optimizer.\n\n See https://arxiv.org/abs/1904.00962\n \"\"\"\n\n def __init__(self, learning_rate=None, beta1=0.9, beta2=0.999, weight_decay=0,\n eps=1e-6):\n \"\"\"Constructor for the LAMB optimizer.\n\n Args:\n learning_rate: the step size used to update the parameters.\n beta1: the coefficient used for the moving average of the gradient\n (default: 0.9).\n beta2: the coefficient used for the moving average of the squared gradient\n (default: 0.999).\n weight_decay: weight decay coefficient to apply\n eps: epsilon used for Adam update computation (default: 1e-6).\n \"\"\"\n\n hyper_params = _LAMBHyperParams(\n learning_rate, beta1, beta2, weight_decay, eps)\n super().__init__(hyper_params)\n\n def init_param_state(self, param):\n return _LAMBParamState(jnp.zeros_like(param), jnp.zeros_like(param))\n\n def apply_param_gradient(self, step, hyper_params, param, state, grad):\n assert hyper_params.learning_rate is not None, 'no learning rate provided.'\n beta1 = hyper_params.beta1\n beta2 = hyper_params.beta2\n weight_decay = hyper_params.weight_decay\n learning_rate = hyper_params.learning_rate\n\n grad_sq = lax.square(grad)\n grad_ema = beta1 * state.grad_ema + (1. - beta1) * grad\n grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq\n\n t = jnp.array(step + 1, lax.dtype(param.dtype))\n grad_ema_corr = grad_ema / (1. - beta1 ** t)\n grad_sq_ema_corr = grad_sq_ema / (1. - beta2 ** t)\n\n update = grad_ema_corr / (jnp.sqrt(grad_sq_ema_corr) + hyper_params.eps)\n\n if weight_decay != 0.0:\n update += weight_decay * param\n\n param_norm = jnp.linalg.norm(param)\n update_norm = jnp.linalg.norm(update)\n trust_ratio = jnp.where(\n param_norm + update_norm > 0., param_norm / update_norm, 1.)\n\n new_param = param - trust_ratio * learning_rate * update\n new_state = _LAMBParamState(grad_ema, grad_sq_ema)\n return new_param, new_state\n", "path": "flax/optim/lamb.py"}, {"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .. import struct\n\nimport jax.numpy as jnp\nfrom jax import lax\n\nimport numpy as onp\n\nfrom .base import OptimizerDef\n\n\[email protected]\nclass _AdamHyperParams:\n learning_rate: onp.ndarray\n beta1: onp.ndarray\n beta2: onp.ndarray\n eps: onp.ndarray\n weight_decay: onp.ndarray\n\n\[email protected]\nclass _AdamParamState:\n grad_ema: onp.ndarray\n grad_sq_ema: onp.ndarray\n\n\nclass Adam(OptimizerDef):\n \"\"\"Adam optimizer.\n\n Implements Adam - a stochastic gradient descent method (SGD) that computes\n individual adaptive learning rates for different parameters from estimates of\n first- and second-order moments of the gradients. \n \n Reference: [Adam: A Method\n for Stochastic Optimization](https://arxiv.org/abs/1412.6980v8) (Kingma and\n Ba, 2014).\n\n Attributes:\n learning_rate: The learning rate \u2014 the step size used to update the\n parameters.\n beta1: The exponentian decay rate for the 1st moment estimates. The\n coefficient used to calculate the first moments of the gradients (the\n moving average of the gradient) (default: 0.9).\n beta2: The exponentian decay rate for the 2nd moment estimates. The\n coefficient used to calculate the second moments of the gradients (the\n moving average of the gradient magnitude) (default: 0.999).\n eps: A small scalar added to the gradient magnitude estimate to improve\n numerical stability (default: 1e-8).\n weight_decay: The learning rate decay (default: 0.0).\n \"\"\"\n\n def __init__(self,\n learning_rate=None,\n beta1=0.9,\n beta2=0.999,\n eps=1e-8,\n weight_decay=0.0):\n \"\"\"Constructor for the Adam optimizer.\n\n Args:\n learning_rate: The step size used to update the parameters.\n beta1: The coefficient used for the moving average of the\n gradient (default: 0.9).\n beta2: The coefficient used for the moving average of the\n gradient magnitude (default: 0.999).\n eps: The term added to the gradient magnitude estimate for\n numerical stability (default: 1e-8).\n weight_decay: AdamW style weight decay rate\n (relative to learning rate) (default: 0.0).\n \"\"\"\n hyper_params = _AdamHyperParams(learning_rate, beta1, beta2, eps,\n weight_decay)\n super().__init__(hyper_params)\n\n def init_param_state(self, param):\n return _AdamParamState(jnp.zeros_like(param), jnp.zeros_like(param))\n\n def apply_param_gradient(self, step, hyper_params, param, state, grad):\n assert hyper_params.learning_rate is not None, 'no learning rate provided.'\n beta1 = hyper_params.beta1\n beta2 = hyper_params.beta2\n weight_decay = hyper_params.weight_decay\n grad_sq = lax.square(grad)\n grad_ema = beta1 * state.grad_ema + (1. - beta1) * grad\n grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq\n\n # bias correction\n t = jnp.array(step + 1, lax.dtype(param.dtype))\n grad_ema_corr = grad_ema / (1 - beta1 ** t)\n grad_sq_ema_corr = grad_sq_ema / (1 - beta2 ** t)\n\n denom = jnp.sqrt(grad_sq_ema_corr) + hyper_params.eps\n new_param = param - hyper_params.learning_rate * grad_ema_corr / denom\n new_param -= hyper_params.learning_rate * weight_decay * param\n new_state = _AdamParamState(grad_ema, grad_sq_ema)\n return new_param, new_state\n", "path": "flax/optim/adam.py"}]} | 2,862 | 315 |
gh_patches_debug_31690 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2237 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Otel Spec forbids B3 propagator from propagating `X-B3-ParentSpanId`
From the [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md#b3-inject)
> MUST NOT propagate X-B3-ParentSpanId as OpenTelemetry does not support reusing the same id for both sides of a request.
But we do it [here](https://github.com/open-telemetry/opentelemetry-python/blob/61c2d6e1508c75cce4b50ff7b28712c944b94128/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py#L152-L158)
This should be removed as the spec forbids it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import typing
16 from re import compile as re_compile
17
18 from deprecated import deprecated
19
20 from opentelemetry import trace
21 from opentelemetry.context import Context
22 from opentelemetry.propagators.textmap import (
23 CarrierT,
24 Getter,
25 Setter,
26 TextMapPropagator,
27 default_getter,
28 default_setter,
29 )
30 from opentelemetry.trace import format_span_id, format_trace_id
31
32
33 class B3MultiFormat(TextMapPropagator):
34 """Propagator for the B3 HTTP multi-header format.
35
36 See: https://github.com/openzipkin/b3-propagation
37 https://github.com/openzipkin/b3-propagation#multiple-headers
38 """
39
40 SINGLE_HEADER_KEY = "b3"
41 TRACE_ID_KEY = "x-b3-traceid"
42 SPAN_ID_KEY = "x-b3-spanid"
43 PARENT_SPAN_ID_KEY = "x-b3-parentspanid"
44 SAMPLED_KEY = "x-b3-sampled"
45 FLAGS_KEY = "x-b3-flags"
46 _SAMPLE_PROPAGATE_VALUES = set(["1", "True", "true", "d"])
47 _trace_id_regex = re_compile(r"[\da-fA-F]{16}|[\da-fA-F]{32}")
48 _span_id_regex = re_compile(r"[\da-fA-F]{16}")
49
50 def extract(
51 self,
52 carrier: CarrierT,
53 context: typing.Optional[Context] = None,
54 getter: Getter = default_getter,
55 ) -> Context:
56 if context is None:
57 context = Context()
58 trace_id = trace.INVALID_TRACE_ID
59 span_id = trace.INVALID_SPAN_ID
60 sampled = "0"
61 flags = None
62
63 single_header = _extract_first_element(
64 getter.get(carrier, self.SINGLE_HEADER_KEY)
65 )
66 if single_header:
67 # The b3 spec calls for the sampling state to be
68 # "deferred", which is unspecified. This concept does not
69 # translate to SpanContext, so we set it as recorded.
70 sampled = "1"
71 fields = single_header.split("-", 4)
72
73 if len(fields) == 1:
74 sampled = fields[0]
75 elif len(fields) == 2:
76 trace_id, span_id = fields
77 elif len(fields) == 3:
78 trace_id, span_id, sampled = fields
79 elif len(fields) == 4:
80 trace_id, span_id, sampled, _ = fields
81 else:
82 trace_id = (
83 _extract_first_element(getter.get(carrier, self.TRACE_ID_KEY))
84 or trace_id
85 )
86 span_id = (
87 _extract_first_element(getter.get(carrier, self.SPAN_ID_KEY))
88 or span_id
89 )
90 sampled = (
91 _extract_first_element(getter.get(carrier, self.SAMPLED_KEY))
92 or sampled
93 )
94 flags = (
95 _extract_first_element(getter.get(carrier, self.FLAGS_KEY))
96 or flags
97 )
98
99 if (
100 trace_id == trace.INVALID_TRACE_ID
101 or span_id == trace.INVALID_SPAN_ID
102 or self._trace_id_regex.fullmatch(trace_id) is None
103 or self._span_id_regex.fullmatch(span_id) is None
104 ):
105 return context
106
107 trace_id = int(trace_id, 16)
108 span_id = int(span_id, 16)
109 options = 0
110 # The b3 spec provides no defined behavior for both sample and
111 # flag values set. Since the setting of at least one implies
112 # the desire for some form of sampling, propagate if either
113 # header is set to allow.
114 if sampled in self._SAMPLE_PROPAGATE_VALUES or flags == "1":
115 options |= trace.TraceFlags.SAMPLED
116
117 return trace.set_span_in_context(
118 trace.NonRecordingSpan(
119 trace.SpanContext(
120 # trace an span ids are encoded in hex, so must be converted
121 trace_id=trace_id,
122 span_id=span_id,
123 is_remote=True,
124 trace_flags=trace.TraceFlags(options),
125 trace_state=trace.TraceState(),
126 )
127 ),
128 context,
129 )
130
131 def inject(
132 self,
133 carrier: CarrierT,
134 context: typing.Optional[Context] = None,
135 setter: Setter = default_setter,
136 ) -> None:
137 span = trace.get_current_span(context=context)
138
139 span_context = span.get_span_context()
140 if span_context == trace.INVALID_SPAN_CONTEXT:
141 return
142
143 sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0
144 setter.set(
145 carrier,
146 self.TRACE_ID_KEY,
147 format_trace_id(span_context.trace_id),
148 )
149 setter.set(
150 carrier, self.SPAN_ID_KEY, format_span_id(span_context.span_id)
151 )
152 span_parent = getattr(span, "parent", None)
153 if span_parent is not None:
154 setter.set(
155 carrier,
156 self.PARENT_SPAN_ID_KEY,
157 format_span_id(span_parent.span_id),
158 )
159 setter.set(carrier, self.SAMPLED_KEY, "1" if sampled else "0")
160
161 @property
162 def fields(self) -> typing.Set[str]:
163 return {
164 self.TRACE_ID_KEY,
165 self.SPAN_ID_KEY,
166 self.PARENT_SPAN_ID_KEY,
167 self.SAMPLED_KEY,
168 }
169
170
171 class B3SingleFormat(B3MultiFormat):
172 """Propagator for the B3 HTTP single-header format.
173
174 See: https://github.com/openzipkin/b3-propagation
175 https://github.com/openzipkin/b3-propagation#single-header
176 """
177
178 def inject(
179 self,
180 carrier: CarrierT,
181 context: typing.Optional[Context] = None,
182 setter: Setter = default_setter,
183 ) -> None:
184 span = trace.get_current_span(context=context)
185
186 span_context = span.get_span_context()
187 if span_context == trace.INVALID_SPAN_CONTEXT:
188 return
189
190 sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0
191
192 fields = [
193 format_trace_id(span_context.trace_id),
194 format_span_id(span_context.span_id),
195 "1" if sampled else "0",
196 ]
197
198 span_parent = getattr(span, "parent", None)
199 if span_parent:
200 fields.append(format_span_id(span_parent.span_id))
201
202 setter.set(carrier, self.SINGLE_HEADER_KEY, "-".join(fields))
203
204 @property
205 def fields(self) -> typing.Set[str]:
206 return {self.SINGLE_HEADER_KEY}
207
208
209 class B3Format(B3MultiFormat):
210 @deprecated(
211 version="1.2.0",
212 reason="B3Format is deprecated in favor of B3MultiFormat",
213 )
214 def __init__(self, *args, **kwargs):
215 super().__init__(*args, **kwargs)
216
217
218 def _extract_first_element(
219 items: typing.Iterable[CarrierT],
220 ) -> typing.Optional[CarrierT]:
221 if items is None:
222 return None
223 return next(iter(items), None)
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py b/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py
--- a/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py
+++ b/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py
@@ -40,7 +40,6 @@
SINGLE_HEADER_KEY = "b3"
TRACE_ID_KEY = "x-b3-traceid"
SPAN_ID_KEY = "x-b3-spanid"
- PARENT_SPAN_ID_KEY = "x-b3-parentspanid"
SAMPLED_KEY = "x-b3-sampled"
FLAGS_KEY = "x-b3-flags"
_SAMPLE_PROPAGATE_VALUES = set(["1", "True", "true", "d"])
@@ -149,13 +148,6 @@
setter.set(
carrier, self.SPAN_ID_KEY, format_span_id(span_context.span_id)
)
- span_parent = getattr(span, "parent", None)
- if span_parent is not None:
- setter.set(
- carrier,
- self.PARENT_SPAN_ID_KEY,
- format_span_id(span_parent.span_id),
- )
setter.set(carrier, self.SAMPLED_KEY, "1" if sampled else "0")
@property
@@ -163,7 +155,6 @@
return {
self.TRACE_ID_KEY,
self.SPAN_ID_KEY,
- self.PARENT_SPAN_ID_KEY,
self.SAMPLED_KEY,
}
@@ -195,10 +186,6 @@
"1" if sampled else "0",
]
- span_parent = getattr(span, "parent", None)
- if span_parent:
- fields.append(format_span_id(span_parent.span_id))
-
setter.set(carrier, self.SINGLE_HEADER_KEY, "-".join(fields))
@property
| {"golden_diff": "diff --git a/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py b/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py\n--- a/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py\n+++ b/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py\n@@ -40,7 +40,6 @@\n SINGLE_HEADER_KEY = \"b3\"\n TRACE_ID_KEY = \"x-b3-traceid\"\n SPAN_ID_KEY = \"x-b3-spanid\"\n- PARENT_SPAN_ID_KEY = \"x-b3-parentspanid\"\n SAMPLED_KEY = \"x-b3-sampled\"\n FLAGS_KEY = \"x-b3-flags\"\n _SAMPLE_PROPAGATE_VALUES = set([\"1\", \"True\", \"true\", \"d\"])\n@@ -149,13 +148,6 @@\n setter.set(\n carrier, self.SPAN_ID_KEY, format_span_id(span_context.span_id)\n )\n- span_parent = getattr(span, \"parent\", None)\n- if span_parent is not None:\n- setter.set(\n- carrier,\n- self.PARENT_SPAN_ID_KEY,\n- format_span_id(span_parent.span_id),\n- )\n setter.set(carrier, self.SAMPLED_KEY, \"1\" if sampled else \"0\")\n \n @property\n@@ -163,7 +155,6 @@\n return {\n self.TRACE_ID_KEY,\n self.SPAN_ID_KEY,\n- self.PARENT_SPAN_ID_KEY,\n self.SAMPLED_KEY,\n }\n \n@@ -195,10 +186,6 @@\n \"1\" if sampled else \"0\",\n ]\n \n- span_parent = getattr(span, \"parent\", None)\n- if span_parent:\n- fields.append(format_span_id(span_parent.span_id))\n-\n setter.set(carrier, self.SINGLE_HEADER_KEY, \"-\".join(fields))\n \n @property\n", "issue": "Otel Spec forbids B3 propagator from propagating `X-B3-ParentSpanId`\nFrom the [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md#b3-inject)\r\n\r\n> MUST NOT propagate X-B3-ParentSpanId as OpenTelemetry does not support reusing the same id for both sides of a request.\r\n\r\nBut we do it [here](https://github.com/open-telemetry/opentelemetry-python/blob/61c2d6e1508c75cce4b50ff7b28712c944b94128/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py#L152-L158)\r\n\r\nThis should be removed as the spec forbids it. \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport typing\nfrom re import compile as re_compile\n\nfrom deprecated import deprecated\n\nfrom opentelemetry import trace\nfrom opentelemetry.context import Context\nfrom opentelemetry.propagators.textmap import (\n CarrierT,\n Getter,\n Setter,\n TextMapPropagator,\n default_getter,\n default_setter,\n)\nfrom opentelemetry.trace import format_span_id, format_trace_id\n\n\nclass B3MultiFormat(TextMapPropagator):\n \"\"\"Propagator for the B3 HTTP multi-header format.\n\n See: https://github.com/openzipkin/b3-propagation\n https://github.com/openzipkin/b3-propagation#multiple-headers\n \"\"\"\n\n SINGLE_HEADER_KEY = \"b3\"\n TRACE_ID_KEY = \"x-b3-traceid\"\n SPAN_ID_KEY = \"x-b3-spanid\"\n PARENT_SPAN_ID_KEY = \"x-b3-parentspanid\"\n SAMPLED_KEY = \"x-b3-sampled\"\n FLAGS_KEY = \"x-b3-flags\"\n _SAMPLE_PROPAGATE_VALUES = set([\"1\", \"True\", \"true\", \"d\"])\n _trace_id_regex = re_compile(r\"[\\da-fA-F]{16}|[\\da-fA-F]{32}\")\n _span_id_regex = re_compile(r\"[\\da-fA-F]{16}\")\n\n def extract(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n getter: Getter = default_getter,\n ) -> Context:\n if context is None:\n context = Context()\n trace_id = trace.INVALID_TRACE_ID\n span_id = trace.INVALID_SPAN_ID\n sampled = \"0\"\n flags = None\n\n single_header = _extract_first_element(\n getter.get(carrier, self.SINGLE_HEADER_KEY)\n )\n if single_header:\n # The b3 spec calls for the sampling state to be\n # \"deferred\", which is unspecified. This concept does not\n # translate to SpanContext, so we set it as recorded.\n sampled = \"1\"\n fields = single_header.split(\"-\", 4)\n\n if len(fields) == 1:\n sampled = fields[0]\n elif len(fields) == 2:\n trace_id, span_id = fields\n elif len(fields) == 3:\n trace_id, span_id, sampled = fields\n elif len(fields) == 4:\n trace_id, span_id, sampled, _ = fields\n else:\n trace_id = (\n _extract_first_element(getter.get(carrier, self.TRACE_ID_KEY))\n or trace_id\n )\n span_id = (\n _extract_first_element(getter.get(carrier, self.SPAN_ID_KEY))\n or span_id\n )\n sampled = (\n _extract_first_element(getter.get(carrier, self.SAMPLED_KEY))\n or sampled\n )\n flags = (\n _extract_first_element(getter.get(carrier, self.FLAGS_KEY))\n or flags\n )\n\n if (\n trace_id == trace.INVALID_TRACE_ID\n or span_id == trace.INVALID_SPAN_ID\n or self._trace_id_regex.fullmatch(trace_id) is None\n or self._span_id_regex.fullmatch(span_id) is None\n ):\n return context\n\n trace_id = int(trace_id, 16)\n span_id = int(span_id, 16)\n options = 0\n # The b3 spec provides no defined behavior for both sample and\n # flag values set. Since the setting of at least one implies\n # the desire for some form of sampling, propagate if either\n # header is set to allow.\n if sampled in self._SAMPLE_PROPAGATE_VALUES or flags == \"1\":\n options |= trace.TraceFlags.SAMPLED\n\n return trace.set_span_in_context(\n trace.NonRecordingSpan(\n trace.SpanContext(\n # trace an span ids are encoded in hex, so must be converted\n trace_id=trace_id,\n span_id=span_id,\n is_remote=True,\n trace_flags=trace.TraceFlags(options),\n trace_state=trace.TraceState(),\n )\n ),\n context,\n )\n\n def inject(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n setter: Setter = default_setter,\n ) -> None:\n span = trace.get_current_span(context=context)\n\n span_context = span.get_span_context()\n if span_context == trace.INVALID_SPAN_CONTEXT:\n return\n\n sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0\n setter.set(\n carrier,\n self.TRACE_ID_KEY,\n format_trace_id(span_context.trace_id),\n )\n setter.set(\n carrier, self.SPAN_ID_KEY, format_span_id(span_context.span_id)\n )\n span_parent = getattr(span, \"parent\", None)\n if span_parent is not None:\n setter.set(\n carrier,\n self.PARENT_SPAN_ID_KEY,\n format_span_id(span_parent.span_id),\n )\n setter.set(carrier, self.SAMPLED_KEY, \"1\" if sampled else \"0\")\n\n @property\n def fields(self) -> typing.Set[str]:\n return {\n self.TRACE_ID_KEY,\n self.SPAN_ID_KEY,\n self.PARENT_SPAN_ID_KEY,\n self.SAMPLED_KEY,\n }\n\n\nclass B3SingleFormat(B3MultiFormat):\n \"\"\"Propagator for the B3 HTTP single-header format.\n\n See: https://github.com/openzipkin/b3-propagation\n https://github.com/openzipkin/b3-propagation#single-header\n \"\"\"\n\n def inject(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n setter: Setter = default_setter,\n ) -> None:\n span = trace.get_current_span(context=context)\n\n span_context = span.get_span_context()\n if span_context == trace.INVALID_SPAN_CONTEXT:\n return\n\n sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0\n\n fields = [\n format_trace_id(span_context.trace_id),\n format_span_id(span_context.span_id),\n \"1\" if sampled else \"0\",\n ]\n\n span_parent = getattr(span, \"parent\", None)\n if span_parent:\n fields.append(format_span_id(span_parent.span_id))\n\n setter.set(carrier, self.SINGLE_HEADER_KEY, \"-\".join(fields))\n\n @property\n def fields(self) -> typing.Set[str]:\n return {self.SINGLE_HEADER_KEY}\n\n\nclass B3Format(B3MultiFormat):\n @deprecated(\n version=\"1.2.0\",\n reason=\"B3Format is deprecated in favor of B3MultiFormat\",\n )\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\ndef _extract_first_element(\n items: typing.Iterable[CarrierT],\n) -> typing.Optional[CarrierT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport typing\nfrom re import compile as re_compile\n\nfrom deprecated import deprecated\n\nfrom opentelemetry import trace\nfrom opentelemetry.context import Context\nfrom opentelemetry.propagators.textmap import (\n CarrierT,\n Getter,\n Setter,\n TextMapPropagator,\n default_getter,\n default_setter,\n)\nfrom opentelemetry.trace import format_span_id, format_trace_id\n\n\nclass B3MultiFormat(TextMapPropagator):\n \"\"\"Propagator for the B3 HTTP multi-header format.\n\n See: https://github.com/openzipkin/b3-propagation\n https://github.com/openzipkin/b3-propagation#multiple-headers\n \"\"\"\n\n SINGLE_HEADER_KEY = \"b3\"\n TRACE_ID_KEY = \"x-b3-traceid\"\n SPAN_ID_KEY = \"x-b3-spanid\"\n SAMPLED_KEY = \"x-b3-sampled\"\n FLAGS_KEY = \"x-b3-flags\"\n _SAMPLE_PROPAGATE_VALUES = set([\"1\", \"True\", \"true\", \"d\"])\n _trace_id_regex = re_compile(r\"[\\da-fA-F]{16}|[\\da-fA-F]{32}\")\n _span_id_regex = re_compile(r\"[\\da-fA-F]{16}\")\n\n def extract(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n getter: Getter = default_getter,\n ) -> Context:\n if context is None:\n context = Context()\n trace_id = trace.INVALID_TRACE_ID\n span_id = trace.INVALID_SPAN_ID\n sampled = \"0\"\n flags = None\n\n single_header = _extract_first_element(\n getter.get(carrier, self.SINGLE_HEADER_KEY)\n )\n if single_header:\n # The b3 spec calls for the sampling state to be\n # \"deferred\", which is unspecified. This concept does not\n # translate to SpanContext, so we set it as recorded.\n sampled = \"1\"\n fields = single_header.split(\"-\", 4)\n\n if len(fields) == 1:\n sampled = fields[0]\n elif len(fields) == 2:\n trace_id, span_id = fields\n elif len(fields) == 3:\n trace_id, span_id, sampled = fields\n elif len(fields) == 4:\n trace_id, span_id, sampled, _ = fields\n else:\n trace_id = (\n _extract_first_element(getter.get(carrier, self.TRACE_ID_KEY))\n or trace_id\n )\n span_id = (\n _extract_first_element(getter.get(carrier, self.SPAN_ID_KEY))\n or span_id\n )\n sampled = (\n _extract_first_element(getter.get(carrier, self.SAMPLED_KEY))\n or sampled\n )\n flags = (\n _extract_first_element(getter.get(carrier, self.FLAGS_KEY))\n or flags\n )\n\n if (\n trace_id == trace.INVALID_TRACE_ID\n or span_id == trace.INVALID_SPAN_ID\n or self._trace_id_regex.fullmatch(trace_id) is None\n or self._span_id_regex.fullmatch(span_id) is None\n ):\n return context\n\n trace_id = int(trace_id, 16)\n span_id = int(span_id, 16)\n options = 0\n # The b3 spec provides no defined behavior for both sample and\n # flag values set. Since the setting of at least one implies\n # the desire for some form of sampling, propagate if either\n # header is set to allow.\n if sampled in self._SAMPLE_PROPAGATE_VALUES or flags == \"1\":\n options |= trace.TraceFlags.SAMPLED\n\n return trace.set_span_in_context(\n trace.NonRecordingSpan(\n trace.SpanContext(\n # trace an span ids are encoded in hex, so must be converted\n trace_id=trace_id,\n span_id=span_id,\n is_remote=True,\n trace_flags=trace.TraceFlags(options),\n trace_state=trace.TraceState(),\n )\n ),\n context,\n )\n\n def inject(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n setter: Setter = default_setter,\n ) -> None:\n span = trace.get_current_span(context=context)\n\n span_context = span.get_span_context()\n if span_context == trace.INVALID_SPAN_CONTEXT:\n return\n\n sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0\n setter.set(\n carrier,\n self.TRACE_ID_KEY,\n format_trace_id(span_context.trace_id),\n )\n setter.set(\n carrier, self.SPAN_ID_KEY, format_span_id(span_context.span_id)\n )\n setter.set(carrier, self.SAMPLED_KEY, \"1\" if sampled else \"0\")\n\n @property\n def fields(self) -> typing.Set[str]:\n return {\n self.TRACE_ID_KEY,\n self.SPAN_ID_KEY,\n self.SAMPLED_KEY,\n }\n\n\nclass B3SingleFormat(B3MultiFormat):\n \"\"\"Propagator for the B3 HTTP single-header format.\n\n See: https://github.com/openzipkin/b3-propagation\n https://github.com/openzipkin/b3-propagation#single-header\n \"\"\"\n\n def inject(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n setter: Setter = default_setter,\n ) -> None:\n span = trace.get_current_span(context=context)\n\n span_context = span.get_span_context()\n if span_context == trace.INVALID_SPAN_CONTEXT:\n return\n\n sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0\n\n fields = [\n format_trace_id(span_context.trace_id),\n format_span_id(span_context.span_id),\n \"1\" if sampled else \"0\",\n ]\n\n setter.set(carrier, self.SINGLE_HEADER_KEY, \"-\".join(fields))\n\n @property\n def fields(self) -> typing.Set[str]:\n return {self.SINGLE_HEADER_KEY}\n\n\nclass B3Format(B3MultiFormat):\n @deprecated(\n version=\"1.2.0\",\n reason=\"B3Format is deprecated in favor of B3MultiFormat\",\n )\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\ndef _extract_first_element(\n items: typing.Iterable[CarrierT],\n) -> typing.Optional[CarrierT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py"}]} | 2,691 | 475 |
gh_patches_debug_14541 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-5006 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Impossible to build with matplotlib 3.3 on Python 3.7 and 3.8
## Description of the issue
Trying a simple program main.py
```python
import matplotlib.pyplot as plt
plt.plot([0,1,2], [0,1,1])
```
Running the application created with pyinstaller --hiddenimport='pkg_resources.py2_warn' main.py
I get:
```
Traceback (most recent call last): File "main.py", line 3, in <module> File "<frozen importlib._bootstrap>", line 983, in _find_and_load File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 677, in _load_unlocked File "c:\users\***\appdata\local\programs\python\python37\lib\site-packages\PyInstaller\loader\pyimod03_importers.py", line 623, in exec_module File "site-packages\matplotlib\__init__.py", line 898, in <module> File "site-packages\matplotlib\cbook\__init__.py", line 480, in _get_data_path File "site-packages\matplotlib\__init__.py", line 239, in wrapper File "site-packages\matplotlib\__init__.py", line 534, in get_data_path File "site-packages\matplotlib\__init__.py", line 239, in wrapper File "site-packages\matplotlib\__init__.py", line 563, in _get_data_path NameError: name 'defaultParams' is not defined [3064] Failed to execute script main
```
## Context information (for bug reports)
* Output of `pyinstaller --version`: ```3.6 and 4.0-dev0```
* Version of Python: 3.7 and 3.8
* Platform: Windows (english 10)
* Did you also try this on another platform? No
## What solves it
Reverting to matplotlib 3.2.2 resolve the issue (temporarily).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-matplotlib.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2020, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12
13 from PyInstaller.utils.hooks import exec_statement
14
15 mpl_data_dir = exec_statement(
16 "import matplotlib; print(matplotlib._get_data_path())")
17
18 datas = [
19 (mpl_data_dir, "mpl-data"),
20 ]
21
```
Path: `PyInstaller/hooks/rthooks/pyi_rth_mpldata.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2020, PyInstaller Development Team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: Apache-2.0
10 #-----------------------------------------------------------------------------
11
12
13 import os
14 import sys
15
16 os.environ["MATPLOTLIBDATA"] = os.path.join(sys._MEIPASS, "mpl-data")
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py
--- a/PyInstaller/hooks/hook-matplotlib.py
+++ b/PyInstaller/hooks/hook-matplotlib.py
@@ -16,5 +16,5 @@
"import matplotlib; print(matplotlib._get_data_path())")
datas = [
- (mpl_data_dir, "mpl-data"),
+ (mpl_data_dir, "matplotlib/mpl-data"),
]
diff --git a/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py b/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py
deleted file mode 100644
--- a/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013-2020, PyInstaller Development Team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#
-# SPDX-License-Identifier: Apache-2.0
-#-----------------------------------------------------------------------------
-
-
-import os
-import sys
-
-os.environ["MATPLOTLIBDATA"] = os.path.join(sys._MEIPASS, "mpl-data")
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py\n--- a/PyInstaller/hooks/hook-matplotlib.py\n+++ b/PyInstaller/hooks/hook-matplotlib.py\n@@ -16,5 +16,5 @@\n \"import matplotlib; print(matplotlib._get_data_path())\")\n \n datas = [\n- (mpl_data_dir, \"mpl-data\"),\n+ (mpl_data_dir, \"matplotlib/mpl-data\"),\n ]\ndiff --git a/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py b/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py\ndeleted file mode 100644\n--- a/PyInstaller/hooks/rthooks/pyi_rth_mpldata.py\n+++ /dev/null\n@@ -1,16 +0,0 @@\n-#-----------------------------------------------------------------------------\n-# Copyright (c) 2013-2020, PyInstaller Development Team.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-#\n-# The full license is in the file COPYING.txt, distributed with this software.\n-#\n-# SPDX-License-Identifier: Apache-2.0\n-#-----------------------------------------------------------------------------\n-\n-\n-import os\n-import sys\n-\n-os.environ[\"MATPLOTLIBDATA\"] = os.path.join(sys._MEIPASS, \"mpl-data\")\n", "issue": "Impossible to build with matplotlib 3.3 on Python 3.7 and 3.8\n## Description of the issue\r\n\r\nTrying a simple program main.py\r\n```python\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.plot([0,1,2], [0,1,1])\r\n```\r\n\r\nRunning the application created with pyinstaller --hiddenimport='pkg_resources.py2_warn' main.py\r\n\r\nI get:\r\n\r\n```\r\nTraceback (most recent call last): File \"main.py\", line 3, in <module> File \"<frozen importlib._bootstrap>\", line 983, in _find_and_load File \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked File \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked File \"c:\\users\\***\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\PyInstaller\\loader\\pyimod03_importers.py\", line 623, in exec_module File \"site-packages\\matplotlib\\__init__.py\", line 898, in <module> File \"site-packages\\matplotlib\\cbook\\__init__.py\", line 480, in _get_data_path File \"site-packages\\matplotlib\\__init__.py\", line 239, in wrapper File \"site-packages\\matplotlib\\__init__.py\", line 534, in get_data_path File \"site-packages\\matplotlib\\__init__.py\", line 239, in wrapper File \"site-packages\\matplotlib\\__init__.py\", line 563, in _get_data_path NameError: name 'defaultParams' is not defined [3064] Failed to execute script main\r\n```\r\n## Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```3.6 and 4.0-dev0```\r\n* Version of Python: 3.7 and 3.8\r\n* Platform: Windows (english 10)\r\n* Did you also try this on another platform? No\r\n\r\n## What solves it\r\n\r\nReverting to matplotlib 3.2.2 resolve the issue (temporarily).\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib._get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py"}, {"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2020, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\n\nimport os\nimport sys\n\nos.environ[\"MATPLOTLIBDATA\"] = os.path.join(sys._MEIPASS, \"mpl-data\")\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_mpldata.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib._get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py"}, {"content": null, "path": "PyInstaller/hooks/rthooks/pyi_rth_mpldata.py"}]} | 1,113 | 317 |
gh_patches_debug_16772 | rasdani/github-patches | git_diff | saulpw__visidata-1584 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ZSH completion fails with invalid option definition
I'm using v2.10.2 installed via the [latest Arch package](https://archlinux.org/packages/community/any/visidata/). Visidata shell completion in my ZSH is currently broken.
```
$ vd <tab>
_arguments:comparguments:327: invalid option definition: --fmt_expand_list[format str to use for names of columns expanded from list (colname, index) (default: %s[%s])]:str
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dev/zsh-completion.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import unicode_literals
3
4 import os
5 from os.path import dirname as dirn
6 import sys
7 import re
8
9 sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
10 from visidata import vd
11 from visidata.main import option_aliases
12
13 ZSH_COMPLETION_FILE = "_visidata"
14 ZSH_COMPLETION_TEMPLATE = "dev/zsh-completion.in"
15 pat_class = re.compile("'(.*)'")
16 pat_select = re.compile("^\([^)]*\)")
17
18
19 def generate_completion(opt):
20 prefix = "--" + opt.name
21 shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]
22 if len(shortnames):
23 if len(shortnames[0]) == 1:
24 shortname = "-" + shortnames[0]
25 else:
26 shortname = "--" + shortnames[0]
27 prefix = "{" + f"{shortname},{prefix}" + "}"
28 if isinstance(opt.value, bool):
29 completion = ""
30 else:
31 completion = ":" + pat_class.findall(str(opt.value.__class__))[0]
32 if opt.name in ["play", "output", "visidata_dir", "config"]:
33 completion += ":_files"
34 elif opt.name in ["plugins_url", "motd_url"]:
35 completion += ":_urls"
36 helpstr = opt.helpstr.replace("[", "\\[").replace("]", "\\]")
37 selections = pat_select.findall(helpstr)
38 if len(selections):
39 completion += f":{selections[0].replace('/', ' ')}"
40 # TODO: use `zstyle ':completion:*' extra-verbose true`
41 # to control the display of default value
42 helpstr = helpstr + f" (default: {opt.value})"
43 return f"{prefix}'[{helpstr}]{completion}'"
44
45
46 flags = [generate_completion(vd._options[opt]["default"]) for opt in vd._options]
47
48 with open(ZSH_COMPLETION_TEMPLATE) as f:
49 template = f.read()
50
51 template = template.replace("{{flags}}", " \\\n ".join(flags))
52
53 with open(ZSH_COMPLETION_FILE, "w") as f:
54 f.write(template)
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dev/zsh-completion.py b/dev/zsh-completion.py
--- a/dev/zsh-completion.py
+++ b/dev/zsh-completion.py
@@ -33,13 +33,14 @@
completion += ":_files"
elif opt.name in ["plugins_url", "motd_url"]:
completion += ":_urls"
- helpstr = opt.helpstr.replace("[", "\\[").replace("]", "\\]")
+ helpstr = opt.helpstr
selections = pat_select.findall(helpstr)
if len(selections):
completion += f":{selections[0].replace('/', ' ')}"
# TODO: use `zstyle ':completion:*' extra-verbose true`
# to control the display of default value
helpstr = helpstr + f" (default: {opt.value})"
+ helpstr = helpstr.replace("[", "\\[").replace("]", "\\]")
return f"{prefix}'[{helpstr}]{completion}'"
| {"golden_diff": "diff --git a/dev/zsh-completion.py b/dev/zsh-completion.py\n--- a/dev/zsh-completion.py\n+++ b/dev/zsh-completion.py\n@@ -33,13 +33,14 @@\n completion += \":_files\"\n elif opt.name in [\"plugins_url\", \"motd_url\"]:\n completion += \":_urls\"\n- helpstr = opt.helpstr.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n+ helpstr = opt.helpstr\n selections = pat_select.findall(helpstr)\n if len(selections):\n completion += f\":{selections[0].replace('/', ' ')}\"\n # TODO: use `zstyle ':completion:*' extra-verbose true`\n # to control the display of default value\n helpstr = helpstr + f\" (default: {opt.value})\"\n+ helpstr = helpstr.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n return f\"{prefix}'[{helpstr}]{completion}'\"\n", "issue": "ZSH completion fails with invalid option definition\nI'm using v2.10.2 installed via the [latest Arch package](https://archlinux.org/packages/community/any/visidata/). Visidata shell completion in my ZSH is currently broken.\r\n\r\n```\r\n$ vd <tab>\r\n_arguments:comparguments:327: invalid option definition: --fmt_expand_list[format str to use for names of columns expanded from list (colname, index) (default: %s[%s])]:str\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\n\nimport os\nfrom os.path import dirname as dirn\nimport sys\nimport re\n\nsys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))\nfrom visidata import vd\nfrom visidata.main import option_aliases\n\nZSH_COMPLETION_FILE = \"_visidata\"\nZSH_COMPLETION_TEMPLATE = \"dev/zsh-completion.in\"\npat_class = re.compile(\"'(.*)'\")\npat_select = re.compile(\"^\\([^)]*\\)\")\n\n\ndef generate_completion(opt):\n prefix = \"--\" + opt.name\n shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]\n if len(shortnames):\n if len(shortnames[0]) == 1:\n shortname = \"-\" + shortnames[0]\n else:\n shortname = \"--\" + shortnames[0]\n prefix = \"{\" + f\"{shortname},{prefix}\" + \"}\"\n if isinstance(opt.value, bool):\n completion = \"\"\n else:\n completion = \":\" + pat_class.findall(str(opt.value.__class__))[0]\n if opt.name in [\"play\", \"output\", \"visidata_dir\", \"config\"]:\n completion += \":_files\"\n elif opt.name in [\"plugins_url\", \"motd_url\"]:\n completion += \":_urls\"\n helpstr = opt.helpstr.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n selections = pat_select.findall(helpstr)\n if len(selections):\n completion += f\":{selections[0].replace('/', ' ')}\"\n # TODO: use `zstyle ':completion:*' extra-verbose true`\n # to control the display of default value\n helpstr = helpstr + f\" (default: {opt.value})\"\n return f\"{prefix}'[{helpstr}]{completion}'\"\n\n\nflags = [generate_completion(vd._options[opt][\"default\"]) for opt in vd._options]\n\nwith open(ZSH_COMPLETION_TEMPLATE) as f:\n template = f.read()\n\ntemplate = template.replace(\"{{flags}}\", \" \\\\\\n \".join(flags))\n\nwith open(ZSH_COMPLETION_FILE, \"w\") as f:\n f.write(template)\n", "path": "dev/zsh-completion.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\n\nimport os\nfrom os.path import dirname as dirn\nimport sys\nimport re\n\nsys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))\nfrom visidata import vd\nfrom visidata.main import option_aliases\n\nZSH_COMPLETION_FILE = \"_visidata\"\nZSH_COMPLETION_TEMPLATE = \"dev/zsh-completion.in\"\npat_class = re.compile(\"'(.*)'\")\npat_select = re.compile(\"^\\([^)]*\\)\")\n\n\ndef generate_completion(opt):\n prefix = \"--\" + opt.name\n shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]\n if len(shortnames):\n if len(shortnames[0]) == 1:\n shortname = \"-\" + shortnames[0]\n else:\n shortname = \"--\" + shortnames[0]\n prefix = \"{\" + f\"{shortname},{prefix}\" + \"}\"\n if isinstance(opt.value, bool):\n completion = \"\"\n else:\n completion = \":\" + pat_class.findall(str(opt.value.__class__))[0]\n if opt.name in [\"play\", \"output\", \"visidata_dir\", \"config\"]:\n completion += \":_files\"\n elif opt.name in [\"plugins_url\", \"motd_url\"]:\n completion += \":_urls\"\n helpstr = opt.helpstr\n selections = pat_select.findall(helpstr)\n if len(selections):\n completion += f\":{selections[0].replace('/', ' ')}\"\n # TODO: use `zstyle ':completion:*' extra-verbose true`\n # to control the display of default value\n helpstr = helpstr + f\" (default: {opt.value})\"\n helpstr = helpstr.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n return f\"{prefix}'[{helpstr}]{completion}'\"\n\n\nflags = [generate_completion(vd._options[opt][\"default\"]) for opt in vd._options]\n\nwith open(ZSH_COMPLETION_TEMPLATE) as f:\n template = f.read()\n\ntemplate = template.replace(\"{{flags}}\", \" \\\\\\n \".join(flags))\n\nwith open(ZSH_COMPLETION_FILE, \"w\") as f:\n f.write(template)\n", "path": "dev/zsh-completion.py"}]} | 948 | 212 |
gh_patches_debug_13197 | rasdani/github-patches | git_diff | localstack__localstack-5700 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: sqs creation breaks with 0.14.1
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Having the following cdk definition for a sqs queue
const queue = new sqs.Queue(scope, `foo-queue`, {
fifo: true,
visibilityTimeout: Duration.seconds(300),
contentBasedDeduplication: true
})
deploy works normally with localstack 0.14.0 using https://github.com/localstack/aws-cdk-local
But after upgrading to 0.14.1 we see the following error
2022-03-17T11:34:23.851:WARNING:localstack.utils.cloudformation.template_deployer: Error calling <bound method ClientCreator._create_api_method.<locals>._api_call of <botocore.client.SQS object at 0x7f11db7346d0>> with params: {'QueueName': 'FooStack-fooqueueB0475DE4-8bf04fb7', 'Attributes': {'ContentBasedDeduplication': 'True', 'FifoQueue': 'True', 'VisibilityTimeout': '300'}, 'tags': {}} for resource: {'Type': 'AWS::SQS::Queue', 'UpdateReplacePolicy': 'Delete', 'DeletionPolicy': 'Delete', 'Metadata': {'aws:cdk:path': 'FooStack/foo-queue/Resource'}, 'LogicalResourceId': 'fooqueueB0475DE4', 'Properties': {'ContentBasedDeduplication': True, 'FifoQueue': True, 'VisibilityTimeout': 300, 'QueueName': 'FooStack-fooqueueB0475DE4-8bf04fb7'}, '_state_': {}}
Only difference being the localstack upgrade, downgrading back to 0.14.0 fixes the problem.
### Expected Behavior
Sqs queue is created successfully like with 0.14.0
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
cdklocal deploy
### Environment
```markdown
- LocalStack: 0.14.1
```
### Anything else?
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/cloudformation/models/sqs.py`
Content:
```
1 import json
2 import logging
3
4 from botocore.exceptions import ClientError
5
6 from localstack.services.cloudformation.deployment_utils import (
7 PLACEHOLDER_RESOURCE_NAME,
8 generate_default_name,
9 params_list_to_dict,
10 params_select_attributes,
11 )
12 from localstack.services.cloudformation.service_models import (
13 DependencyNotYetSatisfied,
14 GenericBaseModel,
15 )
16 from localstack.utils.aws import aws_stack
17 from localstack.utils.common import short_uid
18
19 LOG = logging.getLogger(__name__)
20
21
22 class QueuePolicy(GenericBaseModel):
23 @classmethod
24 def cloudformation_type(cls):
25 return "AWS::SQS::QueuePolicy"
26
27 @classmethod
28 def get_deploy_templates(cls):
29 def _create(resource_id, resources, resource_type, func, stack_name):
30 sqs_client = aws_stack.connect_to_service("sqs")
31 resource = cls(resources[resource_id])
32 props = resource.props
33
34 # TODO: generalize/support in get_physical_resource_id
35 resources[resource_id]["PhysicalResourceId"] = "%s-%s-%s" % (
36 stack_name,
37 resource_id,
38 short_uid(),
39 )
40
41 policy = json.dumps(props["PolicyDocument"])
42 for queue in props["Queues"]:
43 sqs_client.set_queue_attributes(QueueUrl=queue, Attributes={"Policy": policy})
44
45 def _delete(resource_id, resources, *args, **kwargs):
46 sqs_client = aws_stack.connect_to_service("sqs")
47 resource = cls(resources[resource_id])
48 props = resource.props
49
50 for queue in props["Queues"]:
51 try:
52 sqs_client.set_queue_attributes(QueueUrl=queue, Attributes={"Policy": ""})
53 except ClientError as err:
54 if "AWS.SimpleQueueService.NonExistentQueue" != err.response["Error"]["Code"]:
55 raise
56
57 return {
58 "create": {"function": _create},
59 "delete": {
60 "function": _delete,
61 },
62 }
63
64
65 class SQSQueue(GenericBaseModel):
66 @classmethod
67 def cloudformation_type(cls):
68 return "AWS::SQS::Queue"
69
70 def get_resource_name(self):
71 return self.props.get("QueueName")
72
73 def get_physical_resource_id(self, attribute=None, **kwargs):
74 queue_url = None
75 props = self.props
76 try:
77 queue_url = aws_stack.get_sqs_queue_url(props.get("QueueName"))
78 except Exception as e:
79 if "NonExistentQueue" in str(e):
80 raise DependencyNotYetSatisfied(
81 resource_ids=self.resource_id, message="Unable to get queue: %s" % e
82 )
83 if attribute == "Arn":
84 return aws_stack.sqs_queue_arn(props.get("QueueName"))
85 return queue_url
86
87 def fetch_state(self, stack_name, resources):
88 queue_name = self.resolve_refs_recursively(stack_name, self.props["QueueName"], resources)
89 sqs_client = aws_stack.connect_to_service("sqs")
90 queues = sqs_client.list_queues()
91 result = list(
92 filter(
93 lambda item:
94 # TODO possibly find a better way to compare resource_id with queue URLs
95 item.endswith("/%s" % queue_name),
96 queues.get("QueueUrls", []),
97 )
98 )
99 if not result:
100 return None
101 result = sqs_client.get_queue_attributes(QueueUrl=result[0], AttributeNames=["All"])[
102 "Attributes"
103 ]
104 result["Arn"] = result["QueueArn"]
105 return result
106
107 @staticmethod
108 def add_defaults(resource, stack_name: str):
109 role_name = resource.get("Properties", {}).get("QueueName")
110 if not role_name:
111 resource["Properties"]["QueueName"] = generate_default_name(
112 stack_name, resource["LogicalResourceId"]
113 )
114
115 @classmethod
116 def get_deploy_templates(cls):
117 def _queue_url(params, resources, resource_id, **kwargs):
118 resource = cls(resources[resource_id])
119 props = resource.props
120 queue_url = resource.physical_resource_id or props.get("QueueUrl")
121 if queue_url:
122 return queue_url
123 return aws_stack.sqs_queue_url_for_arn(props["QueueArn"])
124
125 return {
126 "create": {
127 "function": "create_queue",
128 "parameters": {
129 "QueueName": ["QueueName", PLACEHOLDER_RESOURCE_NAME],
130 "Attributes": params_select_attributes(
131 "ContentBasedDeduplication",
132 "DelaySeconds",
133 "FifoQueue",
134 "MaximumMessageSize",
135 "MessageRetentionPeriod",
136 "VisibilityTimeout",
137 "RedrivePolicy",
138 "ReceiveMessageWaitTimeSeconds",
139 ),
140 "tags": params_list_to_dict("Tags"),
141 },
142 },
143 "delete": {
144 "function": "delete_queue",
145 "parameters": {"QueueUrl": _queue_url},
146 },
147 }
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/services/cloudformation/models/sqs.py b/localstack/services/cloudformation/models/sqs.py
--- a/localstack/services/cloudformation/models/sqs.py
+++ b/localstack/services/cloudformation/models/sqs.py
@@ -107,10 +107,13 @@
@staticmethod
def add_defaults(resource, stack_name: str):
role_name = resource.get("Properties", {}).get("QueueName")
+
if not role_name:
resource["Properties"]["QueueName"] = generate_default_name(
stack_name, resource["LogicalResourceId"]
)
+ if resource["Properties"].get("FifoQueue"):
+ resource["Properties"]["QueueName"] += ".fifo"
@classmethod
def get_deploy_templates(cls):
| {"golden_diff": "diff --git a/localstack/services/cloudformation/models/sqs.py b/localstack/services/cloudformation/models/sqs.py\n--- a/localstack/services/cloudformation/models/sqs.py\n+++ b/localstack/services/cloudformation/models/sqs.py\n@@ -107,10 +107,13 @@\n @staticmethod\n def add_defaults(resource, stack_name: str):\n role_name = resource.get(\"Properties\", {}).get(\"QueueName\")\n+\n if not role_name:\n resource[\"Properties\"][\"QueueName\"] = generate_default_name(\n stack_name, resource[\"LogicalResourceId\"]\n )\n+ if resource[\"Properties\"].get(\"FifoQueue\"):\n+ resource[\"Properties\"][\"QueueName\"] += \".fifo\"\n \n @classmethod\n def get_deploy_templates(cls):\n", "issue": "bug: sqs creation breaks with 0.14.1\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Behavior\n\nHaving the following cdk definition for a sqs queue\r\nconst queue = new sqs.Queue(scope, `foo-queue`, {\r\n fifo: true,\r\n visibilityTimeout: Duration.seconds(300),\r\n contentBasedDeduplication: true\r\n})\r\ndeploy works normally with localstack 0.14.0 using https://github.com/localstack/aws-cdk-local\r\nBut after upgrading to 0.14.1 we see the following error\r\n2022-03-17T11:34:23.851:WARNING:localstack.utils.cloudformation.template_deployer: Error calling <bound method ClientCreator._create_api_method.<locals>._api_call of <botocore.client.SQS object at 0x7f11db7346d0>> with params: {'QueueName': 'FooStack-fooqueueB0475DE4-8bf04fb7', 'Attributes': {'ContentBasedDeduplication': 'True', 'FifoQueue': 'True', 'VisibilityTimeout': '300'}, 'tags': {}} for resource: {'Type': 'AWS::SQS::Queue', 'UpdateReplacePolicy': 'Delete', 'DeletionPolicy': 'Delete', 'Metadata': {'aws:cdk:path': 'FooStack/foo-queue/Resource'}, 'LogicalResourceId': 'fooqueueB0475DE4', 'Properties': {'ContentBasedDeduplication': True, 'FifoQueue': True, 'VisibilityTimeout': 300, 'QueueName': 'FooStack-fooqueueB0475DE4-8bf04fb7'}, '_state_': {}}\r\nOnly difference being the localstack upgrade, downgrading back to 0.14.0 fixes the problem.\r\n\n\n### Expected Behavior\n\nSqs queue is created successfully like with 0.14.0\n\n### How are you starting LocalStack?\n\nWith a docker-compose file\n\n### Steps To Reproduce\n\ncdklocal deploy\n\n### Environment\n\n```markdown\n- LocalStack: 0.14.1\n```\n\n\n### Anything else?\n\n_No response_\n", "before_files": [{"content": "import json\nimport logging\n\nfrom botocore.exceptions import ClientError\n\nfrom localstack.services.cloudformation.deployment_utils import (\n PLACEHOLDER_RESOURCE_NAME,\n generate_default_name,\n params_list_to_dict,\n params_select_attributes,\n)\nfrom localstack.services.cloudformation.service_models import (\n DependencyNotYetSatisfied,\n GenericBaseModel,\n)\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import short_uid\n\nLOG = logging.getLogger(__name__)\n\n\nclass QueuePolicy(GenericBaseModel):\n @classmethod\n def cloudformation_type(cls):\n return \"AWS::SQS::QueuePolicy\"\n\n @classmethod\n def get_deploy_templates(cls):\n def _create(resource_id, resources, resource_type, func, stack_name):\n sqs_client = aws_stack.connect_to_service(\"sqs\")\n resource = cls(resources[resource_id])\n props = resource.props\n\n # TODO: generalize/support in get_physical_resource_id\n resources[resource_id][\"PhysicalResourceId\"] = \"%s-%s-%s\" % (\n stack_name,\n resource_id,\n short_uid(),\n )\n\n policy = json.dumps(props[\"PolicyDocument\"])\n for queue in props[\"Queues\"]:\n sqs_client.set_queue_attributes(QueueUrl=queue, Attributes={\"Policy\": policy})\n\n def _delete(resource_id, resources, *args, **kwargs):\n sqs_client = aws_stack.connect_to_service(\"sqs\")\n resource = cls(resources[resource_id])\n props = resource.props\n\n for queue in props[\"Queues\"]:\n try:\n sqs_client.set_queue_attributes(QueueUrl=queue, Attributes={\"Policy\": \"\"})\n except ClientError as err:\n if \"AWS.SimpleQueueService.NonExistentQueue\" != err.response[\"Error\"][\"Code\"]:\n raise\n\n return {\n \"create\": {\"function\": _create},\n \"delete\": {\n \"function\": _delete,\n },\n }\n\n\nclass SQSQueue(GenericBaseModel):\n @classmethod\n def cloudformation_type(cls):\n return \"AWS::SQS::Queue\"\n\n def get_resource_name(self):\n return self.props.get(\"QueueName\")\n\n def get_physical_resource_id(self, attribute=None, **kwargs):\n queue_url = None\n props = self.props\n try:\n queue_url = aws_stack.get_sqs_queue_url(props.get(\"QueueName\"))\n except Exception as e:\n if \"NonExistentQueue\" in str(e):\n raise DependencyNotYetSatisfied(\n resource_ids=self.resource_id, message=\"Unable to get queue: %s\" % e\n )\n if attribute == \"Arn\":\n return aws_stack.sqs_queue_arn(props.get(\"QueueName\"))\n return queue_url\n\n def fetch_state(self, stack_name, resources):\n queue_name = self.resolve_refs_recursively(stack_name, self.props[\"QueueName\"], resources)\n sqs_client = aws_stack.connect_to_service(\"sqs\")\n queues = sqs_client.list_queues()\n result = list(\n filter(\n lambda item:\n # TODO possibly find a better way to compare resource_id with queue URLs\n item.endswith(\"/%s\" % queue_name),\n queues.get(\"QueueUrls\", []),\n )\n )\n if not result:\n return None\n result = sqs_client.get_queue_attributes(QueueUrl=result[0], AttributeNames=[\"All\"])[\n \"Attributes\"\n ]\n result[\"Arn\"] = result[\"QueueArn\"]\n return result\n\n @staticmethod\n def add_defaults(resource, stack_name: str):\n role_name = resource.get(\"Properties\", {}).get(\"QueueName\")\n if not role_name:\n resource[\"Properties\"][\"QueueName\"] = generate_default_name(\n stack_name, resource[\"LogicalResourceId\"]\n )\n\n @classmethod\n def get_deploy_templates(cls):\n def _queue_url(params, resources, resource_id, **kwargs):\n resource = cls(resources[resource_id])\n props = resource.props\n queue_url = resource.physical_resource_id or props.get(\"QueueUrl\")\n if queue_url:\n return queue_url\n return aws_stack.sqs_queue_url_for_arn(props[\"QueueArn\"])\n\n return {\n \"create\": {\n \"function\": \"create_queue\",\n \"parameters\": {\n \"QueueName\": [\"QueueName\", PLACEHOLDER_RESOURCE_NAME],\n \"Attributes\": params_select_attributes(\n \"ContentBasedDeduplication\",\n \"DelaySeconds\",\n \"FifoQueue\",\n \"MaximumMessageSize\",\n \"MessageRetentionPeriod\",\n \"VisibilityTimeout\",\n \"RedrivePolicy\",\n \"ReceiveMessageWaitTimeSeconds\",\n ),\n \"tags\": params_list_to_dict(\"Tags\"),\n },\n },\n \"delete\": {\n \"function\": \"delete_queue\",\n \"parameters\": {\"QueueUrl\": _queue_url},\n },\n }\n", "path": "localstack/services/cloudformation/models/sqs.py"}], "after_files": [{"content": "import json\nimport logging\n\nfrom botocore.exceptions import ClientError\n\nfrom localstack.services.cloudformation.deployment_utils import (\n PLACEHOLDER_RESOURCE_NAME,\n generate_default_name,\n params_list_to_dict,\n params_select_attributes,\n)\nfrom localstack.services.cloudformation.service_models import (\n DependencyNotYetSatisfied,\n GenericBaseModel,\n)\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import short_uid\n\nLOG = logging.getLogger(__name__)\n\n\nclass QueuePolicy(GenericBaseModel):\n @classmethod\n def cloudformation_type(cls):\n return \"AWS::SQS::QueuePolicy\"\n\n @classmethod\n def get_deploy_templates(cls):\n def _create(resource_id, resources, resource_type, func, stack_name):\n sqs_client = aws_stack.connect_to_service(\"sqs\")\n resource = cls(resources[resource_id])\n props = resource.props\n\n # TODO: generalize/support in get_physical_resource_id\n resources[resource_id][\"PhysicalResourceId\"] = \"%s-%s-%s\" % (\n stack_name,\n resource_id,\n short_uid(),\n )\n\n policy = json.dumps(props[\"PolicyDocument\"])\n for queue in props[\"Queues\"]:\n sqs_client.set_queue_attributes(QueueUrl=queue, Attributes={\"Policy\": policy})\n\n def _delete(resource_id, resources, *args, **kwargs):\n sqs_client = aws_stack.connect_to_service(\"sqs\")\n resource = cls(resources[resource_id])\n props = resource.props\n\n for queue in props[\"Queues\"]:\n try:\n sqs_client.set_queue_attributes(QueueUrl=queue, Attributes={\"Policy\": \"\"})\n except ClientError as err:\n if \"AWS.SimpleQueueService.NonExistentQueue\" != err.response[\"Error\"][\"Code\"]:\n raise\n\n return {\n \"create\": {\"function\": _create},\n \"delete\": {\n \"function\": _delete,\n },\n }\n\n\nclass SQSQueue(GenericBaseModel):\n @classmethod\n def cloudformation_type(cls):\n return \"AWS::SQS::Queue\"\n\n def get_resource_name(self):\n return self.props.get(\"QueueName\")\n\n def get_physical_resource_id(self, attribute=None, **kwargs):\n queue_url = None\n props = self.props\n try:\n queue_url = aws_stack.get_sqs_queue_url(props.get(\"QueueName\"))\n except Exception as e:\n if \"NonExistentQueue\" in str(e):\n raise DependencyNotYetSatisfied(\n resource_ids=self.resource_id, message=\"Unable to get queue: %s\" % e\n )\n if attribute == \"Arn\":\n return aws_stack.sqs_queue_arn(props.get(\"QueueName\"))\n return queue_url\n\n def fetch_state(self, stack_name, resources):\n queue_name = self.resolve_refs_recursively(stack_name, self.props[\"QueueName\"], resources)\n sqs_client = aws_stack.connect_to_service(\"sqs\")\n queues = sqs_client.list_queues()\n result = list(\n filter(\n lambda item:\n # TODO possibly find a better way to compare resource_id with queue URLs\n item.endswith(\"/%s\" % queue_name),\n queues.get(\"QueueUrls\", []),\n )\n )\n if not result:\n return None\n result = sqs_client.get_queue_attributes(QueueUrl=result[0], AttributeNames=[\"All\"])[\n \"Attributes\"\n ]\n result[\"Arn\"] = result[\"QueueArn\"]\n return result\n\n @staticmethod\n def add_defaults(resource, stack_name: str):\n role_name = resource.get(\"Properties\", {}).get(\"QueueName\")\n\n if not role_name:\n resource[\"Properties\"][\"QueueName\"] = generate_default_name(\n stack_name, resource[\"LogicalResourceId\"]\n )\n if resource[\"Properties\"].get(\"FifoQueue\"):\n resource[\"Properties\"][\"QueueName\"] += \".fifo\"\n\n @classmethod\n def get_deploy_templates(cls):\n def _queue_url(params, resources, resource_id, **kwargs):\n resource = cls(resources[resource_id])\n props = resource.props\n queue_url = resource.physical_resource_id or props.get(\"QueueUrl\")\n if queue_url:\n return queue_url\n return aws_stack.sqs_queue_url_for_arn(props[\"QueueArn\"])\n\n return {\n \"create\": {\n \"function\": \"create_queue\",\n \"parameters\": {\n \"QueueName\": [\"QueueName\", PLACEHOLDER_RESOURCE_NAME],\n \"Attributes\": params_select_attributes(\n \"ContentBasedDeduplication\",\n \"DelaySeconds\",\n \"FifoQueue\",\n \"MaximumMessageSize\",\n \"MessageRetentionPeriod\",\n \"VisibilityTimeout\",\n \"RedrivePolicy\",\n \"ReceiveMessageWaitTimeSeconds\",\n ),\n \"tags\": params_list_to_dict(\"Tags\"),\n },\n },\n \"delete\": {\n \"function\": \"delete_queue\",\n \"parameters\": {\"QueueUrl\": _queue_url},\n },\n }\n", "path": "localstack/services/cloudformation/models/sqs.py"}]} | 2,124 | 163 |
gh_patches_debug_6043 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-10389 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs: Config file deprecation warnings
* [x] Upgrade v1 docs to clearly indicate that it's removed in September 2023 and link to the v2 version
* [x] Add warning banners in various places
References https://github.com/readthedocs/readthedocs.org/issues/10342
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 """
2 Shared Sphinx configuration using sphinx-multiproject.
3
4 To build each project, the ``PROJECT`` environment variable is used.
5
6 .. code:: console
7
8 $ make html # build default project
9 $ PROJECT=dev make html # build the dev project
10
11 for more information read https://sphinx-multiproject.readthedocs.io/.
12 """
13
14 import os
15 import sys
16
17 from multiproject.utils import get_project
18
19 sys.path.append(os.path.abspath("_ext"))
20 extensions = [
21 "hoverxref.extension",
22 "multiproject",
23 "myst_parser",
24 "notfound.extension",
25 "sphinx_design",
26 "sphinx_search.extension",
27 "sphinx_tabs.tabs",
28 "sphinx-prompt",
29 "sphinx.ext.autodoc",
30 "sphinx.ext.autosectionlabel",
31 "sphinx.ext.extlinks",
32 "sphinx.ext.intersphinx",
33 "sphinxcontrib.httpdomain",
34 "sphinxcontrib.video",
35 "sphinxemoji.sphinxemoji",
36 "sphinxext.opengraph",
37 ]
38
39 multiproject_projects = {
40 "user": {
41 "use_config_file": False,
42 "config": {
43 "project": "Read the Docs user documentation",
44 },
45 },
46 "dev": {
47 "use_config_file": False,
48 "config": {
49 "project": "Read the Docs developer documentation",
50 },
51 },
52 }
53
54 docset = get_project(multiproject_projects)
55
56 ogp_site_name = "Read the Docs Documentation"
57 ogp_use_first_image = True # https://github.com/readthedocs/blog/pull/118
58 ogp_image = "https://docs.readthedocs.io/en/latest/_static/img/logo-opengraph.png"
59 # Inspired by https://github.com/executablebooks/MyST-Parser/pull/404/
60 ogp_custom_meta_tags = [
61 '<meta name="twitter:card" content="summary_large_image" />',
62 ]
63 ogp_enable_meta_description = True
64 ogp_description_length = 300
65
66 templates_path = ["_templates"]
67
68 # This may be elevated as a general issue for documentation and behavioral
69 # change to the Sphinx build:
70 # This will ensure that we use the correctly set environment for canonical URLs
71 # Old Read the Docs injections makes it point only to the default version,
72 # for instance /en/stable/
73 html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "/")
74
75 master_doc = "index"
76 copyright = "Read the Docs, Inc & contributors"
77 version = "9.13.2"
78 release = version
79 exclude_patterns = ["_build", "shared", "_includes"]
80 default_role = "obj"
81 intersphinx_cache_limit = 14 # cache for 2 weeks
82 intersphinx_timeout = 3 # 3 seconds timeout
83 intersphinx_mapping = {
84 "python": ("https://docs.python.org/3.10/", None),
85 "django": (
86 "https://docs.djangoproject.com/en/stable/",
87 "https://docs.djangoproject.com/en/stable/_objects/",
88 ),
89 "sphinx": ("https://www.sphinx-doc.org/en/master/", None),
90 "pip": ("https://pip.pypa.io/en/stable/", None),
91 "nbsphinx": ("https://nbsphinx.readthedocs.io/en/latest/", None),
92 "myst-nb": ("https://myst-nb.readthedocs.io/en/stable/", None),
93 "ipywidgets": ("https://ipywidgets.readthedocs.io/en/stable/", None),
94 "jupytext": ("https://jupytext.readthedocs.io/en/stable/", None),
95 "ipyleaflet": ("https://ipyleaflet.readthedocs.io/en/latest/", None),
96 "poliastro": ("https://docs.poliastro.space/en/stable/", None),
97 "qiskit": ("https://qiskit.org/documentation/", None),
98 "myst-parser": ("https://myst-parser.readthedocs.io/en/stable/", None),
99 "writethedocs": ("https://www.writethedocs.org/", None),
100 "jupyterbook": ("https://jupyterbook.org/en/stable/", None),
101 "executablebook": ("https://executablebooks.org/en/latest/", None),
102 "rst-to-myst": ("https://rst-to-myst.readthedocs.io/en/stable/", None),
103 "rtd": ("https://docs.readthedocs.io/en/stable/", None),
104 "rtd-dev": ("https://dev.readthedocs.io/en/latest/", None),
105 "jupyter": ("https://docs.jupyter.org/en/latest/", None),
106 }
107
108 # Intersphinx: Do not try to resolve unresolved labels that aren't explicitly prefixed.
109 # The default setting for intersphinx_disabled_reftypes can cause some pretty bad
110 # breakage because we have rtd and rtd-dev stable versions in our mappings.
111 # Hence, if we refactor labels, we won't see broken references, since the
112 # currently active stable mapping keeps resolving.
113 # Recommending doing this on all projects with Intersphinx.
114 # https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#confval-intersphinx_disabled_reftypes
115 intersphinx_disabled_reftypes = ["*"]
116
117 myst_enable_extensions = [
118 "deflist",
119 ]
120 hoverxref_intersphinx = [
121 "sphinx",
122 "pip",
123 "nbsphinx",
124 "myst-nb",
125 "ipywidgets",
126 "jupytext",
127 ]
128 htmlhelp_basename = "ReadTheDocsdoc"
129 latex_documents = [
130 (
131 "index",
132 "ReadTheDocs.tex",
133 "Read the Docs Documentation",
134 "Eric Holscher, Charlie Leifer, Bobby Grace",
135 "manual",
136 ),
137 ]
138 man_pages = [
139 (
140 "index",
141 "read-the-docs",
142 "Read the Docs Documentation",
143 ["Eric Holscher, Charlie Leifer, Bobby Grace"],
144 1,
145 )
146 ]
147
148 language = "en"
149
150 locale_dirs = [
151 f"{docset}/locale/",
152 ]
153 gettext_compact = False
154
155 html_theme = "sphinx_rtd_theme"
156 html_static_path = ["_static", f"{docset}/_static"]
157 html_css_files = ["css/custom.css", "css/sphinx_prompt_css.css"]
158 html_js_files = ["js/expand_tabs.js"]
159
160 if os.environ.get("READTHEDOCS_VERSION_TYPE") == "external":
161 html_js_files.append("js/readthedocs-doc-diff.js")
162
163 html_logo = "img/logo.svg"
164 html_theme_options = {
165 "logo_only": True,
166 "display_version": False,
167 }
168 html_context = {
169 # Fix the "edit on" links.
170 # TODO: remove once we support different rtd config
171 # files per project.
172 "conf_py_path": f"/docs/{docset}/",
173 # Use to generate the Plausible "data-domain" attribute from the template
174 "plausible_domain": f"{os.environ.get('READTHEDOCS_PROJECT')}.readthedocs.io",
175 }
176
177 hoverxref_auto_ref = True
178 hoverxref_domains = ["py"]
179 hoverxref_roles = [
180 "option",
181 # Documentation pages
182 # Not supported yet: https://github.com/readthedocs/sphinx-hoverxref/issues/18
183 "doc",
184 # Glossary terms
185 "term",
186 ]
187 hoverxref_role_types = {
188 "mod": "modal", # for Python Sphinx Domain
189 "doc": "modal", # for whole docs
190 "class": "tooltip", # for Python Sphinx Domain
191 "ref": "tooltip", # for hoverxref_auto_ref config
192 "confval": "tooltip", # for custom object
193 "term": "tooltip", # for glossaries
194 }
195
196 # See dev/style_guide.rst for documentation
197 rst_epilog = """
198 .. |org_brand| replace:: Read the Docs Community
199 .. |com_brand| replace:: Read the Docs for Business
200 .. |git_providers_and| replace:: GitHub, Bitbucket, and GitLab
201 .. |git_providers_or| replace:: GitHub, Bitbucket, or GitLab
202 """
203
204 # Activate autosectionlabel plugin
205 autosectionlabel_prefix_document = True
206
207 # sphinx-notfound-page
208 # https://github.com/readthedocs/sphinx-notfound-page
209 notfound_context = {
210 "title": "Page Not Found",
211 "body": """
212 <h1>Page Not Found</h1>
213
214 <p>Sorry, we couldn't find that page.</p>
215
216 <p>Try using the search box or go to the homepage.</p>
217 """,
218 }
219 linkcheck_retries = 2
220 linkcheck_timeout = 1
221 linkcheck_workers = 10
222 linkcheck_ignore = [
223 r"http://127\.0\.0\.1",
224 r"http://localhost",
225 r"http://community\.dev\.readthedocs\.io",
226 r"https://yourproject\.readthedocs\.io",
227 r"https?://docs\.example\.com",
228 r"https://foo\.readthedocs\.io/projects",
229 r"https://github\.com.+?#L\d+",
230 r"https://github\.com/readthedocs/readthedocs\.org/issues",
231 r"https://github\.com/readthedocs/readthedocs\.org/pull",
232 r"https://docs\.readthedocs\.io/\?rtd_search",
233 r"https://readthedocs\.org/search",
234 # This page is under login
235 r"https://readthedocs\.org/accounts/gold",
236 ]
237
238 extlinks = {
239 "rtd-issue": ("https://github.com/readthedocs/readthedocs.org/issues/%s", "#%s"),
240 }
241
242 # Disable epub mimetype warnings
243 suppress_warnings = ["epub.unknown_project_files"]
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -102,6 +102,7 @@
"rst-to-myst": ("https://rst-to-myst.readthedocs.io/en/stable/", None),
"rtd": ("https://docs.readthedocs.io/en/stable/", None),
"rtd-dev": ("https://dev.readthedocs.io/en/latest/", None),
+ "rtd-blog": ("https://blog.readthedocs.com/", None),
"jupyter": ("https://docs.jupyter.org/en/latest/", None),
}
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -102,6 +102,7 @@\n \"rst-to-myst\": (\"https://rst-to-myst.readthedocs.io/en/stable/\", None),\n \"rtd\": (\"https://docs.readthedocs.io/en/stable/\", None),\n \"rtd-dev\": (\"https://dev.readthedocs.io/en/latest/\", None),\n+ \"rtd-blog\": (\"https://blog.readthedocs.com/\", None),\n \"jupyter\": (\"https://docs.jupyter.org/en/latest/\", None),\n }\n", "issue": "Docs: Config file deprecation warnings\n* [x] Upgrade v1 docs to clearly indicate that it's removed in September 2023 and link to the v2 version\r\n* [x] Add warning banners in various places\r\n\r\nReferences https://github.com/readthedocs/readthedocs.org/issues/10342\n", "before_files": [{"content": "\"\"\"\nShared Sphinx configuration using sphinx-multiproject.\n\nTo build each project, the ``PROJECT`` environment variable is used.\n\n.. code:: console\n\n $ make html # build default project\n $ PROJECT=dev make html # build the dev project\n\nfor more information read https://sphinx-multiproject.readthedocs.io/.\n\"\"\"\n\nimport os\nimport sys\n\nfrom multiproject.utils import get_project\n\nsys.path.append(os.path.abspath(\"_ext\"))\nextensions = [\n \"hoverxref.extension\",\n \"multiproject\",\n \"myst_parser\",\n \"notfound.extension\",\n \"sphinx_design\",\n \"sphinx_search.extension\",\n \"sphinx_tabs.tabs\",\n \"sphinx-prompt\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.intersphinx\",\n \"sphinxcontrib.httpdomain\",\n \"sphinxcontrib.video\",\n \"sphinxemoji.sphinxemoji\",\n \"sphinxext.opengraph\",\n]\n\nmultiproject_projects = {\n \"user\": {\n \"use_config_file\": False,\n \"config\": {\n \"project\": \"Read the Docs user documentation\",\n },\n },\n \"dev\": {\n \"use_config_file\": False,\n \"config\": {\n \"project\": \"Read the Docs developer documentation\",\n },\n },\n}\n\ndocset = get_project(multiproject_projects)\n\nogp_site_name = \"Read the Docs Documentation\"\nogp_use_first_image = True # https://github.com/readthedocs/blog/pull/118\nogp_image = \"https://docs.readthedocs.io/en/latest/_static/img/logo-opengraph.png\"\n# Inspired by https://github.com/executablebooks/MyST-Parser/pull/404/\nogp_custom_meta_tags = [\n '<meta name=\"twitter:card\" content=\"summary_large_image\" />',\n]\nogp_enable_meta_description = True\nogp_description_length = 300\n\ntemplates_path = [\"_templates\"]\n\n# This may be elevated as a general issue for documentation and behavioral\n# change to the Sphinx build:\n# This will ensure that we use the correctly set environment for canonical URLs\n# Old Read the Docs injections makes it point only to the default version,\n# for instance /en/stable/\nhtml_baseurl = os.environ.get(\"READTHEDOCS_CANONICAL_URL\", \"/\")\n\nmaster_doc = \"index\"\ncopyright = \"Read the Docs, Inc & contributors\"\nversion = \"9.13.2\"\nrelease = version\nexclude_patterns = [\"_build\", \"shared\", \"_includes\"]\ndefault_role = \"obj\"\nintersphinx_cache_limit = 14 # cache for 2 weeks\nintersphinx_timeout = 3 # 3 seconds timeout\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3.10/\", None),\n \"django\": (\n \"https://docs.djangoproject.com/en/stable/\",\n \"https://docs.djangoproject.com/en/stable/_objects/\",\n ),\n \"sphinx\": (\"https://www.sphinx-doc.org/en/master/\", None),\n \"pip\": (\"https://pip.pypa.io/en/stable/\", None),\n \"nbsphinx\": (\"https://nbsphinx.readthedocs.io/en/latest/\", None),\n \"myst-nb\": (\"https://myst-nb.readthedocs.io/en/stable/\", None),\n \"ipywidgets\": (\"https://ipywidgets.readthedocs.io/en/stable/\", None),\n \"jupytext\": (\"https://jupytext.readthedocs.io/en/stable/\", None),\n \"ipyleaflet\": (\"https://ipyleaflet.readthedocs.io/en/latest/\", None),\n \"poliastro\": (\"https://docs.poliastro.space/en/stable/\", None),\n \"qiskit\": (\"https://qiskit.org/documentation/\", None),\n \"myst-parser\": (\"https://myst-parser.readthedocs.io/en/stable/\", None),\n \"writethedocs\": (\"https://www.writethedocs.org/\", None),\n \"jupyterbook\": (\"https://jupyterbook.org/en/stable/\", None),\n \"executablebook\": (\"https://executablebooks.org/en/latest/\", None),\n \"rst-to-myst\": (\"https://rst-to-myst.readthedocs.io/en/stable/\", None),\n \"rtd\": (\"https://docs.readthedocs.io/en/stable/\", None),\n \"rtd-dev\": (\"https://dev.readthedocs.io/en/latest/\", None),\n \"jupyter\": (\"https://docs.jupyter.org/en/latest/\", None),\n}\n\n# Intersphinx: Do not try to resolve unresolved labels that aren't explicitly prefixed.\n# The default setting for intersphinx_disabled_reftypes can cause some pretty bad\n# breakage because we have rtd and rtd-dev stable versions in our mappings.\n# Hence, if we refactor labels, we won't see broken references, since the\n# currently active stable mapping keeps resolving.\n# Recommending doing this on all projects with Intersphinx.\n# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#confval-intersphinx_disabled_reftypes\nintersphinx_disabled_reftypes = [\"*\"]\n\nmyst_enable_extensions = [\n \"deflist\",\n]\nhoverxref_intersphinx = [\n \"sphinx\",\n \"pip\",\n \"nbsphinx\",\n \"myst-nb\",\n \"ipywidgets\",\n \"jupytext\",\n]\nhtmlhelp_basename = \"ReadTheDocsdoc\"\nlatex_documents = [\n (\n \"index\",\n \"ReadTheDocs.tex\",\n \"Read the Docs Documentation\",\n \"Eric Holscher, Charlie Leifer, Bobby Grace\",\n \"manual\",\n ),\n]\nman_pages = [\n (\n \"index\",\n \"read-the-docs\",\n \"Read the Docs Documentation\",\n [\"Eric Holscher, Charlie Leifer, Bobby Grace\"],\n 1,\n )\n]\n\nlanguage = \"en\"\n\nlocale_dirs = [\n f\"{docset}/locale/\",\n]\ngettext_compact = False\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_static_path = [\"_static\", f\"{docset}/_static\"]\nhtml_css_files = [\"css/custom.css\", \"css/sphinx_prompt_css.css\"]\nhtml_js_files = [\"js/expand_tabs.js\"]\n\nif os.environ.get(\"READTHEDOCS_VERSION_TYPE\") == \"external\":\n html_js_files.append(\"js/readthedocs-doc-diff.js\")\n\nhtml_logo = \"img/logo.svg\"\nhtml_theme_options = {\n \"logo_only\": True,\n \"display_version\": False,\n}\nhtml_context = {\n # Fix the \"edit on\" links.\n # TODO: remove once we support different rtd config\n # files per project.\n \"conf_py_path\": f\"/docs/{docset}/\",\n # Use to generate the Plausible \"data-domain\" attribute from the template\n \"plausible_domain\": f\"{os.environ.get('READTHEDOCS_PROJECT')}.readthedocs.io\",\n}\n\nhoverxref_auto_ref = True\nhoverxref_domains = [\"py\"]\nhoverxref_roles = [\n \"option\",\n # Documentation pages\n # Not supported yet: https://github.com/readthedocs/sphinx-hoverxref/issues/18\n \"doc\",\n # Glossary terms\n \"term\",\n]\nhoverxref_role_types = {\n \"mod\": \"modal\", # for Python Sphinx Domain\n \"doc\": \"modal\", # for whole docs\n \"class\": \"tooltip\", # for Python Sphinx Domain\n \"ref\": \"tooltip\", # for hoverxref_auto_ref config\n \"confval\": \"tooltip\", # for custom object\n \"term\": \"tooltip\", # for glossaries\n}\n\n# See dev/style_guide.rst for documentation\nrst_epilog = \"\"\"\n.. |org_brand| replace:: Read the Docs Community\n.. |com_brand| replace:: Read the Docs for Business\n.. |git_providers_and| replace:: GitHub, Bitbucket, and GitLab\n.. |git_providers_or| replace:: GitHub, Bitbucket, or GitLab\n\"\"\"\n\n# Activate autosectionlabel plugin\nautosectionlabel_prefix_document = True\n\n# sphinx-notfound-page\n# https://github.com/readthedocs/sphinx-notfound-page\nnotfound_context = {\n \"title\": \"Page Not Found\",\n \"body\": \"\"\"\n<h1>Page Not Found</h1>\n\n<p>Sorry, we couldn't find that page.</p>\n\n<p>Try using the search box or go to the homepage.</p>\n\"\"\",\n}\nlinkcheck_retries = 2\nlinkcheck_timeout = 1\nlinkcheck_workers = 10\nlinkcheck_ignore = [\n r\"http://127\\.0\\.0\\.1\",\n r\"http://localhost\",\n r\"http://community\\.dev\\.readthedocs\\.io\",\n r\"https://yourproject\\.readthedocs\\.io\",\n r\"https?://docs\\.example\\.com\",\n r\"https://foo\\.readthedocs\\.io/projects\",\n r\"https://github\\.com.+?#L\\d+\",\n r\"https://github\\.com/readthedocs/readthedocs\\.org/issues\",\n r\"https://github\\.com/readthedocs/readthedocs\\.org/pull\",\n r\"https://docs\\.readthedocs\\.io/\\?rtd_search\",\n r\"https://readthedocs\\.org/search\",\n # This page is under login\n r\"https://readthedocs\\.org/accounts/gold\",\n]\n\nextlinks = {\n \"rtd-issue\": (\"https://github.com/readthedocs/readthedocs.org/issues/%s\", \"#%s\"),\n}\n\n# Disable epub mimetype warnings\nsuppress_warnings = [\"epub.unknown_project_files\"]\n", "path": "docs/conf.py"}], "after_files": [{"content": "\"\"\"\nShared Sphinx configuration using sphinx-multiproject.\n\nTo build each project, the ``PROJECT`` environment variable is used.\n\n.. code:: console\n\n $ make html # build default project\n $ PROJECT=dev make html # build the dev project\n\nfor more information read https://sphinx-multiproject.readthedocs.io/.\n\"\"\"\n\nimport os\nimport sys\n\nfrom multiproject.utils import get_project\n\nsys.path.append(os.path.abspath(\"_ext\"))\nextensions = [\n \"hoverxref.extension\",\n \"multiproject\",\n \"myst_parser\",\n \"notfound.extension\",\n \"sphinx_design\",\n \"sphinx_search.extension\",\n \"sphinx_tabs.tabs\",\n \"sphinx-prompt\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.intersphinx\",\n \"sphinxcontrib.httpdomain\",\n \"sphinxcontrib.video\",\n \"sphinxemoji.sphinxemoji\",\n \"sphinxext.opengraph\",\n]\n\nmultiproject_projects = {\n \"user\": {\n \"use_config_file\": False,\n \"config\": {\n \"project\": \"Read the Docs user documentation\",\n },\n },\n \"dev\": {\n \"use_config_file\": False,\n \"config\": {\n \"project\": \"Read the Docs developer documentation\",\n },\n },\n}\n\ndocset = get_project(multiproject_projects)\n\nogp_site_name = \"Read the Docs Documentation\"\nogp_use_first_image = True # https://github.com/readthedocs/blog/pull/118\nogp_image = \"https://docs.readthedocs.io/en/latest/_static/img/logo-opengraph.png\"\n# Inspired by https://github.com/executablebooks/MyST-Parser/pull/404/\nogp_custom_meta_tags = [\n '<meta name=\"twitter:card\" content=\"summary_large_image\" />',\n]\nogp_enable_meta_description = True\nogp_description_length = 300\n\ntemplates_path = [\"_templates\"]\n\n# This may be elevated as a general issue for documentation and behavioral\n# change to the Sphinx build:\n# This will ensure that we use the correctly set environment for canonical URLs\n# Old Read the Docs injections makes it point only to the default version,\n# for instance /en/stable/\nhtml_baseurl = os.environ.get(\"READTHEDOCS_CANONICAL_URL\", \"/\")\n\nmaster_doc = \"index\"\ncopyright = \"Read the Docs, Inc & contributors\"\nversion = \"9.13.2\"\nrelease = version\nexclude_patterns = [\"_build\", \"shared\", \"_includes\"]\ndefault_role = \"obj\"\nintersphinx_cache_limit = 14 # cache for 2 weeks\nintersphinx_timeout = 3 # 3 seconds timeout\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3.10/\", None),\n \"django\": (\n \"https://docs.djangoproject.com/en/stable/\",\n \"https://docs.djangoproject.com/en/stable/_objects/\",\n ),\n \"sphinx\": (\"https://www.sphinx-doc.org/en/master/\", None),\n \"pip\": (\"https://pip.pypa.io/en/stable/\", None),\n \"nbsphinx\": (\"https://nbsphinx.readthedocs.io/en/latest/\", None),\n \"myst-nb\": (\"https://myst-nb.readthedocs.io/en/stable/\", None),\n \"ipywidgets\": (\"https://ipywidgets.readthedocs.io/en/stable/\", None),\n \"jupytext\": (\"https://jupytext.readthedocs.io/en/stable/\", None),\n \"ipyleaflet\": (\"https://ipyleaflet.readthedocs.io/en/latest/\", None),\n \"poliastro\": (\"https://docs.poliastro.space/en/stable/\", None),\n \"qiskit\": (\"https://qiskit.org/documentation/\", None),\n \"myst-parser\": (\"https://myst-parser.readthedocs.io/en/stable/\", None),\n \"writethedocs\": (\"https://www.writethedocs.org/\", None),\n \"jupyterbook\": (\"https://jupyterbook.org/en/stable/\", None),\n \"executablebook\": (\"https://executablebooks.org/en/latest/\", None),\n \"rst-to-myst\": (\"https://rst-to-myst.readthedocs.io/en/stable/\", None),\n \"rtd\": (\"https://docs.readthedocs.io/en/stable/\", None),\n \"rtd-dev\": (\"https://dev.readthedocs.io/en/latest/\", None),\n \"rtd-blog\": (\"https://blog.readthedocs.com/\", None),\n \"jupyter\": (\"https://docs.jupyter.org/en/latest/\", None),\n}\n\n# Intersphinx: Do not try to resolve unresolved labels that aren't explicitly prefixed.\n# The default setting for intersphinx_disabled_reftypes can cause some pretty bad\n# breakage because we have rtd and rtd-dev stable versions in our mappings.\n# Hence, if we refactor labels, we won't see broken references, since the\n# currently active stable mapping keeps resolving.\n# Recommending doing this on all projects with Intersphinx.\n# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#confval-intersphinx_disabled_reftypes\nintersphinx_disabled_reftypes = [\"*\"]\n\nmyst_enable_extensions = [\n \"deflist\",\n]\nhoverxref_intersphinx = [\n \"sphinx\",\n \"pip\",\n \"nbsphinx\",\n \"myst-nb\",\n \"ipywidgets\",\n \"jupytext\",\n]\nhtmlhelp_basename = \"ReadTheDocsdoc\"\nlatex_documents = [\n (\n \"index\",\n \"ReadTheDocs.tex\",\n \"Read the Docs Documentation\",\n \"Eric Holscher, Charlie Leifer, Bobby Grace\",\n \"manual\",\n ),\n]\nman_pages = [\n (\n \"index\",\n \"read-the-docs\",\n \"Read the Docs Documentation\",\n [\"Eric Holscher, Charlie Leifer, Bobby Grace\"],\n 1,\n )\n]\n\nlanguage = \"en\"\n\nlocale_dirs = [\n f\"{docset}/locale/\",\n]\ngettext_compact = False\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_static_path = [\"_static\", f\"{docset}/_static\"]\nhtml_css_files = [\"css/custom.css\", \"css/sphinx_prompt_css.css\"]\nhtml_js_files = [\"js/expand_tabs.js\"]\n\nif os.environ.get(\"READTHEDOCS_VERSION_TYPE\") == \"external\":\n html_js_files.append(\"js/readthedocs-doc-diff.js\")\n\nhtml_logo = \"img/logo.svg\"\nhtml_theme_options = {\n \"logo_only\": True,\n \"display_version\": False,\n}\nhtml_context = {\n # Fix the \"edit on\" links.\n # TODO: remove once we support different rtd config\n # files per project.\n \"conf_py_path\": f\"/docs/{docset}/\",\n # Use to generate the Plausible \"data-domain\" attribute from the template\n \"plausible_domain\": f\"{os.environ.get('READTHEDOCS_PROJECT')}.readthedocs.io\",\n}\n\nhoverxref_auto_ref = True\nhoverxref_domains = [\"py\"]\nhoverxref_roles = [\n \"option\",\n # Documentation pages\n # Not supported yet: https://github.com/readthedocs/sphinx-hoverxref/issues/18\n \"doc\",\n # Glossary terms\n \"term\",\n]\nhoverxref_role_types = {\n \"mod\": \"modal\", # for Python Sphinx Domain\n \"doc\": \"modal\", # for whole docs\n \"class\": \"tooltip\", # for Python Sphinx Domain\n \"ref\": \"tooltip\", # for hoverxref_auto_ref config\n \"confval\": \"tooltip\", # for custom object\n \"term\": \"tooltip\", # for glossaries\n}\n\n# See dev/style_guide.rst for documentation\nrst_epilog = \"\"\"\n.. |org_brand| replace:: Read the Docs Community\n.. |com_brand| replace:: Read the Docs for Business\n.. |git_providers_and| replace:: GitHub, Bitbucket, and GitLab\n.. |git_providers_or| replace:: GitHub, Bitbucket, or GitLab\n\"\"\"\n\n# Activate autosectionlabel plugin\nautosectionlabel_prefix_document = True\n\n# sphinx-notfound-page\n# https://github.com/readthedocs/sphinx-notfound-page\nnotfound_context = {\n \"title\": \"Page Not Found\",\n \"body\": \"\"\"\n<h1>Page Not Found</h1>\n\n<p>Sorry, we couldn't find that page.</p>\n\n<p>Try using the search box or go to the homepage.</p>\n\"\"\",\n}\nlinkcheck_retries = 2\nlinkcheck_timeout = 1\nlinkcheck_workers = 10\nlinkcheck_ignore = [\n r\"http://127\\.0\\.0\\.1\",\n r\"http://localhost\",\n r\"http://community\\.dev\\.readthedocs\\.io\",\n r\"https://yourproject\\.readthedocs\\.io\",\n r\"https?://docs\\.example\\.com\",\n r\"https://foo\\.readthedocs\\.io/projects\",\n r\"https://github\\.com.+?#L\\d+\",\n r\"https://github\\.com/readthedocs/readthedocs\\.org/issues\",\n r\"https://github\\.com/readthedocs/readthedocs\\.org/pull\",\n r\"https://docs\\.readthedocs\\.io/\\?rtd_search\",\n r\"https://readthedocs\\.org/search\",\n # This page is under login\n r\"https://readthedocs\\.org/accounts/gold\",\n]\n\nextlinks = {\n \"rtd-issue\": (\"https://github.com/readthedocs/readthedocs.org/issues/%s\", \"#%s\"),\n}\n\n# Disable epub mimetype warnings\nsuppress_warnings = [\"epub.unknown_project_files\"]\n", "path": "docs/conf.py"}]} | 3,061 | 136 |
gh_patches_debug_36678 | rasdani/github-patches | git_diff | jazzband__pip-tools-909 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip-compile --no-header <blank requirements.in> neither creates nor overwrites requirements.txt
If `requirements.in` is empty, running `pip-compile` will create or clobber `requirements.txt`, thereby removing all requirements, but if `--no-header` is passed to `pip-compile`, `requirements.txt` will not be affected in any way, no matter its state.
#### Environment Versions
1. Linux
1. Python version: `3.7.4`
1. pip version: `19.2.3`
1. pip-tools version: `4.1.0`
#### Steps to replicate
```bash
touch requirements.in
pip-compile --no-header
# no txt file created (unexpected)
pip-compile
# txt file created (as expected)
echo plumbum > requirements.in
pip-compile
echo > requirements.in
pip-compile --no-header
cat requirements.txt
```
full, unexpected:
```python
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile
#
plumbum==1.6.7
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/writer.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import os
4 from itertools import chain
5
6 from .click import unstyle
7 from .logging import log
8 from .utils import (
9 UNSAFE_PACKAGES,
10 comment,
11 dedup,
12 format_requirement,
13 get_compile_command,
14 key_from_ireq,
15 )
16
17 MESSAGE_UNHASHED_PACKAGE = comment(
18 "# WARNING: pip install will require the following package to be hashed."
19 "\n# Consider using a hashable URL like "
20 "https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip"
21 )
22
23 MESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(
24 "# WARNING: The following packages were not pinned, but pip requires them to be"
25 "\n# pinned when the requirements file includes hashes. "
26 "Consider using the --allow-unsafe flag."
27 )
28
29 MESSAGE_UNSAFE_PACKAGES = comment(
30 "# The following packages are considered to be unsafe in a requirements file:"
31 )
32
33 MESSAGE_UNINSTALLABLE = (
34 "The generated requirements file may be rejected by pip install. "
35 "See # WARNING lines for details."
36 )
37
38
39 class OutputWriter(object):
40 def __init__(
41 self,
42 src_files,
43 dst_file,
44 click_ctx,
45 dry_run,
46 emit_header,
47 emit_index,
48 emit_trusted_host,
49 annotate,
50 generate_hashes,
51 default_index_url,
52 index_urls,
53 trusted_hosts,
54 format_control,
55 allow_unsafe,
56 find_links,
57 emit_find_links,
58 ):
59 self.src_files = src_files
60 self.dst_file = dst_file
61 self.click_ctx = click_ctx
62 self.dry_run = dry_run
63 self.emit_header = emit_header
64 self.emit_index = emit_index
65 self.emit_trusted_host = emit_trusted_host
66 self.annotate = annotate
67 self.generate_hashes = generate_hashes
68 self.default_index_url = default_index_url
69 self.index_urls = index_urls
70 self.trusted_hosts = trusted_hosts
71 self.format_control = format_control
72 self.allow_unsafe = allow_unsafe
73 self.find_links = find_links
74 self.emit_find_links = emit_find_links
75
76 def _sort_key(self, ireq):
77 return (not ireq.editable, str(ireq.req).lower())
78
79 def write_header(self):
80 if self.emit_header:
81 yield comment("#")
82 yield comment("# This file is autogenerated by pip-compile")
83 yield comment("# To update, run:")
84 yield comment("#")
85 compile_command = os.environ.get(
86 "CUSTOM_COMPILE_COMMAND"
87 ) or get_compile_command(self.click_ctx)
88 yield comment("# {}".format(compile_command))
89 yield comment("#")
90
91 def write_index_options(self):
92 if self.emit_index:
93 for index, index_url in enumerate(dedup(self.index_urls)):
94 if index_url.rstrip("/") == self.default_index_url:
95 continue
96 flag = "--index-url" if index == 0 else "--extra-index-url"
97 yield "{} {}".format(flag, index_url)
98
99 def write_trusted_hosts(self):
100 if self.emit_trusted_host:
101 for trusted_host in dedup(self.trusted_hosts):
102 yield "--trusted-host {}".format(trusted_host)
103
104 def write_format_controls(self):
105 for nb in dedup(self.format_control.no_binary):
106 yield "--no-binary {}".format(nb)
107 for ob in dedup(self.format_control.only_binary):
108 yield "--only-binary {}".format(ob)
109
110 def write_find_links(self):
111 if self.emit_find_links:
112 for find_link in dedup(self.find_links):
113 yield "--find-links {}".format(find_link)
114
115 def write_flags(self):
116 emitted = False
117 for line in chain(
118 self.write_index_options(),
119 self.write_find_links(),
120 self.write_trusted_hosts(),
121 self.write_format_controls(),
122 ):
123 emitted = True
124 yield line
125 if emitted:
126 yield ""
127
128 def _iter_lines(
129 self,
130 results,
131 unsafe_requirements=None,
132 reverse_dependencies=None,
133 primary_packages=None,
134 markers=None,
135 hashes=None,
136 ):
137 # default values
138 unsafe_requirements = unsafe_requirements or []
139 reverse_dependencies = reverse_dependencies or {}
140 primary_packages = primary_packages or []
141 markers = markers or {}
142 hashes = hashes or {}
143
144 # Check for unhashed or unpinned packages if at least one package does have
145 # hashes, which will trigger pip install's --require-hashes mode.
146 warn_uninstallable = False
147 has_hashes = hashes and any(hash for hash in hashes.values())
148
149 for line in self.write_header():
150 yield line
151 for line in self.write_flags():
152 yield line
153
154 unsafe_requirements = (
155 {r for r in results if r.name in UNSAFE_PACKAGES}
156 if not unsafe_requirements
157 else unsafe_requirements
158 )
159 packages = {r for r in results if r.name not in UNSAFE_PACKAGES}
160
161 packages = sorted(packages, key=self._sort_key)
162
163 for ireq in packages:
164 if has_hashes and not hashes.get(ireq):
165 yield MESSAGE_UNHASHED_PACKAGE
166 warn_uninstallable = True
167 line = self._format_requirement(
168 ireq,
169 reverse_dependencies,
170 primary_packages,
171 markers.get(key_from_ireq(ireq)),
172 hashes=hashes,
173 )
174 yield line
175
176 if unsafe_requirements:
177 unsafe_requirements = sorted(unsafe_requirements, key=self._sort_key)
178 yield ""
179 if has_hashes and not self.allow_unsafe:
180 yield MESSAGE_UNSAFE_PACKAGES_UNPINNED
181 warn_uninstallable = True
182 else:
183 yield MESSAGE_UNSAFE_PACKAGES
184
185 for ireq in unsafe_requirements:
186 req = self._format_requirement(
187 ireq,
188 reverse_dependencies,
189 primary_packages,
190 marker=markers.get(key_from_ireq(ireq)),
191 hashes=hashes,
192 )
193 if not self.allow_unsafe:
194 yield comment("# {}".format(req))
195 else:
196 yield req
197
198 if warn_uninstallable:
199 log.warning(MESSAGE_UNINSTALLABLE)
200
201 def write(
202 self,
203 results,
204 unsafe_requirements,
205 reverse_dependencies,
206 primary_packages,
207 markers,
208 hashes,
209 ):
210
211 for line in self._iter_lines(
212 results,
213 unsafe_requirements,
214 reverse_dependencies,
215 primary_packages,
216 markers,
217 hashes,
218 ):
219 log.info(line)
220 if not self.dry_run:
221 self.dst_file.write(unstyle(line).encode("utf-8"))
222 self.dst_file.write(os.linesep.encode("utf-8"))
223
224 def _format_requirement(
225 self, ireq, reverse_dependencies, primary_packages, marker=None, hashes=None
226 ):
227 ireq_hashes = (hashes if hashes is not None else {}).get(ireq)
228
229 line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)
230
231 if not self.annotate or key_from_ireq(ireq) in primary_packages:
232 return line
233
234 # Annotate what packages this package is required by
235 required_by = reverse_dependencies.get(ireq.name.lower(), [])
236 if required_by:
237 annotation = ", ".join(sorted(required_by))
238 line = "{:24}{}{}".format(
239 line,
240 " \\\n " if ireq_hashes else " ",
241 comment("# via " + annotation),
242 )
243 return line
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/piptools/writer.py b/piptools/writer.py
--- a/piptools/writer.py
+++ b/piptools/writer.py
@@ -146,10 +146,14 @@
warn_uninstallable = False
has_hashes = hashes and any(hash for hash in hashes.values())
+ yielded = False
+
for line in self.write_header():
yield line
+ yielded = True
for line in self.write_flags():
yield line
+ yielded = True
unsafe_requirements = (
{r for r in results if r.name in UNSAFE_PACKAGES}
@@ -158,24 +162,26 @@
)
packages = {r for r in results if r.name not in UNSAFE_PACKAGES}
- packages = sorted(packages, key=self._sort_key)
-
- for ireq in packages:
- if has_hashes and not hashes.get(ireq):
- yield MESSAGE_UNHASHED_PACKAGE
- warn_uninstallable = True
- line = self._format_requirement(
- ireq,
- reverse_dependencies,
- primary_packages,
- markers.get(key_from_ireq(ireq)),
- hashes=hashes,
- )
- yield line
+ if packages:
+ packages = sorted(packages, key=self._sort_key)
+ for ireq in packages:
+ if has_hashes and not hashes.get(ireq):
+ yield MESSAGE_UNHASHED_PACKAGE
+ warn_uninstallable = True
+ line = self._format_requirement(
+ ireq,
+ reverse_dependencies,
+ primary_packages,
+ markers.get(key_from_ireq(ireq)),
+ hashes=hashes,
+ )
+ yield line
+ yielded = True
if unsafe_requirements:
unsafe_requirements = sorted(unsafe_requirements, key=self._sort_key)
yield ""
+ yielded = True
if has_hashes and not self.allow_unsafe:
yield MESSAGE_UNSAFE_PACKAGES_UNPINNED
warn_uninstallable = True
@@ -195,6 +201,10 @@
else:
yield req
+ # Yield even when there's no real content, so that blank files are written
+ if not yielded:
+ yield ""
+
if warn_uninstallable:
log.warning(MESSAGE_UNINSTALLABLE)
| {"golden_diff": "diff --git a/piptools/writer.py b/piptools/writer.py\n--- a/piptools/writer.py\n+++ b/piptools/writer.py\n@@ -146,10 +146,14 @@\n warn_uninstallable = False\n has_hashes = hashes and any(hash for hash in hashes.values())\n \n+ yielded = False\n+\n for line in self.write_header():\n yield line\n+ yielded = True\n for line in self.write_flags():\n yield line\n+ yielded = True\n \n unsafe_requirements = (\n {r for r in results if r.name in UNSAFE_PACKAGES}\n@@ -158,24 +162,26 @@\n )\n packages = {r for r in results if r.name not in UNSAFE_PACKAGES}\n \n- packages = sorted(packages, key=self._sort_key)\n-\n- for ireq in packages:\n- if has_hashes and not hashes.get(ireq):\n- yield MESSAGE_UNHASHED_PACKAGE\n- warn_uninstallable = True\n- line = self._format_requirement(\n- ireq,\n- reverse_dependencies,\n- primary_packages,\n- markers.get(key_from_ireq(ireq)),\n- hashes=hashes,\n- )\n- yield line\n+ if packages:\n+ packages = sorted(packages, key=self._sort_key)\n+ for ireq in packages:\n+ if has_hashes and not hashes.get(ireq):\n+ yield MESSAGE_UNHASHED_PACKAGE\n+ warn_uninstallable = True\n+ line = self._format_requirement(\n+ ireq,\n+ reverse_dependencies,\n+ primary_packages,\n+ markers.get(key_from_ireq(ireq)),\n+ hashes=hashes,\n+ )\n+ yield line\n+ yielded = True\n \n if unsafe_requirements:\n unsafe_requirements = sorted(unsafe_requirements, key=self._sort_key)\n yield \"\"\n+ yielded = True\n if has_hashes and not self.allow_unsafe:\n yield MESSAGE_UNSAFE_PACKAGES_UNPINNED\n warn_uninstallable = True\n@@ -195,6 +201,10 @@\n else:\n yield req\n \n+ # Yield even when there's no real content, so that blank files are written\n+ if not yielded:\n+ yield \"\"\n+\n if warn_uninstallable:\n log.warning(MESSAGE_UNINSTALLABLE)\n", "issue": "pip-compile --no-header <blank requirements.in> neither creates nor overwrites requirements.txt\nIf `requirements.in` is empty, running `pip-compile` will create or clobber `requirements.txt`, thereby removing all requirements, but if `--no-header` is passed to `pip-compile`, `requirements.txt` will not be affected in any way, no matter its state.\r\n\r\n#### Environment Versions\r\n\r\n1. Linux\r\n1. Python version: `3.7.4`\r\n1. pip version: `19.2.3`\r\n1. pip-tools version: `4.1.0`\r\n\r\n#### Steps to replicate\r\n\r\n```bash\r\ntouch requirements.in\r\npip-compile --no-header\r\n# no txt file created (unexpected)\r\npip-compile\r\n# txt file created (as expected)\r\necho plumbum > requirements.in\r\npip-compile\r\necho > requirements.in\r\npip-compile --no-header\r\ncat requirements.txt\r\n```\r\nfull, unexpected:\r\n```python\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# pip-compile\r\n#\r\nplumbum==1.6.7\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nfrom itertools import chain\n\nfrom .click import unstyle\nfrom .logging import log\nfrom .utils import (\n UNSAFE_PACKAGES,\n comment,\n dedup,\n format_requirement,\n get_compile_command,\n key_from_ireq,\n)\n\nMESSAGE_UNHASHED_PACKAGE = comment(\n \"# WARNING: pip install will require the following package to be hashed.\"\n \"\\n# Consider using a hashable URL like \"\n \"https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip\"\n)\n\nMESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(\n \"# WARNING: The following packages were not pinned, but pip requires them to be\"\n \"\\n# pinned when the requirements file includes hashes. \"\n \"Consider using the --allow-unsafe flag.\"\n)\n\nMESSAGE_UNSAFE_PACKAGES = comment(\n \"# The following packages are considered to be unsafe in a requirements file:\"\n)\n\nMESSAGE_UNINSTALLABLE = (\n \"The generated requirements file may be rejected by pip install. \"\n \"See # WARNING lines for details.\"\n)\n\n\nclass OutputWriter(object):\n def __init__(\n self,\n src_files,\n dst_file,\n click_ctx,\n dry_run,\n emit_header,\n emit_index,\n emit_trusted_host,\n annotate,\n generate_hashes,\n default_index_url,\n index_urls,\n trusted_hosts,\n format_control,\n allow_unsafe,\n find_links,\n emit_find_links,\n ):\n self.src_files = src_files\n self.dst_file = dst_file\n self.click_ctx = click_ctx\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index = emit_index\n self.emit_trusted_host = emit_trusted_host\n self.annotate = annotate\n self.generate_hashes = generate_hashes\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n self.trusted_hosts = trusted_hosts\n self.format_control = format_control\n self.allow_unsafe = allow_unsafe\n self.find_links = find_links\n self.emit_find_links = emit_find_links\n\n def _sort_key(self, ireq):\n return (not ireq.editable, str(ireq.req).lower())\n\n def write_header(self):\n if self.emit_header:\n yield comment(\"#\")\n yield comment(\"# This file is autogenerated by pip-compile\")\n yield comment(\"# To update, run:\")\n yield comment(\"#\")\n compile_command = os.environ.get(\n \"CUSTOM_COMPILE_COMMAND\"\n ) or get_compile_command(self.click_ctx)\n yield comment(\"# {}\".format(compile_command))\n yield comment(\"#\")\n\n def write_index_options(self):\n if self.emit_index:\n for index, index_url in enumerate(dedup(self.index_urls)):\n if index_url.rstrip(\"/\") == self.default_index_url:\n continue\n flag = \"--index-url\" if index == 0 else \"--extra-index-url\"\n yield \"{} {}\".format(flag, index_url)\n\n def write_trusted_hosts(self):\n if self.emit_trusted_host:\n for trusted_host in dedup(self.trusted_hosts):\n yield \"--trusted-host {}\".format(trusted_host)\n\n def write_format_controls(self):\n for nb in dedup(self.format_control.no_binary):\n yield \"--no-binary {}\".format(nb)\n for ob in dedup(self.format_control.only_binary):\n yield \"--only-binary {}\".format(ob)\n\n def write_find_links(self):\n if self.emit_find_links:\n for find_link in dedup(self.find_links):\n yield \"--find-links {}\".format(find_link)\n\n def write_flags(self):\n emitted = False\n for line in chain(\n self.write_index_options(),\n self.write_find_links(),\n self.write_trusted_hosts(),\n self.write_format_controls(),\n ):\n emitted = True\n yield line\n if emitted:\n yield \"\"\n\n def _iter_lines(\n self,\n results,\n unsafe_requirements=None,\n reverse_dependencies=None,\n primary_packages=None,\n markers=None,\n hashes=None,\n ):\n # default values\n unsafe_requirements = unsafe_requirements or []\n reverse_dependencies = reverse_dependencies or {}\n primary_packages = primary_packages or []\n markers = markers or {}\n hashes = hashes or {}\n\n # Check for unhashed or unpinned packages if at least one package does have\n # hashes, which will trigger pip install's --require-hashes mode.\n warn_uninstallable = False\n has_hashes = hashes and any(hash for hash in hashes.values())\n\n for line in self.write_header():\n yield line\n for line in self.write_flags():\n yield line\n\n unsafe_requirements = (\n {r for r in results if r.name in UNSAFE_PACKAGES}\n if not unsafe_requirements\n else unsafe_requirements\n )\n packages = {r for r in results if r.name not in UNSAFE_PACKAGES}\n\n packages = sorted(packages, key=self._sort_key)\n\n for ireq in packages:\n if has_hashes and not hashes.get(ireq):\n yield MESSAGE_UNHASHED_PACKAGE\n warn_uninstallable = True\n line = self._format_requirement(\n ireq,\n reverse_dependencies,\n primary_packages,\n markers.get(key_from_ireq(ireq)),\n hashes=hashes,\n )\n yield line\n\n if unsafe_requirements:\n unsafe_requirements = sorted(unsafe_requirements, key=self._sort_key)\n yield \"\"\n if has_hashes and not self.allow_unsafe:\n yield MESSAGE_UNSAFE_PACKAGES_UNPINNED\n warn_uninstallable = True\n else:\n yield MESSAGE_UNSAFE_PACKAGES\n\n for ireq in unsafe_requirements:\n req = self._format_requirement(\n ireq,\n reverse_dependencies,\n primary_packages,\n marker=markers.get(key_from_ireq(ireq)),\n hashes=hashes,\n )\n if not self.allow_unsafe:\n yield comment(\"# {}\".format(req))\n else:\n yield req\n\n if warn_uninstallable:\n log.warning(MESSAGE_UNINSTALLABLE)\n\n def write(\n self,\n results,\n unsafe_requirements,\n reverse_dependencies,\n primary_packages,\n markers,\n hashes,\n ):\n\n for line in self._iter_lines(\n results,\n unsafe_requirements,\n reverse_dependencies,\n primary_packages,\n markers,\n hashes,\n ):\n log.info(line)\n if not self.dry_run:\n self.dst_file.write(unstyle(line).encode(\"utf-8\"))\n self.dst_file.write(os.linesep.encode(\"utf-8\"))\n\n def _format_requirement(\n self, ireq, reverse_dependencies, primary_packages, marker=None, hashes=None\n ):\n ireq_hashes = (hashes if hashes is not None else {}).get(ireq)\n\n line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)\n\n if not self.annotate or key_from_ireq(ireq) in primary_packages:\n return line\n\n # Annotate what packages this package is required by\n required_by = reverse_dependencies.get(ireq.name.lower(), [])\n if required_by:\n annotation = \", \".join(sorted(required_by))\n line = \"{:24}{}{}\".format(\n line,\n \" \\\\\\n \" if ireq_hashes else \" \",\n comment(\"# via \" + annotation),\n )\n return line\n", "path": "piptools/writer.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nfrom itertools import chain\n\nfrom .click import unstyle\nfrom .logging import log\nfrom .utils import (\n UNSAFE_PACKAGES,\n comment,\n dedup,\n format_requirement,\n get_compile_command,\n key_from_ireq,\n)\n\nMESSAGE_UNHASHED_PACKAGE = comment(\n \"# WARNING: pip install will require the following package to be hashed.\"\n \"\\n# Consider using a hashable URL like \"\n \"https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip\"\n)\n\nMESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(\n \"# WARNING: The following packages were not pinned, but pip requires them to be\"\n \"\\n# pinned when the requirements file includes hashes. \"\n \"Consider using the --allow-unsafe flag.\"\n)\n\nMESSAGE_UNSAFE_PACKAGES = comment(\n \"# The following packages are considered to be unsafe in a requirements file:\"\n)\n\nMESSAGE_UNINSTALLABLE = (\n \"The generated requirements file may be rejected by pip install. \"\n \"See # WARNING lines for details.\"\n)\n\n\nclass OutputWriter(object):\n def __init__(\n self,\n src_files,\n dst_file,\n click_ctx,\n dry_run,\n emit_header,\n emit_index,\n emit_trusted_host,\n annotate,\n generate_hashes,\n default_index_url,\n index_urls,\n trusted_hosts,\n format_control,\n allow_unsafe,\n find_links,\n emit_find_links,\n ):\n self.src_files = src_files\n self.dst_file = dst_file\n self.click_ctx = click_ctx\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index = emit_index\n self.emit_trusted_host = emit_trusted_host\n self.annotate = annotate\n self.generate_hashes = generate_hashes\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n self.trusted_hosts = trusted_hosts\n self.format_control = format_control\n self.allow_unsafe = allow_unsafe\n self.find_links = find_links\n self.emit_find_links = emit_find_links\n\n def _sort_key(self, ireq):\n return (not ireq.editable, str(ireq.req).lower())\n\n def write_header(self):\n if self.emit_header:\n yield comment(\"#\")\n yield comment(\"# This file is autogenerated by pip-compile\")\n yield comment(\"# To update, run:\")\n yield comment(\"#\")\n compile_command = os.environ.get(\n \"CUSTOM_COMPILE_COMMAND\"\n ) or get_compile_command(self.click_ctx)\n yield comment(\"# {}\".format(compile_command))\n yield comment(\"#\")\n\n def write_index_options(self):\n if self.emit_index:\n for index, index_url in enumerate(dedup(self.index_urls)):\n if index_url.rstrip(\"/\") == self.default_index_url:\n continue\n flag = \"--index-url\" if index == 0 else \"--extra-index-url\"\n yield \"{} {}\".format(flag, index_url)\n\n def write_trusted_hosts(self):\n if self.emit_trusted_host:\n for trusted_host in dedup(self.trusted_hosts):\n yield \"--trusted-host {}\".format(trusted_host)\n\n def write_format_controls(self):\n for nb in dedup(self.format_control.no_binary):\n yield \"--no-binary {}\".format(nb)\n for ob in dedup(self.format_control.only_binary):\n yield \"--only-binary {}\".format(ob)\n\n def write_find_links(self):\n if self.emit_find_links:\n for find_link in dedup(self.find_links):\n yield \"--find-links {}\".format(find_link)\n\n def write_flags(self):\n emitted = False\n for line in chain(\n self.write_index_options(),\n self.write_find_links(),\n self.write_trusted_hosts(),\n self.write_format_controls(),\n ):\n emitted = True\n yield line\n if emitted:\n yield \"\"\n\n def _iter_lines(\n self,\n results,\n unsafe_requirements=None,\n reverse_dependencies=None,\n primary_packages=None,\n markers=None,\n hashes=None,\n ):\n # default values\n unsafe_requirements = unsafe_requirements or []\n reverse_dependencies = reverse_dependencies or {}\n primary_packages = primary_packages or []\n markers = markers or {}\n hashes = hashes or {}\n\n # Check for unhashed or unpinned packages if at least one package does have\n # hashes, which will trigger pip install's --require-hashes mode.\n warn_uninstallable = False\n has_hashes = hashes and any(hash for hash in hashes.values())\n\n yielded = False\n\n for line in self.write_header():\n yield line\n yielded = True\n for line in self.write_flags():\n yield line\n yielded = True\n\n unsafe_requirements = (\n {r for r in results if r.name in UNSAFE_PACKAGES}\n if not unsafe_requirements\n else unsafe_requirements\n )\n packages = {r for r in results if r.name not in UNSAFE_PACKAGES}\n\n if packages:\n packages = sorted(packages, key=self._sort_key)\n for ireq in packages:\n if has_hashes and not hashes.get(ireq):\n yield MESSAGE_UNHASHED_PACKAGE\n warn_uninstallable = True\n line = self._format_requirement(\n ireq,\n reverse_dependencies,\n primary_packages,\n markers.get(key_from_ireq(ireq)),\n hashes=hashes,\n )\n yield line\n yielded = True\n\n if unsafe_requirements:\n unsafe_requirements = sorted(unsafe_requirements, key=self._sort_key)\n yield \"\"\n yielded = True\n if has_hashes and not self.allow_unsafe:\n yield MESSAGE_UNSAFE_PACKAGES_UNPINNED\n warn_uninstallable = True\n else:\n yield MESSAGE_UNSAFE_PACKAGES\n\n for ireq in unsafe_requirements:\n req = self._format_requirement(\n ireq,\n reverse_dependencies,\n primary_packages,\n marker=markers.get(key_from_ireq(ireq)),\n hashes=hashes,\n )\n if not self.allow_unsafe:\n yield comment(\"# {}\".format(req))\n else:\n yield req\n\n # Yield even when there's no real content, so that blank files are written\n if not yielded:\n yield \"\"\n\n if warn_uninstallable:\n log.warning(MESSAGE_UNINSTALLABLE)\n\n def write(\n self,\n results,\n unsafe_requirements,\n reverse_dependencies,\n primary_packages,\n markers,\n hashes,\n ):\n\n for line in self._iter_lines(\n results,\n unsafe_requirements,\n reverse_dependencies,\n primary_packages,\n markers,\n hashes,\n ):\n log.info(line)\n if not self.dry_run:\n self.dst_file.write(unstyle(line).encode(\"utf-8\"))\n self.dst_file.write(os.linesep.encode(\"utf-8\"))\n\n def _format_requirement(\n self, ireq, reverse_dependencies, primary_packages, marker=None, hashes=None\n ):\n ireq_hashes = (hashes if hashes is not None else {}).get(ireq)\n\n line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)\n\n if not self.annotate or key_from_ireq(ireq) in primary_packages:\n return line\n\n # Annotate what packages this package is required by\n required_by = reverse_dependencies.get(ireq.name.lower(), [])\n if required_by:\n annotation = \", \".join(sorted(required_by))\n line = \"{:24}{}{}\".format(\n line,\n \" \\\\\\n \" if ireq_hashes else \" \",\n comment(\"# via \" + annotation),\n )\n return line\n", "path": "piptools/writer.py"}]} | 2,709 | 529 |
gh_patches_debug_57377 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1657 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: pop() takes no arguments (1 given) with otlp exporter 0.18b0
**Describe your environment**
opentelemetry-sdk==1.0.0rc1
opentelemetry-exporter-otlp==1.0.0rc1
opentelemetry-exporter-jaeger==1.0.0rc1
opentelemetry-propagator-b3==1.0.0rc1
opentelemetry-distro==0.18b0
opentelemetry-instrumentation==0.18b0
opentelemetry-instrumentation-grpc==0.18b0
opentelemetry-instrumentation-jinja2==0.18b0
export OTEL_TRACES_EXPORTER="otlp"
export OTEL_EXPORTER_OTLP_INSECURE=true
export OTEL_EXPORTER_OTLP_ENDPOINT="markf-0398:4317"
export OTEL_RESOURCE_ATTRIBUTES="service.name=emailservice, environment=hipster_shop"
**Steps to reproduce**
I'm using this app, but I don't believe it makes any difference, given the error.
https://github.com/markfink-splunk/microservices-demo/tree/master/src/emailservice
**What is the expected behavior?**
otlp should initialize and export traces.
**What is the actual behavior?**
I get this error immediately upon executing "opentelemetry-instrument python email_server.py".
Configuration of configurator failed
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 74, in _load_configurators
entry_point.load()().configure() # type: ignore
File "/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/configurator.py", line 50, in configure
self._configure(**kwargs)
File "/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py", line 168, in _configure
_initialize_components()
File "/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py", line 159, in _initialize_components
exporter_names = _get_exporter_names()
File "/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py", line 73, in _get_exporter_names
exporters.pop(EXPORTER_OTLP)
TypeError: pop() takes no arguments (1 given)
Failed to auto initialize opentelemetry
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 84, in initialize
_load_configurators()
File "/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 78, in _load_configurators
raise exc
File "/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 74, in _load_configurators
entry_point.load()().configure() # type: ignore
File "/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/configurator.py", line 50, in configure
self._configure(**kwargs)
File "/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py", line 168, in _configure
_initialize_components()
File "/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py", line 159, in _initialize_components
exporter_names = _get_exporter_names()
File "/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py", line 73, in _get_exporter_names
exporters.pop(EXPORTER_OTLP)
TypeError: pop() takes no arguments (1 given)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-distro/src/opentelemetry/distro/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 import os
16 from logging import getLogger
17 from os import environ
18 from typing import Sequence, Tuple
19
20 from pkg_resources import iter_entry_points
21
22 from opentelemetry import trace
23 from opentelemetry.environment_variables import (
24 OTEL_PYTHON_ID_GENERATOR,
25 OTEL_PYTHON_SERVICE_NAME,
26 OTEL_TRACES_EXPORTER,
27 )
28 from opentelemetry.instrumentation.configurator import BaseConfigurator
29 from opentelemetry.instrumentation.distro import BaseDistro
30 from opentelemetry.sdk.resources import Resource
31 from opentelemetry.sdk.trace import TracerProvider
32 from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter
33 from opentelemetry.sdk.trace.id_generator import IdGenerator
34
35 logger = getLogger(__file__)
36
37
38 EXPORTER_OTLP = "otlp"
39 EXPORTER_OTLP_SPAN = "otlp_span"
40
41 RANDOM_ID_GENERATOR = "random"
42 _DEFAULT_ID_GENERATOR = RANDOM_ID_GENERATOR
43
44
45 def _get_id_generator() -> str:
46 return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)
47
48
49 def _get_service_name() -> str:
50 return environ.get(OTEL_PYTHON_SERVICE_NAME, "")
51
52
53 def _get_exporter_names() -> Sequence[str]:
54 trace_exporters = environ.get(OTEL_TRACES_EXPORTER)
55
56 exporters = set()
57
58 if (
59 trace_exporters is not None
60 or trace_exporters.lower().strip() != "none"
61 ):
62 exporters.update(
63 {
64 trace_exporter.strip()
65 for trace_exporter in trace_exporters.split(",")
66 }
67 )
68
69 if EXPORTER_OTLP in exporters:
70 exporters.pop(EXPORTER_OTLP)
71 exporters.add(EXPORTER_OTLP_SPAN)
72
73 return list(exporters)
74
75
76 def _init_tracing(
77 exporters: Sequence[SpanExporter], id_generator: IdGenerator
78 ):
79 service_name = _get_service_name()
80 provider = TracerProvider(
81 resource=Resource.create({"service.name": service_name}),
82 id_generator=id_generator(),
83 )
84 trace.set_tracer_provider(provider)
85
86 for exporter_name, exporter_class in exporters.items():
87 exporter_args = {}
88 if exporter_name not in [
89 EXPORTER_OTLP,
90 EXPORTER_OTLP_SPAN,
91 ]:
92 exporter_args["service_name"] = service_name
93
94 provider.add_span_processor(
95 BatchSpanProcessor(exporter_class(**exporter_args))
96 )
97
98
99 def _import_tracer_provider_config_components(
100 selected_components, entry_point_name
101 ) -> Sequence[Tuple[str, object]]:
102 component_entry_points = {
103 ep.name: ep for ep in iter_entry_points(entry_point_name)
104 }
105 component_impls = []
106 for selected_component in selected_components:
107 entry_point = component_entry_points.get(selected_component, None)
108 if not entry_point:
109 raise RuntimeError(
110 "Requested component '{}' not found in entry points for '{}'".format(
111 selected_component, entry_point_name
112 )
113 )
114
115 component_impl = entry_point.load()
116 component_impls.append((selected_component, component_impl))
117
118 return component_impls
119
120
121 def _import_exporters(
122 exporter_names: Sequence[str],
123 ) -> Sequence[SpanExporter]:
124 trace_exporters = {}
125
126 for (
127 exporter_name,
128 exporter_impl,
129 ) in _import_tracer_provider_config_components(
130 exporter_names, "opentelemetry_exporter"
131 ):
132 if issubclass(exporter_impl, SpanExporter):
133 trace_exporters[exporter_name] = exporter_impl
134 else:
135 raise RuntimeError(
136 "{0} is not a trace exporter".format(exporter_name)
137 )
138 return trace_exporters
139
140
141 def _import_id_generator(id_generator_name: str) -> IdGenerator:
142 # pylint: disable=unbalanced-tuple-unpacking
143 [
144 (id_generator_name, id_generator_impl)
145 ] = _import_tracer_provider_config_components(
146 [id_generator_name.strip()], "opentelemetry_id_generator"
147 )
148
149 if issubclass(id_generator_impl, IdGenerator):
150 return id_generator_impl
151
152 raise RuntimeError("{0} is not an IdGenerator".format(id_generator_name))
153
154
155 def _initialize_components():
156 exporter_names = _get_exporter_names()
157 trace_exporters = _import_exporters(exporter_names)
158 id_generator_name = _get_id_generator()
159 id_generator = _import_id_generator(id_generator_name)
160 _init_tracing(trace_exporters, id_generator)
161
162
163 class Configurator(BaseConfigurator):
164 def _configure(self, **kwargs):
165 _initialize_components()
166
167
168 class OpenTelemetryDistro(BaseDistro):
169 """
170 The OpenTelemetry provided Distro configures a default set of
171 configuration out of the box.
172 """
173
174 def _configure(self, **kwargs):
175 os.environ.setdefault(OTEL_TRACES_EXPORTER, "otlp_span")
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-distro/src/opentelemetry/distro/__init__.py b/opentelemetry-distro/src/opentelemetry/distro/__init__.py
--- a/opentelemetry-distro/src/opentelemetry/distro/__init__.py
+++ b/opentelemetry-distro/src/opentelemetry/distro/__init__.py
@@ -67,7 +67,7 @@
)
if EXPORTER_OTLP in exporters:
- exporters.pop(EXPORTER_OTLP)
+ exporters.remove(EXPORTER_OTLP)
exporters.add(EXPORTER_OTLP_SPAN)
return list(exporters)
| {"golden_diff": "diff --git a/opentelemetry-distro/src/opentelemetry/distro/__init__.py b/opentelemetry-distro/src/opentelemetry/distro/__init__.py\n--- a/opentelemetry-distro/src/opentelemetry/distro/__init__.py\n+++ b/opentelemetry-distro/src/opentelemetry/distro/__init__.py\n@@ -67,7 +67,7 @@\n )\n \n if EXPORTER_OTLP in exporters:\n- exporters.pop(EXPORTER_OTLP)\n+ exporters.remove(EXPORTER_OTLP)\n exporters.add(EXPORTER_OTLP_SPAN)\n \n return list(exporters)\n", "issue": "TypeError: pop() takes no arguments (1 given) with otlp exporter 0.18b0\n**Describe your environment** \r\nopentelemetry-sdk==1.0.0rc1\r\nopentelemetry-exporter-otlp==1.0.0rc1\r\nopentelemetry-exporter-jaeger==1.0.0rc1\r\nopentelemetry-propagator-b3==1.0.0rc1\r\nopentelemetry-distro==0.18b0\r\nopentelemetry-instrumentation==0.18b0\r\nopentelemetry-instrumentation-grpc==0.18b0\r\nopentelemetry-instrumentation-jinja2==0.18b0\r\n\r\nexport OTEL_TRACES_EXPORTER=\"otlp\"\r\nexport OTEL_EXPORTER_OTLP_INSECURE=true\r\nexport OTEL_EXPORTER_OTLP_ENDPOINT=\"markf-0398:4317\"\r\nexport OTEL_RESOURCE_ATTRIBUTES=\"service.name=emailservice, environment=hipster_shop\"\r\n\r\n**Steps to reproduce**\r\nI'm using this app, but I don't believe it makes any difference, given the error.\r\nhttps://github.com/markfink-splunk/microservices-demo/tree/master/src/emailservice\r\n\r\n**What is the expected behavior?**\r\notlp should initialize and export traces.\r\n\r\n**What is the actual behavior?**\r\nI get this error immediately upon executing \"opentelemetry-instrument python email_server.py\".\r\n\r\nConfiguration of configurator failed\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 74, in _load_configurators\r\n entry_point.load()().configure() # type: ignore\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/configurator.py\", line 50, in configure\r\n self._configure(**kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py\", line 168, in _configure\r\n _initialize_components()\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py\", line 159, in _initialize_components\r\n exporter_names = _get_exporter_names()\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py\", line 73, in _get_exporter_names\r\n exporters.pop(EXPORTER_OTLP)\r\nTypeError: pop() takes no arguments (1 given)\r\nFailed to auto initialize opentelemetry\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 84, in initialize\r\n _load_configurators()\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 78, in _load_configurators\r\n raise exc\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 74, in _load_configurators\r\n entry_point.load()().configure() # type: ignore\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/instrumentation/configurator.py\", line 50, in configure\r\n self._configure(**kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py\", line 168, in _configure\r\n _initialize_components()\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py\", line 159, in _initialize_components\r\n exporter_names = _get_exporter_names()\r\n File \"/usr/local/lib/python3.7/site-packages/opentelemetry/distro/__init__.py\", line 73, in _get_exporter_names\r\n exporters.pop(EXPORTER_OTLP)\r\nTypeError: pop() takes no arguments (1 given)\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Sequence, Tuple\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry import trace\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_ID_GENERATOR,\n OTEL_PYTHON_SERVICE_NAME,\n OTEL_TRACES_EXPORTER,\n)\nfrom opentelemetry.instrumentation.configurator import BaseConfigurator\nfrom opentelemetry.instrumentation.distro import BaseDistro\nfrom opentelemetry.sdk.resources import Resource\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter\nfrom opentelemetry.sdk.trace.id_generator import IdGenerator\n\nlogger = getLogger(__file__)\n\n\nEXPORTER_OTLP = \"otlp\"\nEXPORTER_OTLP_SPAN = \"otlp_span\"\n\nRANDOM_ID_GENERATOR = \"random\"\n_DEFAULT_ID_GENERATOR = RANDOM_ID_GENERATOR\n\n\ndef _get_id_generator() -> str:\n return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)\n\n\ndef _get_service_name() -> str:\n return environ.get(OTEL_PYTHON_SERVICE_NAME, \"\")\n\n\ndef _get_exporter_names() -> Sequence[str]:\n trace_exporters = environ.get(OTEL_TRACES_EXPORTER)\n\n exporters = set()\n\n if (\n trace_exporters is not None\n or trace_exporters.lower().strip() != \"none\"\n ):\n exporters.update(\n {\n trace_exporter.strip()\n for trace_exporter in trace_exporters.split(\",\")\n }\n )\n\n if EXPORTER_OTLP in exporters:\n exporters.pop(EXPORTER_OTLP)\n exporters.add(EXPORTER_OTLP_SPAN)\n\n return list(exporters)\n\n\ndef _init_tracing(\n exporters: Sequence[SpanExporter], id_generator: IdGenerator\n):\n service_name = _get_service_name()\n provider = TracerProvider(\n resource=Resource.create({\"service.name\": service_name}),\n id_generator=id_generator(),\n )\n trace.set_tracer_provider(provider)\n\n for exporter_name, exporter_class in exporters.items():\n exporter_args = {}\n if exporter_name not in [\n EXPORTER_OTLP,\n EXPORTER_OTLP_SPAN,\n ]:\n exporter_args[\"service_name\"] = service_name\n\n provider.add_span_processor(\n BatchSpanProcessor(exporter_class(**exporter_args))\n )\n\n\ndef _import_tracer_provider_config_components(\n selected_components, entry_point_name\n) -> Sequence[Tuple[str, object]]:\n component_entry_points = {\n ep.name: ep for ep in iter_entry_points(entry_point_name)\n }\n component_impls = []\n for selected_component in selected_components:\n entry_point = component_entry_points.get(selected_component, None)\n if not entry_point:\n raise RuntimeError(\n \"Requested component '{}' not found in entry points for '{}'\".format(\n selected_component, entry_point_name\n )\n )\n\n component_impl = entry_point.load()\n component_impls.append((selected_component, component_impl))\n\n return component_impls\n\n\ndef _import_exporters(\n exporter_names: Sequence[str],\n) -> Sequence[SpanExporter]:\n trace_exporters = {}\n\n for (\n exporter_name,\n exporter_impl,\n ) in _import_tracer_provider_config_components(\n exporter_names, \"opentelemetry_exporter\"\n ):\n if issubclass(exporter_impl, SpanExporter):\n trace_exporters[exporter_name] = exporter_impl\n else:\n raise RuntimeError(\n \"{0} is not a trace exporter\".format(exporter_name)\n )\n return trace_exporters\n\n\ndef _import_id_generator(id_generator_name: str) -> IdGenerator:\n # pylint: disable=unbalanced-tuple-unpacking\n [\n (id_generator_name, id_generator_impl)\n ] = _import_tracer_provider_config_components(\n [id_generator_name.strip()], \"opentelemetry_id_generator\"\n )\n\n if issubclass(id_generator_impl, IdGenerator):\n return id_generator_impl\n\n raise RuntimeError(\"{0} is not an IdGenerator\".format(id_generator_name))\n\n\ndef _initialize_components():\n exporter_names = _get_exporter_names()\n trace_exporters = _import_exporters(exporter_names)\n id_generator_name = _get_id_generator()\n id_generator = _import_id_generator(id_generator_name)\n _init_tracing(trace_exporters, id_generator)\n\n\nclass Configurator(BaseConfigurator):\n def _configure(self, **kwargs):\n _initialize_components()\n\n\nclass OpenTelemetryDistro(BaseDistro):\n \"\"\"\n The OpenTelemetry provided Distro configures a default set of\n configuration out of the box.\n \"\"\"\n\n def _configure(self, **kwargs):\n os.environ.setdefault(OTEL_TRACES_EXPORTER, \"otlp_span\")\n", "path": "opentelemetry-distro/src/opentelemetry/distro/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Sequence, Tuple\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry import trace\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_ID_GENERATOR,\n OTEL_PYTHON_SERVICE_NAME,\n OTEL_TRACES_EXPORTER,\n)\nfrom opentelemetry.instrumentation.configurator import BaseConfigurator\nfrom opentelemetry.instrumentation.distro import BaseDistro\nfrom opentelemetry.sdk.resources import Resource\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter\nfrom opentelemetry.sdk.trace.id_generator import IdGenerator\n\nlogger = getLogger(__file__)\n\n\nEXPORTER_OTLP = \"otlp\"\nEXPORTER_OTLP_SPAN = \"otlp_span\"\n\nRANDOM_ID_GENERATOR = \"random\"\n_DEFAULT_ID_GENERATOR = RANDOM_ID_GENERATOR\n\n\ndef _get_id_generator() -> str:\n return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)\n\n\ndef _get_service_name() -> str:\n return environ.get(OTEL_PYTHON_SERVICE_NAME, \"\")\n\n\ndef _get_exporter_names() -> Sequence[str]:\n trace_exporters = environ.get(OTEL_TRACES_EXPORTER)\n\n exporters = set()\n\n if (\n trace_exporters is not None\n or trace_exporters.lower().strip() != \"none\"\n ):\n exporters.update(\n {\n trace_exporter.strip()\n for trace_exporter in trace_exporters.split(\",\")\n }\n )\n\n if EXPORTER_OTLP in exporters:\n exporters.remove(EXPORTER_OTLP)\n exporters.add(EXPORTER_OTLP_SPAN)\n\n return list(exporters)\n\n\ndef _init_tracing(\n exporters: Sequence[SpanExporter], id_generator: IdGenerator\n):\n service_name = _get_service_name()\n provider = TracerProvider(\n resource=Resource.create({\"service.name\": service_name}),\n id_generator=id_generator(),\n )\n trace.set_tracer_provider(provider)\n\n for exporter_name, exporter_class in exporters.items():\n exporter_args = {}\n if exporter_name not in [\n EXPORTER_OTLP,\n EXPORTER_OTLP_SPAN,\n ]:\n exporter_args[\"service_name\"] = service_name\n\n provider.add_span_processor(\n BatchSpanProcessor(exporter_class(**exporter_args))\n )\n\n\ndef _import_tracer_provider_config_components(\n selected_components, entry_point_name\n) -> Sequence[Tuple[str, object]]:\n component_entry_points = {\n ep.name: ep for ep in iter_entry_points(entry_point_name)\n }\n component_impls = []\n for selected_component in selected_components:\n entry_point = component_entry_points.get(selected_component, None)\n if not entry_point:\n raise RuntimeError(\n \"Requested component '{}' not found in entry points for '{}'\".format(\n selected_component, entry_point_name\n )\n )\n\n component_impl = entry_point.load()\n component_impls.append((selected_component, component_impl))\n\n return component_impls\n\n\ndef _import_exporters(\n exporter_names: Sequence[str],\n) -> Sequence[SpanExporter]:\n trace_exporters = {}\n\n for (\n exporter_name,\n exporter_impl,\n ) in _import_tracer_provider_config_components(\n exporter_names, \"opentelemetry_exporter\"\n ):\n if issubclass(exporter_impl, SpanExporter):\n trace_exporters[exporter_name] = exporter_impl\n else:\n raise RuntimeError(\n \"{0} is not a trace exporter\".format(exporter_name)\n )\n return trace_exporters\n\n\ndef _import_id_generator(id_generator_name: str) -> IdGenerator:\n # pylint: disable=unbalanced-tuple-unpacking\n [\n (id_generator_name, id_generator_impl)\n ] = _import_tracer_provider_config_components(\n [id_generator_name.strip()], \"opentelemetry_id_generator\"\n )\n\n if issubclass(id_generator_impl, IdGenerator):\n return id_generator_impl\n\n raise RuntimeError(\"{0} is not an IdGenerator\".format(id_generator_name))\n\n\ndef _initialize_components():\n exporter_names = _get_exporter_names()\n trace_exporters = _import_exporters(exporter_names)\n id_generator_name = _get_id_generator()\n id_generator = _import_id_generator(id_generator_name)\n _init_tracing(trace_exporters, id_generator)\n\n\nclass Configurator(BaseConfigurator):\n def _configure(self, **kwargs):\n _initialize_components()\n\n\nclass OpenTelemetryDistro(BaseDistro):\n \"\"\"\n The OpenTelemetry provided Distro configures a default set of\n configuration out of the box.\n \"\"\"\n\n def _configure(self, **kwargs):\n os.environ.setdefault(OTEL_TRACES_EXPORTER, \"otlp_span\")\n", "path": "opentelemetry-distro/src/opentelemetry/distro/__init__.py"}]} | 2,718 | 135 |
gh_patches_debug_14769 | rasdani/github-patches | git_diff | getpelican__pelican-2716 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LOG_FILTER not working as it should?
Good evening folks,
I have a problem with LOG_FILTER. According to the docs, one can remove `TAG_SAVE_AS is set to False` by configuring `LOG_FILTER` this way:
```python
import logging
LOG_FILTER = [(logging.WARN, 'TAG_SAVE_AS is set to False')]
```
So to ignore `AUTHOR_SAVE_AS is set to False` and `CATEGORY_SAVE_AS is set to False`, it must be :
```python
import logging
LOG_FILTER = [
(logging.WARN, 'AUTHOR_SAVE_AS is set to False'),
(logging.WARN, 'CATEGORY_SAVE_AS is set to False')
]
```
Right?
So this is what I did:
```bash
$ head pelicanconf.py -n 20
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import datetime
import logging
import pelican
PELICAN_VERSION = pelican.__version__
[...]
LOG_FILTER = [
(logging.WARN, 'AUTHOR_SAVE_AS is set to False'),
(logging.WARN, 'CATEGORY_SAVE_AS is set to False')
]
AUTHOR_SAVE_AS = False
AUTHORS_SAVE_AS = False
TAG_SAVE_AS = False
TAGS_SAVE_AS = False
CATEGORY_SAVE_AS = False
CATEGORIES_SAVE_AS = False
ARCHIVES_SAVE_AS = False
$ make publish
pelican /mnt/c/Users/max/Code/mlcdf/content -o /mnt/c/Users/max/Code/mlcdf/output -s /mnt/c/Users/max/Code/mlcdf/publishconf.py
WARNING: CATEGORY_SAVE_AS is set to False
WARNING: AUTHOR_SAVE_AS is set to False
Done: Processed 2 articles, 0 drafts, 4 pages, 1 hidden page and 0 draft pages in 0.52 seconds.
```
2 things here:
- it didn't work: `AUTHOR_SAVE_AS is set to False` and `CATEGORY_SAVE_AS is set to False` are still logged.
- and, you may have noticed that, despite setting `TAG_SAVE_AS = False`, and not filtering it, I did not see `TAG_SAVE_AS is set to False` in the logs.
Using the template to filter worked (but it will also filter out other logs matching the template which is not what you would necessary want):
```
import logging
LOG_FILTER = [
(logging.WARN, '%s is set to %s'),
]
```
* Am I missing something?
* What's the intended behaviour here? Is there an actual bug in the code? Or is the code example in the docs just wrong?
I'm a bit lost. 🙃
Maxime
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pelican/log.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import logging
4 import os
5 import sys
6 from collections import defaultdict
7
8 __all__ = [
9 'init'
10 ]
11
12
13 class BaseFormatter(logging.Formatter):
14 def __init__(self, fmt=None, datefmt=None):
15 FORMAT = '%(customlevelname)s %(message)s'
16 super().__init__(fmt=FORMAT, datefmt=datefmt)
17
18 def format(self, record):
19 customlevel = self._get_levelname(record.levelname)
20 record.__dict__['customlevelname'] = customlevel
21 # format multiline messages 'nicely' to make it clear they are together
22 record.msg = record.msg.replace('\n', '\n | ')
23 record.args = tuple(arg.replace('\n', '\n | ') if
24 isinstance(arg, str) else
25 arg for arg in record.args)
26 return super().format(record)
27
28 def formatException(self, ei):
29 ''' prefix traceback info for better representation '''
30 s = super().formatException(ei)
31 # fancy format traceback
32 s = '\n'.join(' | ' + line for line in s.splitlines())
33 # separate the traceback from the preceding lines
34 s = ' |___\n{}'.format(s)
35 return s
36
37 def _get_levelname(self, name):
38 ''' NOOP: overridden by subclasses '''
39 return name
40
41
42 class ANSIFormatter(BaseFormatter):
43 ANSI_CODES = {
44 'red': '\033[1;31m',
45 'yellow': '\033[1;33m',
46 'cyan': '\033[1;36m',
47 'white': '\033[1;37m',
48 'bgred': '\033[1;41m',
49 'bggrey': '\033[1;100m',
50 'reset': '\033[0;m'}
51
52 LEVEL_COLORS = {
53 'INFO': 'cyan',
54 'WARNING': 'yellow',
55 'ERROR': 'red',
56 'CRITICAL': 'bgred',
57 'DEBUG': 'bggrey'}
58
59 def _get_levelname(self, name):
60 color = self.ANSI_CODES[self.LEVEL_COLORS.get(name, 'white')]
61 if name == 'INFO':
62 fmt = '{0}->{2}'
63 else:
64 fmt = '{0}{1}{2}:'
65 return fmt.format(color, name, self.ANSI_CODES['reset'])
66
67
68 class TextFormatter(BaseFormatter):
69 """
70 Convert a `logging.LogRecord' object into text.
71 """
72
73 def _get_levelname(self, name):
74 if name == 'INFO':
75 return '->'
76 else:
77 return name + ':'
78
79
80 class LimitFilter(logging.Filter):
81 """
82 Remove duplicates records, and limit the number of records in the same
83 group.
84
85 Groups are specified by the message to use when the number of records in
86 the same group hit the limit.
87 E.g.: log.warning(('43 is not the answer', 'More erroneous answers'))
88 """
89
90 LOGS_DEDUP_MIN_LEVEL = logging.WARNING
91
92 _ignore = set()
93 _raised_messages = set()
94 _threshold = 5
95 _group_count = defaultdict(int)
96
97 def filter(self, record):
98 # don't limit log messages for anything above "warning"
99 if record.levelno > self.LOGS_DEDUP_MIN_LEVEL:
100 return True
101
102 # extract group
103 group = record.__dict__.get('limit_msg', None)
104 group_args = record.__dict__.get('limit_args', ())
105
106 # ignore record if it was already raised
107 message_key = (record.levelno, record.getMessage())
108 if message_key in self._raised_messages:
109 return False
110 else:
111 self._raised_messages.add(message_key)
112
113 # ignore LOG_FILTER records by templates when "debug" isn't enabled
114 logger_level = logging.getLogger().getEffectiveLevel()
115 if logger_level > logging.DEBUG:
116 ignore_key = (record.levelno, record.msg)
117 if ignore_key in self._ignore:
118 return False
119
120 # check if we went over threshold
121 if group:
122 key = (record.levelno, group)
123 self._group_count[key] += 1
124 if self._group_count[key] == self._threshold:
125 record.msg = group
126 record.args = group_args
127 elif self._group_count[key] > self._threshold:
128 return False
129 return True
130
131
132 class LimitLogger(logging.Logger):
133 """
134 A logger which adds LimitFilter automatically
135 """
136
137 limit_filter = LimitFilter()
138
139 def __init__(self, *args, **kwargs):
140 super().__init__(*args, **kwargs)
141 self.enable_filter()
142
143 def disable_filter(self):
144 self.removeFilter(LimitLogger.limit_filter)
145
146 def enable_filter(self):
147 self.addFilter(LimitLogger.limit_filter)
148
149
150 class FatalLogger(LimitLogger):
151 warnings_fatal = False
152 errors_fatal = False
153
154 def warning(self, *args, **kwargs):
155 super().warning(*args, **kwargs)
156 if FatalLogger.warnings_fatal:
157 raise RuntimeError('Warning encountered')
158
159 def error(self, *args, **kwargs):
160 super().error(*args, **kwargs)
161 if FatalLogger.errors_fatal:
162 raise RuntimeError('Error encountered')
163
164
165 logging.setLoggerClass(FatalLogger)
166
167
168 def supports_color():
169 """
170 Returns True if the running system's terminal supports color,
171 and False otherwise.
172
173 from django.core.management.color
174 """
175 plat = sys.platform
176 supported_platform = plat != 'Pocket PC' and \
177 (plat != 'win32' or 'ANSICON' in os.environ)
178
179 # isatty is not always implemented, #6223.
180 is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
181 if not supported_platform or not is_a_tty:
182 return False
183 return True
184
185
186 def get_formatter():
187 if supports_color():
188 return ANSIFormatter()
189 else:
190 return TextFormatter()
191
192
193 def init(level=None, fatal='', handler=logging.StreamHandler(), name=None,
194 logs_dedup_min_level=None):
195 FatalLogger.warnings_fatal = fatal.startswith('warning')
196 FatalLogger.errors_fatal = bool(fatal)
197
198 logger = logging.getLogger(name)
199
200 handler.setFormatter(get_formatter())
201 logger.addHandler(handler)
202
203 if level:
204 logger.setLevel(level)
205 if logs_dedup_min_level:
206 LimitFilter.LOGS_DEDUP_MIN_LEVEL = logs_dedup_min_level
207
208
209 def log_warnings():
210 import warnings
211 logging.captureWarnings(True)
212 warnings.simplefilter("default", DeprecationWarning)
213 init(logging.DEBUG, name='py.warnings')
214
215
216 if __name__ == '__main__':
217 init(level=logging.DEBUG)
218
219 root_logger = logging.getLogger()
220 root_logger.debug('debug')
221 root_logger.info('info')
222 root_logger.warning('warning')
223 root_logger.error('error')
224 root_logger.critical('critical')
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pelican/log.py b/pelican/log.py
--- a/pelican/log.py
+++ b/pelican/log.py
@@ -110,11 +110,13 @@
else:
self._raised_messages.add(message_key)
- # ignore LOG_FILTER records by templates when "debug" isn't enabled
+ # ignore LOG_FILTER records by templates or messages
+ # when "debug" isn't enabled
logger_level = logging.getLogger().getEffectiveLevel()
if logger_level > logging.DEBUG:
- ignore_key = (record.levelno, record.msg)
- if ignore_key in self._ignore:
+ template_key = (record.levelno, record.msg)
+ message_key = (record.levelno, record.getMessage())
+ if (template_key in self._ignore or message_key in self._ignore):
return False
# check if we went over threshold
| {"golden_diff": "diff --git a/pelican/log.py b/pelican/log.py\n--- a/pelican/log.py\n+++ b/pelican/log.py\n@@ -110,11 +110,13 @@\n else:\n self._raised_messages.add(message_key)\n \n- # ignore LOG_FILTER records by templates when \"debug\" isn't enabled\n+ # ignore LOG_FILTER records by templates or messages\n+ # when \"debug\" isn't enabled\n logger_level = logging.getLogger().getEffectiveLevel()\n if logger_level > logging.DEBUG:\n- ignore_key = (record.levelno, record.msg)\n- if ignore_key in self._ignore:\n+ template_key = (record.levelno, record.msg)\n+ message_key = (record.levelno, record.getMessage())\n+ if (template_key in self._ignore or message_key in self._ignore):\n return False\n \n # check if we went over threshold\n", "issue": "LOG_FILTER not working as it should?\nGood evening folks,\r\n\r\nI have a problem with LOG_FILTER. According to the docs, one can remove `TAG_SAVE_AS is set to False` by configuring `LOG_FILTER` this way:\r\n\r\n```python\r\nimport logging\r\nLOG_FILTER = [(logging.WARN, 'TAG_SAVE_AS is set to False')]\r\n```\r\n\r\nSo to ignore `AUTHOR_SAVE_AS is set to False` and `CATEGORY_SAVE_AS is set to False`, it must be :\r\n\r\n```python\r\nimport logging\r\nLOG_FILTER = [\r\n (logging.WARN, 'AUTHOR_SAVE_AS is set to False'),\r\n (logging.WARN, 'CATEGORY_SAVE_AS is set to False')\r\n]\r\n```\r\nRight?\r\n\r\nSo this is what I did:\r\n```bash\r\n$ head pelicanconf.py -n 20\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*- #\r\nfrom __future__ import unicode_literals\r\nimport datetime\r\nimport logging\r\n\r\nimport pelican\r\n\r\nPELICAN_VERSION = pelican.__version__\r\n\r\n[...]\r\n\r\nLOG_FILTER = [\r\n (logging.WARN, 'AUTHOR_SAVE_AS is set to False'),\r\n (logging.WARN, 'CATEGORY_SAVE_AS is set to False')\r\n]\r\n\r\nAUTHOR_SAVE_AS = False\r\nAUTHORS_SAVE_AS = False\r\nTAG_SAVE_AS = False\r\nTAGS_SAVE_AS = False\r\nCATEGORY_SAVE_AS = False\r\nCATEGORIES_SAVE_AS = False\r\nARCHIVES_SAVE_AS = False\r\n\r\n$ make publish\r\npelican /mnt/c/Users/max/Code/mlcdf/content -o /mnt/c/Users/max/Code/mlcdf/output -s /mnt/c/Users/max/Code/mlcdf/publishconf.py\r\nWARNING: CATEGORY_SAVE_AS is set to False\r\nWARNING: AUTHOR_SAVE_AS is set to False\r\nDone: Processed 2 articles, 0 drafts, 4 pages, 1 hidden page and 0 draft pages in 0.52 seconds.\r\n```\r\n2 things here:\r\n- it didn't work: `AUTHOR_SAVE_AS is set to False` and `CATEGORY_SAVE_AS is set to False` are still logged.\r\n- and, you may have noticed that, despite setting `TAG_SAVE_AS = False`, and not filtering it, I did not see `TAG_SAVE_AS is set to False` in the logs.\r\n\r\nUsing the template to filter worked (but it will also filter out other logs matching the template which is not what you would necessary want):\r\n```\r\nimport logging\r\nLOG_FILTER = [\r\n (logging.WARN, '%s is set to %s'),\r\n]\r\n```\r\n\r\n* Am I missing something?\r\n* What's the intended behaviour here? Is there an actual bug in the code? Or is the code example in the docs just wrong?\r\n\r\nI'm a bit lost. \ud83d\ude43 \r\n\r\nMaxime\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport sys\nfrom collections import defaultdict\n\n__all__ = [\n 'init'\n]\n\n\nclass BaseFormatter(logging.Formatter):\n def __init__(self, fmt=None, datefmt=None):\n FORMAT = '%(customlevelname)s %(message)s'\n super().__init__(fmt=FORMAT, datefmt=datefmt)\n\n def format(self, record):\n customlevel = self._get_levelname(record.levelname)\n record.__dict__['customlevelname'] = customlevel\n # format multiline messages 'nicely' to make it clear they are together\n record.msg = record.msg.replace('\\n', '\\n | ')\n record.args = tuple(arg.replace('\\n', '\\n | ') if\n isinstance(arg, str) else\n arg for arg in record.args)\n return super().format(record)\n\n def formatException(self, ei):\n ''' prefix traceback info for better representation '''\n s = super().formatException(ei)\n # fancy format traceback\n s = '\\n'.join(' | ' + line for line in s.splitlines())\n # separate the traceback from the preceding lines\n s = ' |___\\n{}'.format(s)\n return s\n\n def _get_levelname(self, name):\n ''' NOOP: overridden by subclasses '''\n return name\n\n\nclass ANSIFormatter(BaseFormatter):\n ANSI_CODES = {\n 'red': '\\033[1;31m',\n 'yellow': '\\033[1;33m',\n 'cyan': '\\033[1;36m',\n 'white': '\\033[1;37m',\n 'bgred': '\\033[1;41m',\n 'bggrey': '\\033[1;100m',\n 'reset': '\\033[0;m'}\n\n LEVEL_COLORS = {\n 'INFO': 'cyan',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'bgred',\n 'DEBUG': 'bggrey'}\n\n def _get_levelname(self, name):\n color = self.ANSI_CODES[self.LEVEL_COLORS.get(name, 'white')]\n if name == 'INFO':\n fmt = '{0}->{2}'\n else:\n fmt = '{0}{1}{2}:'\n return fmt.format(color, name, self.ANSI_CODES['reset'])\n\n\nclass TextFormatter(BaseFormatter):\n \"\"\"\n Convert a `logging.LogRecord' object into text.\n \"\"\"\n\n def _get_levelname(self, name):\n if name == 'INFO':\n return '->'\n else:\n return name + ':'\n\n\nclass LimitFilter(logging.Filter):\n \"\"\"\n Remove duplicates records, and limit the number of records in the same\n group.\n\n Groups are specified by the message to use when the number of records in\n the same group hit the limit.\n E.g.: log.warning(('43 is not the answer', 'More erroneous answers'))\n \"\"\"\n\n LOGS_DEDUP_MIN_LEVEL = logging.WARNING\n\n _ignore = set()\n _raised_messages = set()\n _threshold = 5\n _group_count = defaultdict(int)\n\n def filter(self, record):\n # don't limit log messages for anything above \"warning\"\n if record.levelno > self.LOGS_DEDUP_MIN_LEVEL:\n return True\n\n # extract group\n group = record.__dict__.get('limit_msg', None)\n group_args = record.__dict__.get('limit_args', ())\n\n # ignore record if it was already raised\n message_key = (record.levelno, record.getMessage())\n if message_key in self._raised_messages:\n return False\n else:\n self._raised_messages.add(message_key)\n\n # ignore LOG_FILTER records by templates when \"debug\" isn't enabled\n logger_level = logging.getLogger().getEffectiveLevel()\n if logger_level > logging.DEBUG:\n ignore_key = (record.levelno, record.msg)\n if ignore_key in self._ignore:\n return False\n\n # check if we went over threshold\n if group:\n key = (record.levelno, group)\n self._group_count[key] += 1\n if self._group_count[key] == self._threshold:\n record.msg = group\n record.args = group_args\n elif self._group_count[key] > self._threshold:\n return False\n return True\n\n\nclass LimitLogger(logging.Logger):\n \"\"\"\n A logger which adds LimitFilter automatically\n \"\"\"\n\n limit_filter = LimitFilter()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.enable_filter()\n\n def disable_filter(self):\n self.removeFilter(LimitLogger.limit_filter)\n\n def enable_filter(self):\n self.addFilter(LimitLogger.limit_filter)\n\n\nclass FatalLogger(LimitLogger):\n warnings_fatal = False\n errors_fatal = False\n\n def warning(self, *args, **kwargs):\n super().warning(*args, **kwargs)\n if FatalLogger.warnings_fatal:\n raise RuntimeError('Warning encountered')\n\n def error(self, *args, **kwargs):\n super().error(*args, **kwargs)\n if FatalLogger.errors_fatal:\n raise RuntimeError('Error encountered')\n\n\nlogging.setLoggerClass(FatalLogger)\n\n\ndef supports_color():\n \"\"\"\n Returns True if the running system's terminal supports color,\n and False otherwise.\n\n from django.core.management.color\n \"\"\"\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and \\\n (plat != 'win32' or 'ANSICON' in os.environ)\n\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n if not supported_platform or not is_a_tty:\n return False\n return True\n\n\ndef get_formatter():\n if supports_color():\n return ANSIFormatter()\n else:\n return TextFormatter()\n\n\ndef init(level=None, fatal='', handler=logging.StreamHandler(), name=None,\n logs_dedup_min_level=None):\n FatalLogger.warnings_fatal = fatal.startswith('warning')\n FatalLogger.errors_fatal = bool(fatal)\n\n logger = logging.getLogger(name)\n\n handler.setFormatter(get_formatter())\n logger.addHandler(handler)\n\n if level:\n logger.setLevel(level)\n if logs_dedup_min_level:\n LimitFilter.LOGS_DEDUP_MIN_LEVEL = logs_dedup_min_level\n\n\ndef log_warnings():\n import warnings\n logging.captureWarnings(True)\n warnings.simplefilter(\"default\", DeprecationWarning)\n init(logging.DEBUG, name='py.warnings')\n\n\nif __name__ == '__main__':\n init(level=logging.DEBUG)\n\n root_logger = logging.getLogger()\n root_logger.debug('debug')\n root_logger.info('info')\n root_logger.warning('warning')\n root_logger.error('error')\n root_logger.critical('critical')\n", "path": "pelican/log.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport sys\nfrom collections import defaultdict\n\n__all__ = [\n 'init'\n]\n\n\nclass BaseFormatter(logging.Formatter):\n def __init__(self, fmt=None, datefmt=None):\n FORMAT = '%(customlevelname)s %(message)s'\n super().__init__(fmt=FORMAT, datefmt=datefmt)\n\n def format(self, record):\n customlevel = self._get_levelname(record.levelname)\n record.__dict__['customlevelname'] = customlevel\n # format multiline messages 'nicely' to make it clear they are together\n record.msg = record.msg.replace('\\n', '\\n | ')\n record.args = tuple(arg.replace('\\n', '\\n | ') if\n isinstance(arg, str) else\n arg for arg in record.args)\n return super().format(record)\n\n def formatException(self, ei):\n ''' prefix traceback info for better representation '''\n s = super().formatException(ei)\n # fancy format traceback\n s = '\\n'.join(' | ' + line for line in s.splitlines())\n # separate the traceback from the preceding lines\n s = ' |___\\n{}'.format(s)\n return s\n\n def _get_levelname(self, name):\n ''' NOOP: overridden by subclasses '''\n return name\n\n\nclass ANSIFormatter(BaseFormatter):\n ANSI_CODES = {\n 'red': '\\033[1;31m',\n 'yellow': '\\033[1;33m',\n 'cyan': '\\033[1;36m',\n 'white': '\\033[1;37m',\n 'bgred': '\\033[1;41m',\n 'bggrey': '\\033[1;100m',\n 'reset': '\\033[0;m'}\n\n LEVEL_COLORS = {\n 'INFO': 'cyan',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'bgred',\n 'DEBUG': 'bggrey'}\n\n def _get_levelname(self, name):\n color = self.ANSI_CODES[self.LEVEL_COLORS.get(name, 'white')]\n if name == 'INFO':\n fmt = '{0}->{2}'\n else:\n fmt = '{0}{1}{2}:'\n return fmt.format(color, name, self.ANSI_CODES['reset'])\n\n\nclass TextFormatter(BaseFormatter):\n \"\"\"\n Convert a `logging.LogRecord' object into text.\n \"\"\"\n\n def _get_levelname(self, name):\n if name == 'INFO':\n return '->'\n else:\n return name + ':'\n\n\nclass LimitFilter(logging.Filter):\n \"\"\"\n Remove duplicates records, and limit the number of records in the same\n group.\n\n Groups are specified by the message to use when the number of records in\n the same group hit the limit.\n E.g.: log.warning(('43 is not the answer', 'More erroneous answers'))\n \"\"\"\n\n LOGS_DEDUP_MIN_LEVEL = logging.WARNING\n\n _ignore = set()\n _raised_messages = set()\n _threshold = 5\n _group_count = defaultdict(int)\n\n def filter(self, record):\n # don't limit log messages for anything above \"warning\"\n if record.levelno > self.LOGS_DEDUP_MIN_LEVEL:\n return True\n\n # extract group\n group = record.__dict__.get('limit_msg', None)\n group_args = record.__dict__.get('limit_args', ())\n\n # ignore record if it was already raised\n message_key = (record.levelno, record.getMessage())\n if message_key in self._raised_messages:\n return False\n else:\n self._raised_messages.add(message_key)\n\n # ignore LOG_FILTER records by templates or messages\n # when \"debug\" isn't enabled\n logger_level = logging.getLogger().getEffectiveLevel()\n if logger_level > logging.DEBUG:\n template_key = (record.levelno, record.msg)\n message_key = (record.levelno, record.getMessage())\n if (template_key in self._ignore or message_key in self._ignore):\n return False\n\n # check if we went over threshold\n if group:\n key = (record.levelno, group)\n self._group_count[key] += 1\n if self._group_count[key] == self._threshold:\n record.msg = group\n record.args = group_args\n elif self._group_count[key] > self._threshold:\n return False\n return True\n\n\nclass LimitLogger(logging.Logger):\n \"\"\"\n A logger which adds LimitFilter automatically\n \"\"\"\n\n limit_filter = LimitFilter()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.enable_filter()\n\n def disable_filter(self):\n self.removeFilter(LimitLogger.limit_filter)\n\n def enable_filter(self):\n self.addFilter(LimitLogger.limit_filter)\n\n\nclass FatalLogger(LimitLogger):\n warnings_fatal = False\n errors_fatal = False\n\n def warning(self, *args, **kwargs):\n super().warning(*args, **kwargs)\n if FatalLogger.warnings_fatal:\n raise RuntimeError('Warning encountered')\n\n def error(self, *args, **kwargs):\n super().error(*args, **kwargs)\n if FatalLogger.errors_fatal:\n raise RuntimeError('Error encountered')\n\n\nlogging.setLoggerClass(FatalLogger)\n\n\ndef supports_color():\n \"\"\"\n Returns True if the running system's terminal supports color,\n and False otherwise.\n\n from django.core.management.color\n \"\"\"\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and \\\n (plat != 'win32' or 'ANSICON' in os.environ)\n\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n if not supported_platform or not is_a_tty:\n return False\n return True\n\n\ndef get_formatter():\n if supports_color():\n return ANSIFormatter()\n else:\n return TextFormatter()\n\n\ndef init(level=None, fatal='', handler=logging.StreamHandler(), name=None,\n logs_dedup_min_level=None):\n FatalLogger.warnings_fatal = fatal.startswith('warning')\n FatalLogger.errors_fatal = bool(fatal)\n\n logger = logging.getLogger(name)\n\n handler.setFormatter(get_formatter())\n logger.addHandler(handler)\n\n if level:\n logger.setLevel(level)\n if logs_dedup_min_level:\n LimitFilter.LOGS_DEDUP_MIN_LEVEL = logs_dedup_min_level\n\n\ndef log_warnings():\n import warnings\n logging.captureWarnings(True)\n warnings.simplefilter(\"default\", DeprecationWarning)\n init(logging.DEBUG, name='py.warnings')\n\n\nif __name__ == '__main__':\n init(level=logging.DEBUG)\n\n root_logger = logging.getLogger()\n root_logger.debug('debug')\n root_logger.info('info')\n root_logger.warning('warning')\n root_logger.error('error')\n root_logger.critical('critical')\n", "path": "pelican/log.py"}]} | 2,918 | 200 |
gh_patches_debug_23711 | rasdani/github-patches | git_diff | pretix__pretix-254 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Voucher-counting returning True/False
The tag-counting of vouchers seems two have two issues at the moment:
- No voucher has been used yet:
`False/<Number of Vouchers>` instead of `0/<Number of Vouchers>`
- Some other bug, if some vouchers have already been redeemed (cannot put my finger on, when this happens/why this happens):
`True/<Number of Vouchers>` instead of `<Number of used Vouchers>/<Number of Vouchers>`
Second item can be observed at the moment for example for budapest.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretix/control/views/vouchers.py`
Content:
```
1 import csv
2 import io
3
4 from django.conf import settings
5 from django.contrib import messages
6 from django.core.urlresolvers import resolve, reverse
7 from django.db import transaction
8 from django.db.models import Count, Q, Sum
9 from django.http import (
10 Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,
11 JsonResponse,
12 )
13 from django.utils.timezone import now
14 from django.utils.translation import ugettext_lazy as _
15 from django.views.generic import (
16 CreateView, DeleteView, ListView, TemplateView, UpdateView, View,
17 )
18
19 from pretix.base.models import Voucher
20 from pretix.base.models.vouchers import _generate_random_code
21 from pretix.control.forms.vouchers import VoucherBulkForm, VoucherForm
22 from pretix.control.permissions import EventPermissionRequiredMixin
23 from pretix.control.signals import voucher_form_class
24
25
26 class VoucherList(EventPermissionRequiredMixin, ListView):
27 model = Voucher
28 context_object_name = 'vouchers'
29 paginate_by = 30
30 template_name = 'pretixcontrol/vouchers/index.html'
31 permission = 'can_view_vouchers'
32
33 def get_queryset(self):
34 qs = self.request.event.vouchers.all().select_related('item', 'variation')
35 if self.request.GET.get("search", "") != "":
36 s = self.request.GET.get("search", "")
37 qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))
38 if self.request.GET.get("tag", "") != "":
39 s = self.request.GET.get("tag", "")
40 qs = qs.filter(tag__icontains=s)
41 if self.request.GET.get("status", "") != "":
42 s = self.request.GET.get("status", "")
43 if s == 'v':
44 qs = qs.filter(Q(valid_until__isnull=True) | Q(valid_until__gt=now())).filter(redeemed=False)
45 elif s == 'r':
46 qs = qs.filter(redeemed=True)
47 elif s == 'e':
48 qs = qs.filter(Q(valid_until__isnull=False) & Q(valid_until__lt=now())).filter(redeemed=False)
49 return qs
50
51 def get(self, request, *args, **kwargs):
52 if request.GET.get("download", "") == "yes":
53 return self._download_csv()
54 return super().get(request, *args, **kwargs)
55
56 def _download_csv(self):
57 output = io.StringIO()
58 writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=",")
59
60 headers = [
61 _('Voucher code'), _('Valid until'), _('Product'), _('Reserve quota'), _('Bypass quota'),
62 _('Price'), _('Tag'), _('Redeemed')
63 ]
64 writer.writerow(headers)
65
66 for v in self.get_queryset():
67 if v.item:
68 if v.variation:
69 prod = '%s – %s' % (str(v.item.name), str(v.variation.name))
70 else:
71 prod = '%s' % str(v.item.name)
72 elif v.quota:
73 prod = _('Any product in quota "{quota}"').format(quota=str(v.quota.name))
74 row = [
75 v.code,
76 v.valid_until.isoformat() if v.valid_until else "",
77 prod,
78 _("Yes") if v.block_quota else _("No"),
79 _("Yes") if v.allow_ignore_quota else _("No"),
80 str(v.price) if v.price else "",
81 v.tag,
82 _("Yes") if v.redeemed else _("No"),
83 ]
84 writer.writerow(row)
85
86 r = HttpResponse(output.getvalue().encode("utf-8"), content_type='text/csv')
87 r['Content-Disposition'] = 'attachment; filename="vouchers.csv"'
88 return r
89
90
91 class VoucherTags(EventPermissionRequiredMixin, TemplateView):
92 template_name = 'pretixcontrol/vouchers/tags.html'
93 permission = 'can_view_vouchers'
94
95 def get_context_data(self, **kwargs):
96 ctx = super().get_context_data(**kwargs)
97
98 tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(
99 total=Count('id'),
100 redeemed=Sum('redeemed')
101 )
102 for t in tags:
103 t['percentage'] = int((t['redeemed'] / t['total']) * 100)
104
105 ctx['tags'] = tags
106 return ctx
107
108
109 class VoucherDelete(EventPermissionRequiredMixin, DeleteView):
110 model = Voucher
111 template_name = 'pretixcontrol/vouchers/delete.html'
112 permission = 'can_change_vouchers'
113 context_object_name = 'voucher'
114
115 def get_object(self, queryset=None) -> Voucher:
116 try:
117 return self.request.event.vouchers.get(
118 id=self.kwargs['voucher']
119 )
120 except Voucher.DoesNotExist:
121 raise Http404(_("The requested voucher does not exist."))
122
123 def get(self, request, *args, **kwargs):
124 if self.get_object().redeemed:
125 messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))
126 return HttpResponseRedirect(self.get_success_url())
127 return super().get(request, *args, **kwargs)
128
129 @transaction.atomic
130 def delete(self, request, *args, **kwargs):
131 self.object = self.get_object()
132 success_url = self.get_success_url()
133
134 if self.object.redeemed:
135 messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))
136 else:
137 self.object.log_action('pretix.voucher.deleted', user=self.request.user)
138 self.object.delete()
139 messages.success(request, _('The selected voucher has been deleted.'))
140 return HttpResponseRedirect(success_url)
141
142 def get_success_url(self) -> str:
143 return reverse('control:event.vouchers', kwargs={
144 'organizer': self.request.event.organizer.slug,
145 'event': self.request.event.slug,
146 })
147
148
149 class VoucherUpdate(EventPermissionRequiredMixin, UpdateView):
150 model = Voucher
151 template_name = 'pretixcontrol/vouchers/detail.html'
152 permission = 'can_change_vouchers'
153 context_object_name = 'voucher'
154
155 def get_form_class(self):
156 form_class = VoucherForm
157 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):
158 if response:
159 form_class = response
160 return form_class
161
162 def get_object(self, queryset=None) -> VoucherForm:
163 url = resolve(self.request.path_info)
164 try:
165 return self.request.event.vouchers.get(
166 id=url.kwargs['voucher']
167 )
168 except Voucher.DoesNotExist:
169 raise Http404(_("The requested voucher does not exist."))
170
171 @transaction.atomic
172 def form_valid(self, form):
173 messages.success(self.request, _('Your changes have been saved.'))
174 if form.has_changed():
175 self.object.log_action(
176 'pretix.voucher.changed', user=self.request.user, data={
177 k: form.cleaned_data.get(k) for k in form.changed_data
178 }
179 )
180 return super().form_valid(form)
181
182 def get_success_url(self) -> str:
183 return reverse('control:event.vouchers', kwargs={
184 'organizer': self.request.event.organizer.slug,
185 'event': self.request.event.slug,
186 })
187
188
189 class VoucherCreate(EventPermissionRequiredMixin, CreateView):
190 model = Voucher
191 template_name = 'pretixcontrol/vouchers/detail.html'
192 permission = 'can_change_vouchers'
193 context_object_name = 'voucher'
194
195 def get_form_class(self):
196 form_class = VoucherForm
197 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):
198 if response:
199 form_class = response
200 return form_class
201
202 def get_success_url(self) -> str:
203 return reverse('control:event.vouchers', kwargs={
204 'organizer': self.request.event.organizer.slug,
205 'event': self.request.event.slug,
206 })
207
208 def get_form_kwargs(self):
209 kwargs = super().get_form_kwargs()
210 kwargs['instance'] = Voucher(event=self.request.event)
211 return kwargs
212
213 @transaction.atomic
214 def form_valid(self, form):
215 form.instance.event = self.request.event
216 messages.success(self.request, _('The new voucher has been created.'))
217 ret = super().form_valid(form)
218 form.instance.log_action('pretix.voucher.added', data=dict(form.cleaned_data), user=self.request.user)
219 return ret
220
221 def post(self, request, *args, **kwargs):
222 # TODO: Transform this into an asynchronous call?
223 with request.event.lock():
224 return super().post(request, *args, **kwargs)
225
226
227 class VoucherBulkCreate(EventPermissionRequiredMixin, CreateView):
228 model = Voucher
229 template_name = 'pretixcontrol/vouchers/bulk.html'
230 permission = 'can_change_vouchers'
231 context_object_name = 'voucher'
232
233 def get_success_url(self) -> str:
234 return reverse('control:event.vouchers', kwargs={
235 'organizer': self.request.event.organizer.slug,
236 'event': self.request.event.slug,
237 })
238
239 def get_form_kwargs(self):
240 kwargs = super().get_form_kwargs()
241 kwargs['instance'] = Voucher(event=self.request.event)
242 return kwargs
243
244 @transaction.atomic
245 def form_valid(self, form):
246 for o in form.save(self.request.event):
247 o.log_action('pretix.voucher.added', data=form.cleaned_data, user=self.request.user)
248 messages.success(self.request, _('The new vouchers have been created.'))
249 return HttpResponseRedirect(self.get_success_url())
250
251 def get_form_class(self):
252 form_class = VoucherBulkForm
253 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):
254 if response:
255 form_class = response
256 return form_class
257
258 def get_context_data(self, **kwargs):
259 ctx = super().get_context_data(**kwargs)
260 ctx['code_length'] = settings.ENTROPY['voucher_code']
261 return ctx
262
263 def post(self, request, *args, **kwargs):
264 # TODO: Transform this into an asynchronous call?
265 with request.event.lock():
266 return super().post(request, *args, **kwargs)
267
268
269 class VoucherRNG(EventPermissionRequiredMixin, View):
270 permission = 'can_change_vouchers'
271
272 def get(self, request, *args, **kwargs):
273 codes = set()
274 try:
275 num = int(request.GET.get('num', '5'))
276 except ValueError: # NOQA
277 return HttpResponseBadRequest()
278
279 while len(codes) < num:
280 new_codes = set()
281 for i in range(min(num - len(codes), 500)): # Work around SQLite's SQLITE_MAX_VARIABLE_NUMBER
282 new_codes.add(_generate_random_code())
283 new_codes -= set([v['code'] for v in Voucher.objects.filter(code__in=new_codes).values('code')])
284 codes |= new_codes
285
286 return JsonResponse({
287 'codes': list(codes)
288 })
289
290 def get_success_url(self) -> str:
291 return reverse('control:event.vouchers', kwargs={
292 'organizer': self.request.event.organizer.slug,
293 'event': self.request.event.slug,
294 })
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pretix/control/views/vouchers.py b/src/pretix/control/views/vouchers.py
--- a/src/pretix/control/views/vouchers.py
+++ b/src/pretix/control/views/vouchers.py
@@ -5,7 +5,7 @@
from django.contrib import messages
from django.core.urlresolvers import resolve, reverse
from django.db import transaction
-from django.db.models import Count, Q, Sum
+from django.db.models import Case, Count, IntegerField, Q, Sum, When
from django.http import (
Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,
JsonResponse,
@@ -97,7 +97,14 @@
tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(
total=Count('id'),
- redeemed=Sum('redeemed')
+ # This is a fix for this MySQL issue: https://code.djangoproject.com/ticket/24662
+ redeemed=Sum(
+ Case(
+ When(redeemed=True, then=1),
+ When(redeemed=False, then=0),
+ output_field=IntegerField()
+ )
+ )
)
for t in tags:
t['percentage'] = int((t['redeemed'] / t['total']) * 100)
| {"golden_diff": "diff --git a/src/pretix/control/views/vouchers.py b/src/pretix/control/views/vouchers.py\n--- a/src/pretix/control/views/vouchers.py\n+++ b/src/pretix/control/views/vouchers.py\n@@ -5,7 +5,7 @@\n from django.contrib import messages\n from django.core.urlresolvers import resolve, reverse\n from django.db import transaction\n-from django.db.models import Count, Q, Sum\n+from django.db.models import Case, Count, IntegerField, Q, Sum, When\n from django.http import (\n Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,\n JsonResponse,\n@@ -97,7 +97,14 @@\n \n tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(\n total=Count('id'),\n- redeemed=Sum('redeemed')\n+ # This is a fix for this MySQL issue: https://code.djangoproject.com/ticket/24662\n+ redeemed=Sum(\n+ Case(\n+ When(redeemed=True, then=1),\n+ When(redeemed=False, then=0),\n+ output_field=IntegerField()\n+ )\n+ )\n )\n for t in tags:\n t['percentage'] = int((t['redeemed'] / t['total']) * 100)\n", "issue": "Voucher-counting returning True/False\nThe tag-counting of vouchers seems two have two issues at the moment:\n- No voucher has been used yet:\n `False/<Number of Vouchers>` instead of `0/<Number of Vouchers>`\n- Some other bug, if some vouchers have already been redeemed (cannot put my finger on, when this happens/why this happens):\n `True/<Number of Vouchers>` instead of `<Number of used Vouchers>/<Number of Vouchers>`\n\nSecond item can be observed at the moment for example for budapest.\n\n", "before_files": [{"content": "import csv\nimport io\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.urlresolvers import resolve, reverse\nfrom django.db import transaction\nfrom django.db.models import Count, Q, Sum\nfrom django.http import (\n Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,\n JsonResponse,\n)\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (\n CreateView, DeleteView, ListView, TemplateView, UpdateView, View,\n)\n\nfrom pretix.base.models import Voucher\nfrom pretix.base.models.vouchers import _generate_random_code\nfrom pretix.control.forms.vouchers import VoucherBulkForm, VoucherForm\nfrom pretix.control.permissions import EventPermissionRequiredMixin\nfrom pretix.control.signals import voucher_form_class\n\n\nclass VoucherList(EventPermissionRequiredMixin, ListView):\n model = Voucher\n context_object_name = 'vouchers'\n paginate_by = 30\n template_name = 'pretixcontrol/vouchers/index.html'\n permission = 'can_view_vouchers'\n\n def get_queryset(self):\n qs = self.request.event.vouchers.all().select_related('item', 'variation')\n if self.request.GET.get(\"search\", \"\") != \"\":\n s = self.request.GET.get(\"search\", \"\")\n qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))\n if self.request.GET.get(\"tag\", \"\") != \"\":\n s = self.request.GET.get(\"tag\", \"\")\n qs = qs.filter(tag__icontains=s)\n if self.request.GET.get(\"status\", \"\") != \"\":\n s = self.request.GET.get(\"status\", \"\")\n if s == 'v':\n qs = qs.filter(Q(valid_until__isnull=True) | Q(valid_until__gt=now())).filter(redeemed=False)\n elif s == 'r':\n qs = qs.filter(redeemed=True)\n elif s == 'e':\n qs = qs.filter(Q(valid_until__isnull=False) & Q(valid_until__lt=now())).filter(redeemed=False)\n return qs\n\n def get(self, request, *args, **kwargs):\n if request.GET.get(\"download\", \"\") == \"yes\":\n return self._download_csv()\n return super().get(request, *args, **kwargs)\n\n def _download_csv(self):\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=\",\")\n\n headers = [\n _('Voucher code'), _('Valid until'), _('Product'), _('Reserve quota'), _('Bypass quota'),\n _('Price'), _('Tag'), _('Redeemed')\n ]\n writer.writerow(headers)\n\n for v in self.get_queryset():\n if v.item:\n if v.variation:\n prod = '%s \u2013 %s' % (str(v.item.name), str(v.variation.name))\n else:\n prod = '%s' % str(v.item.name)\n elif v.quota:\n prod = _('Any product in quota \"{quota}\"').format(quota=str(v.quota.name))\n row = [\n v.code,\n v.valid_until.isoformat() if v.valid_until else \"\",\n prod,\n _(\"Yes\") if v.block_quota else _(\"No\"),\n _(\"Yes\") if v.allow_ignore_quota else _(\"No\"),\n str(v.price) if v.price else \"\",\n v.tag,\n _(\"Yes\") if v.redeemed else _(\"No\"),\n ]\n writer.writerow(row)\n\n r = HttpResponse(output.getvalue().encode(\"utf-8\"), content_type='text/csv')\n r['Content-Disposition'] = 'attachment; filename=\"vouchers.csv\"'\n return r\n\n\nclass VoucherTags(EventPermissionRequiredMixin, TemplateView):\n template_name = 'pretixcontrol/vouchers/tags.html'\n permission = 'can_view_vouchers'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n\n tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(\n total=Count('id'),\n redeemed=Sum('redeemed')\n )\n for t in tags:\n t['percentage'] = int((t['redeemed'] / t['total']) * 100)\n\n ctx['tags'] = tags\n return ctx\n\n\nclass VoucherDelete(EventPermissionRequiredMixin, DeleteView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/delete.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_object(self, queryset=None) -> Voucher:\n try:\n return self.request.event.vouchers.get(\n id=self.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n def get(self, request, *args, **kwargs):\n if self.get_object().redeemed:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n return HttpResponseRedirect(self.get_success_url())\n return super().get(request, *args, **kwargs)\n\n @transaction.atomic\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n\n if self.object.redeemed:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n else:\n self.object.log_action('pretix.voucher.deleted', user=self.request.user)\n self.object.delete()\n messages.success(request, _('The selected voucher has been deleted.'))\n return HttpResponseRedirect(success_url)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherUpdate(EventPermissionRequiredMixin, UpdateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_object(self, queryset=None) -> VoucherForm:\n url = resolve(self.request.path_info)\n try:\n return self.request.event.vouchers.get(\n id=url.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n @transaction.atomic\n def form_valid(self, form):\n messages.success(self.request, _('Your changes have been saved.'))\n if form.has_changed():\n self.object.log_action(\n 'pretix.voucher.changed', user=self.request.user, data={\n k: form.cleaned_data.get(k) for k in form.changed_data\n }\n )\n return super().form_valid(form)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n form.instance.event = self.request.event\n messages.success(self.request, _('The new voucher has been created.'))\n ret = super().form_valid(form)\n form.instance.log_action('pretix.voucher.added', data=dict(form.cleaned_data), user=self.request.user)\n return ret\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherBulkCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/bulk.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n for o in form.save(self.request.event):\n o.log_action('pretix.voucher.added', data=form.cleaned_data, user=self.request.user)\n messages.success(self.request, _('The new vouchers have been created.'))\n return HttpResponseRedirect(self.get_success_url())\n\n def get_form_class(self):\n form_class = VoucherBulkForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['code_length'] = settings.ENTROPY['voucher_code']\n return ctx\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherRNG(EventPermissionRequiredMixin, View):\n permission = 'can_change_vouchers'\n\n def get(self, request, *args, **kwargs):\n codes = set()\n try:\n num = int(request.GET.get('num', '5'))\n except ValueError: # NOQA\n return HttpResponseBadRequest()\n\n while len(codes) < num:\n new_codes = set()\n for i in range(min(num - len(codes), 500)): # Work around SQLite's SQLITE_MAX_VARIABLE_NUMBER\n new_codes.add(_generate_random_code())\n new_codes -= set([v['code'] for v in Voucher.objects.filter(code__in=new_codes).values('code')])\n codes |= new_codes\n\n return JsonResponse({\n 'codes': list(codes)\n })\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n", "path": "src/pretix/control/views/vouchers.py"}], "after_files": [{"content": "import csv\nimport io\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.urlresolvers import resolve, reverse\nfrom django.db import transaction\nfrom django.db.models import Case, Count, IntegerField, Q, Sum, When\nfrom django.http import (\n Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,\n JsonResponse,\n)\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (\n CreateView, DeleteView, ListView, TemplateView, UpdateView, View,\n)\n\nfrom pretix.base.models import Voucher\nfrom pretix.base.models.vouchers import _generate_random_code\nfrom pretix.control.forms.vouchers import VoucherBulkForm, VoucherForm\nfrom pretix.control.permissions import EventPermissionRequiredMixin\nfrom pretix.control.signals import voucher_form_class\n\n\nclass VoucherList(EventPermissionRequiredMixin, ListView):\n model = Voucher\n context_object_name = 'vouchers'\n paginate_by = 30\n template_name = 'pretixcontrol/vouchers/index.html'\n permission = 'can_view_vouchers'\n\n def get_queryset(self):\n qs = self.request.event.vouchers.all().select_related('item', 'variation')\n if self.request.GET.get(\"search\", \"\") != \"\":\n s = self.request.GET.get(\"search\", \"\")\n qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))\n if self.request.GET.get(\"tag\", \"\") != \"\":\n s = self.request.GET.get(\"tag\", \"\")\n qs = qs.filter(tag__icontains=s)\n if self.request.GET.get(\"status\", \"\") != \"\":\n s = self.request.GET.get(\"status\", \"\")\n if s == 'v':\n qs = qs.filter(Q(valid_until__isnull=True) | Q(valid_until__gt=now())).filter(redeemed=False)\n elif s == 'r':\n qs = qs.filter(redeemed=True)\n elif s == 'e':\n qs = qs.filter(Q(valid_until__isnull=False) & Q(valid_until__lt=now())).filter(redeemed=False)\n return qs\n\n def get(self, request, *args, **kwargs):\n if request.GET.get(\"download\", \"\") == \"yes\":\n return self._download_csv()\n return super().get(request, *args, **kwargs)\n\n def _download_csv(self):\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=\",\")\n\n headers = [\n _('Voucher code'), _('Valid until'), _('Product'), _('Reserve quota'), _('Bypass quota'),\n _('Price'), _('Tag'), _('Redeemed')\n ]\n writer.writerow(headers)\n\n for v in self.get_queryset():\n if v.item:\n if v.variation:\n prod = '%s \u2013 %s' % (str(v.item.name), str(v.variation.name))\n else:\n prod = '%s' % str(v.item.name)\n elif v.quota:\n prod = _('Any product in quota \"{quota}\"').format(quota=str(v.quota.name))\n row = [\n v.code,\n v.valid_until.isoformat() if v.valid_until else \"\",\n prod,\n _(\"Yes\") if v.block_quota else _(\"No\"),\n _(\"Yes\") if v.allow_ignore_quota else _(\"No\"),\n str(v.price) if v.price else \"\",\n v.tag,\n _(\"Yes\") if v.redeemed else _(\"No\"),\n ]\n writer.writerow(row)\n\n r = HttpResponse(output.getvalue().encode(\"utf-8\"), content_type='text/csv')\n r['Content-Disposition'] = 'attachment; filename=\"vouchers.csv\"'\n return r\n\n\nclass VoucherTags(EventPermissionRequiredMixin, TemplateView):\n template_name = 'pretixcontrol/vouchers/tags.html'\n permission = 'can_view_vouchers'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n\n tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(\n total=Count('id'),\n # This is a fix for this MySQL issue: https://code.djangoproject.com/ticket/24662\n redeemed=Sum(\n Case(\n When(redeemed=True, then=1),\n When(redeemed=False, then=0),\n output_field=IntegerField()\n )\n )\n )\n for t in tags:\n t['percentage'] = int((t['redeemed'] / t['total']) * 100)\n\n ctx['tags'] = tags\n return ctx\n\n\nclass VoucherDelete(EventPermissionRequiredMixin, DeleteView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/delete.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_object(self, queryset=None) -> Voucher:\n try:\n return self.request.event.vouchers.get(\n id=self.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n def get(self, request, *args, **kwargs):\n if self.get_object().redeemed:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n return HttpResponseRedirect(self.get_success_url())\n return super().get(request, *args, **kwargs)\n\n @transaction.atomic\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n\n if self.object.redeemed:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n else:\n self.object.log_action('pretix.voucher.deleted', user=self.request.user)\n self.object.delete()\n messages.success(request, _('The selected voucher has been deleted.'))\n return HttpResponseRedirect(success_url)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherUpdate(EventPermissionRequiredMixin, UpdateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_object(self, queryset=None) -> VoucherForm:\n url = resolve(self.request.path_info)\n try:\n return self.request.event.vouchers.get(\n id=url.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n @transaction.atomic\n def form_valid(self, form):\n messages.success(self.request, _('Your changes have been saved.'))\n if form.has_changed():\n self.object.log_action(\n 'pretix.voucher.changed', user=self.request.user, data={\n k: form.cleaned_data.get(k) for k in form.changed_data\n }\n )\n return super().form_valid(form)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n form.instance.event = self.request.event\n messages.success(self.request, _('The new voucher has been created.'))\n ret = super().form_valid(form)\n form.instance.log_action('pretix.voucher.added', data=dict(form.cleaned_data), user=self.request.user)\n return ret\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherBulkCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/bulk.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n for o in form.save(self.request.event):\n o.log_action('pretix.voucher.added', data=form.cleaned_data, user=self.request.user)\n messages.success(self.request, _('The new vouchers have been created.'))\n return HttpResponseRedirect(self.get_success_url())\n\n def get_form_class(self):\n form_class = VoucherBulkForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['code_length'] = settings.ENTROPY['voucher_code']\n return ctx\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherRNG(EventPermissionRequiredMixin, View):\n permission = 'can_change_vouchers'\n\n def get(self, request, *args, **kwargs):\n codes = set()\n try:\n num = int(request.GET.get('num', '5'))\n except ValueError: # NOQA\n return HttpResponseBadRequest()\n\n while len(codes) < num:\n new_codes = set()\n for i in range(min(num - len(codes), 500)): # Work around SQLite's SQLITE_MAX_VARIABLE_NUMBER\n new_codes.add(_generate_random_code())\n new_codes -= set([v['code'] for v in Voucher.objects.filter(code__in=new_codes).values('code')])\n codes |= new_codes\n\n return JsonResponse({\n 'codes': list(codes)\n })\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n", "path": "src/pretix/control/views/vouchers.py"}]} | 3,546 | 293 |
gh_patches_debug_8893 | rasdani/github-patches | git_diff | vacanza__python-holidays-806 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Swaziland deprecation warning
Resolves #793.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holidays/__init__.py`
Content:
```
1 # python-holidays
2 # ---------------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: dr-prodigy <[email protected]> (c) 2017-2022
8 # ryanss <[email protected]> (c) 2014-2017
9 # Website: https://github.com/dr-prodigy/python-holidays
10 # License: MIT (see LICENSE file)
11 from holidays.constants import (
12 MON,
13 TUE,
14 WED,
15 THU,
16 FRI,
17 SAT,
18 SUN,
19 WEEKEND,
20 JAN,
21 FEB,
22 MAR,
23 APR,
24 MAY,
25 JUN,
26 JUL,
27 AUG,
28 SEP,
29 OCT,
30 NOV,
31 DEC,
32 )
33 from holidays.countries import *
34 from holidays.financial import *
35 from holidays.holiday_base import * # * import required for IDE docstrings
36 from holidays.utils import (
37 CountryHoliday,
38 country_holidays,
39 financial_holidays,
40 list_supported_countries,
41 list_supported_financial,
42 )
43
44 __version__ = "0.17"
45
```
Path: `holidays/countries/eswatini.py`
Content:
```
1 # python-holidays
2 # ---------------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: dr-prodigy <[email protected]> (c) 2017-2022
8 # ryanss <[email protected]> (c) 2014-2017
9 # Website: https://github.com/dr-prodigy/python-holidays
10 # License: MIT (see LICENSE file)
11
12 import warnings
13 from datetime import date
14
15 from dateutil.easter import easter
16 from dateutil.relativedelta import relativedelta as rd
17
18 from holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC
19 from holidays.holiday_base import HolidayBase
20
21
22 class Eswatini(HolidayBase):
23 """
24 https://swazilii.org/sz/legislation/act/1938/71
25 https://www.officeholidays.com/countries/swaziland
26 """
27
28 country = "SZ"
29
30 def _populate(self, year):
31 super()._populate(year)
32
33 # Observed since 1938
34 if year > 1938:
35 self[date(year, JAN, 1)] = "New Year's Day"
36
37 e = easter(year)
38 good_friday = e - rd(days=2)
39 easter_monday = e + rd(days=1)
40 ascension_day = e + rd(days=39)
41 self[good_friday] = "Good Friday"
42 self[easter_monday] = "Easter Monday"
43 self[ascension_day] = "Ascension Day"
44
45 if year > 1968:
46 self[date(year, APR, 25)] = "National Flag Day"
47
48 if year > 1982:
49 # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza
50 self[date(year, JUL, 22)] = "Birthday of Late King Sobhuza"
51
52 if year > 1986:
53 # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii
54 self[date(year, APR, 19)] = "King's Birthday"
55
56 self[date(year, MAY, 1)] = "Worker's Day"
57 self[date(year, SEP, 6)] = "Independence Day"
58 self[date(year, DEC, 25)] = "Christmas Day"
59 self[date(year, DEC, 26)] = "Boxing Day"
60
61 # Once-off public holidays
62 y2k = "Y2K changeover"
63
64 if year == 1999:
65 # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/
66 self[date(1999, DEC, 31)] = y2k
67 if year == 2000:
68 self[date(2000, JAN, 3)] = y2k
69
70 # As of 2021/1/1, whenever a public holiday falls on a
71 # Sunday
72 # it rolls over to the following Monday
73 for k, v in list(self.items()):
74
75 if self.observed and k.weekday() == SUN and k.year == year:
76 add_days = 1
77 while self.get(k + rd(days=add_days)) is not None:
78 add_days += 1
79 self[k + rd(days=add_days)] = v + " (Day Off)"
80
81
82 class Swaziland(Eswatini):
83 warnings.warn(
84 "Swaziland is deprecated, use Eswatini instead.",
85 DeprecationWarning,
86 )
87 pass
88
89
90 class SZ(Eswatini):
91 pass
92
93
94 class SZW(Eswatini):
95 pass
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holidays/__init__.py b/holidays/__init__.py
--- a/holidays/__init__.py
+++ b/holidays/__init__.py
@@ -41,4 +41,4 @@
list_supported_financial,
)
-__version__ = "0.17"
+__version__ = "0.17.1"
diff --git a/holidays/countries/eswatini.py b/holidays/countries/eswatini.py
--- a/holidays/countries/eswatini.py
+++ b/holidays/countries/eswatini.py
@@ -80,11 +80,13 @@
class Swaziland(Eswatini):
- warnings.warn(
- "Swaziland is deprecated, use Eswatini instead.",
- DeprecationWarning,
- )
- pass
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "Swaziland is deprecated, use Eswatini instead.",
+ DeprecationWarning,
+ )
+
+ super().__init__(*args, **kwargs)
class SZ(Eswatini):
| {"golden_diff": "diff --git a/holidays/__init__.py b/holidays/__init__.py\n--- a/holidays/__init__.py\n+++ b/holidays/__init__.py\n@@ -41,4 +41,4 @@\n list_supported_financial,\n )\n \n-__version__ = \"0.17\"\n+__version__ = \"0.17.1\"\ndiff --git a/holidays/countries/eswatini.py b/holidays/countries/eswatini.py\n--- a/holidays/countries/eswatini.py\n+++ b/holidays/countries/eswatini.py\n@@ -80,11 +80,13 @@\n \n \n class Swaziland(Eswatini):\n- warnings.warn(\n- \"Swaziland is deprecated, use Eswatini instead.\",\n- DeprecationWarning,\n- )\n- pass\n+ def __init__(self, *args, **kwargs) -> None:\n+ warnings.warn(\n+ \"Swaziland is deprecated, use Eswatini instead.\",\n+ DeprecationWarning,\n+ )\n+\n+ super().__init__(*args, **kwargs)\n \n \n class SZ(Eswatini):\n", "issue": "Swaziland deprecation warning\nResolves #793.\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\nfrom holidays.constants import (\n MON,\n TUE,\n WED,\n THU,\n FRI,\n SAT,\n SUN,\n WEEKEND,\n JAN,\n FEB,\n MAR,\n APR,\n MAY,\n JUN,\n JUL,\n AUG,\n SEP,\n OCT,\n NOV,\n DEC,\n)\nfrom holidays.countries import *\nfrom holidays.financial import *\nfrom holidays.holiday_base import * # * import required for IDE docstrings\nfrom holidays.utils import (\n CountryHoliday,\n country_holidays,\n financial_holidays,\n list_supported_countries,\n list_supported_financial,\n)\n\n__version__ = \"0.17\"\n", "path": "holidays/__init__.py"}, {"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport warnings\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Eswatini(HolidayBase):\n \"\"\"\n https://swazilii.org/sz/legislation/act/1938/71\n https://www.officeholidays.com/countries/swaziland\n \"\"\"\n\n country = \"SZ\"\n\n def _populate(self, year):\n super()._populate(year)\n\n # Observed since 1938\n if year > 1938:\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n e = easter(year)\n good_friday = e - rd(days=2)\n easter_monday = e + rd(days=1)\n ascension_day = e + rd(days=39)\n self[good_friday] = \"Good Friday\"\n self[easter_monday] = \"Easter Monday\"\n self[ascension_day] = \"Ascension Day\"\n\n if year > 1968:\n self[date(year, APR, 25)] = \"National Flag Day\"\n\n if year > 1982:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza\n self[date(year, JUL, 22)] = \"Birthday of Late King Sobhuza\"\n\n if year > 1986:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii\n self[date(year, APR, 19)] = \"King's Birthday\"\n\n self[date(year, MAY, 1)] = \"Worker's Day\"\n self[date(year, SEP, 6)] = \"Independence Day\"\n self[date(year, DEC, 25)] = \"Christmas Day\"\n self[date(year, DEC, 26)] = \"Boxing Day\"\n\n # Once-off public holidays\n y2k = \"Y2K changeover\"\n\n if year == 1999:\n # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/\n self[date(1999, DEC, 31)] = y2k\n if year == 2000:\n self[date(2000, JAN, 3)] = y2k\n\n # As of 2021/1/1, whenever a public holiday falls on a\n # Sunday\n # it rolls over to the following Monday\n for k, v in list(self.items()):\n\n if self.observed and k.weekday() == SUN and k.year == year:\n add_days = 1\n while self.get(k + rd(days=add_days)) is not None:\n add_days += 1\n self[k + rd(days=add_days)] = v + \" (Day Off)\"\n\n\nclass Swaziland(Eswatini):\n warnings.warn(\n \"Swaziland is deprecated, use Eswatini instead.\",\n DeprecationWarning,\n )\n pass\n\n\nclass SZ(Eswatini):\n pass\n\n\nclass SZW(Eswatini):\n pass\n", "path": "holidays/countries/eswatini.py"}], "after_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\nfrom holidays.constants import (\n MON,\n TUE,\n WED,\n THU,\n FRI,\n SAT,\n SUN,\n WEEKEND,\n JAN,\n FEB,\n MAR,\n APR,\n MAY,\n JUN,\n JUL,\n AUG,\n SEP,\n OCT,\n NOV,\n DEC,\n)\nfrom holidays.countries import *\nfrom holidays.financial import *\nfrom holidays.holiday_base import * # * import required for IDE docstrings\nfrom holidays.utils import (\n CountryHoliday,\n country_holidays,\n financial_holidays,\n list_supported_countries,\n list_supported_financial,\n)\n\n__version__ = \"0.17.1\"\n", "path": "holidays/__init__.py"}, {"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport warnings\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Eswatini(HolidayBase):\n \"\"\"\n https://swazilii.org/sz/legislation/act/1938/71\n https://www.officeholidays.com/countries/swaziland\n \"\"\"\n\n country = \"SZ\"\n\n def _populate(self, year):\n super()._populate(year)\n\n # Observed since 1938\n if year > 1938:\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n e = easter(year)\n good_friday = e - rd(days=2)\n easter_monday = e + rd(days=1)\n ascension_day = e + rd(days=39)\n self[good_friday] = \"Good Friday\"\n self[easter_monday] = \"Easter Monday\"\n self[ascension_day] = \"Ascension Day\"\n\n if year > 1968:\n self[date(year, APR, 25)] = \"National Flag Day\"\n\n if year > 1982:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza\n self[date(year, JUL, 22)] = \"Birthday of Late King Sobhuza\"\n\n if year > 1986:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii\n self[date(year, APR, 19)] = \"King's Birthday\"\n\n self[date(year, MAY, 1)] = \"Worker's Day\"\n self[date(year, SEP, 6)] = \"Independence Day\"\n self[date(year, DEC, 25)] = \"Christmas Day\"\n self[date(year, DEC, 26)] = \"Boxing Day\"\n\n # Once-off public holidays\n y2k = \"Y2K changeover\"\n\n if year == 1999:\n # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/\n self[date(1999, DEC, 31)] = y2k\n if year == 2000:\n self[date(2000, JAN, 3)] = y2k\n\n # As of 2021/1/1, whenever a public holiday falls on a\n # Sunday\n # it rolls over to the following Monday\n for k, v in list(self.items()):\n\n if self.observed and k.weekday() == SUN and k.year == year:\n add_days = 1\n while self.get(k + rd(days=add_days)) is not None:\n add_days += 1\n self[k + rd(days=add_days)] = v + \" (Day Off)\"\n\n\nclass Swaziland(Eswatini):\n def __init__(self, *args, **kwargs) -> None:\n warnings.warn(\n \"Swaziland is deprecated, use Eswatini instead.\",\n DeprecationWarning,\n )\n\n super().__init__(*args, **kwargs)\n\n\nclass SZ(Eswatini):\n pass\n\n\nclass SZW(Eswatini):\n pass\n", "path": "holidays/countries/eswatini.py"}]} | 1,746 | 251 |
gh_patches_debug_27065 | rasdani/github-patches | git_diff | mdn__kuma-7869 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactorings for welcome HTML email
Based on https://github.com/mdn/kuma/pull/7866
we'll drop the `utm` query string things and we'll get rid of the plain text email template.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/users/tasks.py`
Content:
```
1 import logging
2
3 from celery import task
4 from django.conf import settings
5 from django.contrib.auth import get_user_model
6 from django.utils import translation
7 from django.utils.translation import gettext_lazy as _
8
9 from kuma.core.decorators import skip_in_maintenance_mode
10 from kuma.core.email_utils import render_email
11 from kuma.core.utils import (
12 EmailMultiAlternativesRetrying,
13 send_mail_retrying,
14 strings_are_translated,
15 )
16
17 log = logging.getLogger("kuma.users.tasks")
18
19
20 WELCOME_EMAIL_STRINGS = [
21 "Like words?",
22 "Don't be shy, if you have any doubt, problems, questions: contact us! We are here to help.",
23 ]
24
25
26 @task
27 @skip_in_maintenance_mode
28 def send_recovery_email(user_pk, email, locale=None):
29 user = get_user_model().objects.get(pk=user_pk)
30 locale = locale or settings.WIKI_DEFAULT_LANGUAGE
31 url = settings.SITE_URL + user.get_recovery_url()
32 context = {"recovery_url": url, "username": user.username}
33 with translation.override(locale):
34 subject = render_email("users/email/recovery/subject.ltxt", context)
35 # Email subject *must not* contain newlines
36 subject = "".join(subject.splitlines())
37 plain = render_email("users/email/recovery/plain.ltxt", context)
38 send_mail_retrying(subject, plain, settings.DEFAULT_FROM_EMAIL, [email])
39
40
41 @task
42 @skip_in_maintenance_mode
43 def send_welcome_email(user_pk, locale):
44 user = get_user_model().objects.get(pk=user_pk)
45 if locale == settings.WIKI_DEFAULT_LANGUAGE or strings_are_translated(
46 WELCOME_EMAIL_STRINGS, locale
47 ):
48 context = {"username": user.username}
49 log.debug("Using the locale %s to send the welcome email", locale)
50 with translation.override(locale):
51 content_plain = render_email("users/email/welcome/plain.ltxt", context)
52 content_html = render_email("users/email/welcome/html.ltxt", context)
53
54 email = EmailMultiAlternativesRetrying(
55 _("Getting started with your new MDN account"),
56 content_plain,
57 settings.WELCOME_EMAIL_FROM,
58 [user.email],
59 )
60 email.attach_alternative(content_html, "text/html")
61 email.send()
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/users/tasks.py b/kuma/users/tasks.py
--- a/kuma/users/tasks.py
+++ b/kuma/users/tasks.py
@@ -5,6 +5,7 @@
from django.contrib.auth import get_user_model
from django.utils import translation
from django.utils.translation import gettext_lazy as _
+from pyquery import PyQuery as pq
from kuma.core.decorators import skip_in_maintenance_mode
from kuma.core.email_utils import render_email
@@ -48,8 +49,18 @@
context = {"username": user.username}
log.debug("Using the locale %s to send the welcome email", locale)
with translation.override(locale):
- content_plain = render_email("users/email/welcome/plain.ltxt", context)
content_html = render_email("users/email/welcome/html.ltxt", context)
+ doc = pq(content_html)
+ urls = []
+ for i, link in enumerate(doc("body a[href]").items()):
+ link.text(f"{link.text()}[{i + 1}]")
+ urls.append((i + 1, link.attr("href")))
+
+ content_plain = doc("body").text().replace("\n", "\n\n")
+ if urls:
+ content_plain += "\n\n"
+ for i, url in urls:
+ content_plain += f"[{i}] {url}\n"
email = EmailMultiAlternativesRetrying(
_("Getting started with your new MDN account"),
| {"golden_diff": "diff --git a/kuma/users/tasks.py b/kuma/users/tasks.py\n--- a/kuma/users/tasks.py\n+++ b/kuma/users/tasks.py\n@@ -5,6 +5,7 @@\n from django.contrib.auth import get_user_model\n from django.utils import translation\n from django.utils.translation import gettext_lazy as _\n+from pyquery import PyQuery as pq\n \n from kuma.core.decorators import skip_in_maintenance_mode\n from kuma.core.email_utils import render_email\n@@ -48,8 +49,18 @@\n context = {\"username\": user.username}\n log.debug(\"Using the locale %s to send the welcome email\", locale)\n with translation.override(locale):\n- content_plain = render_email(\"users/email/welcome/plain.ltxt\", context)\n content_html = render_email(\"users/email/welcome/html.ltxt\", context)\n+ doc = pq(content_html)\n+ urls = []\n+ for i, link in enumerate(doc(\"body a[href]\").items()):\n+ link.text(f\"{link.text()}[{i + 1}]\")\n+ urls.append((i + 1, link.attr(\"href\")))\n+\n+ content_plain = doc(\"body\").text().replace(\"\\n\", \"\\n\\n\")\n+ if urls:\n+ content_plain += \"\\n\\n\"\n+ for i, url in urls:\n+ content_plain += f\"[{i}] {url}\\n\"\n \n email = EmailMultiAlternativesRetrying(\n _(\"Getting started with your new MDN account\"),\n", "issue": "Refactorings for welcome HTML email \nBased on https://github.com/mdn/kuma/pull/7866 \r\nwe'll drop the `utm` query string things and we'll get rid of the plain text email template. \n", "before_files": [{"content": "import logging\n\nfrom celery import task\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.utils import translation\nfrom django.utils.translation import gettext_lazy as _\n\nfrom kuma.core.decorators import skip_in_maintenance_mode\nfrom kuma.core.email_utils import render_email\nfrom kuma.core.utils import (\n EmailMultiAlternativesRetrying,\n send_mail_retrying,\n strings_are_translated,\n)\n\nlog = logging.getLogger(\"kuma.users.tasks\")\n\n\nWELCOME_EMAIL_STRINGS = [\n \"Like words?\",\n \"Don't be shy, if you have any doubt, problems, questions: contact us! We are here to help.\",\n]\n\n\n@task\n@skip_in_maintenance_mode\ndef send_recovery_email(user_pk, email, locale=None):\n user = get_user_model().objects.get(pk=user_pk)\n locale = locale or settings.WIKI_DEFAULT_LANGUAGE\n url = settings.SITE_URL + user.get_recovery_url()\n context = {\"recovery_url\": url, \"username\": user.username}\n with translation.override(locale):\n subject = render_email(\"users/email/recovery/subject.ltxt\", context)\n # Email subject *must not* contain newlines\n subject = \"\".join(subject.splitlines())\n plain = render_email(\"users/email/recovery/plain.ltxt\", context)\n send_mail_retrying(subject, plain, settings.DEFAULT_FROM_EMAIL, [email])\n\n\n@task\n@skip_in_maintenance_mode\ndef send_welcome_email(user_pk, locale):\n user = get_user_model().objects.get(pk=user_pk)\n if locale == settings.WIKI_DEFAULT_LANGUAGE or strings_are_translated(\n WELCOME_EMAIL_STRINGS, locale\n ):\n context = {\"username\": user.username}\n log.debug(\"Using the locale %s to send the welcome email\", locale)\n with translation.override(locale):\n content_plain = render_email(\"users/email/welcome/plain.ltxt\", context)\n content_html = render_email(\"users/email/welcome/html.ltxt\", context)\n\n email = EmailMultiAlternativesRetrying(\n _(\"Getting started with your new MDN account\"),\n content_plain,\n settings.WELCOME_EMAIL_FROM,\n [user.email],\n )\n email.attach_alternative(content_html, \"text/html\")\n email.send()\n", "path": "kuma/users/tasks.py"}], "after_files": [{"content": "import logging\n\nfrom celery import task\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.utils import translation\nfrom django.utils.translation import gettext_lazy as _\nfrom pyquery import PyQuery as pq\n\nfrom kuma.core.decorators import skip_in_maintenance_mode\nfrom kuma.core.email_utils import render_email\nfrom kuma.core.utils import (\n EmailMultiAlternativesRetrying,\n send_mail_retrying,\n strings_are_translated,\n)\n\nlog = logging.getLogger(\"kuma.users.tasks\")\n\n\nWELCOME_EMAIL_STRINGS = [\n \"Like words?\",\n \"Don't be shy, if you have any doubt, problems, questions: contact us! We are here to help.\",\n]\n\n\n@task\n@skip_in_maintenance_mode\ndef send_recovery_email(user_pk, email, locale=None):\n user = get_user_model().objects.get(pk=user_pk)\n locale = locale or settings.WIKI_DEFAULT_LANGUAGE\n url = settings.SITE_URL + user.get_recovery_url()\n context = {\"recovery_url\": url, \"username\": user.username}\n with translation.override(locale):\n subject = render_email(\"users/email/recovery/subject.ltxt\", context)\n # Email subject *must not* contain newlines\n subject = \"\".join(subject.splitlines())\n plain = render_email(\"users/email/recovery/plain.ltxt\", context)\n send_mail_retrying(subject, plain, settings.DEFAULT_FROM_EMAIL, [email])\n\n\n@task\n@skip_in_maintenance_mode\ndef send_welcome_email(user_pk, locale):\n user = get_user_model().objects.get(pk=user_pk)\n if locale == settings.WIKI_DEFAULT_LANGUAGE or strings_are_translated(\n WELCOME_EMAIL_STRINGS, locale\n ):\n context = {\"username\": user.username}\n log.debug(\"Using the locale %s to send the welcome email\", locale)\n with translation.override(locale):\n content_html = render_email(\"users/email/welcome/html.ltxt\", context)\n doc = pq(content_html)\n urls = []\n for i, link in enumerate(doc(\"body a[href]\").items()):\n link.text(f\"{link.text()}[{i + 1}]\")\n urls.append((i + 1, link.attr(\"href\")))\n\n content_plain = doc(\"body\").text().replace(\"\\n\", \"\\n\\n\")\n if urls:\n content_plain += \"\\n\\n\"\n for i, url in urls:\n content_plain += f\"[{i}] {url}\\n\"\n\n email = EmailMultiAlternativesRetrying(\n _(\"Getting started with your new MDN account\"),\n content_plain,\n settings.WELCOME_EMAIL_FROM,\n [user.email],\n )\n email.attach_alternative(content_html, \"text/html\")\n email.send()\n", "path": "kuma/users/tasks.py"}]} | 902 | 316 |
gh_patches_debug_38488 | rasdani/github-patches | git_diff | larq__larq-356 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make the HyperparameterScheduler compatible with the CaseOptimizer
### Feature motivation
The HyperparameterScheduler is not compatible with the CaseOptimizer since the hyperparameters are attributes of the optimizers inside the CaseOptimizer.
### Feature description
I propose one of the two possible solutions:
Either we could give HyperparameterScheduler the optimizer as an argument. It could be called via ``` HyperparameterScheduler(schedule, hyperparameter, optimizer, verbose=0) ``` and the right optimizer inside the CaseOptimizer can be addressed. (@koenhelwegen)
My second proposal would be to search the CaseOptimizer for optimizers that have the hyperparameter as attribute. Then the schedule can be applied to this optimizer only. The downside of this would be that in case there are two optimizers inside the CaseOptimizer that have a hyperparameter with the same name the schedule would be applied to both of them. I do not think this would happen very often but it could definitively be an issue. See code below for my second proposal.
### Feature implementation
``` python
class HyperparameterScheduler(tf.keras.callbacks.Callback):
"""Generic hyperparameter scheduler.
# Arguments
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new hyperparameter as output.
hyperparameter: str. the name of the hyperparameter to be scheduled.
verbose: int. 0: quiet, 1: update messages.
"""
def __init__(self, schedule, hyperparameter, verbose=0):
super(HyperparameterScheduler, self).__init__()
self.schedule = schedule
self.hyperparameter = hyperparameter
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
for op in self.model.optimizer.optimizers:
if hasattr(op, self.hyperparameter):
hp = getattr(op, self.hyperparameter)
try: # new API
hyperparameter_val = tf.keras.backend.get_value(hp)
hyperparameter_val = self.schedule(epoch, hyperparameter_val)
except TypeError: # Support for old API for backward compatibility
hyperparameter_val = self.schedule(epoch)
tf.keras.backend.set_value(hp, hyperparameter_val)
if self.verbose > 0:
print(
f"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}."
)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for op in self.model.optimizer.optimizers:
if hasattr(op, self.hyperparameter):
hp = getattr(op, self.hyperparameter)
logs[self.hyperparameter] = tf.keras.backend.get_value(hp)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `larq/callbacks.py`
Content:
```
1 import tensorflow as tf
2
3
4 class HyperparameterScheduler(tf.keras.callbacks.Callback):
5 """Generic hyperparameter scheduler.
6
7 # Arguments
8 schedule: a function that takes an epoch index as input
9 (integer, indexed from 0) and returns a new hyperparameter as output.
10 hyperparameter: str. the name of the hyperparameter to be scheduled.
11 verbose: int. 0: quiet, 1: update messages.
12 """
13
14 def __init__(self, schedule, hyperparameter, verbose=0):
15 super(HyperparameterScheduler, self).__init__()
16 self.schedule = schedule
17 self.hyperparameter = hyperparameter
18 self.verbose = verbose
19
20 def on_epoch_begin(self, epoch, logs=None):
21 if not hasattr(self.model.optimizer, self.hyperparameter):
22 raise ValueError(
23 f'Optimizer must have a "{self.hyperparameter}" attribute.'
24 )
25
26 hp = getattr(self.model.optimizer, self.hyperparameter)
27 try: # new API
28 hyperparameter_val = tf.keras.backend.get_value(hp)
29 hyperparameter_val = self.schedule(epoch, hyperparameter_val)
30 except TypeError: # Support for old API for backward compatibility
31 hyperparameter_val = self.schedule(epoch)
32
33 tf.keras.backend.set_value(hp, hyperparameter_val)
34
35 if self.verbose > 0:
36 print(
37 f"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}."
38 )
39
40 def on_epoch_end(self, epoch, logs=None):
41 logs = logs or {}
42 hp = getattr(self.model.optimizer, self.hyperparameter)
43 logs[self.hyperparameter] = tf.keras.backend.get_value(hp)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/larq/callbacks.py b/larq/callbacks.py
--- a/larq/callbacks.py
+++ b/larq/callbacks.py
@@ -4,26 +4,40 @@
class HyperparameterScheduler(tf.keras.callbacks.Callback):
"""Generic hyperparameter scheduler.
+ !!! example
+ ```python
+ bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)
+ adam = tf.keras.optimizers.Adam(0.01)
+ optimizer = lq.optimizers.CaseOptimizer(
+ (lq.optimizers.Bop.is_binary_variable, bop), default_optimizer=adam,
+ )
+ callbacks = [
+ HyperparameterScheduler(lambda x: 0.001 * (0.1 ** (x // 30)), "gamma", bop)
+ ]
+ ```
# Arguments
+ optimizer: the optimizer that contains the hyperparameter that will be scheduled.
+ Defaults to `self.model.optimizer` if `optimizer == None`.
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new hyperparameter as output.
hyperparameter: str. the name of the hyperparameter to be scheduled.
verbose: int. 0: quiet, 1: update messages.
"""
- def __init__(self, schedule, hyperparameter, verbose=0):
+ def __init__(self, schedule, hyperparameter, optimizer=None, verbose=0):
super(HyperparameterScheduler, self).__init__()
+ self.optimizer = optimizer if optimizer else self.model.optimizer
self.schedule = schedule
self.hyperparameter = hyperparameter
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
- if not hasattr(self.model.optimizer, self.hyperparameter):
+ if not hasattr(self.optimizer, self.hyperparameter):
raise ValueError(
f'Optimizer must have a "{self.hyperparameter}" attribute.'
)
- hp = getattr(self.model.optimizer, self.hyperparameter)
+ hp = getattr(self.optimizer, self.hyperparameter)
try: # new API
hyperparameter_val = tf.keras.backend.get_value(hp)
hyperparameter_val = self.schedule(epoch, hyperparameter_val)
@@ -34,10 +48,10 @@
if self.verbose > 0:
print(
- f"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}."
+ f"Epoch {epoch + 1}: {self.hyperparameter} changing to {tf.keras.backend.get_value(hp)}."
)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
- hp = getattr(self.model.optimizer, self.hyperparameter)
+ hp = getattr(self.optimizer, self.hyperparameter)
logs[self.hyperparameter] = tf.keras.backend.get_value(hp)
| {"golden_diff": "diff --git a/larq/callbacks.py b/larq/callbacks.py\n--- a/larq/callbacks.py\n+++ b/larq/callbacks.py\n@@ -4,26 +4,40 @@\n class HyperparameterScheduler(tf.keras.callbacks.Callback):\n \"\"\"Generic hyperparameter scheduler.\n \n+ !!! example\n+ ```python\n+ bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)\n+ adam = tf.keras.optimizers.Adam(0.01)\n+ optimizer = lq.optimizers.CaseOptimizer(\n+ (lq.optimizers.Bop.is_binary_variable, bop), default_optimizer=adam,\n+ )\n+ callbacks = [\n+ HyperparameterScheduler(lambda x: 0.001 * (0.1 ** (x // 30)), \"gamma\", bop)\n+ ]\n+ ```\n # Arguments\n+ optimizer: the optimizer that contains the hyperparameter that will be scheduled.\n+ Defaults to `self.model.optimizer` if `optimizer == None`.\n schedule: a function that takes an epoch index as input\n (integer, indexed from 0) and returns a new hyperparameter as output.\n hyperparameter: str. the name of the hyperparameter to be scheduled.\n verbose: int. 0: quiet, 1: update messages.\n \"\"\"\n \n- def __init__(self, schedule, hyperparameter, verbose=0):\n+ def __init__(self, schedule, hyperparameter, optimizer=None, verbose=0):\n super(HyperparameterScheduler, self).__init__()\n+ self.optimizer = optimizer if optimizer else self.model.optimizer\n self.schedule = schedule\n self.hyperparameter = hyperparameter\n self.verbose = verbose\n \n def on_epoch_begin(self, epoch, logs=None):\n- if not hasattr(self.model.optimizer, self.hyperparameter):\n+ if not hasattr(self.optimizer, self.hyperparameter):\n raise ValueError(\n f'Optimizer must have a \"{self.hyperparameter}\" attribute.'\n )\n \n- hp = getattr(self.model.optimizer, self.hyperparameter)\n+ hp = getattr(self.optimizer, self.hyperparameter)\n try: # new API\n hyperparameter_val = tf.keras.backend.get_value(hp)\n hyperparameter_val = self.schedule(epoch, hyperparameter_val)\n@@ -34,10 +48,10 @@\n \n if self.verbose > 0:\n print(\n- f\"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}.\"\n+ f\"Epoch {epoch + 1}: {self.hyperparameter} changing to {tf.keras.backend.get_value(hp)}.\"\n )\n \n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n- hp = getattr(self.model.optimizer, self.hyperparameter)\n+ hp = getattr(self.optimizer, self.hyperparameter)\n logs[self.hyperparameter] = tf.keras.backend.get_value(hp)\n", "issue": "Make the HyperparameterScheduler compatible with the CaseOptimizer\n### Feature motivation\r\nThe HyperparameterScheduler is not compatible with the CaseOptimizer since the hyperparameters are attributes of the optimizers inside the CaseOptimizer. \r\n\r\n### Feature description\r\nI propose one of the two possible solutions: \r\nEither we could give HyperparameterScheduler the optimizer as an argument. It could be called via ``` HyperparameterScheduler(schedule, hyperparameter, optimizer, verbose=0) ``` and the right optimizer inside the CaseOptimizer can be addressed. (@koenhelwegen) \r\n\r\nMy second proposal would be to search the CaseOptimizer for optimizers that have the hyperparameter as attribute. Then the schedule can be applied to this optimizer only. The downside of this would be that in case there are two optimizers inside the CaseOptimizer that have a hyperparameter with the same name the schedule would be applied to both of them. I do not think this would happen very often but it could definitively be an issue. See code below for my second proposal. \r\n### Feature implementation\r\n``` python \r\nclass HyperparameterScheduler(tf.keras.callbacks.Callback):\r\n \"\"\"Generic hyperparameter scheduler.\r\n # Arguments\r\n schedule: a function that takes an epoch index as input\r\n (integer, indexed from 0) and returns a new hyperparameter as output.\r\n hyperparameter: str. the name of the hyperparameter to be scheduled.\r\n verbose: int. 0: quiet, 1: update messages.\r\n \"\"\"\r\n\r\n def __init__(self, schedule, hyperparameter, verbose=0):\r\n super(HyperparameterScheduler, self).__init__()\r\n self.schedule = schedule\r\n self.hyperparameter = hyperparameter\r\n self.verbose = verbose\r\n\r\n def on_epoch_begin(self, epoch, logs=None):\r\n for op in self.model.optimizer.optimizers:\r\n if hasattr(op, self.hyperparameter):\r\n\r\n hp = getattr(op, self.hyperparameter)\r\n try: # new API\r\n hyperparameter_val = tf.keras.backend.get_value(hp)\r\n hyperparameter_val = self.schedule(epoch, hyperparameter_val)\r\n except TypeError: # Support for old API for backward compatibility\r\n hyperparameter_val = self.schedule(epoch)\r\n\r\n tf.keras.backend.set_value(hp, hyperparameter_val)\r\n\r\n if self.verbose > 0:\r\n print(\r\n f\"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}.\"\r\n )\r\n\r\n def on_epoch_end(self, epoch, logs=None):\r\n logs = logs or {}\r\n for op in self.model.optimizer.optimizers:\r\n if hasattr(op, self.hyperparameter):\r\n hp = getattr(op, self.hyperparameter)\r\n logs[self.hyperparameter] = tf.keras.backend.get_value(hp)\r\n```\n", "before_files": [{"content": "import tensorflow as tf\n\n\nclass HyperparameterScheduler(tf.keras.callbacks.Callback):\n \"\"\"Generic hyperparameter scheduler.\n\n # Arguments\n schedule: a function that takes an epoch index as input\n (integer, indexed from 0) and returns a new hyperparameter as output.\n hyperparameter: str. the name of the hyperparameter to be scheduled.\n verbose: int. 0: quiet, 1: update messages.\n \"\"\"\n\n def __init__(self, schedule, hyperparameter, verbose=0):\n super(HyperparameterScheduler, self).__init__()\n self.schedule = schedule\n self.hyperparameter = hyperparameter\n self.verbose = verbose\n\n def on_epoch_begin(self, epoch, logs=None):\n if not hasattr(self.model.optimizer, self.hyperparameter):\n raise ValueError(\n f'Optimizer must have a \"{self.hyperparameter}\" attribute.'\n )\n\n hp = getattr(self.model.optimizer, self.hyperparameter)\n try: # new API\n hyperparameter_val = tf.keras.backend.get_value(hp)\n hyperparameter_val = self.schedule(epoch, hyperparameter_val)\n except TypeError: # Support for old API for backward compatibility\n hyperparameter_val = self.schedule(epoch)\n\n tf.keras.backend.set_value(hp, hyperparameter_val)\n\n if self.verbose > 0:\n print(\n f\"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}.\"\n )\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n hp = getattr(self.model.optimizer, self.hyperparameter)\n logs[self.hyperparameter] = tf.keras.backend.get_value(hp)\n", "path": "larq/callbacks.py"}], "after_files": [{"content": "import tensorflow as tf\n\n\nclass HyperparameterScheduler(tf.keras.callbacks.Callback):\n \"\"\"Generic hyperparameter scheduler.\n\n !!! example\n ```python\n bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)\n adam = tf.keras.optimizers.Adam(0.01)\n optimizer = lq.optimizers.CaseOptimizer(\n (lq.optimizers.Bop.is_binary_variable, bop), default_optimizer=adam,\n )\n callbacks = [\n HyperparameterScheduler(lambda x: 0.001 * (0.1 ** (x // 30)), \"gamma\", bop)\n ]\n ```\n # Arguments\n optimizer: the optimizer that contains the hyperparameter that will be scheduled.\n Defaults to `self.model.optimizer` if `optimizer == None`.\n schedule: a function that takes an epoch index as input\n (integer, indexed from 0) and returns a new hyperparameter as output.\n hyperparameter: str. the name of the hyperparameter to be scheduled.\n verbose: int. 0: quiet, 1: update messages.\n \"\"\"\n\n def __init__(self, schedule, hyperparameter, optimizer=None, verbose=0):\n super(HyperparameterScheduler, self).__init__()\n self.optimizer = optimizer if optimizer else self.model.optimizer\n self.schedule = schedule\n self.hyperparameter = hyperparameter\n self.verbose = verbose\n\n def on_epoch_begin(self, epoch, logs=None):\n if not hasattr(self.optimizer, self.hyperparameter):\n raise ValueError(\n f'Optimizer must have a \"{self.hyperparameter}\" attribute.'\n )\n\n hp = getattr(self.optimizer, self.hyperparameter)\n try: # new API\n hyperparameter_val = tf.keras.backend.get_value(hp)\n hyperparameter_val = self.schedule(epoch, hyperparameter_val)\n except TypeError: # Support for old API for backward compatibility\n hyperparameter_val = self.schedule(epoch)\n\n tf.keras.backend.set_value(hp, hyperparameter_val)\n\n if self.verbose > 0:\n print(\n f\"Epoch {epoch + 1}: {self.hyperparameter} changing to {tf.keras.backend.get_value(hp)}.\"\n )\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n hp = getattr(self.optimizer, self.hyperparameter)\n logs[self.hyperparameter] = tf.keras.backend.get_value(hp)\n", "path": "larq/callbacks.py"}]} | 1,273 | 650 |
gh_patches_debug_118 | rasdani/github-patches | git_diff | librosa__librosa-1738 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release new version to fix scipy tests
https://github.com/librosa/librosa/commit/12dee8eabed7df14c5622b52c05393ddfeb11f4b fixed compatibility with scipy in tests but it's not included in any release.
We rely as downstream packagers on tests to ensure all python dependencies play well together.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `librosa/version.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """Version info"""
4
5 import sys
6 import importlib
7
8 short_version = "0.10"
9 version = "0.10.1dev"
10
11
12 def __get_mod_version(modname):
13 try:
14 if modname in sys.modules:
15 mod = sys.modules[modname]
16 else:
17 mod = importlib.import_module(modname)
18 try:
19 return mod.__version__
20 except AttributeError:
21 return "installed, no version number available"
22
23 except ImportError:
24 return None
25
26
27 def show_versions() -> None:
28 """Return the version information for all librosa dependencies."""
29 core_deps = [
30 "audioread",
31 "numpy",
32 "scipy",
33 "sklearn",
34 "joblib",
35 "decorator",
36 "numba",
37 "soundfile",
38 "pooch",
39 "soxr",
40 "typing_extensions",
41 "lazy_loader",
42 "msgpack",
43 ]
44
45 extra_deps = [
46 "numpydoc",
47 "sphinx",
48 "sphinx_rtd_theme",
49 "matplotlib",
50 "sphinx_multiversion",
51 "sphinx_gallery",
52 "mir_eval",
53 "ipython",
54 "sphinxcontrib.rsvgconverter",
55 "pytest",
56 "pytest_mpl",
57 "pytest_cov",
58 "samplerate",
59 "resampy",
60 "presets",
61 "packaging",
62 ]
63
64 print("INSTALLED VERSIONS")
65 print("------------------")
66 print(f"python: {sys.version}\n")
67 print(f"librosa: {version}\n")
68 for dep in core_deps:
69 print("{}: {}".format(dep, __get_mod_version(dep)))
70 print("")
71 for dep in extra_deps:
72 print("{}: {}".format(dep, __get_mod_version(dep)))
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/librosa/version.py b/librosa/version.py
--- a/librosa/version.py
+++ b/librosa/version.py
@@ -6,7 +6,7 @@
import importlib
short_version = "0.10"
-version = "0.10.1dev"
+version = "0.10.1"
def __get_mod_version(modname):
| {"golden_diff": "diff --git a/librosa/version.py b/librosa/version.py\n--- a/librosa/version.py\n+++ b/librosa/version.py\n@@ -6,7 +6,7 @@\n import importlib\n \n short_version = \"0.10\"\n-version = \"0.10.1dev\"\n+version = \"0.10.1\"\n \n \n def __get_mod_version(modname):\n", "issue": "Release new version to fix scipy tests\nhttps://github.com/librosa/librosa/commit/12dee8eabed7df14c5622b52c05393ddfeb11f4b fixed compatibility with scipy in tests but it's not included in any release.\r\nWe rely as downstream packagers on tests to ensure all python dependencies play well together.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Version info\"\"\"\n\nimport sys\nimport importlib\n\nshort_version = \"0.10\"\nversion = \"0.10.1dev\"\n\n\ndef __get_mod_version(modname):\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n try:\n return mod.__version__\n except AttributeError:\n return \"installed, no version number available\"\n\n except ImportError:\n return None\n\n\ndef show_versions() -> None:\n \"\"\"Return the version information for all librosa dependencies.\"\"\"\n core_deps = [\n \"audioread\",\n \"numpy\",\n \"scipy\",\n \"sklearn\",\n \"joblib\",\n \"decorator\",\n \"numba\",\n \"soundfile\",\n \"pooch\",\n \"soxr\",\n \"typing_extensions\",\n \"lazy_loader\",\n \"msgpack\",\n ]\n\n extra_deps = [\n \"numpydoc\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"matplotlib\",\n \"sphinx_multiversion\",\n \"sphinx_gallery\",\n \"mir_eval\",\n \"ipython\",\n \"sphinxcontrib.rsvgconverter\",\n \"pytest\",\n \"pytest_mpl\",\n \"pytest_cov\",\n \"samplerate\",\n \"resampy\",\n \"presets\",\n \"packaging\",\n ]\n\n print(\"INSTALLED VERSIONS\")\n print(\"------------------\")\n print(f\"python: {sys.version}\\n\")\n print(f\"librosa: {version}\\n\")\n for dep in core_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n print(\"\")\n for dep in extra_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n", "path": "librosa/version.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Version info\"\"\"\n\nimport sys\nimport importlib\n\nshort_version = \"0.10\"\nversion = \"0.10.1\"\n\n\ndef __get_mod_version(modname):\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n try:\n return mod.__version__\n except AttributeError:\n return \"installed, no version number available\"\n\n except ImportError:\n return None\n\n\ndef show_versions() -> None:\n \"\"\"Return the version information for all librosa dependencies.\"\"\"\n core_deps = [\n \"audioread\",\n \"numpy\",\n \"scipy\",\n \"sklearn\",\n \"joblib\",\n \"decorator\",\n \"numba\",\n \"soundfile\",\n \"pooch\",\n \"soxr\",\n \"typing_extensions\",\n \"lazy_loader\",\n \"msgpack\",\n ]\n\n extra_deps = [\n \"numpydoc\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"matplotlib\",\n \"sphinx_multiversion\",\n \"sphinx_gallery\",\n \"mir_eval\",\n \"ipython\",\n \"sphinxcontrib.rsvgconverter\",\n \"pytest\",\n \"pytest_mpl\",\n \"pytest_cov\",\n \"samplerate\",\n \"resampy\",\n \"presets\",\n \"packaging\",\n ]\n\n print(\"INSTALLED VERSIONS\")\n print(\"------------------\")\n print(f\"python: {sys.version}\\n\")\n print(f\"librosa: {version}\\n\")\n for dep in core_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n print(\"\")\n for dep in extra_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n", "path": "librosa/version.py"}]} | 882 | 86 |
gh_patches_debug_12127 | rasdani/github-patches | git_diff | elastic__ecs-1148 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation Suggestion: anchor links on ECS fields
This is really pretty simple and I'm not sure where else to properly capture it.
The existing ECS documentation should have anchor tags on each field name in the ECS guide so you can easily deep-link directly to the relevant field.
When collaborating, often I'll say "Look at this field(such as rule.id)". I can link the fields [category page](https://www.elastic.co/guide/en/ecs/current/ecs-rule.html), but I can't deep-link directly to the field of discussion.
Just a small quality of life change that could speed up collaboration.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/schema/finalizer.py`
Content:
```
1 import copy
2 import re
3
4 from schema import visitor
5
6 # This script takes the fleshed out deeply nested fields dictionary as emitted by
7 # cleaner.py, and performs field reuse in two phases.
8 #
9 # Phase 1 performs field reuse across field sets. E.g. `group` fields should also be under `user`.
10 # This type of reuse is then carried around if the receiving field set is also reused.
11 # In other words, user.group.* will be in other places where user is nested:
12 # source.user.* will contain source.user.group.*
13
14 # Phase 2 performs field reuse where field sets are reused within themselves, with a different name.
15 # Examples are nesting `process` within itself, as `process.parent.*`,
16 # or nesting `user` within itself at `user.target.*`.
17 # This second kind of nesting is not carried around everywhere else the receiving field set is reused.
18 # So `user.target.*` is *not* carried over to `source.user.target*` when we reuse `user` under `source`.
19
20
21 def finalize(fields):
22 '''Intended entrypoint of the finalizer.'''
23 perform_reuse(fields)
24 calculate_final_values(fields)
25
26
27 def order_reuses(fields):
28 foreign_reuses = {}
29 self_nestings = {}
30 for schema_name, schema in fields.items():
31 if not 'reusable' in schema['schema_details']:
32 continue
33 reuse_order = schema['schema_details']['reusable']['order']
34 for reuse_entry in schema['schema_details']['reusable']['expected']:
35 destination_schema_name = reuse_entry['full'].split('.')[0]
36 if destination_schema_name == schema_name:
37 # Accumulate self-nestings for phase 2.
38 self_nestings.setdefault(destination_schema_name, [])
39 self_nestings[destination_schema_name].extend([reuse_entry])
40 else:
41 # Group foreign reuses by 'order' attribute.
42 foreign_reuses.setdefault(reuse_order, {})
43 foreign_reuses[reuse_order].setdefault(schema_name, [])
44 foreign_reuses[reuse_order][schema_name].extend([reuse_entry])
45 return foreign_reuses, self_nestings
46
47
48 def perform_reuse(fields):
49 '''Performs field reuse in two phases'''
50 foreign_reuses, self_nestings = order_reuses(fields)
51
52 # Phase 1: foreign reuse
53 # These are done respecting the reusable.order attribute.
54 # This lets us force the order for chained reuses (e.g. group => user, then user => many places)
55 for order in sorted(foreign_reuses.keys()):
56 for schema_name, reuse_entries in foreign_reuses[order].items():
57 schema = fields[schema_name]
58 for reuse_entry in reuse_entries:
59 # print(order, "{} => {}".format(schema_name, reuse_entry['full']))
60 nest_as = reuse_entry['as']
61 destination_schema_name = reuse_entry['full'].split('.')[0]
62 destination_schema = fields[destination_schema_name]
63 ensure_valid_reuse(schema, destination_schema)
64
65 new_field_details = copy.deepcopy(schema['field_details'])
66 new_field_details['name'] = nest_as
67 new_field_details['original_fieldset'] = schema_name
68 new_field_details['intermediate'] = True
69 reused_fields = copy.deepcopy(schema['fields'])
70 set_original_fieldset(reused_fields, schema_name)
71 destination_fields = field_group_at_path(reuse_entry['at'], fields)
72 destination_fields[nest_as] = {
73 'field_details': new_field_details,
74 'fields': reused_fields,
75 }
76 append_reused_here(schema, reuse_entry, destination_schema)
77
78 # Phase 2: self-nesting
79 for schema_name, reuse_entries in self_nestings.items():
80 schema = fields[schema_name]
81 ensure_valid_reuse(schema)
82 # Since we're about self-nest more fields within these, make a pristine copy first
83 reused_fields = copy.deepcopy(schema['fields'])
84 set_original_fieldset(reused_fields, schema_name)
85 for reuse_entry in reuse_entries:
86 # print("x {} => {}".format(schema_name, reuse_entry['full']))
87 nest_as = reuse_entry['as']
88 new_field_details = copy.deepcopy(schema['field_details'])
89 new_field_details['name'] = nest_as
90 new_field_details['original_fieldset'] = schema_name
91 new_field_details['intermediate'] = True
92 destination_fields = schema['fields']
93 destination_fields[nest_as] = {
94 'field_details': new_field_details,
95 # Make a new copy of the pristine copy
96 'fields': copy.deepcopy(reused_fields),
97 }
98 append_reused_here(schema, reuse_entry, fields[schema_name])
99
100
101 def ensure_valid_reuse(reused_schema, destination_schema=None):
102 '''
103 Raise if either the reused schema or destination schema have root=true.
104
105 Second param is optional, if testing for a self-nesting (where source=destination).
106 '''
107 if reused_schema['schema_details']['root']:
108 msg = "Schema {} has attribute root=true and therefore cannot be reused.".format(
109 reused_schema['field_details']['name'])
110 raise ValueError(msg)
111 elif destination_schema and destination_schema['schema_details']['root']:
112 msg = "Schema {} has attribute root=true and therefore cannot have other field sets reused inside it.".format(
113 destination_schema['field_details']['name'])
114 raise ValueError(msg)
115
116
117 def append_reused_here(reused_schema, reuse_entry, destination_schema):
118 '''Captures two ways of denoting what field sets are reused under a given field set'''
119 # Legacy, too limited
120 destination_schema['schema_details'].setdefault('nestings', [])
121 destination_schema['schema_details']['nestings'] = sorted(
122 destination_schema['schema_details']['nestings'] + [reuse_entry['full']]
123 )
124 # New roomier way: we could eventually include contextual description here
125 destination_schema['schema_details'].setdefault('reused_here', [])
126 reused_here_entry = {
127 'schema_name': reused_schema['field_details']['name'],
128 'full': reuse_entry['full'],
129 'short': reused_schema['field_details']['short'],
130 }
131 # Check for beta attribute
132 if 'beta' in reuse_entry:
133 reused_here_entry['beta'] = reuse_entry['beta']
134 destination_schema['schema_details']['reused_here'].extend([reused_here_entry])
135
136
137 def set_original_fieldset(fields, original_fieldset):
138 '''Recursively set the 'original_fieldset' attribute for all fields in a group of fields'''
139 def func(details):
140 # Don't override if already set (e.g. 'group' for user.group.* fields)
141 details['field_details'].setdefault('original_fieldset', original_fieldset)
142 visitor.visit_fields(fields, field_func=func)
143
144
145 def field_group_at_path(dotted_path, fields):
146 '''Returns the ['fields'] hash at the dotted_path.'''
147 path = dotted_path.split('.')
148 nesting = fields
149 for next_field in path:
150 field = nesting.get(next_field, None)
151 if not field:
152 raise ValueError("Field {} not found, failed to find {}".format(dotted_path, next_field))
153 nesting = field.get('fields', None)
154 if not nesting:
155 field_type = field['field_details']['type']
156 if field_type in ['object', 'group', 'nested']:
157 nesting = field['fields'] = {}
158 else:
159 raise ValueError("Field {} (type {}) already exists and cannot have nested fields".format(
160 dotted_path, field_type))
161 return nesting
162
163
164 def calculate_final_values(fields):
165 '''
166 This function navigates all fields recursively.
167
168 It populates a few more values for the fields, especially path-based values
169 like flat_name.
170 '''
171 visitor.visit_fields_with_path(fields, field_finalizer)
172
173
174 def field_finalizer(details, path):
175 '''This is the function called by the visitor to perform the work of calculate_final_values'''
176 name_array = path + [details['field_details']['node_name']]
177 flat_name = '.'.join(name_array)
178 details['field_details']['flat_name'] = flat_name
179 details['field_details']['dashed_name'] = re.sub('[@_\.]', '-', flat_name)
180 if 'multi_fields' in details['field_details']:
181 for mf in details['field_details']['multi_fields']:
182 mf['flat_name'] = flat_name + '.' + mf['name']
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/schema/finalizer.py b/scripts/schema/finalizer.py
--- a/scripts/schema/finalizer.py
+++ b/scripts/schema/finalizer.py
@@ -176,7 +176,7 @@
name_array = path + [details['field_details']['node_name']]
flat_name = '.'.join(name_array)
details['field_details']['flat_name'] = flat_name
- details['field_details']['dashed_name'] = re.sub('[@_\.]', '-', flat_name)
+ details['field_details']['dashed_name'] = re.sub('[_\.]', '-', flat_name).replace('@', '')
if 'multi_fields' in details['field_details']:
for mf in details['field_details']['multi_fields']:
mf['flat_name'] = flat_name + '.' + mf['name']
| {"golden_diff": "diff --git a/scripts/schema/finalizer.py b/scripts/schema/finalizer.py\n--- a/scripts/schema/finalizer.py\n+++ b/scripts/schema/finalizer.py\n@@ -176,7 +176,7 @@\n name_array = path + [details['field_details']['node_name']]\n flat_name = '.'.join(name_array)\n details['field_details']['flat_name'] = flat_name\n- details['field_details']['dashed_name'] = re.sub('[@_\\.]', '-', flat_name)\n+ details['field_details']['dashed_name'] = re.sub('[_\\.]', '-', flat_name).replace('@', '')\n if 'multi_fields' in details['field_details']:\n for mf in details['field_details']['multi_fields']:\n mf['flat_name'] = flat_name + '.' + mf['name']\n", "issue": "Documentation Suggestion: anchor links on ECS fields\nThis is really pretty simple and I'm not sure where else to properly capture it. \r\n\r\nThe existing ECS documentation should have anchor tags on each field name in the ECS guide so you can easily deep-link directly to the relevant field. \r\n\r\nWhen collaborating, often I'll say \"Look at this field(such as rule.id)\". I can link the fields [category page](https://www.elastic.co/guide/en/ecs/current/ecs-rule.html), but I can't deep-link directly to the field of discussion. \r\n\r\nJust a small quality of life change that could speed up collaboration. \r\n\n", "before_files": [{"content": "import copy\nimport re\n\nfrom schema import visitor\n\n# This script takes the fleshed out deeply nested fields dictionary as emitted by\n# cleaner.py, and performs field reuse in two phases.\n#\n# Phase 1 performs field reuse across field sets. E.g. `group` fields should also be under `user`.\n# This type of reuse is then carried around if the receiving field set is also reused.\n# In other words, user.group.* will be in other places where user is nested:\n# source.user.* will contain source.user.group.*\n\n# Phase 2 performs field reuse where field sets are reused within themselves, with a different name.\n# Examples are nesting `process` within itself, as `process.parent.*`,\n# or nesting `user` within itself at `user.target.*`.\n# This second kind of nesting is not carried around everywhere else the receiving field set is reused.\n# So `user.target.*` is *not* carried over to `source.user.target*` when we reuse `user` under `source`.\n\n\ndef finalize(fields):\n '''Intended entrypoint of the finalizer.'''\n perform_reuse(fields)\n calculate_final_values(fields)\n\n\ndef order_reuses(fields):\n foreign_reuses = {}\n self_nestings = {}\n for schema_name, schema in fields.items():\n if not 'reusable' in schema['schema_details']:\n continue\n reuse_order = schema['schema_details']['reusable']['order']\n for reuse_entry in schema['schema_details']['reusable']['expected']:\n destination_schema_name = reuse_entry['full'].split('.')[0]\n if destination_schema_name == schema_name:\n # Accumulate self-nestings for phase 2.\n self_nestings.setdefault(destination_schema_name, [])\n self_nestings[destination_schema_name].extend([reuse_entry])\n else:\n # Group foreign reuses by 'order' attribute.\n foreign_reuses.setdefault(reuse_order, {})\n foreign_reuses[reuse_order].setdefault(schema_name, [])\n foreign_reuses[reuse_order][schema_name].extend([reuse_entry])\n return foreign_reuses, self_nestings\n\n\ndef perform_reuse(fields):\n '''Performs field reuse in two phases'''\n foreign_reuses, self_nestings = order_reuses(fields)\n\n # Phase 1: foreign reuse\n # These are done respecting the reusable.order attribute.\n # This lets us force the order for chained reuses (e.g. group => user, then user => many places)\n for order in sorted(foreign_reuses.keys()):\n for schema_name, reuse_entries in foreign_reuses[order].items():\n schema = fields[schema_name]\n for reuse_entry in reuse_entries:\n # print(order, \"{} => {}\".format(schema_name, reuse_entry['full']))\n nest_as = reuse_entry['as']\n destination_schema_name = reuse_entry['full'].split('.')[0]\n destination_schema = fields[destination_schema_name]\n ensure_valid_reuse(schema, destination_schema)\n\n new_field_details = copy.deepcopy(schema['field_details'])\n new_field_details['name'] = nest_as\n new_field_details['original_fieldset'] = schema_name\n new_field_details['intermediate'] = True\n reused_fields = copy.deepcopy(schema['fields'])\n set_original_fieldset(reused_fields, schema_name)\n destination_fields = field_group_at_path(reuse_entry['at'], fields)\n destination_fields[nest_as] = {\n 'field_details': new_field_details,\n 'fields': reused_fields,\n }\n append_reused_here(schema, reuse_entry, destination_schema)\n\n # Phase 2: self-nesting\n for schema_name, reuse_entries in self_nestings.items():\n schema = fields[schema_name]\n ensure_valid_reuse(schema)\n # Since we're about self-nest more fields within these, make a pristine copy first\n reused_fields = copy.deepcopy(schema['fields'])\n set_original_fieldset(reused_fields, schema_name)\n for reuse_entry in reuse_entries:\n # print(\"x {} => {}\".format(schema_name, reuse_entry['full']))\n nest_as = reuse_entry['as']\n new_field_details = copy.deepcopy(schema['field_details'])\n new_field_details['name'] = nest_as\n new_field_details['original_fieldset'] = schema_name\n new_field_details['intermediate'] = True\n destination_fields = schema['fields']\n destination_fields[nest_as] = {\n 'field_details': new_field_details,\n # Make a new copy of the pristine copy\n 'fields': copy.deepcopy(reused_fields),\n }\n append_reused_here(schema, reuse_entry, fields[schema_name])\n\n\ndef ensure_valid_reuse(reused_schema, destination_schema=None):\n '''\n Raise if either the reused schema or destination schema have root=true.\n\n Second param is optional, if testing for a self-nesting (where source=destination).\n '''\n if reused_schema['schema_details']['root']:\n msg = \"Schema {} has attribute root=true and therefore cannot be reused.\".format(\n reused_schema['field_details']['name'])\n raise ValueError(msg)\n elif destination_schema and destination_schema['schema_details']['root']:\n msg = \"Schema {} has attribute root=true and therefore cannot have other field sets reused inside it.\".format(\n destination_schema['field_details']['name'])\n raise ValueError(msg)\n\n\ndef append_reused_here(reused_schema, reuse_entry, destination_schema):\n '''Captures two ways of denoting what field sets are reused under a given field set'''\n # Legacy, too limited\n destination_schema['schema_details'].setdefault('nestings', [])\n destination_schema['schema_details']['nestings'] = sorted(\n destination_schema['schema_details']['nestings'] + [reuse_entry['full']]\n )\n # New roomier way: we could eventually include contextual description here\n destination_schema['schema_details'].setdefault('reused_here', [])\n reused_here_entry = {\n 'schema_name': reused_schema['field_details']['name'],\n 'full': reuse_entry['full'],\n 'short': reused_schema['field_details']['short'],\n }\n # Check for beta attribute\n if 'beta' in reuse_entry:\n reused_here_entry['beta'] = reuse_entry['beta']\n destination_schema['schema_details']['reused_here'].extend([reused_here_entry])\n\n\ndef set_original_fieldset(fields, original_fieldset):\n '''Recursively set the 'original_fieldset' attribute for all fields in a group of fields'''\n def func(details):\n # Don't override if already set (e.g. 'group' for user.group.* fields)\n details['field_details'].setdefault('original_fieldset', original_fieldset)\n visitor.visit_fields(fields, field_func=func)\n\n\ndef field_group_at_path(dotted_path, fields):\n '''Returns the ['fields'] hash at the dotted_path.'''\n path = dotted_path.split('.')\n nesting = fields\n for next_field in path:\n field = nesting.get(next_field, None)\n if not field:\n raise ValueError(\"Field {} not found, failed to find {}\".format(dotted_path, next_field))\n nesting = field.get('fields', None)\n if not nesting:\n field_type = field['field_details']['type']\n if field_type in ['object', 'group', 'nested']:\n nesting = field['fields'] = {}\n else:\n raise ValueError(\"Field {} (type {}) already exists and cannot have nested fields\".format(\n dotted_path, field_type))\n return nesting\n\n\ndef calculate_final_values(fields):\n '''\n This function navigates all fields recursively.\n\n It populates a few more values for the fields, especially path-based values\n like flat_name.\n '''\n visitor.visit_fields_with_path(fields, field_finalizer)\n\n\ndef field_finalizer(details, path):\n '''This is the function called by the visitor to perform the work of calculate_final_values'''\n name_array = path + [details['field_details']['node_name']]\n flat_name = '.'.join(name_array)\n details['field_details']['flat_name'] = flat_name\n details['field_details']['dashed_name'] = re.sub('[@_\\.]', '-', flat_name)\n if 'multi_fields' in details['field_details']:\n for mf in details['field_details']['multi_fields']:\n mf['flat_name'] = flat_name + '.' + mf['name']\n", "path": "scripts/schema/finalizer.py"}], "after_files": [{"content": "import copy\nimport re\n\nfrom schema import visitor\n\n# This script takes the fleshed out deeply nested fields dictionary as emitted by\n# cleaner.py, and performs field reuse in two phases.\n#\n# Phase 1 performs field reuse across field sets. E.g. `group` fields should also be under `user`.\n# This type of reuse is then carried around if the receiving field set is also reused.\n# In other words, user.group.* will be in other places where user is nested:\n# source.user.* will contain source.user.group.*\n\n# Phase 2 performs field reuse where field sets are reused within themselves, with a different name.\n# Examples are nesting `process` within itself, as `process.parent.*`,\n# or nesting `user` within itself at `user.target.*`.\n# This second kind of nesting is not carried around everywhere else the receiving field set is reused.\n# So `user.target.*` is *not* carried over to `source.user.target*` when we reuse `user` under `source`.\n\n\ndef finalize(fields):\n '''Intended entrypoint of the finalizer.'''\n perform_reuse(fields)\n calculate_final_values(fields)\n\n\ndef order_reuses(fields):\n foreign_reuses = {}\n self_nestings = {}\n for schema_name, schema in fields.items():\n if not 'reusable' in schema['schema_details']:\n continue\n reuse_order = schema['schema_details']['reusable']['order']\n for reuse_entry in schema['schema_details']['reusable']['expected']:\n destination_schema_name = reuse_entry['full'].split('.')[0]\n if destination_schema_name == schema_name:\n # Accumulate self-nestings for phase 2.\n self_nestings.setdefault(destination_schema_name, [])\n self_nestings[destination_schema_name].extend([reuse_entry])\n else:\n # Group foreign reuses by 'order' attribute.\n foreign_reuses.setdefault(reuse_order, {})\n foreign_reuses[reuse_order].setdefault(schema_name, [])\n foreign_reuses[reuse_order][schema_name].extend([reuse_entry])\n return foreign_reuses, self_nestings\n\n\ndef perform_reuse(fields):\n '''Performs field reuse in two phases'''\n foreign_reuses, self_nestings = order_reuses(fields)\n\n # Phase 1: foreign reuse\n # These are done respecting the reusable.order attribute.\n # This lets us force the order for chained reuses (e.g. group => user, then user => many places)\n for order in sorted(foreign_reuses.keys()):\n for schema_name, reuse_entries in foreign_reuses[order].items():\n schema = fields[schema_name]\n for reuse_entry in reuse_entries:\n # print(order, \"{} => {}\".format(schema_name, reuse_entry['full']))\n nest_as = reuse_entry['as']\n destination_schema_name = reuse_entry['full'].split('.')[0]\n destination_schema = fields[destination_schema_name]\n ensure_valid_reuse(schema, destination_schema)\n\n new_field_details = copy.deepcopy(schema['field_details'])\n new_field_details['name'] = nest_as\n new_field_details['original_fieldset'] = schema_name\n new_field_details['intermediate'] = True\n reused_fields = copy.deepcopy(schema['fields'])\n set_original_fieldset(reused_fields, schema_name)\n destination_fields = field_group_at_path(reuse_entry['at'], fields)\n destination_fields[nest_as] = {\n 'field_details': new_field_details,\n 'fields': reused_fields,\n }\n append_reused_here(schema, reuse_entry, destination_schema)\n\n # Phase 2: self-nesting\n for schema_name, reuse_entries in self_nestings.items():\n schema = fields[schema_name]\n ensure_valid_reuse(schema)\n # Since we're about self-nest more fields within these, make a pristine copy first\n reused_fields = copy.deepcopy(schema['fields'])\n set_original_fieldset(reused_fields, schema_name)\n for reuse_entry in reuse_entries:\n # print(\"x {} => {}\".format(schema_name, reuse_entry['full']))\n nest_as = reuse_entry['as']\n new_field_details = copy.deepcopy(schema['field_details'])\n new_field_details['name'] = nest_as\n new_field_details['original_fieldset'] = schema_name\n new_field_details['intermediate'] = True\n destination_fields = schema['fields']\n destination_fields[nest_as] = {\n 'field_details': new_field_details,\n # Make a new copy of the pristine copy\n 'fields': copy.deepcopy(reused_fields),\n }\n append_reused_here(schema, reuse_entry, fields[schema_name])\n\n\ndef ensure_valid_reuse(reused_schema, destination_schema=None):\n '''\n Raise if either the reused schema or destination schema have root=true.\n\n Second param is optional, if testing for a self-nesting (where source=destination).\n '''\n if reused_schema['schema_details']['root']:\n msg = \"Schema {} has attribute root=true and therefore cannot be reused.\".format(\n reused_schema['field_details']['name'])\n raise ValueError(msg)\n elif destination_schema and destination_schema['schema_details']['root']:\n msg = \"Schema {} has attribute root=true and therefore cannot have other field sets reused inside it.\".format(\n destination_schema['field_details']['name'])\n raise ValueError(msg)\n\n\ndef append_reused_here(reused_schema, reuse_entry, destination_schema):\n '''Captures two ways of denoting what field sets are reused under a given field set'''\n # Legacy, too limited\n destination_schema['schema_details'].setdefault('nestings', [])\n destination_schema['schema_details']['nestings'] = sorted(\n destination_schema['schema_details']['nestings'] + [reuse_entry['full']]\n )\n # New roomier way: we could eventually include contextual description here\n destination_schema['schema_details'].setdefault('reused_here', [])\n reused_here_entry = {\n 'schema_name': reused_schema['field_details']['name'],\n 'full': reuse_entry['full'],\n 'short': reused_schema['field_details']['short'],\n }\n # Check for beta attribute\n if 'beta' in reuse_entry:\n reused_here_entry['beta'] = reuse_entry['beta']\n destination_schema['schema_details']['reused_here'].extend([reused_here_entry])\n\n\ndef set_original_fieldset(fields, original_fieldset):\n '''Recursively set the 'original_fieldset' attribute for all fields in a group of fields'''\n def func(details):\n # Don't override if already set (e.g. 'group' for user.group.* fields)\n details['field_details'].setdefault('original_fieldset', original_fieldset)\n visitor.visit_fields(fields, field_func=func)\n\n\ndef field_group_at_path(dotted_path, fields):\n '''Returns the ['fields'] hash at the dotted_path.'''\n path = dotted_path.split('.')\n nesting = fields\n for next_field in path:\n field = nesting.get(next_field, None)\n if not field:\n raise ValueError(\"Field {} not found, failed to find {}\".format(dotted_path, next_field))\n nesting = field.get('fields', None)\n if not nesting:\n field_type = field['field_details']['type']\n if field_type in ['object', 'group', 'nested']:\n nesting = field['fields'] = {}\n else:\n raise ValueError(\"Field {} (type {}) already exists and cannot have nested fields\".format(\n dotted_path, field_type))\n return nesting\n\n\ndef calculate_final_values(fields):\n '''\n This function navigates all fields recursively.\n\n It populates a few more values for the fields, especially path-based values\n like flat_name.\n '''\n visitor.visit_fields_with_path(fields, field_finalizer)\n\n\ndef field_finalizer(details, path):\n '''This is the function called by the visitor to perform the work of calculate_final_values'''\n name_array = path + [details['field_details']['node_name']]\n flat_name = '.'.join(name_array)\n details['field_details']['flat_name'] = flat_name\n details['field_details']['dashed_name'] = re.sub('[_\\.]', '-', flat_name).replace('@', '')\n if 'multi_fields' in details['field_details']:\n for mf in details['field_details']['multi_fields']:\n mf['flat_name'] = flat_name + '.' + mf['name']\n", "path": "scripts/schema/finalizer.py"}]} | 2,605 | 177 |
gh_patches_debug_11399 | rasdani/github-patches | git_diff | ethereum__web3.py-407 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove shh from default list of modules
The whisper protocol is not standardized enough to be in the default list.
Also, note in the docs the current fractured nature of whisper.
See #384
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/main.py`
Content:
```
1 from __future__ import absolute_import
2
3 from eth_utils import (
4 apply_to_return_value,
5 add_0x_prefix,
6 from_wei,
7 is_address,
8 is_checksum_address,
9 keccak,
10 remove_0x_prefix,
11 to_checksum_address,
12 to_wei,
13 )
14
15 from web3.admin import Admin
16 from web3.eth import Eth
17 from web3.iban import Iban
18 from web3.miner import Miner
19 from web3.net import Net
20 from web3.personal import Personal
21 from web3.shh import Shh
22 from web3.testing import Testing
23 from web3.txpool import TxPool
24 from web3.version import Version
25
26 from web3.providers.ipc import (
27 IPCProvider,
28 )
29 from web3.providers.rpc import (
30 HTTPProvider,
31 )
32 from web3.providers.tester import (
33 TestRPCProvider,
34 EthereumTesterProvider,
35 )
36
37 from web3.manager import (
38 RequestManager,
39 )
40
41 from web3.utils.datastructures import (
42 HexBytes,
43 )
44 from web3.utils.encoding import (
45 hex_encode_abi_type,
46 to_bytes,
47 to_int,
48 to_hex,
49 to_text,
50 )
51
52
53 def get_default_modules():
54 return {
55 "eth": Eth,
56 "shh": Shh,
57 "net": Net,
58 "personal": Personal,
59 "version": Version,
60 "txpool": TxPool,
61 "miner": Miner,
62 "admin": Admin,
63 "testing": Testing,
64 }
65
66
67 class Web3(object):
68 # Providers
69 HTTPProvider = HTTPProvider
70 IPCProvider = IPCProvider
71 TestRPCProvider = TestRPCProvider
72 EthereumTesterProvider = EthereumTesterProvider
73
74 # Managers
75 RequestManager = RequestManager
76
77 # Iban
78 Iban = Iban
79
80 # Encoding and Decoding
81 toBytes = staticmethod(to_bytes)
82 toInt = staticmethod(to_int)
83 toHex = staticmethod(to_hex)
84 toText = staticmethod(to_text)
85
86 # Currency Utility
87 toWei = staticmethod(to_wei)
88 fromWei = staticmethod(from_wei)
89
90 # Address Utility
91 isAddress = staticmethod(is_address)
92 isChecksumAddress = staticmethod(is_checksum_address)
93 toChecksumAddress = staticmethod(to_checksum_address)
94
95 def __init__(self, providers, middlewares=None, modules=None):
96 self.manager = RequestManager(self, providers, middlewares)
97
98 if modules is None:
99 modules = get_default_modules()
100
101 for module_name, module_class in modules.items():
102 module_class.attach(self, module_name)
103
104 @property
105 def middleware_stack(self):
106 return self.manager.middleware_stack
107
108 @property
109 def providers(self):
110 return self.manager.providers
111
112 def setProviders(self, providers):
113 self.manager.setProvider(providers)
114
115 @staticmethod
116 @apply_to_return_value(HexBytes)
117 def sha3(primitive=None, text=None, hexstr=None):
118 if isinstance(primitive, (bytes, int, type(None))):
119 input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)
120 return keccak(input_bytes)
121
122 raise TypeError(
123 "You called sha3 with first arg %r and keywords %r. You must call it with one of "
124 "these approaches: sha3(text='txt'), sha3(hexstr='0x747874'), "
125 "sha3(b'\\x74\\x78\\x74'), or sha3(0x747874)." % (
126 primitive,
127 {'text': text, 'hexstr': hexstr}
128 )
129 )
130
131 @classmethod
132 def soliditySha3(cls, abi_types, values):
133 """
134 Executes sha3 (keccak256) exactly as Solidity does.
135 Takes list of abi_types as inputs -- `[uint24, int8[], bool]`
136 and list of corresponding values -- `[20, [-1, 5, 0], True]`
137 """
138 if len(abi_types) != len(values):
139 raise ValueError(
140 "Length mismatch between provided abi types and values. Got "
141 "{0} types and {1} values.".format(len(abi_types), len(values))
142 )
143
144 hex_string = add_0x_prefix(''.join(
145 remove_0x_prefix(hex_encode_abi_type(abi_type, value))
146 for abi_type, value
147 in zip(abi_types, values)
148 ))
149 return cls.sha3(hexstr=hex_string)
150
151 def isConnected(self):
152 for provider in self.providers:
153 if provider.isConnected():
154 return True
155 else:
156 return False
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/web3/main.py b/web3/main.py
--- a/web3/main.py
+++ b/web3/main.py
@@ -18,7 +18,6 @@
from web3.miner import Miner
from web3.net import Net
from web3.personal import Personal
-from web3.shh import Shh
from web3.testing import Testing
from web3.txpool import TxPool
from web3.version import Version
@@ -53,7 +52,6 @@
def get_default_modules():
return {
"eth": Eth,
- "shh": Shh,
"net": Net,
"personal": Personal,
"version": Version,
| {"golden_diff": "diff --git a/web3/main.py b/web3/main.py\n--- a/web3/main.py\n+++ b/web3/main.py\n@@ -18,7 +18,6 @@\n from web3.miner import Miner\n from web3.net import Net\n from web3.personal import Personal\n-from web3.shh import Shh\n from web3.testing import Testing\n from web3.txpool import TxPool\n from web3.version import Version\n@@ -53,7 +52,6 @@\n def get_default_modules():\n return {\n \"eth\": Eth,\n- \"shh\": Shh,\n \"net\": Net,\n \"personal\": Personal,\n \"version\": Version,\n", "issue": "Remove shh from default list of modules\nThe whisper protocol is not standardized enough to be in the default list.\r\n\r\nAlso, note in the docs the current fractured nature of whisper.\r\n\r\nSee #384 \n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom eth_utils import (\n apply_to_return_value,\n add_0x_prefix,\n from_wei,\n is_address,\n is_checksum_address,\n keccak,\n remove_0x_prefix,\n to_checksum_address,\n to_wei,\n)\n\nfrom web3.admin import Admin\nfrom web3.eth import Eth\nfrom web3.iban import Iban\nfrom web3.miner import Miner\nfrom web3.net import Net\nfrom web3.personal import Personal\nfrom web3.shh import Shh\nfrom web3.testing import Testing\nfrom web3.txpool import TxPool\nfrom web3.version import Version\n\nfrom web3.providers.ipc import (\n IPCProvider,\n)\nfrom web3.providers.rpc import (\n HTTPProvider,\n)\nfrom web3.providers.tester import (\n TestRPCProvider,\n EthereumTesterProvider,\n)\n\nfrom web3.manager import (\n RequestManager,\n)\n\nfrom web3.utils.datastructures import (\n HexBytes,\n)\nfrom web3.utils.encoding import (\n hex_encode_abi_type,\n to_bytes,\n to_int,\n to_hex,\n to_text,\n)\n\n\ndef get_default_modules():\n return {\n \"eth\": Eth,\n \"shh\": Shh,\n \"net\": Net,\n \"personal\": Personal,\n \"version\": Version,\n \"txpool\": TxPool,\n \"miner\": Miner,\n \"admin\": Admin,\n \"testing\": Testing,\n }\n\n\nclass Web3(object):\n # Providers\n HTTPProvider = HTTPProvider\n IPCProvider = IPCProvider\n TestRPCProvider = TestRPCProvider\n EthereumTesterProvider = EthereumTesterProvider\n\n # Managers\n RequestManager = RequestManager\n\n # Iban\n Iban = Iban\n\n # Encoding and Decoding\n toBytes = staticmethod(to_bytes)\n toInt = staticmethod(to_int)\n toHex = staticmethod(to_hex)\n toText = staticmethod(to_text)\n\n # Currency Utility\n toWei = staticmethod(to_wei)\n fromWei = staticmethod(from_wei)\n\n # Address Utility\n isAddress = staticmethod(is_address)\n isChecksumAddress = staticmethod(is_checksum_address)\n toChecksumAddress = staticmethod(to_checksum_address)\n\n def __init__(self, providers, middlewares=None, modules=None):\n self.manager = RequestManager(self, providers, middlewares)\n\n if modules is None:\n modules = get_default_modules()\n\n for module_name, module_class in modules.items():\n module_class.attach(self, module_name)\n\n @property\n def middleware_stack(self):\n return self.manager.middleware_stack\n\n @property\n def providers(self):\n return self.manager.providers\n\n def setProviders(self, providers):\n self.manager.setProvider(providers)\n\n @staticmethod\n @apply_to_return_value(HexBytes)\n def sha3(primitive=None, text=None, hexstr=None):\n if isinstance(primitive, (bytes, int, type(None))):\n input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)\n return keccak(input_bytes)\n\n raise TypeError(\n \"You called sha3 with first arg %r and keywords %r. You must call it with one of \"\n \"these approaches: sha3(text='txt'), sha3(hexstr='0x747874'), \"\n \"sha3(b'\\\\x74\\\\x78\\\\x74'), or sha3(0x747874).\" % (\n primitive,\n {'text': text, 'hexstr': hexstr}\n )\n )\n\n @classmethod\n def soliditySha3(cls, abi_types, values):\n \"\"\"\n Executes sha3 (keccak256) exactly as Solidity does.\n Takes list of abi_types as inputs -- `[uint24, int8[], bool]`\n and list of corresponding values -- `[20, [-1, 5, 0], True]`\n \"\"\"\n if len(abi_types) != len(values):\n raise ValueError(\n \"Length mismatch between provided abi types and values. Got \"\n \"{0} types and {1} values.\".format(len(abi_types), len(values))\n )\n\n hex_string = add_0x_prefix(''.join(\n remove_0x_prefix(hex_encode_abi_type(abi_type, value))\n for abi_type, value\n in zip(abi_types, values)\n ))\n return cls.sha3(hexstr=hex_string)\n\n def isConnected(self):\n for provider in self.providers:\n if provider.isConnected():\n return True\n else:\n return False\n", "path": "web3/main.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom eth_utils import (\n apply_to_return_value,\n add_0x_prefix,\n from_wei,\n is_address,\n is_checksum_address,\n keccak,\n remove_0x_prefix,\n to_checksum_address,\n to_wei,\n)\n\nfrom web3.admin import Admin\nfrom web3.eth import Eth\nfrom web3.iban import Iban\nfrom web3.miner import Miner\nfrom web3.net import Net\nfrom web3.personal import Personal\nfrom web3.testing import Testing\nfrom web3.txpool import TxPool\nfrom web3.version import Version\n\nfrom web3.providers.ipc import (\n IPCProvider,\n)\nfrom web3.providers.rpc import (\n HTTPProvider,\n)\nfrom web3.providers.tester import (\n TestRPCProvider,\n EthereumTesterProvider,\n)\n\nfrom web3.manager import (\n RequestManager,\n)\n\nfrom web3.utils.datastructures import (\n HexBytes,\n)\nfrom web3.utils.encoding import (\n hex_encode_abi_type,\n to_bytes,\n to_int,\n to_hex,\n to_text,\n)\n\n\ndef get_default_modules():\n return {\n \"eth\": Eth,\n \"net\": Net,\n \"personal\": Personal,\n \"version\": Version,\n \"txpool\": TxPool,\n \"miner\": Miner,\n \"admin\": Admin,\n \"testing\": Testing,\n }\n\n\nclass Web3(object):\n # Providers\n HTTPProvider = HTTPProvider\n IPCProvider = IPCProvider\n TestRPCProvider = TestRPCProvider\n EthereumTesterProvider = EthereumTesterProvider\n\n # Managers\n RequestManager = RequestManager\n\n # Iban\n Iban = Iban\n\n # Encoding and Decoding\n toBytes = staticmethod(to_bytes)\n toInt = staticmethod(to_int)\n toHex = staticmethod(to_hex)\n toText = staticmethod(to_text)\n\n # Currency Utility\n toWei = staticmethod(to_wei)\n fromWei = staticmethod(from_wei)\n\n # Address Utility\n isAddress = staticmethod(is_address)\n isChecksumAddress = staticmethod(is_checksum_address)\n toChecksumAddress = staticmethod(to_checksum_address)\n\n def __init__(self, providers, middlewares=None, modules=None):\n self.manager = RequestManager(self, providers, middlewares)\n\n if modules is None:\n modules = get_default_modules()\n\n for module_name, module_class in modules.items():\n module_class.attach(self, module_name)\n\n @property\n def middleware_stack(self):\n return self.manager.middleware_stack\n\n @property\n def providers(self):\n return self.manager.providers\n\n def setProviders(self, providers):\n self.manager.setProvider(providers)\n\n @staticmethod\n @apply_to_return_value(HexBytes)\n def sha3(primitive=None, text=None, hexstr=None):\n if isinstance(primitive, (bytes, int, type(None))):\n input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)\n return keccak(input_bytes)\n\n raise TypeError(\n \"You called sha3 with first arg %r and keywords %r. You must call it with one of \"\n \"these approaches: sha3(text='txt'), sha3(hexstr='0x747874'), \"\n \"sha3(b'\\\\x74\\\\x78\\\\x74'), or sha3(0x747874).\" % (\n primitive,\n {'text': text, 'hexstr': hexstr}\n )\n )\n\n @classmethod\n def soliditySha3(cls, abi_types, values):\n \"\"\"\n Executes sha3 (keccak256) exactly as Solidity does.\n Takes list of abi_types as inputs -- `[uint24, int8[], bool]`\n and list of corresponding values -- `[20, [-1, 5, 0], True]`\n \"\"\"\n if len(abi_types) != len(values):\n raise ValueError(\n \"Length mismatch between provided abi types and values. Got \"\n \"{0} types and {1} values.\".format(len(abi_types), len(values))\n )\n\n hex_string = add_0x_prefix(''.join(\n remove_0x_prefix(hex_encode_abi_type(abi_type, value))\n for abi_type, value\n in zip(abi_types, values)\n ))\n return cls.sha3(hexstr=hex_string)\n\n def isConnected(self):\n for provider in self.providers:\n if provider.isConnected():\n return True\n else:\n return False\n", "path": "web3/main.py"}]} | 1,668 | 148 |
gh_patches_debug_22671 | rasdani/github-patches | git_diff | rucio__rucio-2844 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Distance exporter fails
Motivation
----------
There is a bug in https://github.com/rucio/rucio/blob/master/lib/rucio/api/exporter.py#L40
(ValueError: too many values to unpack) which does not show up in the test.
Modification
------------
Fix the loop where the ValueError occurs and modify the test in tests/test_import_export.py to have distances to export
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/api/exporter.py`
Content:
```
1 '''
2 Copyright European Organization for Nuclear Research (CERN)
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 You may not use this file except in compliance with the License.
6 You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
7
8 Authors:
9 - Hannes Hansen, <[email protected]>, 2018
10 - Andrew Lister, <[email protected]>, 2019
11
12 PY3K COMPATIBLE
13 '''
14
15 from rucio.api import permission
16 from rucio.common import exception
17 from rucio.core import exporter
18 from rucio.core.rse import get_rse_name
19
20
21 def export_data(issuer):
22 """
23 Export data from Rucio.
24
25 :param issuer: the issuer.
26 """
27 kwargs = {'issuer': issuer}
28 if not permission.has_permission(issuer=issuer, action='export', kwargs=kwargs):
29 raise exception.AccessDenied('Account %s can not export data' % issuer)
30
31 data = exporter.export_data()
32 rses = {}
33 distances = {}
34
35 for rse_id in data['rses']:
36 rse = data['rses'][rse_id]
37 rses[get_rse_name(rse_id=rse_id)] = rse
38 data['rses'] = rses
39
40 for src_id, tmp in data['distances']:
41 src = get_rse_name(rse_id=src_id)
42 distances[src] = {}
43 for dst_id, dists in tmp:
44 dst = get_rse_name(rse_id=dst_id)
45 distances[src][dst] = dists
46 data['distances'] = distances
47 return data
48
```
Path: `lib/rucio/core/distance.py`
Content:
```
1 """
2 Copyright European Organization for Nuclear Research (CERN)
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 You may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Authors:
10 - Wen Guan, <[email protected]>, 2015-2016
11 - Cedric Serfon, <[email protected]>, 2017
12 - Andrew Lister, <[email protected]>, 2019
13
14 PY3K COMPATIBLE
15 """
16
17 from sqlalchemy.exc import DatabaseError, IntegrityError
18 from sqlalchemy.orm import aliased
19
20 from rucio.common import exception
21 from rucio.core.rse import get_rse_name
22 from rucio.db.sqla.models import Distance, RSE
23 from rucio.db.sqla.session import transactional_session, read_session
24
25
26 @transactional_session
27 def add_distance(src_rse_id, dest_rse_id, ranking=None, agis_distance=None, geoip_distance=None,
28 active=None, submitted=None, finished=None, failed=None, transfer_speed=None, session=None):
29 """
30 Add a src-dest distance.
31
32 :param src_rse_id: The source RSE ID.
33 :param dest_rse_id: The destination RSE ID.
34 :param ranking: Ranking as an integer.
35 :param agis_distance: AGIS Distance as an integer.
36 :param geoip_distance: GEOIP Distance as an integer.
37 :param active: Active FTS transfers as an integer.
38 :param submitted: Submitted FTS transfers as an integer.
39 :param finished: Finished FTS transfers as an integer.
40 :param failed: Failed FTS transfers as an integer.
41 :param transfer_speed: FTS transfer speed as an integer.
42 :param session: The database session to use.
43 """
44
45 try:
46 new_distance = Distance(src_rse_id=src_rse_id, dest_rse_id=dest_rse_id, ranking=ranking, agis_distance=agis_distance, geoip_distance=geoip_distance,
47 active=active, submitted=submitted, finished=finished, failed=failed, transfer_speed=transfer_speed)
48 new_distance.save(session=session)
49 except IntegrityError:
50 raise exception.Duplicate('Distance from %s to %s already exists!' % (get_rse_name(rse_id=src_rse_id, session=session), get_rse_name(rse_id=dest_rse_id, session=session)))
51 except DatabaseError as error:
52 raise exception.RucioException(error.args)
53
54
55 @transactional_session
56 def add_distance_short(src_rse_id, dest_rse_id, distance=None, session=None):
57 """
58 Add a src-dest distance.
59
60 :param src_rse_id: The source RSE ID.
61 :param dest_rse_id: The destination RSE ID.
62 :param distance: A dictionary with different values.
63 """
64
65 add_distance(src_rse_id, dest_rse_id, ranking=distance.get('ranking', None), agis_distance=distance.get('agis_distance', None),
66 geoip_distance=distance.get('geoip_distance', None), active=distance.get('active', None), submitted=distance.get('submitted', None),
67 finished=distance.get('finished', None), failed=distance.get('failed', None), transfer_speed=distance.get('transfer_speed', None),
68 session=session)
69
70
71 @read_session
72 def get_distances(src_rse_id=None, dest_rse_id=None, session=None):
73 """
74 Get distances between rses.
75
76 :param src_rse_id: The source RSE ID.
77 :param dest_rse_id: The destination RSE ID.
78 :param session: The database session to use.
79
80 :returns distance: List of dictionaries.
81 """
82
83 try:
84 query = session.query(Distance)
85 if src_rse_id:
86 query = query.filter(Distance.src_rse_id == src_rse_id)
87 if dest_rse_id:
88 query = query.filter(Distance.dest_rse_id == dest_rse_id)
89
90 distances = []
91 tmp = query.all()
92 if tmp:
93 for t in tmp:
94 t2 = dict(t)
95 t2['distance'] = t2['agis_distance']
96 t2.pop('_sa_instance_state')
97 distances.append(t2)
98 return distances
99 except IntegrityError as error:
100 raise exception.RucioException(error.args)
101
102
103 @transactional_session
104 def delete_distances(src_rse_id=None, dest_rse_id=None, session=None):
105 """
106 Delete distances with the given RSE ids.
107
108 :param src_rse_id: The source RSE ID.
109 :param dest_rse_id: The destination RSE ID.
110 :param session: The database session to use.
111 """
112
113 try:
114 query = session.query(Distance)
115
116 if src_rse_id:
117 query = query.filter(Distance.src_rse_id == src_rse_id)
118 if dest_rse_id:
119 query = query.filter(Distance.dest_rse_id == dest_rse_id)
120
121 query.delete()
122 except IntegrityError as error:
123 raise exception.RucioException(error.args)
124
125
126 @transactional_session
127 def update_distances(src_rse_id=None, dest_rse_id=None, parameters=None, session=None):
128 """
129 Update distances with the given RSE ids.
130
131 :param src_rse_id: The source RSE ID.
132 :param dest_rse_id: The destination RSE ID.
133 :param parameters: A dictionnary with property
134 :param session: The database session to use.
135 """
136 params = {}
137 for key in parameters:
138 if key in ['ranking', 'agis_distance', 'geoip_distance', 'active', 'submitted', 'finished', 'failed', 'transfer_speed', 'packet_loss', 'latency', 'mbps_file', 'mbps_link', 'queued_total', 'done_1h', 'done_6h']:
139 params[key] = parameters[key]
140 try:
141 query = session.query(Distance)
142 if src_rse_id:
143 query = query.filter(Distance.src_rse_id == src_rse_id)
144 if dest_rse_id:
145 query = query.filter(Distance.dest_rse_id == dest_rse_id)
146 query.update(params)
147 except IntegrityError as error:
148 raise exception.RucioException(error.args)
149
150
151 @read_session
152 def list_distances(filter={}, session=None):
153 """
154 Get distances between all the RSEs.
155
156 :param filter: dictionary to filter distances.
157 :param session: The database session in use.
158 """
159 return [distance.to_dict() for distance in session.query(Distance).all()]
160
161
162 @read_session
163 def export_distances(session=None):
164 """
165 Export distances between all the RSEs using RSE ids.
166 :param session: The database session to use.
167 :returns distance: dictionary of dictionaries with all the distances.
168 """
169
170 distances = {}
171 try:
172 rse_src = aliased(RSE)
173 rse_dest = aliased(RSE)
174 query = session.query(Distance, rse_src.id, rse_dest.id)\
175 .join(rse_src, rse_src.id == Distance.src_rse_id)\
176 .join(rse_dest, rse_dest.id == Distance.dest_rse_id)
177 for result in query.all():
178 distance = result[0]
179 src_id = result[1]
180 dst_id = result[2]
181 if src_id not in distances:
182 distances[src_id] = {}
183 distances[src_id][dst_id] = {}
184 distances[src_id][dst_id] = distance.to_dict()
185 del distances[src_id][dst_id]['_sa_instance_state']
186 return distances
187 except IntegrityError as error:
188 raise exception.RucioException(error.args)
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/rucio/api/exporter.py b/lib/rucio/api/exporter.py
--- a/lib/rucio/api/exporter.py
+++ b/lib/rucio/api/exporter.py
@@ -37,11 +37,12 @@
rses[get_rse_name(rse_id=rse_id)] = rse
data['rses'] = rses
- for src_id, tmp in data['distances']:
+ for src_id in data['distances']:
+ dests = data['distances'][src_id]
src = get_rse_name(rse_id=src_id)
distances[src] = {}
- for dst_id, dists in tmp:
- dst = get_rse_name(rse_id=dst_id)
- distances[src][dst] = dists
+ for dest_id in dests:
+ dest = get_rse_name(rse_id=dest_id)
+ distances[src][dest] = dests[dest_id]
data['distances'] = distances
return data
diff --git a/lib/rucio/core/distance.py b/lib/rucio/core/distance.py
--- a/lib/rucio/core/distance.py
+++ b/lib/rucio/core/distance.py
@@ -181,6 +181,7 @@
if src_id not in distances:
distances[src_id] = {}
distances[src_id][dst_id] = {}
+ distance['distance'] = distance['agis_distance']
distances[src_id][dst_id] = distance.to_dict()
del distances[src_id][dst_id]['_sa_instance_state']
return distances
| {"golden_diff": "diff --git a/lib/rucio/api/exporter.py b/lib/rucio/api/exporter.py\n--- a/lib/rucio/api/exporter.py\n+++ b/lib/rucio/api/exporter.py\n@@ -37,11 +37,12 @@\n rses[get_rse_name(rse_id=rse_id)] = rse\n data['rses'] = rses\n \n- for src_id, tmp in data['distances']:\n+ for src_id in data['distances']:\n+ dests = data['distances'][src_id]\n src = get_rse_name(rse_id=src_id)\n distances[src] = {}\n- for dst_id, dists in tmp:\n- dst = get_rse_name(rse_id=dst_id)\n- distances[src][dst] = dists\n+ for dest_id in dests:\n+ dest = get_rse_name(rse_id=dest_id)\n+ distances[src][dest] = dests[dest_id]\n data['distances'] = distances\n return data\ndiff --git a/lib/rucio/core/distance.py b/lib/rucio/core/distance.py\n--- a/lib/rucio/core/distance.py\n+++ b/lib/rucio/core/distance.py\n@@ -181,6 +181,7 @@\n if src_id not in distances:\n distances[src_id] = {}\n distances[src_id][dst_id] = {}\n+ distance['distance'] = distance['agis_distance']\n distances[src_id][dst_id] = distance.to_dict()\n del distances[src_id][dst_id]['_sa_instance_state']\n return distances\n", "issue": "Distance exporter fails\nMotivation\r\n----------\r\nThere is a bug in https://github.com/rucio/rucio/blob/master/lib/rucio/api/exporter.py#L40\r\n(ValueError: too many values to unpack) which does not show up in the test.\r\n\r\n\r\nModification\r\n------------\r\nFix the loop where the ValueError occurs and modify the test in tests/test_import_export.py to have distances to export \r\n\r\n\n", "before_files": [{"content": "'''\n Copyright European Organization for Nuclear Research (CERN)\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n You may not use this file except in compliance with the License.\n You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\n Authors:\n - Hannes Hansen, <[email protected]>, 2018\n - Andrew Lister, <[email protected]>, 2019\n\n PY3K COMPATIBLE\n'''\n\nfrom rucio.api import permission\nfrom rucio.common import exception\nfrom rucio.core import exporter\nfrom rucio.core.rse import get_rse_name\n\n\ndef export_data(issuer):\n \"\"\"\n Export data from Rucio.\n\n :param issuer: the issuer.\n \"\"\"\n kwargs = {'issuer': issuer}\n if not permission.has_permission(issuer=issuer, action='export', kwargs=kwargs):\n raise exception.AccessDenied('Account %s can not export data' % issuer)\n\n data = exporter.export_data()\n rses = {}\n distances = {}\n\n for rse_id in data['rses']:\n rse = data['rses'][rse_id]\n rses[get_rse_name(rse_id=rse_id)] = rse\n data['rses'] = rses\n\n for src_id, tmp in data['distances']:\n src = get_rse_name(rse_id=src_id)\n distances[src] = {}\n for dst_id, dists in tmp:\n dst = get_rse_name(rse_id=dst_id)\n distances[src][dst] = dists\n data['distances'] = distances\n return data\n", "path": "lib/rucio/api/exporter.py"}, {"content": "\"\"\"\n Copyright European Organization for Nuclear Research (CERN)\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n You may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n\n Authors:\n - Wen Guan, <[email protected]>, 2015-2016\n - Cedric Serfon, <[email protected]>, 2017\n - Andrew Lister, <[email protected]>, 2019\n\n PY3K COMPATIBLE\n\"\"\"\n\nfrom sqlalchemy.exc import DatabaseError, IntegrityError\nfrom sqlalchemy.orm import aliased\n\nfrom rucio.common import exception\nfrom rucio.core.rse import get_rse_name\nfrom rucio.db.sqla.models import Distance, RSE\nfrom rucio.db.sqla.session import transactional_session, read_session\n\n\n@transactional_session\ndef add_distance(src_rse_id, dest_rse_id, ranking=None, agis_distance=None, geoip_distance=None,\n active=None, submitted=None, finished=None, failed=None, transfer_speed=None, session=None):\n \"\"\"\n Add a src-dest distance.\n\n :param src_rse_id: The source RSE ID.\n :param dest_rse_id: The destination RSE ID.\n :param ranking: Ranking as an integer.\n :param agis_distance: AGIS Distance as an integer.\n :param geoip_distance: GEOIP Distance as an integer.\n :param active: Active FTS transfers as an integer.\n :param submitted: Submitted FTS transfers as an integer.\n :param finished: Finished FTS transfers as an integer.\n :param failed: Failed FTS transfers as an integer.\n :param transfer_speed: FTS transfer speed as an integer.\n :param session: The database session to use.\n \"\"\"\n\n try:\n new_distance = Distance(src_rse_id=src_rse_id, dest_rse_id=dest_rse_id, ranking=ranking, agis_distance=agis_distance, geoip_distance=geoip_distance,\n active=active, submitted=submitted, finished=finished, failed=failed, transfer_speed=transfer_speed)\n new_distance.save(session=session)\n except IntegrityError:\n raise exception.Duplicate('Distance from %s to %s already exists!' % (get_rse_name(rse_id=src_rse_id, session=session), get_rse_name(rse_id=dest_rse_id, session=session)))\n except DatabaseError as error:\n raise exception.RucioException(error.args)\n\n\n@transactional_session\ndef add_distance_short(src_rse_id, dest_rse_id, distance=None, session=None):\n \"\"\"\n Add a src-dest distance.\n\n :param src_rse_id: The source RSE ID.\n :param dest_rse_id: The destination RSE ID.\n :param distance: A dictionary with different values.\n \"\"\"\n\n add_distance(src_rse_id, dest_rse_id, ranking=distance.get('ranking', None), agis_distance=distance.get('agis_distance', None),\n geoip_distance=distance.get('geoip_distance', None), active=distance.get('active', None), submitted=distance.get('submitted', None),\n finished=distance.get('finished', None), failed=distance.get('failed', None), transfer_speed=distance.get('transfer_speed', None),\n session=session)\n\n\n@read_session\ndef get_distances(src_rse_id=None, dest_rse_id=None, session=None):\n \"\"\"\n Get distances between rses.\n\n :param src_rse_id: The source RSE ID.\n :param dest_rse_id: The destination RSE ID.\n :param session: The database session to use.\n\n :returns distance: List of dictionaries.\n \"\"\"\n\n try:\n query = session.query(Distance)\n if src_rse_id:\n query = query.filter(Distance.src_rse_id == src_rse_id)\n if dest_rse_id:\n query = query.filter(Distance.dest_rse_id == dest_rse_id)\n\n distances = []\n tmp = query.all()\n if tmp:\n for t in tmp:\n t2 = dict(t)\n t2['distance'] = t2['agis_distance']\n t2.pop('_sa_instance_state')\n distances.append(t2)\n return distances\n except IntegrityError as error:\n raise exception.RucioException(error.args)\n\n\n@transactional_session\ndef delete_distances(src_rse_id=None, dest_rse_id=None, session=None):\n \"\"\"\n Delete distances with the given RSE ids.\n\n :param src_rse_id: The source RSE ID.\n :param dest_rse_id: The destination RSE ID.\n :param session: The database session to use.\n \"\"\"\n\n try:\n query = session.query(Distance)\n\n if src_rse_id:\n query = query.filter(Distance.src_rse_id == src_rse_id)\n if dest_rse_id:\n query = query.filter(Distance.dest_rse_id == dest_rse_id)\n\n query.delete()\n except IntegrityError as error:\n raise exception.RucioException(error.args)\n\n\n@transactional_session\ndef update_distances(src_rse_id=None, dest_rse_id=None, parameters=None, session=None):\n \"\"\"\n Update distances with the given RSE ids.\n\n :param src_rse_id: The source RSE ID.\n :param dest_rse_id: The destination RSE ID.\n :param parameters: A dictionnary with property\n :param session: The database session to use.\n \"\"\"\n params = {}\n for key in parameters:\n if key in ['ranking', 'agis_distance', 'geoip_distance', 'active', 'submitted', 'finished', 'failed', 'transfer_speed', 'packet_loss', 'latency', 'mbps_file', 'mbps_link', 'queued_total', 'done_1h', 'done_6h']:\n params[key] = parameters[key]\n try:\n query = session.query(Distance)\n if src_rse_id:\n query = query.filter(Distance.src_rse_id == src_rse_id)\n if dest_rse_id:\n query = query.filter(Distance.dest_rse_id == dest_rse_id)\n query.update(params)\n except IntegrityError as error:\n raise exception.RucioException(error.args)\n\n\n@read_session\ndef list_distances(filter={}, session=None):\n \"\"\"\n Get distances between all the RSEs.\n\n :param filter: dictionary to filter distances.\n :param session: The database session in use.\n \"\"\"\n return [distance.to_dict() for distance in session.query(Distance).all()]\n\n\n@read_session\ndef export_distances(session=None):\n \"\"\"\n Export distances between all the RSEs using RSE ids.\n :param session: The database session to use.\n :returns distance: dictionary of dictionaries with all the distances.\n \"\"\"\n\n distances = {}\n try:\n rse_src = aliased(RSE)\n rse_dest = aliased(RSE)\n query = session.query(Distance, rse_src.id, rse_dest.id)\\\n .join(rse_src, rse_src.id == Distance.src_rse_id)\\\n .join(rse_dest, rse_dest.id == Distance.dest_rse_id)\n for result in query.all():\n distance = result[0]\n src_id = result[1]\n dst_id = result[2]\n if src_id not in distances:\n distances[src_id] = {}\n distances[src_id][dst_id] = {}\n distances[src_id][dst_id] = distance.to_dict()\n del distances[src_id][dst_id]['_sa_instance_state']\n return distances\n except IntegrityError as error:\n raise exception.RucioException(error.args)\n", "path": "lib/rucio/core/distance.py"}], "after_files": [{"content": "'''\n Copyright European Organization for Nuclear Research (CERN)\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n You may not use this file except in compliance with the License.\n You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\n Authors:\n - Hannes Hansen, <[email protected]>, 2018\n - Andrew Lister, <[email protected]>, 2019\n\n PY3K COMPATIBLE\n'''\n\nfrom rucio.api import permission\nfrom rucio.common import exception\nfrom rucio.core import exporter\nfrom rucio.core.rse import get_rse_name\n\n\ndef export_data(issuer):\n \"\"\"\n Export data from Rucio.\n\n :param issuer: the issuer.\n \"\"\"\n kwargs = {'issuer': issuer}\n if not permission.has_permission(issuer=issuer, action='export', kwargs=kwargs):\n raise exception.AccessDenied('Account %s can not export data' % issuer)\n\n data = exporter.export_data()\n rses = {}\n distances = {}\n\n for rse_id in data['rses']:\n rse = data['rses'][rse_id]\n rses[get_rse_name(rse_id=rse_id)] = rse\n data['rses'] = rses\n\n for src_id in data['distances']:\n dests = data['distances'][src_id]\n src = get_rse_name(rse_id=src_id)\n distances[src] = {}\n for dest_id in dests:\n dest = get_rse_name(rse_id=dest_id)\n distances[src][dest] = dests[dest_id]\n data['distances'] = distances\n return data\n", "path": "lib/rucio/api/exporter.py"}, {"content": "\"\"\"\n Copyright European Organization for Nuclear Research (CERN)\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n You may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n\n Authors:\n - Wen Guan, <[email protected]>, 2015-2016\n - Cedric Serfon, <[email protected]>, 2017\n - Andrew Lister, <[email protected]>, 2019\n\n PY3K COMPATIBLE\n\"\"\"\n\nfrom sqlalchemy.exc import DatabaseError, IntegrityError\nfrom sqlalchemy.orm import aliased\n\nfrom rucio.common import exception\nfrom rucio.core.rse import get_rse_name\nfrom rucio.db.sqla.models import Distance, RSE\nfrom rucio.db.sqla.session import transactional_session, read_session\n\n\n@transactional_session\ndef add_distance(src_rse_id, dest_rse_id, ranking=None, agis_distance=None, geoip_distance=None,\n active=None, submitted=None, finished=None, failed=None, transfer_speed=None, session=None):\n \"\"\"\n Add a src-dest distance.\n\n :param src_rse_id: The source RSE ID.\n :param dest_rse_id: The destination RSE ID.\n :param ranking: Ranking as an integer.\n :param agis_distance: AGIS Distance as an integer.\n :param geoip_distance: GEOIP Distance as an integer.\n :param active: Active FTS transfers as an integer.\n :param submitted: Submitted FTS transfers as an integer.\n :param finished: Finished FTS transfers as an integer.\n :param failed: Failed FTS transfers as an integer.\n :param transfer_speed: FTS transfer speed as an integer.\n :param session: The database session to use.\n \"\"\"\n\n try:\n new_distance = Distance(src_rse_id=src_rse_id, dest_rse_id=dest_rse_id, ranking=ranking, agis_distance=agis_distance, geoip_distance=geoip_distance,\n active=active, submitted=submitted, finished=finished, failed=failed, transfer_speed=transfer_speed)\n new_distance.save(session=session)\n except IntegrityError:\n raise exception.Duplicate('Distance from %s to %s already exists!' % (get_rse_name(rse_id=src_rse_id, session=session), get_rse_name(rse_id=dest_rse_id, session=session)))\n except DatabaseError as error:\n raise exception.RucioException(error.args)\n\n\n@transactional_session\ndef add_distance_short(src_rse_id, dest_rse_id, distance=None, session=None):\n \"\"\"\n Add a src-dest distance.\n\n :param src_rse_id: The source RSE ID.\n :param dest_rse_id: The destination RSE ID.\n :param distance: A dictionary with different values.\n \"\"\"\n\n add_distance(src_rse_id, dest_rse_id, ranking=distance.get('ranking', None), agis_distance=distance.get('agis_distance', None),\n geoip_distance=distance.get('geoip_distance', None), active=distance.get('active', None), submitted=distance.get('submitted', None),\n finished=distance.get('finished', None), failed=distance.get('failed', None), transfer_speed=distance.get('transfer_speed', None),\n session=session)\n\n\n@read_session\ndef get_distances(src_rse_id=None, dest_rse_id=None, session=None):\n \"\"\"\n Get distances between rses.\n\n :param src_rse_id: The source RSE ID.\n :param dest_rse_id: The destination RSE ID.\n :param session: The database session to use.\n\n :returns distance: List of dictionaries.\n \"\"\"\n\n try:\n query = session.query(Distance)\n if src_rse_id:\n query = query.filter(Distance.src_rse_id == src_rse_id)\n if dest_rse_id:\n query = query.filter(Distance.dest_rse_id == dest_rse_id)\n\n distances = []\n tmp = query.all()\n if tmp:\n for t in tmp:\n t2 = dict(t)\n t2['distance'] = t2['agis_distance']\n t2.pop('_sa_instance_state')\n distances.append(t2)\n return distances\n except IntegrityError as error:\n raise exception.RucioException(error.args)\n\n\n@transactional_session\ndef delete_distances(src_rse_id=None, dest_rse_id=None, session=None):\n \"\"\"\n Delete distances with the given RSE ids.\n\n :param src_rse_id: The source RSE ID.\n :param dest_rse_id: The destination RSE ID.\n :param session: The database session to use.\n \"\"\"\n\n try:\n query = session.query(Distance)\n\n if src_rse_id:\n query = query.filter(Distance.src_rse_id == src_rse_id)\n if dest_rse_id:\n query = query.filter(Distance.dest_rse_id == dest_rse_id)\n\n query.delete()\n except IntegrityError as error:\n raise exception.RucioException(error.args)\n\n\n@transactional_session\ndef update_distances(src_rse_id=None, dest_rse_id=None, parameters=None, session=None):\n \"\"\"\n Update distances with the given RSE ids.\n\n :param src_rse_id: The source RSE ID.\n :param dest_rse_id: The destination RSE ID.\n :param parameters: A dictionnary with property\n :param session: The database session to use.\n \"\"\"\n params = {}\n for key in parameters:\n if key in ['ranking', 'agis_distance', 'geoip_distance', 'active', 'submitted', 'finished', 'failed', 'transfer_speed', 'packet_loss', 'latency', 'mbps_file', 'mbps_link', 'queued_total', 'done_1h', 'done_6h']:\n params[key] = parameters[key]\n try:\n query = session.query(Distance)\n if src_rse_id:\n query = query.filter(Distance.src_rse_id == src_rse_id)\n if dest_rse_id:\n query = query.filter(Distance.dest_rse_id == dest_rse_id)\n query.update(params)\n except IntegrityError as error:\n raise exception.RucioException(error.args)\n\n\n@read_session\ndef list_distances(filter={}, session=None):\n \"\"\"\n Get distances between all the RSEs.\n\n :param filter: dictionary to filter distances.\n :param session: The database session in use.\n \"\"\"\n return [distance.to_dict() for distance in session.query(Distance).all()]\n\n\n@read_session\ndef export_distances(session=None):\n \"\"\"\n Export distances between all the RSEs using RSE ids.\n :param session: The database session to use.\n :returns distance: dictionary of dictionaries with all the distances.\n \"\"\"\n\n distances = {}\n try:\n rse_src = aliased(RSE)\n rse_dest = aliased(RSE)\n query = session.query(Distance, rse_src.id, rse_dest.id)\\\n .join(rse_src, rse_src.id == Distance.src_rse_id)\\\n .join(rse_dest, rse_dest.id == Distance.dest_rse_id)\n for result in query.all():\n distance = result[0]\n src_id = result[1]\n dst_id = result[2]\n if src_id not in distances:\n distances[src_id] = {}\n distances[src_id][dst_id] = {}\n distance['distance'] = distance['agis_distance']\n distances[src_id][dst_id] = distance.to_dict()\n del distances[src_id][dst_id]['_sa_instance_state']\n return distances\n except IntegrityError as error:\n raise exception.RucioException(error.args)\n", "path": "lib/rucio/core/distance.py"}]} | 3,017 | 359 |
gh_patches_debug_2327 | rasdani/github-patches | git_diff | encode__httpx-194 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing py.typed declaration?
`mypy` is complaining about not being able to find type annotations for `httpx`:
`error: Cannot find module named 'httpx'`
I'm somewhat new to using type annotations/static type checking in Python, but from the mypy documentation [here](https://mypy.readthedocs.io/en/latest/installed_packages.html#making-pep-561-compatible-packages) it looks like there may be a missing declaration in `setup.py`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import os
5 import re
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 with open(os.path.join(package, "__version__.py")) as f:
15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", f.read()).group(1)
16
17
18 def get_long_description():
19 """
20 Return the README.
21 """
22 with open("README.md", encoding="utf8") as f:
23 return f.read()
24
25
26 def get_packages(package):
27 """
28 Return root package and all sub-packages.
29 """
30 return [
31 dirpath
32 for dirpath, dirnames, filenames in os.walk(package)
33 if os.path.exists(os.path.join(dirpath, "__init__.py"))
34 ]
35
36
37 setup(
38 name="httpx",
39 python_requires=">=3.6",
40 version=get_version("httpx"),
41 url="https://github.com/encode/httpx",
42 license="BSD",
43 description="The next generation HTTP client.",
44 long_description=get_long_description(),
45 long_description_content_type="text/markdown",
46 author="Tom Christie",
47 author_email="[email protected]",
48 packages=get_packages("httpx"),
49 install_requires=[
50 "certifi",
51 "chardet==3.*",
52 "h11==0.8.*",
53 "h2==3.*",
54 "hstspreload",
55 "idna==2.*",
56 "rfc3986==1.*",
57 ],
58 classifiers=[
59 "Development Status :: 3 - Alpha",
60 "Environment :: Web Environment",
61 "Intended Audience :: Developers",
62 "License :: OSI Approved :: BSD License",
63 "Operating System :: OS Independent",
64 "Topic :: Internet :: WWW/HTTP",
65 "Programming Language :: Python :: 3",
66 "Programming Language :: Python :: 3.6",
67 "Programming Language :: Python :: 3.7",
68 "Programming Language :: Python :: 3.8",
69 ],
70 )
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -45,6 +45,7 @@
long_description_content_type="text/markdown",
author="Tom Christie",
author_email="[email protected]",
+ package_data={"httpx": ["py.typed"]},
packages=get_packages("httpx"),
install_requires=[
"certifi",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,6 +45,7 @@\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n+ package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n install_requires=[\n \"certifi\",\n", "issue": "Missing py.typed declaration?\n`mypy` is complaining about not being able to find type annotations for `httpx`: \r\n\r\n`error: Cannot find module named 'httpx'`\r\n\r\nI'm somewhat new to using type annotations/static type checking in Python, but from the mypy documentation [here](https://mypy.readthedocs.io/en/latest/installed_packages.html#making-pep-561-compatible-packages) it looks like there may be a missing declaration in `setup.py`?\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n with open(os.path.join(package, \"__version__.py\")) as f:\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", f.read()).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n return f.read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"httpx\"),\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"hstspreload\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n with open(os.path.join(package, \"__version__.py\")) as f:\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", f.read()).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n return f.read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"hstspreload\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]} | 947 | 90 |
gh_patches_debug_4090 | rasdani/github-patches | git_diff | enthought__chaco-883 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NameError: name 'gc' is not defined
**Problem Description**
NameError: name 'gc' is not defined in chaco/chaco/plots/polar_line_renderer.py
**Reproduction Steps:**
python chaco/examples/demo/simple_polar.py
[Steps to reproduce issue here.]
```python
# Code to reproduce issue here
```
**Expected behavior:**
[MacOS, python3.8 (EDM)]
**OS, Python version:** [Enter OS name and Python version]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chaco/plots/polar_line_renderer.py`
Content:
```
1 # (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
2 # All rights reserved.
3 #
4 # This software is provided without warranty under the terms of the BSD
5 # license included in LICENSE.txt and may be redistributed only under
6 # the conditions described in the aforementioned license. The license
7 # is also available online at http://www.enthought.com/licenses/BSD.txt
8 #
9 # Thanks for using Enthought open source!
10
11 """ Defines the PolarLineRenderer class.
12 """
13
14
15 # Major library imports
16 from numpy import array, cos, empty, pi, sin, transpose
17
18 # Enthought library imports
19 from enable.api import black_color_trait, LineStyle
20 from traits.api import Float
21
22 # Local, relative imports
23 from chaco.abstract_plot_renderer import AbstractPlotRenderer
24
25
26 class PolarLineRenderer(AbstractPlotRenderer):
27 """A renderer for polar line plots."""
28
29 # ------------------------------------------------------------------------
30 # Appearance-related traits
31 # ------------------------------------------------------------------------
32
33 # The color of the origin axis.
34 origin_axis_color_ = (0, 0, 0, 1)
35 # The width of the origin axis.
36 origin_axis_width = 2.0
37 # The origin axis is visible.
38 origin_axis_visible = True
39 # The grid is visible.
40 grid_visible = True
41 # The orientation of the plot is horizontal; for any other value, it is
42 # transposed
43 orientation = "h"
44 # The color of the line.
45 color = black_color_trait
46 # The width of the line.
47 line_width = Float(1.0)
48 # The style of the line.
49 line_style = LineStyle("solid")
50 # The style of the grid lines.
51 grid_style = LineStyle("dot")
52
53 def _gather_points(self):
54 """
55 Collects the data points that are within the plot bounds and caches them
56 """
57 # This is just a stub for now. We should really find the lines only
58 # inside the screen range here.
59
60 x = self.index.get_data()
61 y = self.value.get_data()
62 rad = min(self.width / 2.0, self.height / 2.0)
63 sx = x * rad + self.x + self.width / 2.0
64 sy = y * rad + self.y + self.height / 2.0
65
66 points = transpose(array((sx, sy)))
67 self._cached_data_pts = points
68 self._cache_valid = True
69
70 def _data_changed(self):
71 self._cache_valid = False
72
73 def _update_mappers(self):
74 # Dunno if there is anything else to do here
75 self._cache_valid = False
76
77 def _render(self, gc, points):
78 """Actually draw the plot."""
79 with gc:
80 gc.set_antialias(True)
81 self._draw_default_axes(gc)
82 self._draw_default_grid(gc)
83 if len(points) > 0:
84 gc.clip_to_rect(self.x, self.y, self.width, self.height)
85 gc.set_stroke_color(self.color_)
86 gc.set_line_width(self.line_width)
87 gc.set_line_dash(self.line_style_)
88
89 gc.begin_path()
90 gc.lines(points)
91 gc.stroke_path()
92
93 def map_screen(self, data_array):
94 """Maps an array of data points into screen space and returns it as
95 an array.
96
97 Implements the AbstractPlotRenderer interface.
98 """
99
100 if len(data_array) == 0:
101 return empty(shape=(0, 2))
102 elif len(data_array) == 1:
103 xtmp, ytmp = transpose(data_array)
104 x_ary = xtmp
105 y_ary = ytmp
106 else:
107 x_ary, y_ary = transpose(data_array)
108
109 sx = self.index_mapper.map_screen(x_ary)
110 sy = self.value_mapper.map_screen(y_ary)
111
112 if self.orientation == "h":
113 return transpose(array((sx, sy)))
114 else:
115 return transpose(array((sy, sx)))
116
117 def map_data(self, screen_pt):
118 """Maps a screen space point into the "index" space of the plot.
119
120 Implements the AbstractPlotRenderer interface.
121 """
122 if self.orientation == "h":
123 x, y = screen_pt
124 else:
125 y, x = screen_pt
126 return array(
127 (self.index_mapper.map_data(x), self.value_mapper.map_data(y))
128 )
129
130 def _downsample(self):
131 return self.map_screen(self._cached_data_pts)
132
133 def _draw_plot(self, *args, **kw):
134 """Draws the 'plot' layer."""
135 self._gather_points()
136 self._render(gc, self._cached_data_pts)
137
138 def _bounds_changed(self, old, new):
139 super()._bounds_changed(old, new)
140 self._update_mappers()
141
142 def _bounds_items_changed(self, event):
143 super()._bounds_items_changed(event)
144 self._update_mappers()
145
146 def _draw_default_axes(self, gc):
147 if not self.origin_axis_visible:
148 return
149
150 with gc:
151 gc.set_stroke_color(self.origin_axis_color_)
152 gc.set_line_width(self.origin_axis_width)
153 gc.set_line_dash(self.grid_style_)
154 x_data, y_data = transpose(self._cached_data_pts)
155 x_center = self.x + self.width / 2.0
156 y_center = self.y + self.height / 2.0
157
158 for theta in range(12):
159 r = min(self.width / 2.0, self.height / 2.0)
160 x = r * cos(theta * pi / 6) + x_center
161 y = r * sin(theta * pi / 6) + y_center
162 data_pts = array([[x_center, y_center], [x, y]])
163 start, end = data_pts
164 gc.move_to(int(start[0]), int(start[1]))
165 gc.line_to(int(end[0]), int(end[1]))
166 gc.stroke_path()
167
168 def _draw_default_grid(self, gc):
169 if not self.grid_visible:
170 return
171
172 with gc:
173 gc.set_stroke_color(self.origin_axis_color_)
174 gc.set_line_width(self.origin_axis_width)
175 gc.set_line_dash(self.grid_style_)
176 x_data, y_data = transpose(self._cached_data_pts)
177 x_center = self.x + self.width / 2.0
178 y_center = self.y + self.height / 2.0
179 rad = min(self.width / 2.0, self.height / 2.0)
180 for r_part in range(1, 5):
181 r = rad * r_part / 4
182 gc.arc(x_center, y_center, r, 0, 2 * pi)
183 gc.stroke_path()
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chaco/plots/polar_line_renderer.py b/chaco/plots/polar_line_renderer.py
--- a/chaco/plots/polar_line_renderer.py
+++ b/chaco/plots/polar_line_renderer.py
@@ -130,7 +130,7 @@
def _downsample(self):
return self.map_screen(self._cached_data_pts)
- def _draw_plot(self, *args, **kw):
+ def _draw_plot(self, gc, *args, **kw):
"""Draws the 'plot' layer."""
self._gather_points()
self._render(gc, self._cached_data_pts)
| {"golden_diff": "diff --git a/chaco/plots/polar_line_renderer.py b/chaco/plots/polar_line_renderer.py\n--- a/chaco/plots/polar_line_renderer.py\n+++ b/chaco/plots/polar_line_renderer.py\n@@ -130,7 +130,7 @@\n def _downsample(self):\n return self.map_screen(self._cached_data_pts)\n \n- def _draw_plot(self, *args, **kw):\n+ def _draw_plot(self, gc, *args, **kw):\n \"\"\"Draws the 'plot' layer.\"\"\"\n self._gather_points()\n self._render(gc, self._cached_data_pts)\n", "issue": "NameError: name 'gc' is not defined\n**Problem Description**\nNameError: name 'gc' is not defined in chaco/chaco/plots/polar_line_renderer.py\n\n**Reproduction Steps:**\npython chaco/examples/demo/simple_polar.py\n\n[Steps to reproduce issue here.]\n\n```python\n# Code to reproduce issue here\n```\n\n**Expected behavior:** \n\n[MacOS, python3.8 (EDM)]\n\n**OS, Python version:** [Enter OS name and Python version]\n\n", "before_files": [{"content": "# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in LICENSE.txt and may be redistributed only under\n# the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n#\n# Thanks for using Enthought open source!\n\n\"\"\" Defines the PolarLineRenderer class.\n\"\"\"\n\n\n# Major library imports\nfrom numpy import array, cos, empty, pi, sin, transpose\n\n# Enthought library imports\nfrom enable.api import black_color_trait, LineStyle\nfrom traits.api import Float\n\n# Local, relative imports\nfrom chaco.abstract_plot_renderer import AbstractPlotRenderer\n\n\nclass PolarLineRenderer(AbstractPlotRenderer):\n \"\"\"A renderer for polar line plots.\"\"\"\n\n # ------------------------------------------------------------------------\n # Appearance-related traits\n # ------------------------------------------------------------------------\n\n # The color of the origin axis.\n origin_axis_color_ = (0, 0, 0, 1)\n # The width of the origin axis.\n origin_axis_width = 2.0\n # The origin axis is visible.\n origin_axis_visible = True\n # The grid is visible.\n grid_visible = True\n # The orientation of the plot is horizontal; for any other value, it is\n # transposed\n orientation = \"h\"\n # The color of the line.\n color = black_color_trait\n # The width of the line.\n line_width = Float(1.0)\n # The style of the line.\n line_style = LineStyle(\"solid\")\n # The style of the grid lines.\n grid_style = LineStyle(\"dot\")\n\n def _gather_points(self):\n \"\"\"\n Collects the data points that are within the plot bounds and caches them\n \"\"\"\n # This is just a stub for now. We should really find the lines only\n # inside the screen range here.\n\n x = self.index.get_data()\n y = self.value.get_data()\n rad = min(self.width / 2.0, self.height / 2.0)\n sx = x * rad + self.x + self.width / 2.0\n sy = y * rad + self.y + self.height / 2.0\n\n points = transpose(array((sx, sy)))\n self._cached_data_pts = points\n self._cache_valid = True\n\n def _data_changed(self):\n self._cache_valid = False\n\n def _update_mappers(self):\n # Dunno if there is anything else to do here\n self._cache_valid = False\n\n def _render(self, gc, points):\n \"\"\"Actually draw the plot.\"\"\"\n with gc:\n gc.set_antialias(True)\n self._draw_default_axes(gc)\n self._draw_default_grid(gc)\n if len(points) > 0:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n gc.set_stroke_color(self.color_)\n gc.set_line_width(self.line_width)\n gc.set_line_dash(self.line_style_)\n\n gc.begin_path()\n gc.lines(points)\n gc.stroke_path()\n\n def map_screen(self, data_array):\n \"\"\"Maps an array of data points into screen space and returns it as\n an array.\n\n Implements the AbstractPlotRenderer interface.\n \"\"\"\n\n if len(data_array) == 0:\n return empty(shape=(0, 2))\n elif len(data_array) == 1:\n xtmp, ytmp = transpose(data_array)\n x_ary = xtmp\n y_ary = ytmp\n else:\n x_ary, y_ary = transpose(data_array)\n\n sx = self.index_mapper.map_screen(x_ary)\n sy = self.value_mapper.map_screen(y_ary)\n\n if self.orientation == \"h\":\n return transpose(array((sx, sy)))\n else:\n return transpose(array((sy, sx)))\n\n def map_data(self, screen_pt):\n \"\"\"Maps a screen space point into the \"index\" space of the plot.\n\n Implements the AbstractPlotRenderer interface.\n \"\"\"\n if self.orientation == \"h\":\n x, y = screen_pt\n else:\n y, x = screen_pt\n return array(\n (self.index_mapper.map_data(x), self.value_mapper.map_data(y))\n )\n\n def _downsample(self):\n return self.map_screen(self._cached_data_pts)\n\n def _draw_plot(self, *args, **kw):\n \"\"\"Draws the 'plot' layer.\"\"\"\n self._gather_points()\n self._render(gc, self._cached_data_pts)\n\n def _bounds_changed(self, old, new):\n super()._bounds_changed(old, new)\n self._update_mappers()\n\n def _bounds_items_changed(self, event):\n super()._bounds_items_changed(event)\n self._update_mappers()\n\n def _draw_default_axes(self, gc):\n if not self.origin_axis_visible:\n return\n\n with gc:\n gc.set_stroke_color(self.origin_axis_color_)\n gc.set_line_width(self.origin_axis_width)\n gc.set_line_dash(self.grid_style_)\n x_data, y_data = transpose(self._cached_data_pts)\n x_center = self.x + self.width / 2.0\n y_center = self.y + self.height / 2.0\n\n for theta in range(12):\n r = min(self.width / 2.0, self.height / 2.0)\n x = r * cos(theta * pi / 6) + x_center\n y = r * sin(theta * pi / 6) + y_center\n data_pts = array([[x_center, y_center], [x, y]])\n start, end = data_pts\n gc.move_to(int(start[0]), int(start[1]))\n gc.line_to(int(end[0]), int(end[1]))\n gc.stroke_path()\n\n def _draw_default_grid(self, gc):\n if not self.grid_visible:\n return\n\n with gc:\n gc.set_stroke_color(self.origin_axis_color_)\n gc.set_line_width(self.origin_axis_width)\n gc.set_line_dash(self.grid_style_)\n x_data, y_data = transpose(self._cached_data_pts)\n x_center = self.x + self.width / 2.0\n y_center = self.y + self.height / 2.0\n rad = min(self.width / 2.0, self.height / 2.0)\n for r_part in range(1, 5):\n r = rad * r_part / 4\n gc.arc(x_center, y_center, r, 0, 2 * pi)\n gc.stroke_path()\n", "path": "chaco/plots/polar_line_renderer.py"}], "after_files": [{"content": "# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in LICENSE.txt and may be redistributed only under\n# the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n#\n# Thanks for using Enthought open source!\n\n\"\"\" Defines the PolarLineRenderer class.\n\"\"\"\n\n\n# Major library imports\nfrom numpy import array, cos, empty, pi, sin, transpose\n\n# Enthought library imports\nfrom enable.api import black_color_trait, LineStyle\nfrom traits.api import Float\n\n# Local, relative imports\nfrom chaco.abstract_plot_renderer import AbstractPlotRenderer\n\n\nclass PolarLineRenderer(AbstractPlotRenderer):\n \"\"\"A renderer for polar line plots.\"\"\"\n\n # ------------------------------------------------------------------------\n # Appearance-related traits\n # ------------------------------------------------------------------------\n\n # The color of the origin axis.\n origin_axis_color_ = (0, 0, 0, 1)\n # The width of the origin axis.\n origin_axis_width = 2.0\n # The origin axis is visible.\n origin_axis_visible = True\n # The grid is visible.\n grid_visible = True\n # The orientation of the plot is horizontal; for any other value, it is\n # transposed\n orientation = \"h\"\n # The color of the line.\n color = black_color_trait\n # The width of the line.\n line_width = Float(1.0)\n # The style of the line.\n line_style = LineStyle(\"solid\")\n # The style of the grid lines.\n grid_style = LineStyle(\"dot\")\n\n def _gather_points(self):\n \"\"\"\n Collects the data points that are within the plot bounds and caches them\n \"\"\"\n # This is just a stub for now. We should really find the lines only\n # inside the screen range here.\n\n x = self.index.get_data()\n y = self.value.get_data()\n rad = min(self.width / 2.0, self.height / 2.0)\n sx = x * rad + self.x + self.width / 2.0\n sy = y * rad + self.y + self.height / 2.0\n\n points = transpose(array((sx, sy)))\n self._cached_data_pts = points\n self._cache_valid = True\n\n def _data_changed(self):\n self._cache_valid = False\n\n def _update_mappers(self):\n # Dunno if there is anything else to do here\n self._cache_valid = False\n\n def _render(self, gc, points):\n \"\"\"Actually draw the plot.\"\"\"\n with gc:\n gc.set_antialias(True)\n self._draw_default_axes(gc)\n self._draw_default_grid(gc)\n if len(points) > 0:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n gc.set_stroke_color(self.color_)\n gc.set_line_width(self.line_width)\n gc.set_line_dash(self.line_style_)\n\n gc.begin_path()\n gc.lines(points)\n gc.stroke_path()\n\n def map_screen(self, data_array):\n \"\"\"Maps an array of data points into screen space and returns it as\n an array.\n\n Implements the AbstractPlotRenderer interface.\n \"\"\"\n\n if len(data_array) == 0:\n return empty(shape=(0, 2))\n elif len(data_array) == 1:\n xtmp, ytmp = transpose(data_array)\n x_ary = xtmp\n y_ary = ytmp\n else:\n x_ary, y_ary = transpose(data_array)\n\n sx = self.index_mapper.map_screen(x_ary)\n sy = self.value_mapper.map_screen(y_ary)\n\n if self.orientation == \"h\":\n return transpose(array((sx, sy)))\n else:\n return transpose(array((sy, sx)))\n\n def map_data(self, screen_pt):\n \"\"\"Maps a screen space point into the \"index\" space of the plot.\n\n Implements the AbstractPlotRenderer interface.\n \"\"\"\n if self.orientation == \"h\":\n x, y = screen_pt\n else:\n y, x = screen_pt\n return array(\n (self.index_mapper.map_data(x), self.value_mapper.map_data(y))\n )\n\n def _downsample(self):\n return self.map_screen(self._cached_data_pts)\n\n def _draw_plot(self, gc, *args, **kw):\n \"\"\"Draws the 'plot' layer.\"\"\"\n self._gather_points()\n self._render(gc, self._cached_data_pts)\n\n def _bounds_changed(self, old, new):\n super()._bounds_changed(old, new)\n self._update_mappers()\n\n def _bounds_items_changed(self, event):\n super()._bounds_items_changed(event)\n self._update_mappers()\n\n def _draw_default_axes(self, gc):\n if not self.origin_axis_visible:\n return\n\n with gc:\n gc.set_stroke_color(self.origin_axis_color_)\n gc.set_line_width(self.origin_axis_width)\n gc.set_line_dash(self.grid_style_)\n x_data, y_data = transpose(self._cached_data_pts)\n x_center = self.x + self.width / 2.0\n y_center = self.y + self.height / 2.0\n\n for theta in range(12):\n r = min(self.width / 2.0, self.height / 2.0)\n x = r * cos(theta * pi / 6) + x_center\n y = r * sin(theta * pi / 6) + y_center\n data_pts = array([[x_center, y_center], [x, y]])\n start, end = data_pts\n gc.move_to(int(start[0]), int(start[1]))\n gc.line_to(int(end[0]), int(end[1]))\n gc.stroke_path()\n\n def _draw_default_grid(self, gc):\n if not self.grid_visible:\n return\n\n with gc:\n gc.set_stroke_color(self.origin_axis_color_)\n gc.set_line_width(self.origin_axis_width)\n gc.set_line_dash(self.grid_style_)\n x_data, y_data = transpose(self._cached_data_pts)\n x_center = self.x + self.width / 2.0\n y_center = self.y + self.height / 2.0\n rad = min(self.width / 2.0, self.height / 2.0)\n for r_part in range(1, 5):\n r = rad * r_part / 4\n gc.arc(x_center, y_center, r, 0, 2 * pi)\n gc.stroke_path()\n", "path": "chaco/plots/polar_line_renderer.py"}]} | 2,258 | 139 |
gh_patches_debug_9383 | rasdani/github-patches | git_diff | buildbot__buildbot-4099 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Workers configured_on gets corrupted for multi-master
https://github.com/buildbot/buildbot/issues/3981 fixed the issue of Workers configured_on getting corrupted. It works for single master scenario. However, for multi-master scenario, this bug is still present.
I have two masters (running 1.1.1):
1: handling the web-server
2: handling workers communication, builds and everything else
If I restart 1 and then 2, then it's fine. However, if I restart 2 and then 1, then the workers configured_on is corrupted. The issue reproduce immediately on restarting 1.
If I restart only 2, then the issue doesn't happen. However, restarting 1 consistently reproduces the issue.
I suspect that the HouseKeeping which is done while starting the masters might be causing the issue.
https://github.com/buildbot/buildbot/blob/master/master/buildbot/master.py#L300
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/data/masters.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from __future__ import absolute_import
17 from __future__ import print_function
18
19 from twisted.internet import defer
20 from twisted.internet import reactor
21 from twisted.python import log
22
23 from buildbot.data import base
24 from buildbot.data import resultspec
25 from buildbot.data import types
26 from buildbot.process.results import RETRY
27 from buildbot.util import epoch2datetime
28
29 # time, in minutes, after which a master that hasn't checked in will be
30 # marked as inactive
31 EXPIRE_MINUTES = 10
32
33
34 def _db2data(master):
35 return dict(masterid=master['id'],
36 name=master['name'],
37 active=master['active'],
38 last_active=master['last_active'])
39
40
41 class MasterEndpoint(base.Endpoint):
42
43 isCollection = False
44 pathPatterns = """
45 /masters/n:masterid
46 /builders/n:builderid/masters/n:masterid
47 """
48
49 @defer.inlineCallbacks
50 def get(self, resultSpec, kwargs):
51 # if a builder is given, only return the master if it's associated with
52 # this builder
53 if 'builderid' in kwargs:
54 builder = yield self.master.db.builders.getBuilder(
55 builderid=kwargs['builderid'])
56 if not builder or kwargs['masterid'] not in builder['masterids']:
57 defer.returnValue(None)
58 return
59 m = yield self.master.db.masters.getMaster(kwargs['masterid'])
60 defer.returnValue(_db2data(m) if m else None)
61
62
63 class MastersEndpoint(base.Endpoint):
64
65 isCollection = True
66 pathPatterns = """
67 /masters
68 /builders/n:builderid/masters
69 """
70 rootLinkName = 'masters'
71
72 @defer.inlineCallbacks
73 def get(self, resultSpec, kwargs):
74 masterlist = yield self.master.db.masters.getMasters()
75 if 'builderid' in kwargs:
76 builder = yield self.master.db.builders.getBuilder(
77 builderid=kwargs['builderid'])
78 if builder:
79 masterids = set(builder['masterids'])
80 masterlist = [m for m in masterlist if m['id'] in masterids]
81 else:
82 masterlist = []
83 defer.returnValue([_db2data(m) for m in masterlist])
84
85
86 class Master(base.ResourceType):
87
88 name = "master"
89 plural = "masters"
90 endpoints = [MasterEndpoint, MastersEndpoint]
91 eventPathPatterns = """
92 /masters/:masterid
93 """
94
95 class EntityType(types.Entity):
96 masterid = types.Integer()
97 name = types.String()
98 active = types.Boolean()
99 last_active = types.DateTime()
100 entityType = EntityType(name)
101
102 @base.updateMethod
103 @defer.inlineCallbacks
104 def masterActive(self, name, masterid, _reactor=reactor):
105 activated = yield self.master.db.masters.setMasterState(
106 masterid=masterid, active=True, _reactor=_reactor)
107 if activated:
108 self.produceEvent(
109 dict(masterid=masterid, name=name, active=True),
110 'started')
111
112 @base.updateMethod
113 @defer.inlineCallbacks
114 def expireMasters(self, forceHouseKeeping=False, _reactor=reactor):
115 too_old = epoch2datetime(_reactor.seconds() - 60 * EXPIRE_MINUTES)
116 masters = yield self.master.db.masters.getMasters()
117 for m in masters:
118 if not forceHouseKeeping and m['last_active'] is not None and m['last_active'] >= too_old:
119 continue
120
121 # mark the master inactive, and send a message on its behalf
122 deactivated = yield self.master.db.masters.setMasterState(
123 masterid=m['id'], active=False, _reactor=_reactor)
124 if deactivated:
125 yield self._masterDeactivated(m['id'], m['name'])
126 elif forceHouseKeeping:
127 yield self._masterDeactivatedHousekeeping(m['id'], m['name'])
128
129 @base.updateMethod
130 @defer.inlineCallbacks
131 def masterStopped(self, name, masterid):
132 deactivated = yield self.master.db.masters.setMasterState(
133 masterid=masterid, active=False)
134 if deactivated:
135 yield self._masterDeactivated(masterid, name)
136
137 @defer.inlineCallbacks
138 def _masterDeactivatedHousekeeping(self, masterid, name):
139 log.msg("doing housekeeping for master %d %s" % (masterid, name))
140
141 # common code for deactivating a master
142 yield self.master.data.rtypes.worker._masterDeactivated(
143 masterid=masterid)
144 yield self.master.data.rtypes.builder._masterDeactivated(
145 masterid=masterid)
146 yield self.master.data.rtypes.scheduler._masterDeactivated(
147 masterid=masterid)
148 yield self.master.data.rtypes.changesource._masterDeactivated(
149 masterid=masterid)
150
151 # for each build running on that instance..
152 builds = yield self.master.data.get(('builds',),
153 filters=[resultspec.Filter('masterid', 'eq', [masterid]),
154 resultspec.Filter('complete', 'eq', [False])])
155 for build in builds:
156 # stop any running steps..
157 steps = yield self.master.data.get(
158 ('builds', build['buildid'], 'steps'),
159 filters=[resultspec.Filter('results', 'eq', [None])])
160 for step in steps:
161 # finish remaining logs for those steps..
162 logs = yield self.master.data.get(
163 ('steps', step['stepid'], 'logs'),
164 filters=[resultspec.Filter('complete', 'eq',
165 [False])])
166 for _log in logs:
167 yield self.master.data.updates.finishLog(
168 logid=_log['logid'])
169 yield self.master.data.updates.finishStep(
170 stepid=step['stepid'], results=RETRY, hidden=False)
171 # then stop the build itself
172 yield self.master.data.updates.finishBuild(
173 buildid=build['buildid'], results=RETRY)
174
175 # unclaim all of the build requests owned by the deactivated instance
176 buildrequests = yield self.master.db.buildrequests.getBuildRequests(
177 complete=False, claimed=masterid)
178 yield self.master.db.buildrequests.unclaimBuildRequests(
179 brids=[br['buildrequestid'] for br in buildrequests])
180
181 @defer.inlineCallbacks
182 def _masterDeactivated(self, masterid, name):
183 yield self._masterDeactivatedHousekeeping(masterid, name)
184
185 self.produceEvent(
186 dict(masterid=masterid, name=name, active=False),
187 'stopped')
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/master/buildbot/data/masters.py b/master/buildbot/data/masters.py
--- a/master/buildbot/data/masters.py
+++ b/master/buildbot/data/masters.py
@@ -115,7 +115,7 @@
too_old = epoch2datetime(_reactor.seconds() - 60 * EXPIRE_MINUTES)
masters = yield self.master.db.masters.getMasters()
for m in masters:
- if not forceHouseKeeping and m['last_active'] is not None and m['last_active'] >= too_old:
+ if m['last_active'] is not None and m['last_active'] >= too_old:
continue
# mark the master inactive, and send a message on its behalf
| {"golden_diff": "diff --git a/master/buildbot/data/masters.py b/master/buildbot/data/masters.py\n--- a/master/buildbot/data/masters.py\n+++ b/master/buildbot/data/masters.py\n@@ -115,7 +115,7 @@\n too_old = epoch2datetime(_reactor.seconds() - 60 * EXPIRE_MINUTES)\n masters = yield self.master.db.masters.getMasters()\n for m in masters:\n- if not forceHouseKeeping and m['last_active'] is not None and m['last_active'] >= too_old:\n+ if m['last_active'] is not None and m['last_active'] >= too_old:\n continue\n \n # mark the master inactive, and send a message on its behalf\n", "issue": "Workers configured_on gets corrupted for multi-master\nhttps://github.com/buildbot/buildbot/issues/3981 fixed the issue of Workers configured_on getting corrupted. It works for single master scenario. However, for multi-master scenario, this bug is still present.\r\n\r\nI have two masters (running 1.1.1):\r\n1: handling the web-server\r\n2: handling workers communication, builds and everything else\r\n\r\nIf I restart 1 and then 2, then it's fine. However, if I restart 2 and then 1, then the workers configured_on is corrupted. The issue reproduce immediately on restarting 1.\r\n\r\nIf I restart only 2, then the issue doesn't happen. However, restarting 1 consistently reproduces the issue.\r\n\r\nI suspect that the HouseKeeping which is done while starting the masters might be causing the issue.\r\n\r\nhttps://github.com/buildbot/buildbot/blob/master/master/buildbot/master.py#L300\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\nfrom twisted.internet import reactor\nfrom twisted.python import log\n\nfrom buildbot.data import base\nfrom buildbot.data import resultspec\nfrom buildbot.data import types\nfrom buildbot.process.results import RETRY\nfrom buildbot.util import epoch2datetime\n\n# time, in minutes, after which a master that hasn't checked in will be\n# marked as inactive\nEXPIRE_MINUTES = 10\n\n\ndef _db2data(master):\n return dict(masterid=master['id'],\n name=master['name'],\n active=master['active'],\n last_active=master['last_active'])\n\n\nclass MasterEndpoint(base.Endpoint):\n\n isCollection = False\n pathPatterns = \"\"\"\n /masters/n:masterid\n /builders/n:builderid/masters/n:masterid\n \"\"\"\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n # if a builder is given, only return the master if it's associated with\n # this builder\n if 'builderid' in kwargs:\n builder = yield self.master.db.builders.getBuilder(\n builderid=kwargs['builderid'])\n if not builder or kwargs['masterid'] not in builder['masterids']:\n defer.returnValue(None)\n return\n m = yield self.master.db.masters.getMaster(kwargs['masterid'])\n defer.returnValue(_db2data(m) if m else None)\n\n\nclass MastersEndpoint(base.Endpoint):\n\n isCollection = True\n pathPatterns = \"\"\"\n /masters\n /builders/n:builderid/masters\n \"\"\"\n rootLinkName = 'masters'\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n masterlist = yield self.master.db.masters.getMasters()\n if 'builderid' in kwargs:\n builder = yield self.master.db.builders.getBuilder(\n builderid=kwargs['builderid'])\n if builder:\n masterids = set(builder['masterids'])\n masterlist = [m for m in masterlist if m['id'] in masterids]\n else:\n masterlist = []\n defer.returnValue([_db2data(m) for m in masterlist])\n\n\nclass Master(base.ResourceType):\n\n name = \"master\"\n plural = \"masters\"\n endpoints = [MasterEndpoint, MastersEndpoint]\n eventPathPatterns = \"\"\"\n /masters/:masterid\n \"\"\"\n\n class EntityType(types.Entity):\n masterid = types.Integer()\n name = types.String()\n active = types.Boolean()\n last_active = types.DateTime()\n entityType = EntityType(name)\n\n @base.updateMethod\n @defer.inlineCallbacks\n def masterActive(self, name, masterid, _reactor=reactor):\n activated = yield self.master.db.masters.setMasterState(\n masterid=masterid, active=True, _reactor=_reactor)\n if activated:\n self.produceEvent(\n dict(masterid=masterid, name=name, active=True),\n 'started')\n\n @base.updateMethod\n @defer.inlineCallbacks\n def expireMasters(self, forceHouseKeeping=False, _reactor=reactor):\n too_old = epoch2datetime(_reactor.seconds() - 60 * EXPIRE_MINUTES)\n masters = yield self.master.db.masters.getMasters()\n for m in masters:\n if not forceHouseKeeping and m['last_active'] is not None and m['last_active'] >= too_old:\n continue\n\n # mark the master inactive, and send a message on its behalf\n deactivated = yield self.master.db.masters.setMasterState(\n masterid=m['id'], active=False, _reactor=_reactor)\n if deactivated:\n yield self._masterDeactivated(m['id'], m['name'])\n elif forceHouseKeeping:\n yield self._masterDeactivatedHousekeeping(m['id'], m['name'])\n\n @base.updateMethod\n @defer.inlineCallbacks\n def masterStopped(self, name, masterid):\n deactivated = yield self.master.db.masters.setMasterState(\n masterid=masterid, active=False)\n if deactivated:\n yield self._masterDeactivated(masterid, name)\n\n @defer.inlineCallbacks\n def _masterDeactivatedHousekeeping(self, masterid, name):\n log.msg(\"doing housekeeping for master %d %s\" % (masterid, name))\n\n # common code for deactivating a master\n yield self.master.data.rtypes.worker._masterDeactivated(\n masterid=masterid)\n yield self.master.data.rtypes.builder._masterDeactivated(\n masterid=masterid)\n yield self.master.data.rtypes.scheduler._masterDeactivated(\n masterid=masterid)\n yield self.master.data.rtypes.changesource._masterDeactivated(\n masterid=masterid)\n\n # for each build running on that instance..\n builds = yield self.master.data.get(('builds',),\n filters=[resultspec.Filter('masterid', 'eq', [masterid]),\n resultspec.Filter('complete', 'eq', [False])])\n for build in builds:\n # stop any running steps..\n steps = yield self.master.data.get(\n ('builds', build['buildid'], 'steps'),\n filters=[resultspec.Filter('results', 'eq', [None])])\n for step in steps:\n # finish remaining logs for those steps..\n logs = yield self.master.data.get(\n ('steps', step['stepid'], 'logs'),\n filters=[resultspec.Filter('complete', 'eq',\n [False])])\n for _log in logs:\n yield self.master.data.updates.finishLog(\n logid=_log['logid'])\n yield self.master.data.updates.finishStep(\n stepid=step['stepid'], results=RETRY, hidden=False)\n # then stop the build itself\n yield self.master.data.updates.finishBuild(\n buildid=build['buildid'], results=RETRY)\n\n # unclaim all of the build requests owned by the deactivated instance\n buildrequests = yield self.master.db.buildrequests.getBuildRequests(\n complete=False, claimed=masterid)\n yield self.master.db.buildrequests.unclaimBuildRequests(\n brids=[br['buildrequestid'] for br in buildrequests])\n\n @defer.inlineCallbacks\n def _masterDeactivated(self, masterid, name):\n yield self._masterDeactivatedHousekeeping(masterid, name)\n\n self.produceEvent(\n dict(masterid=masterid, name=name, active=False),\n 'stopped')\n", "path": "master/buildbot/data/masters.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\nfrom twisted.internet import reactor\nfrom twisted.python import log\n\nfrom buildbot.data import base\nfrom buildbot.data import resultspec\nfrom buildbot.data import types\nfrom buildbot.process.results import RETRY\nfrom buildbot.util import epoch2datetime\n\n# time, in minutes, after which a master that hasn't checked in will be\n# marked as inactive\nEXPIRE_MINUTES = 10\n\n\ndef _db2data(master):\n return dict(masterid=master['id'],\n name=master['name'],\n active=master['active'],\n last_active=master['last_active'])\n\n\nclass MasterEndpoint(base.Endpoint):\n\n isCollection = False\n pathPatterns = \"\"\"\n /masters/n:masterid\n /builders/n:builderid/masters/n:masterid\n \"\"\"\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n # if a builder is given, only return the master if it's associated with\n # this builder\n if 'builderid' in kwargs:\n builder = yield self.master.db.builders.getBuilder(\n builderid=kwargs['builderid'])\n if not builder or kwargs['masterid'] not in builder['masterids']:\n defer.returnValue(None)\n return\n m = yield self.master.db.masters.getMaster(kwargs['masterid'])\n defer.returnValue(_db2data(m) if m else None)\n\n\nclass MastersEndpoint(base.Endpoint):\n\n isCollection = True\n pathPatterns = \"\"\"\n /masters\n /builders/n:builderid/masters\n \"\"\"\n rootLinkName = 'masters'\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n masterlist = yield self.master.db.masters.getMasters()\n if 'builderid' in kwargs:\n builder = yield self.master.db.builders.getBuilder(\n builderid=kwargs['builderid'])\n if builder:\n masterids = set(builder['masterids'])\n masterlist = [m for m in masterlist if m['id'] in masterids]\n else:\n masterlist = []\n defer.returnValue([_db2data(m) for m in masterlist])\n\n\nclass Master(base.ResourceType):\n\n name = \"master\"\n plural = \"masters\"\n endpoints = [MasterEndpoint, MastersEndpoint]\n eventPathPatterns = \"\"\"\n /masters/:masterid\n \"\"\"\n\n class EntityType(types.Entity):\n masterid = types.Integer()\n name = types.String()\n active = types.Boolean()\n last_active = types.DateTime()\n entityType = EntityType(name)\n\n @base.updateMethod\n @defer.inlineCallbacks\n def masterActive(self, name, masterid, _reactor=reactor):\n activated = yield self.master.db.masters.setMasterState(\n masterid=masterid, active=True, _reactor=_reactor)\n if activated:\n self.produceEvent(\n dict(masterid=masterid, name=name, active=True),\n 'started')\n\n @base.updateMethod\n @defer.inlineCallbacks\n def expireMasters(self, forceHouseKeeping=False, _reactor=reactor):\n too_old = epoch2datetime(_reactor.seconds() - 60 * EXPIRE_MINUTES)\n masters = yield self.master.db.masters.getMasters()\n for m in masters:\n if m['last_active'] is not None and m['last_active'] >= too_old:\n continue\n\n # mark the master inactive, and send a message on its behalf\n deactivated = yield self.master.db.masters.setMasterState(\n masterid=m['id'], active=False, _reactor=_reactor)\n if deactivated:\n yield self._masterDeactivated(m['id'], m['name'])\n elif forceHouseKeeping:\n yield self._masterDeactivatedHousekeeping(m['id'], m['name'])\n\n @base.updateMethod\n @defer.inlineCallbacks\n def masterStopped(self, name, masterid):\n deactivated = yield self.master.db.masters.setMasterState(\n masterid=masterid, active=False)\n if deactivated:\n yield self._masterDeactivated(masterid, name)\n\n @defer.inlineCallbacks\n def _masterDeactivatedHousekeeping(self, masterid, name):\n log.msg(\"doing housekeeping for master %d %s\" % (masterid, name))\n\n # common code for deactivating a master\n yield self.master.data.rtypes.worker._masterDeactivated(\n masterid=masterid)\n yield self.master.data.rtypes.builder._masterDeactivated(\n masterid=masterid)\n yield self.master.data.rtypes.scheduler._masterDeactivated(\n masterid=masterid)\n yield self.master.data.rtypes.changesource._masterDeactivated(\n masterid=masterid)\n\n # for each build running on that instance..\n builds = yield self.master.data.get(('builds',),\n filters=[resultspec.Filter('masterid', 'eq', [masterid]),\n resultspec.Filter('complete', 'eq', [False])])\n for build in builds:\n # stop any running steps..\n steps = yield self.master.data.get(\n ('builds', build['buildid'], 'steps'),\n filters=[resultspec.Filter('results', 'eq', [None])])\n for step in steps:\n # finish remaining logs for those steps..\n logs = yield self.master.data.get(\n ('steps', step['stepid'], 'logs'),\n filters=[resultspec.Filter('complete', 'eq',\n [False])])\n for _log in logs:\n yield self.master.data.updates.finishLog(\n logid=_log['logid'])\n yield self.master.data.updates.finishStep(\n stepid=step['stepid'], results=RETRY, hidden=False)\n # then stop the build itself\n yield self.master.data.updates.finishBuild(\n buildid=build['buildid'], results=RETRY)\n\n # unclaim all of the build requests owned by the deactivated instance\n buildrequests = yield self.master.db.buildrequests.getBuildRequests(\n complete=False, claimed=masterid)\n yield self.master.db.buildrequests.unclaimBuildRequests(\n brids=[br['buildrequestid'] for br in buildrequests])\n\n @defer.inlineCallbacks\n def _masterDeactivated(self, masterid, name):\n yield self._masterDeactivatedHousekeeping(masterid, name)\n\n self.produceEvent(\n dict(masterid=masterid, name=name, active=False),\n 'stopped')\n", "path": "master/buildbot/data/masters.py"}]} | 2,494 | 160 |
gh_patches_debug_25818 | rasdani/github-patches | git_diff | conda__conda-build-1105 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda-build crashes when building PEP420 namespace packages
There is a very trivial bug in `conda_build.utils.copy_into()`, causing it to crash when building PEP420 namespace packages. To reproduce the issue, all of the following conditions must be met:
- your package must be a native PEP420 namespace package (`python >=3.3`)
- your package must depend on (and therefore install) other packages mapping to the same namespace
- you must build all of these packages with `recipe['build']['preserve_egg_dir'] = False`
- your namespace must be a multi-level directory
This is easier to explain by example:
```
found egg dir: C:\ANACONDA\envs\_build\Lib\site-packages\example-4.3.0-py3.5-win-amd64.egg
...
Traceback (most recent call last):
File "C:\ANACONDA\Scripts\conda-build-script.py", line 4, in <module>
sys.exit(main())
File "C:\ANACONDA\lib\site-packages\conda_build\main_build.py", line 144, in main
args_func(args, p)
File "C:\ANACONDA\lib\site-packages\conda_build\main_build.py", line 389, in args_func
args.func(args, p)
File "C:\ANACONDA\lib\site-packages\conda_build\main_build.py", line 332, in execute
dirty=args.dirty, activate=args.activate)
File "C:\ANACONDA\lib\site-packages\conda_build\build.py", line 606, in build
preserve_egg_dir=bool(m.get_value('build/preserve_egg_dir')))
File "C:\ANACONDA\lib\site-packages\conda_build\post.py", line 162, in post_process
remove_easy_install_pth(files, preserve_egg_dir=preserve_egg_dir)
File "C:\ANACONDA\lib\site-packages\conda_build\post.py", line 119, in remove_easy_install_pth
utils.copy_into(join(egg_path, fn), join(sp_dir, fn))
File "C:\ANACONDA\lib\site-packages\conda_build\utils.py", line 54, in copy_into
shutil.copytree(srcname, dstname)
File "C:\ANACONDA\lib\shutil.py", line 177, in copytree
os.makedirs(dst)
File "C:\ANACONDA\lib\os.py", line 157, in makedirs
mkdir(name, mode)
WindowsError: [Error 183] Cannot create a file when that file already exists: 'C:\ANACONDA\envs\_build\Lib\site-packages\ns\sub'
```
Here `ns` is the top level namespace directory and `sub` is a sub-package, but also a namespace package:
```
+ ns
|
+ sub
|
module.py
```
The problem is that conda is trying to copy `ns\sub` to site-packages at build time. However, this directory already exists because another package -- a runtime dependency -- is already installed in site packages providing the same namespace directory layout:
```
+ ns
|
+ sub
|
dependency.py
```
`conda-build` needs to be able to "merge" the `sub` directories coming from all namespace packages. Deleting or skipping directories if they exist will corrupt the namespace and produce incorrect results. Here is the quickest way to fix this:
``` python
from distutils.dir_util import copy_tree
def copy_into(src, dst):
"Copy all the files and directories in src to the directory dst"
if not isdir(src):
tocopy = [src]
else:
tocopy = os.listdir(src)
for afile in tocopy:
srcname = os.path.join(src, afile)
dstname = os.path.join(dst, afile)
if os.path.isdir(srcname):
copy_tree(srcname, dstname)
else:
shutil.copy2(srcname, dstname)
```
i.e. just replace `shutil.copytree()` with `distutils.dir_util.copy_tree()`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_build/utils.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import fnmatch
4 import os
5 import sys
6 import shutil
7 import tarfile
8 import zipfile
9 import subprocess
10 import operator
11 from os.path import dirname, getmtime, getsize, isdir, join
12 from collections import defaultdict
13
14 from conda.utils import md5_file, unix_path_to_win
15 from conda.compat import PY3, iteritems
16
17 from conda_build import external
18
19 # Backwards compatibility import. Do not remove.
20 from conda.install import rm_rf
21 rm_rf
22
23
24 def find_recipe(path):
25 """recurse through a folder, locating meta.yaml. Raises error if more than one is found.
26
27 Returns folder containing meta.yaml, to be built.
28
29 If we have a base level meta.yaml and other supplemental ones, use that first"""
30 results = rec_glob(path, ["meta.yaml", "conda.yaml"])
31 if len(results) > 1:
32 base_recipe = os.path.join(path, "meta.yaml")
33 if base_recipe in results:
34 return os.path.dirname(base_recipe)
35 else:
36 raise IOError("More than one meta.yaml files found in %s" % path)
37 elif not results:
38 raise IOError("No meta.yaml files found in %s" % path)
39 return os.path.dirname(results[0])
40
41
42 def copy_into(src, dst):
43 "Copy all the files and directories in src to the directory dst"
44
45 if not isdir(src):
46 tocopy = [src]
47 else:
48 tocopy = os.listdir(src)
49 for afile in tocopy:
50 srcname = os.path.join(src, afile)
51 dstname = os.path.join(dst, afile)
52
53 if os.path.isdir(srcname):
54 shutil.copytree(srcname, dstname)
55 else:
56 shutil.copy2(srcname, dstname)
57
58
59 def relative(f, d='lib'):
60 assert not f.startswith('/'), f
61 assert not d.startswith('/'), d
62 d = d.strip('/').split('/')
63 if d == ['.']:
64 d = []
65 f = dirname(f).split('/')
66 if f == ['']:
67 f = []
68 while d and f and d[0] == f[0]:
69 d.pop(0)
70 f.pop(0)
71 return '/'.join(((['..'] * len(f)) if f else ['.']) + d)
72
73
74 def _check_call(args, **kwargs):
75 try:
76 subprocess.check_call(args, **kwargs)
77 except subprocess.CalledProcessError:
78 sys.exit('Command failed: %s' % ' '.join(args))
79
80
81 def tar_xf(tarball, dir_path, mode='r:*'):
82 if tarball.lower().endswith('.tar.z'):
83 uncompress = external.find_executable('uncompress')
84 if not uncompress:
85 uncompress = external.find_executable('gunzip')
86 if not uncompress:
87 sys.exit("""\
88 uncompress (or gunzip) is required to unarchive .z source files.
89 """)
90 subprocess.check_call([uncompress, '-f', tarball])
91 tarball = tarball[:-2]
92 if not PY3 and tarball.endswith('.tar.xz'):
93 unxz = external.find_executable('unxz')
94 if not unxz:
95 sys.exit("""\
96 unxz is required to unarchive .xz source files.
97 """)
98
99 subprocess.check_call([unxz, '-f', '-k', tarball])
100 tarball = tarball[:-3]
101 t = tarfile.open(tarball, mode)
102 t.extractall(path=dir_path)
103 t.close()
104
105
106 def unzip(zip_path, dir_path):
107 z = zipfile.ZipFile(zip_path)
108 for name in z.namelist():
109 if name.endswith('/'):
110 continue
111 path = join(dir_path, *name.split('/'))
112 dp = dirname(path)
113 if not isdir(dp):
114 os.makedirs(dp)
115 with open(path, 'wb') as fo:
116 fo.write(z.read(name))
117 z.close()
118
119
120 def file_info(path):
121 return {'size': getsize(path),
122 'md5': md5_file(path),
123 'mtime': getmtime(path)}
124
125 # Taken from toolz
126
127
128 def groupby(key, seq):
129 """ Group a collection by a key function
130 >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
131 >>> groupby(len, names) # doctest: +SKIP
132 {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
133 >>> iseven = lambda x: x % 2 == 0
134 >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
135 {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
136 Non-callable keys imply grouping on a member.
137 >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
138 ... {'name': 'Bob', 'gender': 'M'},
139 ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
140 {'F': [{'gender': 'F', 'name': 'Alice'}],
141 'M': [{'gender': 'M', 'name': 'Bob'},
142 {'gender': 'M', 'name': 'Charlie'}]}
143 See Also:
144 countby
145 """
146 if not callable(key):
147 key = getter(key)
148 d = defaultdict(lambda: [].append)
149 for item in seq:
150 d[key(item)](item)
151 rv = {}
152 for k, v in iteritems(d):
153 rv[k] = v.__self__
154 return rv
155
156
157 def getter(index):
158 if isinstance(index, list):
159 if len(index) == 1:
160 index = index[0]
161 return lambda x: (x[index],)
162 elif index:
163 return operator.itemgetter(*index)
164 else:
165 return lambda x: ()
166 else:
167 return operator.itemgetter(index)
168
169
170 def comma_join(items):
171 """
172 Like ', '.join(items) but with and
173
174 Examples:
175
176 >>> comma_join(['a'])
177 'a'
178 >>> comma_join(['a', 'b'])
179 'a and b'
180 >>> comma_join(['a', 'b', 'c])
181 'a, b, and c'
182 """
183 return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]
184
185
186 def safe_print_unicode(*args, **kwargs):
187 """
188 prints unicode strings to stdout using configurable `errors` handler for
189 encoding errors
190
191 :param args: unicode strings to print to stdout
192 :param sep: separator (defaults to ' ')
193 :param end: ending character (defaults to '\n')
194 :param errors: error handler for encoding errors (defaults to 'replace')
195 """
196 sep = kwargs.pop('sep', u' ')
197 end = kwargs.pop('end', u'\n')
198 errors = kwargs.pop('errors', 'replace')
199 if PY3:
200 func = sys.stdout.buffer.write
201 else:
202 func = sys.stdout.write
203 line = sep.join(args) + end
204 encoding = sys.stdout.encoding or 'utf8'
205 func(line.encode(encoding, errors))
206
207
208 def rec_glob(path, patterns):
209 result = []
210 for d_f in os.walk(path):
211 m = []
212 for pattern in patterns:
213 m.extend(fnmatch.filter(d_f[2], pattern))
214 if m:
215 result.extend([os.path.join(d_f[0], f) for f in m])
216 return result
217
218
219 def convert_unix_path_to_win(path):
220 if external.find_executable('cygpath'):
221 cmd = "cygpath -w {0}".format(path)
222 if PY3:
223 path = subprocess.getoutput(cmd)
224 else:
225 path = subprocess.check_output(cmd.split()).rstrip().rstrip("\\")
226
227 else:
228 path = unix_path_to_win(path)
229 return path
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_build/utils.py b/conda_build/utils.py
--- a/conda_build/utils.py
+++ b/conda_build/utils.py
@@ -8,8 +8,9 @@
import zipfile
import subprocess
import operator
-from os.path import dirname, getmtime, getsize, isdir, join
+from os.path import dirname, getmtime, getsize, isdir, isfile, join
from collections import defaultdict
+from distutils.dir_util import copy_tree
from conda.utils import md5_file, unix_path_to_win
from conda.compat import PY3, iteritems
@@ -51,11 +52,29 @@
dstname = os.path.join(dst, afile)
if os.path.isdir(srcname):
- shutil.copytree(srcname, dstname)
+ merge_tree(srcname, dstname)
else:
shutil.copy2(srcname, dstname)
+def merge_tree(src, dst):
+ """
+ Merge src into dst recursively by copying all files from src into dst.
+ Return a list of all files copied.
+
+ Like copy_tree(src, dst), but raises an error if merging the two trees
+ would overwrite any files.
+ """
+ new_files = copy_tree(src, dst, dry_run=True)
+ existing = [f for f in new_files if isfile(f)]
+
+ if existing:
+ raise IOError("Can't merge {0} into {1}: file exists: "
+ "{2}".format(src, dst, existing[0]))
+
+ return copy_tree(src, dst)
+
+
def relative(f, d='lib'):
assert not f.startswith('/'), f
assert not d.startswith('/'), d
| {"golden_diff": "diff --git a/conda_build/utils.py b/conda_build/utils.py\n--- a/conda_build/utils.py\n+++ b/conda_build/utils.py\n@@ -8,8 +8,9 @@\n import zipfile\n import subprocess\n import operator\n-from os.path import dirname, getmtime, getsize, isdir, join\n+from os.path import dirname, getmtime, getsize, isdir, isfile, join\n from collections import defaultdict\n+from distutils.dir_util import copy_tree\n \n from conda.utils import md5_file, unix_path_to_win\n from conda.compat import PY3, iteritems\n@@ -51,11 +52,29 @@\n dstname = os.path.join(dst, afile)\n \n if os.path.isdir(srcname):\n- shutil.copytree(srcname, dstname)\n+ merge_tree(srcname, dstname)\n else:\n shutil.copy2(srcname, dstname)\n \n \n+def merge_tree(src, dst):\n+ \"\"\"\n+ Merge src into dst recursively by copying all files from src into dst.\n+ Return a list of all files copied.\n+\n+ Like copy_tree(src, dst), but raises an error if merging the two trees\n+ would overwrite any files.\n+ \"\"\"\n+ new_files = copy_tree(src, dst, dry_run=True)\n+ existing = [f for f in new_files if isfile(f)]\n+\n+ if existing:\n+ raise IOError(\"Can't merge {0} into {1}: file exists: \"\n+ \"{2}\".format(src, dst, existing[0]))\n+\n+ return copy_tree(src, dst)\n+\n+\n def relative(f, d='lib'):\n assert not f.startswith('/'), f\n assert not d.startswith('/'), d\n", "issue": "conda-build crashes when building PEP420 namespace packages\nThere is a very trivial bug in `conda_build.utils.copy_into()`, causing it to crash when building PEP420 namespace packages. To reproduce the issue, all of the following conditions must be met:\n- your package must be a native PEP420 namespace package (`python >=3.3`)\n- your package must depend on (and therefore install) other packages mapping to the same namespace\n- you must build all of these packages with `recipe['build']['preserve_egg_dir'] = False`\n- your namespace must be a multi-level directory \n\nThis is easier to explain by example:\n\n```\nfound egg dir: C:\\ANACONDA\\envs\\_build\\Lib\\site-packages\\example-4.3.0-py3.5-win-amd64.egg\n...\nTraceback (most recent call last):\n File \"C:\\ANACONDA\\Scripts\\conda-build-script.py\", line 4, in <module>\n sys.exit(main())\n File \"C:\\ANACONDA\\lib\\site-packages\\conda_build\\main_build.py\", line 144, in main\n args_func(args, p)\n File \"C:\\ANACONDA\\lib\\site-packages\\conda_build\\main_build.py\", line 389, in args_func\n args.func(args, p)\n File \"C:\\ANACONDA\\lib\\site-packages\\conda_build\\main_build.py\", line 332, in execute\n dirty=args.dirty, activate=args.activate)\n File \"C:\\ANACONDA\\lib\\site-packages\\conda_build\\build.py\", line 606, in build\n preserve_egg_dir=bool(m.get_value('build/preserve_egg_dir')))\n File \"C:\\ANACONDA\\lib\\site-packages\\conda_build\\post.py\", line 162, in post_process\n remove_easy_install_pth(files, preserve_egg_dir=preserve_egg_dir)\n File \"C:\\ANACONDA\\lib\\site-packages\\conda_build\\post.py\", line 119, in remove_easy_install_pth\n utils.copy_into(join(egg_path, fn), join(sp_dir, fn))\n File \"C:\\ANACONDA\\lib\\site-packages\\conda_build\\utils.py\", line 54, in copy_into\n shutil.copytree(srcname, dstname)\n File \"C:\\ANACONDA\\lib\\shutil.py\", line 177, in copytree\n os.makedirs(dst)\n File \"C:\\ANACONDA\\lib\\os.py\", line 157, in makedirs\n mkdir(name, mode)\nWindowsError: [Error 183] Cannot create a file when that file already exists: 'C:\\ANACONDA\\envs\\_build\\Lib\\site-packages\\ns\\sub'\n```\n\nHere `ns` is the top level namespace directory and `sub` is a sub-package, but also a namespace package:\n\n```\n+ ns\n |\n + sub\n |\n module.py\n```\n\nThe problem is that conda is trying to copy `ns\\sub` to site-packages at build time. However, this directory already exists because another package -- a runtime dependency -- is already installed in site packages providing the same namespace directory layout:\n\n```\n+ ns\n |\n + sub\n |\n dependency.py\n```\n\n`conda-build` needs to be able to \"merge\" the `sub` directories coming from all namespace packages. Deleting or skipping directories if they exist will corrupt the namespace and produce incorrect results. Here is the quickest way to fix this:\n\n``` python\nfrom distutils.dir_util import copy_tree\n\ndef copy_into(src, dst):\n \"Copy all the files and directories in src to the directory dst\"\n\n if not isdir(src):\n tocopy = [src]\n else:\n tocopy = os.listdir(src)\n for afile in tocopy:\n srcname = os.path.join(src, afile)\n dstname = os.path.join(dst, afile)\n\n if os.path.isdir(srcname):\n copy_tree(srcname, dstname)\n else:\n shutil.copy2(srcname, dstname)\n```\n\ni.e. just replace `shutil.copytree()` with `distutils.dir_util.copy_tree()`.\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport fnmatch\nimport os\nimport sys\nimport shutil\nimport tarfile\nimport zipfile\nimport subprocess\nimport operator\nfrom os.path import dirname, getmtime, getsize, isdir, join\nfrom collections import defaultdict\n\nfrom conda.utils import md5_file, unix_path_to_win\nfrom conda.compat import PY3, iteritems\n\nfrom conda_build import external\n\n# Backwards compatibility import. Do not remove.\nfrom conda.install import rm_rf\nrm_rf\n\n\ndef find_recipe(path):\n \"\"\"recurse through a folder, locating meta.yaml. Raises error if more than one is found.\n\n Returns folder containing meta.yaml, to be built.\n\n If we have a base level meta.yaml and other supplemental ones, use that first\"\"\"\n results = rec_glob(path, [\"meta.yaml\", \"conda.yaml\"])\n if len(results) > 1:\n base_recipe = os.path.join(path, \"meta.yaml\")\n if base_recipe in results:\n return os.path.dirname(base_recipe)\n else:\n raise IOError(\"More than one meta.yaml files found in %s\" % path)\n elif not results:\n raise IOError(\"No meta.yaml files found in %s\" % path)\n return os.path.dirname(results[0])\n\n\ndef copy_into(src, dst):\n \"Copy all the files and directories in src to the directory dst\"\n\n if not isdir(src):\n tocopy = [src]\n else:\n tocopy = os.listdir(src)\n for afile in tocopy:\n srcname = os.path.join(src, afile)\n dstname = os.path.join(dst, afile)\n\n if os.path.isdir(srcname):\n shutil.copytree(srcname, dstname)\n else:\n shutil.copy2(srcname, dstname)\n\n\ndef relative(f, d='lib'):\n assert not f.startswith('/'), f\n assert not d.startswith('/'), d\n d = d.strip('/').split('/')\n if d == ['.']:\n d = []\n f = dirname(f).split('/')\n if f == ['']:\n f = []\n while d and f and d[0] == f[0]:\n d.pop(0)\n f.pop(0)\n return '/'.join(((['..'] * len(f)) if f else ['.']) + d)\n\n\ndef _check_call(args, **kwargs):\n try:\n subprocess.check_call(args, **kwargs)\n except subprocess.CalledProcessError:\n sys.exit('Command failed: %s' % ' '.join(args))\n\n\ndef tar_xf(tarball, dir_path, mode='r:*'):\n if tarball.lower().endswith('.tar.z'):\n uncompress = external.find_executable('uncompress')\n if not uncompress:\n uncompress = external.find_executable('gunzip')\n if not uncompress:\n sys.exit(\"\"\"\\\nuncompress (or gunzip) is required to unarchive .z source files.\n\"\"\")\n subprocess.check_call([uncompress, '-f', tarball])\n tarball = tarball[:-2]\n if not PY3 and tarball.endswith('.tar.xz'):\n unxz = external.find_executable('unxz')\n if not unxz:\n sys.exit(\"\"\"\\\nunxz is required to unarchive .xz source files.\n\"\"\")\n\n subprocess.check_call([unxz, '-f', '-k', tarball])\n tarball = tarball[:-3]\n t = tarfile.open(tarball, mode)\n t.extractall(path=dir_path)\n t.close()\n\n\ndef unzip(zip_path, dir_path):\n z = zipfile.ZipFile(zip_path)\n for name in z.namelist():\n if name.endswith('/'):\n continue\n path = join(dir_path, *name.split('/'))\n dp = dirname(path)\n if not isdir(dp):\n os.makedirs(dp)\n with open(path, 'wb') as fo:\n fo.write(z.read(name))\n z.close()\n\n\ndef file_info(path):\n return {'size': getsize(path),\n 'md5': md5_file(path),\n 'mtime': getmtime(path)}\n\n# Taken from toolz\n\n\ndef groupby(key, seq):\n \"\"\" Group a collection by a key function\n >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']\n >>> groupby(len, names) # doctest: +SKIP\n {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}\n >>> iseven = lambda x: x % 2 == 0\n >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP\n {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}\n Non-callable keys imply grouping on a member.\n >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},\n ... {'name': 'Bob', 'gender': 'M'},\n ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP\n {'F': [{'gender': 'F', 'name': 'Alice'}],\n 'M': [{'gender': 'M', 'name': 'Bob'},\n {'gender': 'M', 'name': 'Charlie'}]}\n See Also:\n countby\n \"\"\"\n if not callable(key):\n key = getter(key)\n d = defaultdict(lambda: [].append)\n for item in seq:\n d[key(item)](item)\n rv = {}\n for k, v in iteritems(d):\n rv[k] = v.__self__\n return rv\n\n\ndef getter(index):\n if isinstance(index, list):\n if len(index) == 1:\n index = index[0]\n return lambda x: (x[index],)\n elif index:\n return operator.itemgetter(*index)\n else:\n return lambda x: ()\n else:\n return operator.itemgetter(index)\n\n\ndef comma_join(items):\n \"\"\"\n Like ', '.join(items) but with and\n\n Examples:\n\n >>> comma_join(['a'])\n 'a'\n >>> comma_join(['a', 'b'])\n 'a and b'\n >>> comma_join(['a', 'b', 'c])\n 'a, b, and c'\n \"\"\"\n return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]\n\n\ndef safe_print_unicode(*args, **kwargs):\n \"\"\"\n prints unicode strings to stdout using configurable `errors` handler for\n encoding errors\n\n :param args: unicode strings to print to stdout\n :param sep: separator (defaults to ' ')\n :param end: ending character (defaults to '\\n')\n :param errors: error handler for encoding errors (defaults to 'replace')\n \"\"\"\n sep = kwargs.pop('sep', u' ')\n end = kwargs.pop('end', u'\\n')\n errors = kwargs.pop('errors', 'replace')\n if PY3:\n func = sys.stdout.buffer.write\n else:\n func = sys.stdout.write\n line = sep.join(args) + end\n encoding = sys.stdout.encoding or 'utf8'\n func(line.encode(encoding, errors))\n\n\ndef rec_glob(path, patterns):\n result = []\n for d_f in os.walk(path):\n m = []\n for pattern in patterns:\n m.extend(fnmatch.filter(d_f[2], pattern))\n if m:\n result.extend([os.path.join(d_f[0], f) for f in m])\n return result\n\n\ndef convert_unix_path_to_win(path):\n if external.find_executable('cygpath'):\n cmd = \"cygpath -w {0}\".format(path)\n if PY3:\n path = subprocess.getoutput(cmd)\n else:\n path = subprocess.check_output(cmd.split()).rstrip().rstrip(\"\\\\\")\n\n else:\n path = unix_path_to_win(path)\n return path\n", "path": "conda_build/utils.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport fnmatch\nimport os\nimport sys\nimport shutil\nimport tarfile\nimport zipfile\nimport subprocess\nimport operator\nfrom os.path import dirname, getmtime, getsize, isdir, isfile, join\nfrom collections import defaultdict\nfrom distutils.dir_util import copy_tree\n\nfrom conda.utils import md5_file, unix_path_to_win\nfrom conda.compat import PY3, iteritems\n\nfrom conda_build import external\n\n# Backwards compatibility import. Do not remove.\nfrom conda.install import rm_rf\nrm_rf\n\n\ndef find_recipe(path):\n \"\"\"recurse through a folder, locating meta.yaml. Raises error if more than one is found.\n\n Returns folder containing meta.yaml, to be built.\n\n If we have a base level meta.yaml and other supplemental ones, use that first\"\"\"\n results = rec_glob(path, [\"meta.yaml\", \"conda.yaml\"])\n if len(results) > 1:\n base_recipe = os.path.join(path, \"meta.yaml\")\n if base_recipe in results:\n return os.path.dirname(base_recipe)\n else:\n raise IOError(\"More than one meta.yaml files found in %s\" % path)\n elif not results:\n raise IOError(\"No meta.yaml files found in %s\" % path)\n return os.path.dirname(results[0])\n\n\ndef copy_into(src, dst):\n \"Copy all the files and directories in src to the directory dst\"\n\n if not isdir(src):\n tocopy = [src]\n else:\n tocopy = os.listdir(src)\n for afile in tocopy:\n srcname = os.path.join(src, afile)\n dstname = os.path.join(dst, afile)\n\n if os.path.isdir(srcname):\n merge_tree(srcname, dstname)\n else:\n shutil.copy2(srcname, dstname)\n\n\ndef merge_tree(src, dst):\n \"\"\"\n Merge src into dst recursively by copying all files from src into dst.\n Return a list of all files copied.\n\n Like copy_tree(src, dst), but raises an error if merging the two trees\n would overwrite any files.\n \"\"\"\n new_files = copy_tree(src, dst, dry_run=True)\n existing = [f for f in new_files if isfile(f)]\n\n if existing:\n raise IOError(\"Can't merge {0} into {1}: file exists: \"\n \"{2}\".format(src, dst, existing[0]))\n\n return copy_tree(src, dst)\n\n\ndef relative(f, d='lib'):\n assert not f.startswith('/'), f\n assert not d.startswith('/'), d\n d = d.strip('/').split('/')\n if d == ['.']:\n d = []\n f = dirname(f).split('/')\n if f == ['']:\n f = []\n while d and f and d[0] == f[0]:\n d.pop(0)\n f.pop(0)\n return '/'.join(((['..'] * len(f)) if f else ['.']) + d)\n\n\ndef _check_call(args, **kwargs):\n try:\n subprocess.check_call(args, **kwargs)\n except subprocess.CalledProcessError:\n sys.exit('Command failed: %s' % ' '.join(args))\n\n\ndef tar_xf(tarball, dir_path, mode='r:*'):\n if tarball.lower().endswith('.tar.z'):\n uncompress = external.find_executable('uncompress')\n if not uncompress:\n uncompress = external.find_executable('gunzip')\n if not uncompress:\n sys.exit(\"\"\"\\\nuncompress (or gunzip) is required to unarchive .z source files.\n\"\"\")\n subprocess.check_call([uncompress, '-f', tarball])\n tarball = tarball[:-2]\n if not PY3 and tarball.endswith('.tar.xz'):\n unxz = external.find_executable('unxz')\n if not unxz:\n sys.exit(\"\"\"\\\nunxz is required to unarchive .xz source files.\n\"\"\")\n\n subprocess.check_call([unxz, '-f', '-k', tarball])\n tarball = tarball[:-3]\n t = tarfile.open(tarball, mode)\n t.extractall(path=dir_path)\n t.close()\n\n\ndef unzip(zip_path, dir_path):\n z = zipfile.ZipFile(zip_path)\n for name in z.namelist():\n if name.endswith('/'):\n continue\n path = join(dir_path, *name.split('/'))\n dp = dirname(path)\n if not isdir(dp):\n os.makedirs(dp)\n with open(path, 'wb') as fo:\n fo.write(z.read(name))\n z.close()\n\n\ndef file_info(path):\n return {'size': getsize(path),\n 'md5': md5_file(path),\n 'mtime': getmtime(path)}\n\n# Taken from toolz\n\n\ndef groupby(key, seq):\n \"\"\" Group a collection by a key function\n >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']\n >>> groupby(len, names) # doctest: +SKIP\n {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}\n >>> iseven = lambda x: x % 2 == 0\n >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP\n {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}\n Non-callable keys imply grouping on a member.\n >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},\n ... {'name': 'Bob', 'gender': 'M'},\n ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP\n {'F': [{'gender': 'F', 'name': 'Alice'}],\n 'M': [{'gender': 'M', 'name': 'Bob'},\n {'gender': 'M', 'name': 'Charlie'}]}\n See Also:\n countby\n \"\"\"\n if not callable(key):\n key = getter(key)\n d = defaultdict(lambda: [].append)\n for item in seq:\n d[key(item)](item)\n rv = {}\n for k, v in iteritems(d):\n rv[k] = v.__self__\n return rv\n\n\ndef getter(index):\n if isinstance(index, list):\n if len(index) == 1:\n index = index[0]\n return lambda x: (x[index],)\n elif index:\n return operator.itemgetter(*index)\n else:\n return lambda x: ()\n else:\n return operator.itemgetter(index)\n\n\ndef comma_join(items):\n \"\"\"\n Like ', '.join(items) but with and\n\n Examples:\n\n >>> comma_join(['a'])\n 'a'\n >>> comma_join(['a', 'b'])\n 'a and b'\n >>> comma_join(['a', 'b', 'c])\n 'a, b, and c'\n \"\"\"\n return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]\n\n\ndef safe_print_unicode(*args, **kwargs):\n \"\"\"\n prints unicode strings to stdout using configurable `errors` handler for\n encoding errors\n\n :param args: unicode strings to print to stdout\n :param sep: separator (defaults to ' ')\n :param end: ending character (defaults to '\\n')\n :param errors: error handler for encoding errors (defaults to 'replace')\n \"\"\"\n sep = kwargs.pop('sep', u' ')\n end = kwargs.pop('end', u'\\n')\n errors = kwargs.pop('errors', 'replace')\n if PY3:\n func = sys.stdout.buffer.write\n else:\n func = sys.stdout.write\n line = sep.join(args) + end\n encoding = sys.stdout.encoding or 'utf8'\n func(line.encode(encoding, errors))\n\n\ndef rec_glob(path, patterns):\n result = []\n for d_f in os.walk(path):\n m = []\n for pattern in patterns:\n m.extend(fnmatch.filter(d_f[2], pattern))\n if m:\n result.extend([os.path.join(d_f[0], f) for f in m])\n return result\n\n\ndef convert_unix_path_to_win(path):\n if external.find_executable('cygpath'):\n cmd = \"cygpath -w {0}\".format(path)\n if PY3:\n path = subprocess.getoutput(cmd)\n else:\n path = subprocess.check_output(cmd.split()).rstrip().rstrip(\"\\\\\")\n\n else:\n path = unix_path_to_win(path)\n return path\n", "path": "conda_build/utils.py"}]} | 3,536 | 374 |
gh_patches_debug_19453 | rasdani/github-patches | git_diff | psf__black-2343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bad formatting of error messages about EOF in multi-line statements
**Describe the bug**
"EOF in multi-line statement" error is shown to the user as the raw tuple it was returned as, rather than in a nicely formatted message.
**To Reproduce**
For example:
1. Take this (invalid) file:
```py
print(
```
2. Run _Black_ on it with no arguments
3. See a message with the EOF error being shown as a raw tuple representation:
```
error: cannot format test_black.py: ('EOF in multi-line statement', (2, 0))
Oh no! 💥 💔 💥
1 file failed to reformat.
```
**Expected behavior**
A bit more nicely formatted error message.
**Environment (please complete the following information):**
- Version: <!-- e.g. [main] -->
- OS and Python version: <!-- e.g. [Linux/Python 3.7.4rc1] -->
**Does this bug also happen on main?**
Yes.
**Additional context**
For comparison, here's how I'm assuming the error should look like:
```
error: cannot format test_black.py: Cannot parse: 1:7: print([)
Oh no! 💥 💔 💥
1 file failed to reformat.
```
which is what you get in output when you try to format:
```py
print([)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/black/parsing.py`
Content:
```
1 """
2 Parse Python code and perform AST validation.
3 """
4 import ast
5 import platform
6 import sys
7 from typing import Any, Iterable, Iterator, List, Set, Tuple, Type, Union
8
9 if sys.version_info < (3, 8):
10 from typing_extensions import Final
11 else:
12 from typing import Final
13
14 # lib2to3 fork
15 from blib2to3.pytree import Node, Leaf
16 from blib2to3 import pygram
17 from blib2to3.pgen2 import driver
18 from blib2to3.pgen2.grammar import Grammar
19 from blib2to3.pgen2.parse import ParseError
20
21 from black.mode import TargetVersion, Feature, supports_feature
22 from black.nodes import syms
23
24 ast3: Any
25 ast27: Any
26
27 _IS_PYPY = platform.python_implementation() == "PyPy"
28
29 try:
30 from typed_ast import ast3, ast27
31 except ImportError:
32 # Either our python version is too low, or we're on pypy
33 if sys.version_info < (3, 7) or (sys.version_info < (3, 8) and not _IS_PYPY):
34 print(
35 "The typed_ast package is required but not installed.\n"
36 "You can upgrade to Python 3.8+ or install typed_ast with\n"
37 "`python3 -m pip install typed-ast`.",
38 file=sys.stderr,
39 )
40 sys.exit(1)
41 else:
42 ast3 = ast27 = ast
43
44
45 class InvalidInput(ValueError):
46 """Raised when input source code fails all parse attempts."""
47
48
49 def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
50 if not target_versions:
51 # No target_version specified, so try all grammars.
52 return [
53 # Python 3.7+
54 pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords,
55 # Python 3.0-3.6
56 pygram.python_grammar_no_print_statement_no_exec_statement,
57 # Python 2.7 with future print_function import
58 pygram.python_grammar_no_print_statement,
59 # Python 2.7
60 pygram.python_grammar,
61 ]
62
63 if all(version.is_python2() for version in target_versions):
64 # Python 2-only code, so try Python 2 grammars.
65 return [
66 # Python 2.7 with future print_function import
67 pygram.python_grammar_no_print_statement,
68 # Python 2.7
69 pygram.python_grammar,
70 ]
71
72 # Python 3-compatible code, so only try Python 3 grammar.
73 grammars = []
74 if supports_feature(target_versions, Feature.PATTERN_MATCHING):
75 # Python 3.10+
76 grammars.append(pygram.python_grammar_soft_keywords)
77 # If we have to parse both, try to parse async as a keyword first
78 if not supports_feature(
79 target_versions, Feature.ASYNC_IDENTIFIERS
80 ) and not supports_feature(target_versions, Feature.PATTERN_MATCHING):
81 # Python 3.7-3.9
82 grammars.append(
83 pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords
84 )
85 if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
86 # Python 3.0-3.6
87 grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)
88 # At least one of the above branches must have been taken, because every Python
89 # version has exactly one of the two 'ASYNC_*' flags
90 return grammars
91
92
93 def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
94 """Given a string with source, return the lib2to3 Node."""
95 if not src_txt.endswith("\n"):
96 src_txt += "\n"
97
98 for grammar in get_grammars(set(target_versions)):
99 drv = driver.Driver(grammar)
100 try:
101 result = drv.parse_string(src_txt, True)
102 break
103
104 except ParseError as pe:
105 lineno, column = pe.context[1]
106 lines = src_txt.splitlines()
107 try:
108 faulty_line = lines[lineno - 1]
109 except IndexError:
110 faulty_line = "<line number missing in source>"
111 exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
112 else:
113 raise exc from None
114
115 if isinstance(result, Leaf):
116 result = Node(syms.file_input, [result])
117 return result
118
119
120 def lib2to3_unparse(node: Node) -> str:
121 """Given a lib2to3 node, return its string representation."""
122 code = str(node)
123 return code
124
125
126 def parse_single_version(
127 src: str, version: Tuple[int, int]
128 ) -> Union[ast.AST, ast3.AST, ast27.AST]:
129 filename = "<unknown>"
130 # typed_ast is needed because of feature version limitations in the builtin ast
131 if sys.version_info >= (3, 8) and version >= (3,):
132 return ast.parse(src, filename, feature_version=version)
133 elif version >= (3,):
134 if _IS_PYPY:
135 return ast3.parse(src, filename)
136 else:
137 return ast3.parse(src, filename, feature_version=version[1])
138 elif version == (2, 7):
139 return ast27.parse(src)
140 raise AssertionError("INTERNAL ERROR: Tried parsing unsupported Python version!")
141
142
143 def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]:
144 # TODO: support Python 4+ ;)
145 versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)]
146
147 if ast27.__name__ != "ast":
148 versions.append((2, 7))
149
150 first_error = ""
151 for version in sorted(versions, reverse=True):
152 try:
153 return parse_single_version(src, version)
154 except SyntaxError as e:
155 if not first_error:
156 first_error = str(e)
157
158 raise SyntaxError(first_error)
159
160
161 ast3_AST: Final[Type[ast3.AST]] = ast3.AST
162 ast27_AST: Final[Type[ast27.AST]] = ast27.AST
163
164
165 def stringify_ast(
166 node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0
167 ) -> Iterator[str]:
168 """Simple visitor generating strings to compare ASTs by content."""
169
170 node = fixup_ast_constants(node)
171
172 yield f"{' ' * depth}{node.__class__.__name__}("
173
174 type_ignore_classes: Tuple[Type[Any], ...]
175 for field in sorted(node._fields): # noqa: F402
176 # TypeIgnore will not be present using pypy < 3.8, so need for this
177 if not (_IS_PYPY and sys.version_info < (3, 8)):
178 # TypeIgnore has only one field 'lineno' which breaks this comparison
179 type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore)
180 if sys.version_info >= (3, 8):
181 type_ignore_classes += (ast.TypeIgnore,)
182 if isinstance(node, type_ignore_classes):
183 break
184
185 try:
186 value = getattr(node, field)
187 except AttributeError:
188 continue
189
190 yield f"{' ' * (depth+1)}{field}="
191
192 if isinstance(value, list):
193 for item in value:
194 # Ignore nested tuples within del statements, because we may insert
195 # parentheses and they change the AST.
196 if (
197 field == "targets"
198 and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete))
199 and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple))
200 ):
201 for item in item.elts:
202 yield from stringify_ast(item, depth + 2)
203
204 elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)):
205 yield from stringify_ast(item, depth + 2)
206
207 # Note that we are referencing the typed-ast ASTs via global variables and not
208 # direct module attribute accesses because that breaks mypyc. It's probably
209 # something to do with the ast3 / ast27 variables being marked as Any leading
210 # mypy to think this branch is always taken, leaving the rest of the code
211 # unanalyzed. Tighting up the types for the typed-ast AST types avoids the
212 # mypyc crash.
213 elif isinstance(value, (ast.AST, ast3_AST, ast27_AST)):
214 yield from stringify_ast(value, depth + 2)
215
216 else:
217 # Constant strings may be indented across newlines, if they are
218 # docstrings; fold spaces after newlines when comparing. Similarly,
219 # trailing and leading space may be removed.
220 # Note that when formatting Python 2 code, at least with Windows
221 # line-endings, docstrings can end up here as bytes instead of
222 # str so make sure that we handle both cases.
223 if (
224 isinstance(node, ast.Constant)
225 and field == "value"
226 and isinstance(value, (str, bytes))
227 ):
228 lineend = "\n" if isinstance(value, str) else b"\n"
229 # To normalize, we strip any leading and trailing space from
230 # each line...
231 stripped = [line.strip() for line in value.splitlines()]
232 normalized = lineend.join(stripped) # type: ignore[attr-defined]
233 # ...and remove any blank lines at the beginning and end of
234 # the whole string
235 normalized = normalized.strip()
236 else:
237 normalized = value
238 yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}"
239
240 yield f"{' ' * depth}) # /{node.__class__.__name__}"
241
242
243 def fixup_ast_constants(
244 node: Union[ast.AST, ast3.AST, ast27.AST]
245 ) -> Union[ast.AST, ast3.AST, ast27.AST]:
246 """Map ast nodes deprecated in 3.8 to Constant."""
247 if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)):
248 return ast.Constant(value=node.s)
249
250 if isinstance(node, (ast.Num, ast3.Num, ast27.Num)):
251 return ast.Constant(value=node.n)
252
253 if isinstance(node, (ast.NameConstant, ast3.NameConstant)):
254 return ast.Constant(value=node.value)
255
256 return node
257
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/black/parsing.py b/src/black/parsing.py
--- a/src/black/parsing.py
+++ b/src/black/parsing.py
@@ -17,6 +17,7 @@
from blib2to3.pgen2 import driver
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pgen2.parse import ParseError
+from blib2to3.pgen2.tokenize import TokenError
from black.mode import TargetVersion, Feature, supports_feature
from black.nodes import syms
@@ -109,6 +110,12 @@
except IndexError:
faulty_line = "<line number missing in source>"
exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
+
+ except TokenError as te:
+ # In edge cases these are raised; and typically don't have a "faulty_line".
+ lineno, column = te.args[1]
+ exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {te.args[0]}")
+
else:
raise exc from None
| {"golden_diff": "diff --git a/src/black/parsing.py b/src/black/parsing.py\n--- a/src/black/parsing.py\n+++ b/src/black/parsing.py\n@@ -17,6 +17,7 @@\n from blib2to3.pgen2 import driver\n from blib2to3.pgen2.grammar import Grammar\n from blib2to3.pgen2.parse import ParseError\n+from blib2to3.pgen2.tokenize import TokenError\n \n from black.mode import TargetVersion, Feature, supports_feature\n from black.nodes import syms\n@@ -109,6 +110,12 @@\n except IndexError:\n faulty_line = \"<line number missing in source>\"\n exc = InvalidInput(f\"Cannot parse: {lineno}:{column}: {faulty_line}\")\n+\n+ except TokenError as te:\n+ # In edge cases these are raised; and typically don't have a \"faulty_line\".\n+ lineno, column = te.args[1]\n+ exc = InvalidInput(f\"Cannot parse: {lineno}:{column}: {te.args[0]}\")\n+\n else:\n raise exc from None\n", "issue": "Bad formatting of error messages about EOF in multi-line statements\n**Describe the bug**\r\n\r\n\"EOF in multi-line statement\" error is shown to the user as the raw tuple it was returned as, rather than in a nicely formatted message.\r\n\r\n**To Reproduce**\r\n\r\nFor example:\r\n1. Take this (invalid) file:\r\n```py\r\nprint(\r\n\r\n```\r\n2. Run _Black_ on it with no arguments\r\n3. See a message with the EOF error being shown as a raw tuple representation:\r\n```\r\nerror: cannot format test_black.py: ('EOF in multi-line statement', (2, 0))\r\nOh no! \ud83d\udca5 \ud83d\udc94 \ud83d\udca5\r\n1 file failed to reformat.\r\n```\r\n\r\n**Expected behavior**\r\n\r\nA bit more nicely formatted error message.\r\n\r\n**Environment (please complete the following information):**\r\n\r\n- Version: <!-- e.g. [main] -->\r\n- OS and Python version: <!-- e.g. [Linux/Python 3.7.4rc1] -->\r\n\r\n**Does this bug also happen on main?**\r\n\r\nYes.\r\n\r\n**Additional context**\r\n\r\nFor comparison, here's how I'm assuming the error should look like:\r\n```\r\nerror: cannot format test_black.py: Cannot parse: 1:7: print([)\r\nOh no! \ud83d\udca5 \ud83d\udc94 \ud83d\udca5\r\n1 file failed to reformat.\r\n```\r\nwhich is what you get in output when you try to format:\r\n```py\r\nprint([)\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nParse Python code and perform AST validation.\n\"\"\"\nimport ast\nimport platform\nimport sys\nfrom typing import Any, Iterable, Iterator, List, Set, Tuple, Type, Union\n\nif sys.version_info < (3, 8):\n from typing_extensions import Final\nelse:\n from typing import Final\n\n# lib2to3 fork\nfrom blib2to3.pytree import Node, Leaf\nfrom blib2to3 import pygram\nfrom blib2to3.pgen2 import driver\nfrom blib2to3.pgen2.grammar import Grammar\nfrom blib2to3.pgen2.parse import ParseError\n\nfrom black.mode import TargetVersion, Feature, supports_feature\nfrom black.nodes import syms\n\nast3: Any\nast27: Any\n\n_IS_PYPY = platform.python_implementation() == \"PyPy\"\n\ntry:\n from typed_ast import ast3, ast27\nexcept ImportError:\n # Either our python version is too low, or we're on pypy\n if sys.version_info < (3, 7) or (sys.version_info < (3, 8) and not _IS_PYPY):\n print(\n \"The typed_ast package is required but not installed.\\n\"\n \"You can upgrade to Python 3.8+ or install typed_ast with\\n\"\n \"`python3 -m pip install typed-ast`.\",\n file=sys.stderr,\n )\n sys.exit(1)\n else:\n ast3 = ast27 = ast\n\n\nclass InvalidInput(ValueError):\n \"\"\"Raised when input source code fails all parse attempts.\"\"\"\n\n\ndef get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:\n if not target_versions:\n # No target_version specified, so try all grammars.\n return [\n # Python 3.7+\n pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords,\n # Python 3.0-3.6\n pygram.python_grammar_no_print_statement_no_exec_statement,\n # Python 2.7 with future print_function import\n pygram.python_grammar_no_print_statement,\n # Python 2.7\n pygram.python_grammar,\n ]\n\n if all(version.is_python2() for version in target_versions):\n # Python 2-only code, so try Python 2 grammars.\n return [\n # Python 2.7 with future print_function import\n pygram.python_grammar_no_print_statement,\n # Python 2.7\n pygram.python_grammar,\n ]\n\n # Python 3-compatible code, so only try Python 3 grammar.\n grammars = []\n if supports_feature(target_versions, Feature.PATTERN_MATCHING):\n # Python 3.10+\n grammars.append(pygram.python_grammar_soft_keywords)\n # If we have to parse both, try to parse async as a keyword first\n if not supports_feature(\n target_versions, Feature.ASYNC_IDENTIFIERS\n ) and not supports_feature(target_versions, Feature.PATTERN_MATCHING):\n # Python 3.7-3.9\n grammars.append(\n pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords\n )\n if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):\n # Python 3.0-3.6\n grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)\n # At least one of the above branches must have been taken, because every Python\n # version has exactly one of the two 'ASYNC_*' flags\n return grammars\n\n\ndef lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:\n \"\"\"Given a string with source, return the lib2to3 Node.\"\"\"\n if not src_txt.endswith(\"\\n\"):\n src_txt += \"\\n\"\n\n for grammar in get_grammars(set(target_versions)):\n drv = driver.Driver(grammar)\n try:\n result = drv.parse_string(src_txt, True)\n break\n\n except ParseError as pe:\n lineno, column = pe.context[1]\n lines = src_txt.splitlines()\n try:\n faulty_line = lines[lineno - 1]\n except IndexError:\n faulty_line = \"<line number missing in source>\"\n exc = InvalidInput(f\"Cannot parse: {lineno}:{column}: {faulty_line}\")\n else:\n raise exc from None\n\n if isinstance(result, Leaf):\n result = Node(syms.file_input, [result])\n return result\n\n\ndef lib2to3_unparse(node: Node) -> str:\n \"\"\"Given a lib2to3 node, return its string representation.\"\"\"\n code = str(node)\n return code\n\n\ndef parse_single_version(\n src: str, version: Tuple[int, int]\n) -> Union[ast.AST, ast3.AST, ast27.AST]:\n filename = \"<unknown>\"\n # typed_ast is needed because of feature version limitations in the builtin ast\n if sys.version_info >= (3, 8) and version >= (3,):\n return ast.parse(src, filename, feature_version=version)\n elif version >= (3,):\n if _IS_PYPY:\n return ast3.parse(src, filename)\n else:\n return ast3.parse(src, filename, feature_version=version[1])\n elif version == (2, 7):\n return ast27.parse(src)\n raise AssertionError(\"INTERNAL ERROR: Tried parsing unsupported Python version!\")\n\n\ndef parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]:\n # TODO: support Python 4+ ;)\n versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)]\n\n if ast27.__name__ != \"ast\":\n versions.append((2, 7))\n\n first_error = \"\"\n for version in sorted(versions, reverse=True):\n try:\n return parse_single_version(src, version)\n except SyntaxError as e:\n if not first_error:\n first_error = str(e)\n\n raise SyntaxError(first_error)\n\n\nast3_AST: Final[Type[ast3.AST]] = ast3.AST\nast27_AST: Final[Type[ast27.AST]] = ast27.AST\n\n\ndef stringify_ast(\n node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0\n) -> Iterator[str]:\n \"\"\"Simple visitor generating strings to compare ASTs by content.\"\"\"\n\n node = fixup_ast_constants(node)\n\n yield f\"{' ' * depth}{node.__class__.__name__}(\"\n\n type_ignore_classes: Tuple[Type[Any], ...]\n for field in sorted(node._fields): # noqa: F402\n # TypeIgnore will not be present using pypy < 3.8, so need for this\n if not (_IS_PYPY and sys.version_info < (3, 8)):\n # TypeIgnore has only one field 'lineno' which breaks this comparison\n type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore)\n if sys.version_info >= (3, 8):\n type_ignore_classes += (ast.TypeIgnore,)\n if isinstance(node, type_ignore_classes):\n break\n\n try:\n value = getattr(node, field)\n except AttributeError:\n continue\n\n yield f\"{' ' * (depth+1)}{field}=\"\n\n if isinstance(value, list):\n for item in value:\n # Ignore nested tuples within del statements, because we may insert\n # parentheses and they change the AST.\n if (\n field == \"targets\"\n and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete))\n and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple))\n ):\n for item in item.elts:\n yield from stringify_ast(item, depth + 2)\n\n elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)):\n yield from stringify_ast(item, depth + 2)\n\n # Note that we are referencing the typed-ast ASTs via global variables and not\n # direct module attribute accesses because that breaks mypyc. It's probably\n # something to do with the ast3 / ast27 variables being marked as Any leading\n # mypy to think this branch is always taken, leaving the rest of the code\n # unanalyzed. Tighting up the types for the typed-ast AST types avoids the\n # mypyc crash.\n elif isinstance(value, (ast.AST, ast3_AST, ast27_AST)):\n yield from stringify_ast(value, depth + 2)\n\n else:\n # Constant strings may be indented across newlines, if they are\n # docstrings; fold spaces after newlines when comparing. Similarly,\n # trailing and leading space may be removed.\n # Note that when formatting Python 2 code, at least with Windows\n # line-endings, docstrings can end up here as bytes instead of\n # str so make sure that we handle both cases.\n if (\n isinstance(node, ast.Constant)\n and field == \"value\"\n and isinstance(value, (str, bytes))\n ):\n lineend = \"\\n\" if isinstance(value, str) else b\"\\n\"\n # To normalize, we strip any leading and trailing space from\n # each line...\n stripped = [line.strip() for line in value.splitlines()]\n normalized = lineend.join(stripped) # type: ignore[attr-defined]\n # ...and remove any blank lines at the beginning and end of\n # the whole string\n normalized = normalized.strip()\n else:\n normalized = value\n yield f\"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}\"\n\n yield f\"{' ' * depth}) # /{node.__class__.__name__}\"\n\n\ndef fixup_ast_constants(\n node: Union[ast.AST, ast3.AST, ast27.AST]\n) -> Union[ast.AST, ast3.AST, ast27.AST]:\n \"\"\"Map ast nodes deprecated in 3.8 to Constant.\"\"\"\n if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)):\n return ast.Constant(value=node.s)\n\n if isinstance(node, (ast.Num, ast3.Num, ast27.Num)):\n return ast.Constant(value=node.n)\n\n if isinstance(node, (ast.NameConstant, ast3.NameConstant)):\n return ast.Constant(value=node.value)\n\n return node\n", "path": "src/black/parsing.py"}], "after_files": [{"content": "\"\"\"\nParse Python code and perform AST validation.\n\"\"\"\nimport ast\nimport platform\nimport sys\nfrom typing import Any, Iterable, Iterator, List, Set, Tuple, Type, Union\n\nif sys.version_info < (3, 8):\n from typing_extensions import Final\nelse:\n from typing import Final\n\n# lib2to3 fork\nfrom blib2to3.pytree import Node, Leaf\nfrom blib2to3 import pygram\nfrom blib2to3.pgen2 import driver\nfrom blib2to3.pgen2.grammar import Grammar\nfrom blib2to3.pgen2.parse import ParseError\nfrom blib2to3.pgen2.tokenize import TokenError\n\nfrom black.mode import TargetVersion, Feature, supports_feature\nfrom black.nodes import syms\n\nast3: Any\nast27: Any\n\n_IS_PYPY = platform.python_implementation() == \"PyPy\"\n\ntry:\n from typed_ast import ast3, ast27\nexcept ImportError:\n # Either our python version is too low, or we're on pypy\n if sys.version_info < (3, 7) or (sys.version_info < (3, 8) and not _IS_PYPY):\n print(\n \"The typed_ast package is required but not installed.\\n\"\n \"You can upgrade to Python 3.8+ or install typed_ast with\\n\"\n \"`python3 -m pip install typed-ast`.\",\n file=sys.stderr,\n )\n sys.exit(1)\n else:\n ast3 = ast27 = ast\n\n\nclass InvalidInput(ValueError):\n \"\"\"Raised when input source code fails all parse attempts.\"\"\"\n\n\ndef get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:\n if not target_versions:\n # No target_version specified, so try all grammars.\n return [\n # Python 3.7+\n pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords,\n # Python 3.0-3.6\n pygram.python_grammar_no_print_statement_no_exec_statement,\n # Python 2.7 with future print_function import\n pygram.python_grammar_no_print_statement,\n # Python 2.7\n pygram.python_grammar,\n ]\n\n if all(version.is_python2() for version in target_versions):\n # Python 2-only code, so try Python 2 grammars.\n return [\n # Python 2.7 with future print_function import\n pygram.python_grammar_no_print_statement,\n # Python 2.7\n pygram.python_grammar,\n ]\n\n # Python 3-compatible code, so only try Python 3 grammar.\n grammars = []\n if supports_feature(target_versions, Feature.PATTERN_MATCHING):\n # Python 3.10+\n grammars.append(pygram.python_grammar_soft_keywords)\n # If we have to parse both, try to parse async as a keyword first\n if not supports_feature(\n target_versions, Feature.ASYNC_IDENTIFIERS\n ) and not supports_feature(target_versions, Feature.PATTERN_MATCHING):\n # Python 3.7-3.9\n grammars.append(\n pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords\n )\n if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):\n # Python 3.0-3.6\n grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)\n # At least one of the above branches must have been taken, because every Python\n # version has exactly one of the two 'ASYNC_*' flags\n return grammars\n\n\ndef lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:\n \"\"\"Given a string with source, return the lib2to3 Node.\"\"\"\n if not src_txt.endswith(\"\\n\"):\n src_txt += \"\\n\"\n\n for grammar in get_grammars(set(target_versions)):\n drv = driver.Driver(grammar)\n try:\n result = drv.parse_string(src_txt, True)\n break\n\n except ParseError as pe:\n lineno, column = pe.context[1]\n lines = src_txt.splitlines()\n try:\n faulty_line = lines[lineno - 1]\n except IndexError:\n faulty_line = \"<line number missing in source>\"\n exc = InvalidInput(f\"Cannot parse: {lineno}:{column}: {faulty_line}\")\n\n except TokenError as te:\n # In edge cases these are raised; and typically don't have a \"faulty_line\".\n lineno, column = te.args[1]\n exc = InvalidInput(f\"Cannot parse: {lineno}:{column}: {te.args[0]}\")\n\n else:\n raise exc from None\n\n if isinstance(result, Leaf):\n result = Node(syms.file_input, [result])\n return result\n\n\ndef lib2to3_unparse(node: Node) -> str:\n \"\"\"Given a lib2to3 node, return its string representation.\"\"\"\n code = str(node)\n return code\n\n\ndef parse_single_version(\n src: str, version: Tuple[int, int]\n) -> Union[ast.AST, ast3.AST, ast27.AST]:\n filename = \"<unknown>\"\n # typed_ast is needed because of feature version limitations in the builtin ast\n if sys.version_info >= (3, 8) and version >= (3,):\n return ast.parse(src, filename, feature_version=version)\n elif version >= (3,):\n if _IS_PYPY:\n return ast3.parse(src, filename)\n else:\n return ast3.parse(src, filename, feature_version=version[1])\n elif version == (2, 7):\n return ast27.parse(src)\n raise AssertionError(\"INTERNAL ERROR: Tried parsing unsupported Python version!\")\n\n\ndef parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]:\n # TODO: support Python 4+ ;)\n versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)]\n\n if ast27.__name__ != \"ast\":\n versions.append((2, 7))\n\n first_error = \"\"\n for version in sorted(versions, reverse=True):\n try:\n return parse_single_version(src, version)\n except SyntaxError as e:\n if not first_error:\n first_error = str(e)\n\n raise SyntaxError(first_error)\n\n\nast3_AST: Final[Type[ast3.AST]] = ast3.AST\nast27_AST: Final[Type[ast27.AST]] = ast27.AST\n\n\ndef stringify_ast(\n node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0\n) -> Iterator[str]:\n \"\"\"Simple visitor generating strings to compare ASTs by content.\"\"\"\n\n node = fixup_ast_constants(node)\n\n yield f\"{' ' * depth}{node.__class__.__name__}(\"\n\n type_ignore_classes: Tuple[Type[Any], ...]\n for field in sorted(node._fields): # noqa: F402\n # TypeIgnore will not be present using pypy < 3.8, so need for this\n if not (_IS_PYPY and sys.version_info < (3, 8)):\n # TypeIgnore has only one field 'lineno' which breaks this comparison\n type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore)\n if sys.version_info >= (3, 8):\n type_ignore_classes += (ast.TypeIgnore,)\n if isinstance(node, type_ignore_classes):\n break\n\n try:\n value = getattr(node, field)\n except AttributeError:\n continue\n\n yield f\"{' ' * (depth+1)}{field}=\"\n\n if isinstance(value, list):\n for item in value:\n # Ignore nested tuples within del statements, because we may insert\n # parentheses and they change the AST.\n if (\n field == \"targets\"\n and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete))\n and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple))\n ):\n for item in item.elts:\n yield from stringify_ast(item, depth + 2)\n\n elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)):\n yield from stringify_ast(item, depth + 2)\n\n # Note that we are referencing the typed-ast ASTs via global variables and not\n # direct module attribute accesses because that breaks mypyc. It's probably\n # something to do with the ast3 / ast27 variables being marked as Any leading\n # mypy to think this branch is always taken, leaving the rest of the code\n # unanalyzed. Tighting up the types for the typed-ast AST types avoids the\n # mypyc crash.\n elif isinstance(value, (ast.AST, ast3_AST, ast27_AST)):\n yield from stringify_ast(value, depth + 2)\n\n else:\n # Constant strings may be indented across newlines, if they are\n # docstrings; fold spaces after newlines when comparing. Similarly,\n # trailing and leading space may be removed.\n # Note that when formatting Python 2 code, at least with Windows\n # line-endings, docstrings can end up here as bytes instead of\n # str so make sure that we handle both cases.\n if (\n isinstance(node, ast.Constant)\n and field == \"value\"\n and isinstance(value, (str, bytes))\n ):\n lineend = \"\\n\" if isinstance(value, str) else b\"\\n\"\n # To normalize, we strip any leading and trailing space from\n # each line...\n stripped = [line.strip() for line in value.splitlines()]\n normalized = lineend.join(stripped) # type: ignore[attr-defined]\n # ...and remove any blank lines at the beginning and end of\n # the whole string\n normalized = normalized.strip()\n else:\n normalized = value\n yield f\"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}\"\n\n yield f\"{' ' * depth}) # /{node.__class__.__name__}\"\n\n\ndef fixup_ast_constants(\n node: Union[ast.AST, ast3.AST, ast27.AST]\n) -> Union[ast.AST, ast3.AST, ast27.AST]:\n \"\"\"Map ast nodes deprecated in 3.8 to Constant.\"\"\"\n if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)):\n return ast.Constant(value=node.s)\n\n if isinstance(node, (ast.Num, ast3.Num, ast27.Num)):\n return ast.Constant(value=node.n)\n\n if isinstance(node, (ast.NameConstant, ast3.NameConstant)):\n return ast.Constant(value=node.value)\n\n return node\n", "path": "src/black/parsing.py"}]} | 3,543 | 242 |
gh_patches_debug_8332 | rasdani/github-patches | git_diff | sopel-irc__sopel-1380 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
.title command should say an error if no title is found
Example: http://zimage.com/~ant/temp/notitle.html
Sopel should say there was no web page title found.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/url.py`
Content:
```
1 # coding=utf-8
2 """URL title module"""
3 # Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham
4 # Copyright 2012-2013 Elsie Powell
5 # Copyright 2013 Lior Ramati ([email protected])
6 # Copyright © 2014 Elad Alfassa <[email protected]>
7 # Licensed under the Eiffel Forum License 2.
8 from __future__ import unicode_literals, absolute_import, print_function, division
9
10 import re
11 from sopel import web, tools, __version__
12 from sopel.module import commands, rule, example
13 from sopel.config.types import ValidatedAttribute, ListAttribute, StaticSection
14
15 import requests
16
17 USER_AGENT = 'Sopel/{} (https://sopel.chat)'.format(__version__)
18 default_headers = {'User-Agent': USER_AGENT}
19 url_finder = None
20 # These are used to clean up the title tag before actually parsing it. Not the
21 # world's best way to do this, but it'll do for now.
22 title_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)
23 quoted_title = re.compile('[\'"]<title>[\'"]', re.IGNORECASE)
24 # This is another regex that presumably does something important.
25 re_dcc = re.compile(r'(?i)dcc\ssend')
26 # This sets the maximum number of bytes that should be read in order to find
27 # the title. We don't want it too high, or a link to a big file/stream will
28 # just keep downloading until there's no more memory. 640k ought to be enough
29 # for anybody.
30 max_bytes = 655360
31
32
33 class UrlSection(StaticSection):
34 # TODO some validation rules maybe?
35 exclude = ListAttribute('exclude')
36 exclusion_char = ValidatedAttribute('exclusion_char', default='!')
37
38
39 def configure(config):
40 config.define_section('url', UrlSection)
41 config.url.configure_setting(
42 'exclude',
43 'Enter regular expressions for each URL you would like to exclude.'
44 )
45 config.url.configure_setting(
46 'exclusion_char',
47 'Enter a character which can be prefixed to suppress URL titling'
48 )
49
50
51 def setup(bot):
52 global url_finder
53
54 bot.config.define_section('url', UrlSection)
55
56 if bot.config.url.exclude:
57 regexes = [re.compile(s) for s in bot.config.url.exclude]
58 else:
59 regexes = []
60
61 # We're keeping these in their own list, rather than putting then in the
62 # callbacks list because 1, it's easier to deal with modules that are still
63 # using this list, and not the newer callbacks list and 2, having a lambda
64 # just to pass is kinda ugly.
65 if not bot.memory.contains('url_exclude'):
66 bot.memory['url_exclude'] = regexes
67 else:
68 exclude = bot.memory['url_exclude']
69 if regexes:
70 exclude.extend(regexes)
71 bot.memory['url_exclude'] = exclude
72
73 # Ensure that url_callbacks and last_seen_url are in memory
74 if not bot.memory.contains('url_callbacks'):
75 bot.memory['url_callbacks'] = tools.SopelMemory()
76 if not bot.memory.contains('last_seen_url'):
77 bot.memory['last_seen_url'] = tools.SopelMemory()
78
79 url_finder = re.compile(r'(?u)(%s?(?:http|https|ftp)(?:://\S+))' %
80 (bot.config.url.exclusion_char), re.IGNORECASE)
81
82
83 @commands('title')
84 @example('.title http://google.com', '[ Google ] - google.com')
85 def title_command(bot, trigger):
86 """
87 Show the title or URL information for the given URL, or the last URL seen
88 in this channel.
89 """
90 if not trigger.group(2):
91 if trigger.sender not in bot.memory['last_seen_url']:
92 return
93 matched = check_callbacks(bot, trigger,
94 bot.memory['last_seen_url'][trigger.sender],
95 True)
96 if matched:
97 return
98 else:
99 urls = [bot.memory['last_seen_url'][trigger.sender]]
100 else:
101 urls = re.findall(url_finder, trigger)
102
103 results = process_urls(bot, trigger, urls)
104 for title, domain in results[:4]:
105 bot.reply('[ %s ] - %s' % (title, domain))
106
107
108 @rule('(?u).*(https?://\S+).*')
109 def title_auto(bot, trigger):
110 """
111 Automatically show titles for URLs. For shortened URLs/redirects, find
112 where the URL redirects to and show the title for that (or call a function
113 from another module to give more information).
114 """
115 if re.match(bot.config.core.prefix + 'title', trigger):
116 return
117
118 # Avoid fetching known malicious links
119 if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:
120 if bot.memory['safety_cache'][trigger]['positives'] > 1:
121 return
122
123 urls = re.findall(url_finder, trigger)
124 if len(urls) == 0:
125 return
126
127 results = process_urls(bot, trigger, urls)
128 bot.memory['last_seen_url'][trigger.sender] = urls[-1]
129
130 for title, domain in results[:4]:
131 message = '[ %s ] - %s' % (title, domain)
132 # Guard against responding to other instances of this bot.
133 if message != trigger:
134 bot.say(message)
135
136
137 def process_urls(bot, trigger, urls):
138 """
139 For each URL in the list, ensure that it isn't handled by another module.
140 If not, find where it redirects to, if anywhere. If that redirected URL
141 should be handled by another module, dispatch the callback for it.
142 Return a list of (title, hostname) tuples for each URL which is not handled by
143 another module.
144 """
145
146 results = []
147 for url in urls:
148 if not url.startswith(bot.config.url.exclusion_char):
149 # Magic stuff to account for international domain names
150 try:
151 url = web.iri_to_uri(url)
152 except Exception: # TODO: Be specific
153 pass
154 # First, check that the URL we got doesn't match
155 matched = check_callbacks(bot, trigger, url, False)
156 if matched:
157 continue
158 # Finally, actually show the URL
159 title = find_title(url, verify=bot.config.core.verify_ssl)
160 if title:
161 results.append((title, get_hostname(url)))
162 return results
163
164
165 def check_callbacks(bot, trigger, url, run=True):
166 """
167 Check the given URL against the callbacks list. If it matches, and ``run``
168 is given as ``True``, run the callback function, otherwise pass. Returns
169 ``True`` if the url matched anything in the callbacks list.
170 """
171 # Check if it matches the exclusion list first
172 matched = any(regex.search(url) for regex in bot.memory['url_exclude'])
173 # Then, check if there's anything in the callback list
174 for regex, function in tools.iteritems(bot.memory['url_callbacks']):
175 match = regex.search(url)
176 if match:
177 # Always run ones from @url; they don't run on their own.
178 if run or hasattr(function, 'url_regex'):
179 function(bot, trigger, match)
180 matched = True
181 return matched
182
183
184 def find_title(url, verify=True):
185 """Return the title for the given URL."""
186 try:
187 response = requests.get(url, stream=True, verify=verify,
188 headers=default_headers)
189 content = b''
190 for byte in response.iter_content(chunk_size=512):
191 content += byte
192 if b'</title>' in content or len(content) > max_bytes:
193 break
194 content = content.decode('utf-8', errors='ignore')
195 # Need to close the connection because we have not read all
196 # the data
197 response.close()
198 except requests.exceptions.ConnectionError:
199 return None
200
201 # Some cleanup that I don't really grok, but was in the original, so
202 # we'll keep it (with the compiled regexes made global) for now.
203 content = title_tag_data.sub(r'<\1title>', content)
204 content = quoted_title.sub('', content)
205
206 start = content.rfind('<title>')
207 end = content.rfind('</title>')
208 if start == -1 or end == -1:
209 return
210 title = web.decode(content[start + 7:end])
211 title = title.strip()[:200]
212
213 title = ' '.join(title.split()) # cleanly remove multiple spaces
214
215 # More cryptic regex substitutions. This one looks to be myano's invention.
216 title = re_dcc.sub('', title)
217
218 return title or None
219
220
221 def get_hostname(url):
222 idx = 7
223 if url.startswith('https://'):
224 idx = 8
225 elif url.startswith('ftp://'):
226 idx = 6
227 hostname = url[idx:]
228 slash = hostname.find('/')
229 if slash != -1:
230 hostname = hostname[:slash]
231 return hostname
232
233
234 if __name__ == "__main__":
235 from sopel.test_tools import run_example_tests
236 run_example_tests(__file__)
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/modules/url.py b/sopel/modules/url.py
--- a/sopel/modules/url.py
+++ b/sopel/modules/url.py
@@ -104,6 +104,13 @@
for title, domain in results[:4]:
bot.reply('[ %s ] - %s' % (title, domain))
+ # Nice to have different failure messages for one-and-only requested URL
+ # failed vs. one-of-many failed.
+ if len(urls) == 1 and not results:
+ bot.reply('Sorry, fetching that title failed. Make sure the site is working.')
+ elif len(urls) > len(results):
+ bot.reply('I couldn\'t get all of the titles, but I fetched what I could!')
+
@rule('(?u).*(https?://\S+).*')
def title_auto(bot, trigger):
| {"golden_diff": "diff --git a/sopel/modules/url.py b/sopel/modules/url.py\n--- a/sopel/modules/url.py\n+++ b/sopel/modules/url.py\n@@ -104,6 +104,13 @@\n for title, domain in results[:4]:\n bot.reply('[ %s ] - %s' % (title, domain))\n \n+ # Nice to have different failure messages for one-and-only requested URL\n+ # failed vs. one-of-many failed.\n+ if len(urls) == 1 and not results:\n+ bot.reply('Sorry, fetching that title failed. Make sure the site is working.')\n+ elif len(urls) > len(results):\n+ bot.reply('I couldn\\'t get all of the titles, but I fetched what I could!')\n+\n \n @rule('(?u).*(https?://\\S+).*')\n def title_auto(bot, trigger):\n", "issue": ".title command should say an error if no title is found\nExample: http://zimage.com/~ant/temp/notitle.html\r\n\r\nSopel should say there was no web page title found.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"URL title module\"\"\"\n# Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham\n# Copyright 2012-2013 Elsie Powell\n# Copyright 2013 Lior Ramati ([email protected])\n# Copyright \u00a9 2014 Elad Alfassa <[email protected]>\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nfrom sopel import web, tools, __version__\nfrom sopel.module import commands, rule, example\nfrom sopel.config.types import ValidatedAttribute, ListAttribute, StaticSection\n\nimport requests\n\nUSER_AGENT = 'Sopel/{} (https://sopel.chat)'.format(__version__)\ndefault_headers = {'User-Agent': USER_AGENT}\nurl_finder = None\n# These are used to clean up the title tag before actually parsing it. Not the\n# world's best way to do this, but it'll do for now.\ntitle_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)\nquoted_title = re.compile('[\\'\"]<title>[\\'\"]', re.IGNORECASE)\n# This is another regex that presumably does something important.\nre_dcc = re.compile(r'(?i)dcc\\ssend')\n# This sets the maximum number of bytes that should be read in order to find\n# the title. We don't want it too high, or a link to a big file/stream will\n# just keep downloading until there's no more memory. 640k ought to be enough\n# for anybody.\nmax_bytes = 655360\n\n\nclass UrlSection(StaticSection):\n # TODO some validation rules maybe?\n exclude = ListAttribute('exclude')\n exclusion_char = ValidatedAttribute('exclusion_char', default='!')\n\n\ndef configure(config):\n config.define_section('url', UrlSection)\n config.url.configure_setting(\n 'exclude',\n 'Enter regular expressions for each URL you would like to exclude.'\n )\n config.url.configure_setting(\n 'exclusion_char',\n 'Enter a character which can be prefixed to suppress URL titling'\n )\n\n\ndef setup(bot):\n global url_finder\n\n bot.config.define_section('url', UrlSection)\n\n if bot.config.url.exclude:\n regexes = [re.compile(s) for s in bot.config.url.exclude]\n else:\n regexes = []\n\n # We're keeping these in their own list, rather than putting then in the\n # callbacks list because 1, it's easier to deal with modules that are still\n # using this list, and not the newer callbacks list and 2, having a lambda\n # just to pass is kinda ugly.\n if not bot.memory.contains('url_exclude'):\n bot.memory['url_exclude'] = regexes\n else:\n exclude = bot.memory['url_exclude']\n if regexes:\n exclude.extend(regexes)\n bot.memory['url_exclude'] = exclude\n\n # Ensure that url_callbacks and last_seen_url are in memory\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n if not bot.memory.contains('last_seen_url'):\n bot.memory['last_seen_url'] = tools.SopelMemory()\n\n url_finder = re.compile(r'(?u)(%s?(?:http|https|ftp)(?:://\\S+))' %\n (bot.config.url.exclusion_char), re.IGNORECASE)\n\n\n@commands('title')\n@example('.title http://google.com', '[ Google ] - google.com')\ndef title_command(bot, trigger):\n \"\"\"\n Show the title or URL information for the given URL, or the last URL seen\n in this channel.\n \"\"\"\n if not trigger.group(2):\n if trigger.sender not in bot.memory['last_seen_url']:\n return\n matched = check_callbacks(bot, trigger,\n bot.memory['last_seen_url'][trigger.sender],\n True)\n if matched:\n return\n else:\n urls = [bot.memory['last_seen_url'][trigger.sender]]\n else:\n urls = re.findall(url_finder, trigger)\n\n results = process_urls(bot, trigger, urls)\n for title, domain in results[:4]:\n bot.reply('[ %s ] - %s' % (title, domain))\n\n\n@rule('(?u).*(https?://\\S+).*')\ndef title_auto(bot, trigger):\n \"\"\"\n Automatically show titles for URLs. For shortened URLs/redirects, find\n where the URL redirects to and show the title for that (or call a function\n from another module to give more information).\n \"\"\"\n if re.match(bot.config.core.prefix + 'title', trigger):\n return\n\n # Avoid fetching known malicious links\n if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:\n if bot.memory['safety_cache'][trigger]['positives'] > 1:\n return\n\n urls = re.findall(url_finder, trigger)\n if len(urls) == 0:\n return\n\n results = process_urls(bot, trigger, urls)\n bot.memory['last_seen_url'][trigger.sender] = urls[-1]\n\n for title, domain in results[:4]:\n message = '[ %s ] - %s' % (title, domain)\n # Guard against responding to other instances of this bot.\n if message != trigger:\n bot.say(message)\n\n\ndef process_urls(bot, trigger, urls):\n \"\"\"\n For each URL in the list, ensure that it isn't handled by another module.\n If not, find where it redirects to, if anywhere. If that redirected URL\n should be handled by another module, dispatch the callback for it.\n Return a list of (title, hostname) tuples for each URL which is not handled by\n another module.\n \"\"\"\n\n results = []\n for url in urls:\n if not url.startswith(bot.config.url.exclusion_char):\n # Magic stuff to account for international domain names\n try:\n url = web.iri_to_uri(url)\n except Exception: # TODO: Be specific\n pass\n # First, check that the URL we got doesn't match\n matched = check_callbacks(bot, trigger, url, False)\n if matched:\n continue\n # Finally, actually show the URL\n title = find_title(url, verify=bot.config.core.verify_ssl)\n if title:\n results.append((title, get_hostname(url)))\n return results\n\n\ndef check_callbacks(bot, trigger, url, run=True):\n \"\"\"\n Check the given URL against the callbacks list. If it matches, and ``run``\n is given as ``True``, run the callback function, otherwise pass. Returns\n ``True`` if the url matched anything in the callbacks list.\n \"\"\"\n # Check if it matches the exclusion list first\n matched = any(regex.search(url) for regex in bot.memory['url_exclude'])\n # Then, check if there's anything in the callback list\n for regex, function in tools.iteritems(bot.memory['url_callbacks']):\n match = regex.search(url)\n if match:\n # Always run ones from @url; they don't run on their own.\n if run or hasattr(function, 'url_regex'):\n function(bot, trigger, match)\n matched = True\n return matched\n\n\ndef find_title(url, verify=True):\n \"\"\"Return the title for the given URL.\"\"\"\n try:\n response = requests.get(url, stream=True, verify=verify,\n headers=default_headers)\n content = b''\n for byte in response.iter_content(chunk_size=512):\n content += byte\n if b'</title>' in content or len(content) > max_bytes:\n break\n content = content.decode('utf-8', errors='ignore')\n # Need to close the connection because we have not read all\n # the data\n response.close()\n except requests.exceptions.ConnectionError:\n return None\n\n # Some cleanup that I don't really grok, but was in the original, so\n # we'll keep it (with the compiled regexes made global) for now.\n content = title_tag_data.sub(r'<\\1title>', content)\n content = quoted_title.sub('', content)\n\n start = content.rfind('<title>')\n end = content.rfind('</title>')\n if start == -1 or end == -1:\n return\n title = web.decode(content[start + 7:end])\n title = title.strip()[:200]\n\n title = ' '.join(title.split()) # cleanly remove multiple spaces\n\n # More cryptic regex substitutions. This one looks to be myano's invention.\n title = re_dcc.sub('', title)\n\n return title or None\n\n\ndef get_hostname(url):\n idx = 7\n if url.startswith('https://'):\n idx = 8\n elif url.startswith('ftp://'):\n idx = 6\n hostname = url[idx:]\n slash = hostname.find('/')\n if slash != -1:\n hostname = hostname[:slash]\n return hostname\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/url.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"URL title module\"\"\"\n# Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham\n# Copyright 2012-2013 Elsie Powell\n# Copyright 2013 Lior Ramati ([email protected])\n# Copyright \u00a9 2014 Elad Alfassa <[email protected]>\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nfrom sopel import web, tools, __version__\nfrom sopel.module import commands, rule, example\nfrom sopel.config.types import ValidatedAttribute, ListAttribute, StaticSection\n\nimport requests\n\nUSER_AGENT = 'Sopel/{} (https://sopel.chat)'.format(__version__)\ndefault_headers = {'User-Agent': USER_AGENT}\nurl_finder = None\n# These are used to clean up the title tag before actually parsing it. Not the\n# world's best way to do this, but it'll do for now.\ntitle_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)\nquoted_title = re.compile('[\\'\"]<title>[\\'\"]', re.IGNORECASE)\n# This is another regex that presumably does something important.\nre_dcc = re.compile(r'(?i)dcc\\ssend')\n# This sets the maximum number of bytes that should be read in order to find\n# the title. We don't want it too high, or a link to a big file/stream will\n# just keep downloading until there's no more memory. 640k ought to be enough\n# for anybody.\nmax_bytes = 655360\n\n\nclass UrlSection(StaticSection):\n # TODO some validation rules maybe?\n exclude = ListAttribute('exclude')\n exclusion_char = ValidatedAttribute('exclusion_char', default='!')\n\n\ndef configure(config):\n config.define_section('url', UrlSection)\n config.url.configure_setting(\n 'exclude',\n 'Enter regular expressions for each URL you would like to exclude.'\n )\n config.url.configure_setting(\n 'exclusion_char',\n 'Enter a character which can be prefixed to suppress URL titling'\n )\n\n\ndef setup(bot):\n global url_finder\n\n bot.config.define_section('url', UrlSection)\n\n if bot.config.url.exclude:\n regexes = [re.compile(s) for s in bot.config.url.exclude]\n else:\n regexes = []\n\n # We're keeping these in their own list, rather than putting then in the\n # callbacks list because 1, it's easier to deal with modules that are still\n # using this list, and not the newer callbacks list and 2, having a lambda\n # just to pass is kinda ugly.\n if not bot.memory.contains('url_exclude'):\n bot.memory['url_exclude'] = regexes\n else:\n exclude = bot.memory['url_exclude']\n if regexes:\n exclude.extend(regexes)\n bot.memory['url_exclude'] = exclude\n\n # Ensure that url_callbacks and last_seen_url are in memory\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n if not bot.memory.contains('last_seen_url'):\n bot.memory['last_seen_url'] = tools.SopelMemory()\n\n url_finder = re.compile(r'(?u)(%s?(?:http|https|ftp)(?:://\\S+))' %\n (bot.config.url.exclusion_char), re.IGNORECASE)\n\n\n@commands('title')\n@example('.title http://google.com', '[ Google ] - google.com')\ndef title_command(bot, trigger):\n \"\"\"\n Show the title or URL information for the given URL, or the last URL seen\n in this channel.\n \"\"\"\n if not trigger.group(2):\n if trigger.sender not in bot.memory['last_seen_url']:\n return\n matched = check_callbacks(bot, trigger,\n bot.memory['last_seen_url'][trigger.sender],\n True)\n if matched:\n return\n else:\n urls = [bot.memory['last_seen_url'][trigger.sender]]\n else:\n urls = re.findall(url_finder, trigger)\n\n results = process_urls(bot, trigger, urls)\n for title, domain in results[:4]:\n bot.reply('[ %s ] - %s' % (title, domain))\n\n # Nice to have different failure messages for one-and-only requested URL\n # failed vs. one-of-many failed.\n if len(urls) == 1 and not results:\n bot.reply('Sorry, fetching that title failed. Make sure the site is working.')\n elif len(urls) > len(results):\n bot.reply('I couldn\\'t get all of the titles, but I fetched what I could!')\n\n\n@rule('(?u).*(https?://\\S+).*')\ndef title_auto(bot, trigger):\n \"\"\"\n Automatically show titles for URLs. For shortened URLs/redirects, find\n where the URL redirects to and show the title for that (or call a function\n from another module to give more information).\n \"\"\"\n if re.match(bot.config.core.prefix + 'title', trigger):\n return\n\n # Avoid fetching known malicious links\n if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:\n if bot.memory['safety_cache'][trigger]['positives'] > 1:\n return\n\n urls = re.findall(url_finder, trigger)\n if len(urls) == 0:\n return\n\n results = process_urls(bot, trigger, urls)\n bot.memory['last_seen_url'][trigger.sender] = urls[-1]\n\n for title, domain in results[:4]:\n message = '[ %s ] - %s' % (title, domain)\n # Guard against responding to other instances of this bot.\n if message != trigger:\n bot.say(message)\n\n\ndef process_urls(bot, trigger, urls):\n \"\"\"\n For each URL in the list, ensure that it isn't handled by another module.\n If not, find where it redirects to, if anywhere. If that redirected URL\n should be handled by another module, dispatch the callback for it.\n Return a list of (title, hostname) tuples for each URL which is not handled by\n another module.\n \"\"\"\n\n results = []\n for url in urls:\n if not url.startswith(bot.config.url.exclusion_char):\n # Magic stuff to account for international domain names\n try:\n url = web.iri_to_uri(url)\n except Exception: # TODO: Be specific\n pass\n # First, check that the URL we got doesn't match\n matched = check_callbacks(bot, trigger, url, False)\n if matched:\n continue\n # Finally, actually show the URL\n title = find_title(url, verify=bot.config.core.verify_ssl)\n if title:\n results.append((title, get_hostname(url)))\n return results\n\n\ndef check_callbacks(bot, trigger, url, run=True):\n \"\"\"\n Check the given URL against the callbacks list. If it matches, and ``run``\n is given as ``True``, run the callback function, otherwise pass. Returns\n ``True`` if the url matched anything in the callbacks list.\n \"\"\"\n # Check if it matches the exclusion list first\n matched = any(regex.search(url) for regex in bot.memory['url_exclude'])\n # Then, check if there's anything in the callback list\n for regex, function in tools.iteritems(bot.memory['url_callbacks']):\n match = regex.search(url)\n if match:\n # Always run ones from @url; they don't run on their own.\n if run or hasattr(function, 'url_regex'):\n function(bot, trigger, match)\n matched = True\n return matched\n\n\ndef find_title(url, verify=True):\n \"\"\"Return the title for the given URL.\"\"\"\n try:\n response = requests.get(url, stream=True, verify=verify,\n headers=default_headers)\n content = b''\n for byte in response.iter_content(chunk_size=512):\n content += byte\n if b'</title>' in content or len(content) > max_bytes:\n break\n content = content.decode('utf-8', errors='ignore')\n # Need to close the connection because we have not read all\n # the data\n response.close()\n except requests.exceptions.ConnectionError:\n return None\n\n # Some cleanup that I don't really grok, but was in the original, so\n # we'll keep it (with the compiled regexes made global) for now.\n content = title_tag_data.sub(r'<\\1title>', content)\n content = quoted_title.sub('', content)\n\n start = content.rfind('<title>')\n end = content.rfind('</title>')\n if start == -1 or end == -1:\n return\n title = web.decode(content[start + 7:end])\n title = title.strip()[:200]\n\n title = ' '.join(title.split()) # cleanly remove multiple spaces\n\n # More cryptic regex substitutions. This one looks to be myano's invention.\n title = re_dcc.sub('', title)\n\n return title or None\n\n\ndef get_hostname(url):\n idx = 7\n if url.startswith('https://'):\n idx = 8\n elif url.startswith('ftp://'):\n idx = 6\n hostname = url[idx:]\n slash = hostname.find('/')\n if slash != -1:\n hostname = hostname[:slash]\n return hostname\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/url.py"}]} | 2,912 | 194 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.